code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
def calculate_iou(bboxes1, bboxes2):
"""
This calculates the intersection over union of N bounding boxes
in the form N x [left, top, right, bottom], e.g for N=2:
>> bb = [[21,34,45,67], [67,120, 89, 190]]
:param bboxes1: np array: N x 4 ground truth bounding boxes
:param bboxes2: np array: N x 4 target bounding boxes
:return: iou: ratio between 0 and 1
"""
if len(bboxes1.shape) == 1:
bboxes1 = bboxes1.reshape(1, bboxes1.shape[0])
if len(bboxes2.shape) == 1:
bboxes2 = bboxes2.reshape(1, bboxes2.shape[0])
if bboxes1.shape[0] != bboxes2.shape[0] or bboxes1.shape[1] != bboxes2.shape[1]:
raise ValueError('Bounding boxes must be of equal dimension')
left_intersection = np.maximum(bboxes1[:, 0], bboxes2[:, 0])
top_intersection = np.maximum(bboxes1[:, 1], bboxes2[:, 1])
right_intersection = np.minimum(bboxes1[:, 2], bboxes2[:, 2])
bottom_intersection = np.minimum(bboxes1[:, 3], bboxes2[:, 3])
w_intersection = right_intersection - left_intersection
h_intersection = bottom_intersection - top_intersection
intersection_area = w_intersection * h_intersection
bboxes1_area = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
bboxes2_area = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
union_area = bboxes1_area + bboxes2_area - intersection_area
iou = np.clip(intersection_area/union_area, 0, 1)
return iou
|
[
"numpy.clip",
"numpy.maximum",
"numpy.minimum"
] |
[((770, 810), 'numpy.maximum', 'np.maximum', (['bboxes1[:, 0]', 'bboxes2[:, 0]'], {}), '(bboxes1[:, 0], bboxes2[:, 0])\n', (780, 810), True, 'import numpy as np\n'), ((834, 874), 'numpy.maximum', 'np.maximum', (['bboxes1[:, 1]', 'bboxes2[:, 1]'], {}), '(bboxes1[:, 1], bboxes2[:, 1])\n', (844, 874), True, 'import numpy as np\n'), ((900, 940), 'numpy.minimum', 'np.minimum', (['bboxes1[:, 2]', 'bboxes2[:, 2]'], {}), '(bboxes1[:, 2], bboxes2[:, 2])\n', (910, 940), True, 'import numpy as np\n'), ((967, 1007), 'numpy.minimum', 'np.minimum', (['bboxes1[:, 3]', 'bboxes2[:, 3]'], {}), '(bboxes1[:, 3], bboxes2[:, 3])\n', (977, 1007), True, 'import numpy as np\n'), ((1434, 1479), 'numpy.clip', 'np.clip', (['(intersection_area / union_area)', '(0)', '(1)'], {}), '(intersection_area / union_area, 0, 1)\n', (1441, 1479), True, 'import numpy as np\n')]
|
import h5py
import numpy as np
from code.model import UNetClassifier
def load_dataset(covid_file_path, normal_file_path):
covid = h5py.File(covid_file_path, 'r')['covid']
normal = h5py.File(normal_file_path, 'r')['normal']
all_images = np.expand_dims(np.concatenate([covid, normal]), axis=3)
all_labels = np.concatenate([[1]*covid.shape[0], [0]*normal.shape[0]])
shuffled_indices = np.random.permutation(np.arange(all_images.shape[0]))
all_images = all_images[shuffled_indices]
all_labels = all_labels[shuffled_indices]
return all_images, all_labels
if __name__ == '__main__':
model = Classifier((512, 512, 1), 2, True)
all_images, all_labels = load_dataset()
print(all_images.shape, all_labels.shape)
model.train(all_images, all_labels, 15, 16, 0.2)
|
[
"numpy.arange",
"numpy.concatenate",
"h5py.File"
] |
[((325, 386), 'numpy.concatenate', 'np.concatenate', (['[[1] * covid.shape[0], [0] * normal.shape[0]]'], {}), '([[1] * covid.shape[0], [0] * normal.shape[0]])\n', (339, 386), True, 'import numpy as np\n'), ((137, 168), 'h5py.File', 'h5py.File', (['covid_file_path', '"""r"""'], {}), "(covid_file_path, 'r')\n", (146, 168), False, 'import h5py\n'), ((191, 223), 'h5py.File', 'h5py.File', (['normal_file_path', '"""r"""'], {}), "(normal_file_path, 'r')\n", (200, 223), False, 'import h5py\n'), ((267, 298), 'numpy.concatenate', 'np.concatenate', (['[covid, normal]'], {}), '([covid, normal])\n', (281, 298), True, 'import numpy as np\n'), ((429, 459), 'numpy.arange', 'np.arange', (['all_images.shape[0]'], {}), '(all_images.shape[0])\n', (438, 459), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
from collections import Counter
import numpy as np
import theano
import theano.tensor as T
punctuation = set(string.punctuation)
punctuation.add('\n')
punctuation.add('\t')
punctuation.add(u'’')
punctuation.add(u'‘')
punctuation.add(u'“')
punctuation.add(u'”')
punctuation.add(u'´')
punctuation.add('')
def one_hot(X, n=None, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def flatten(l):
return [item for sublist in l for item in sublist]
def lbf(l,b):
return [el for el, condition in zip(l, b) if condition]
def list_index(l, idxs):
return [l[idx] for idx in idxs]
def tokenize(text):
tokenized = []
w = ''
for t in text:
if t in punctuation:
tokenized.append(w)
tokenized.append(t)
w = ''
elif t == ' ':
tokenized.append(w)
w = ''
else:
w += t
if w != '':
tokenized.append(w)
tokenized = [token for token in tokenized if token]
return tokenized
def token_encoder(texts, max_features=9997, min_df=10):
df = {}
for text in texts:
tokens = set(text)
for token in tokens:
if token in df:
df[token] += 1
else:
df[token] = 1
k, v = df.keys(), np.asarray(df.values())
valid = v >= min_df
k = lbf(k, valid)
v = v[valid]
sort_mask = np.argsort(v)[::-1]
k = list_index(k, sort_mask)[:max_features]
v = v[sort_mask][:max_features]
xtoi = dict(zip(k, range(3, len(k)+3)))
return xtoi
def standardize_targets(Y, cost):
Y = np.asarray(Y)
ndim = len(Y.shape)
if ndim == 1:
Y = Y.reshape(-1, 1)
if Y.shape[1] == 1 and cost.__name__ == 'CategoricalCrossEntropy':
Y = one_hot(Y, negative_class=0.)
if Y.shape[1] == 1 and 'Hinge' in cost.__name__:
if len(np.unique(Y)) > 2:
Y = one_hot(Y, negative_class=-1.)
else:
Y[Y==0] -= 1
return Y
class Tokenizer(object):
"""
For converting lists of text into tokens used by Passage models.
max_features sets the maximum number of tokens (all others are mapped to UNK)
min_df sets the minimum number of documents a token must appear in to not get mapped to UNK
lowercase controls whether the text is lowercased or not
character sets whether the tokenizer works on a character or word level
Usage:
>>> from passage.preprocessing import Tokenizer
>>> example_text = ['This. is.', 'Example TEXT', 'is text']
>>> tokenizer = Tokenizer(min_df=1, lowercase=True, character=False)
>>> tokenized = tokenizer.fit_transform(example_text)
>>> tokenized
[[7, 5, 3, 5], [6, 4], [3, 4]]
>>> tokenizer.inverse_transform(tokenized)
['this . is .', 'example text', 'is text']
"""
def __init__(self, max_features=9997, min_df=10, lowercase=True, character=False):
self.max_features = max_features
self.min_df = min_df
self.lowercase = lowercase
self.character = character
def fit(self, texts):
if self.lowercase:
texts = [text.lower() for text in texts]
if self.character:
tokens = [list(text) for text in texts]
else:
tokens = [tokenize(text) for text in texts]
self.encoder = token_encoder(tokens, max_features=self.max_features-3, min_df=self.min_df)
self.encoder['PAD'] = 0
self.encoder['END'] = 1
self.encoder['UNK'] = 2
self.decoder = dict(zip(self.encoder.values(), self.encoder.keys()))
self.n_features = len(self.encoder)
return self
def transform(self, texts):
if self.lowercase:
texts = [text.lower() for text in texts]
if self.character:
texts = [list(text) for text in texts]
else:
texts = [tokenize(text) for text in texts]
tokens = [[self.encoder.get(token, 2) for token in text] for text in texts]
return tokens
def fit_transform(self, texts):
self.fit(texts)
tokens = self.transform(texts)
return tokens
def inverse_transform(self, codes):
if self.character:
joiner = ''
else:
joiner = ' '
return [joiner.join([self.decoder[token] for token in code]) for code in codes]
class LenFilter(object):
def __init__(self, max_len=1000, min_max_len=100, percentile=99):
self.max_len = max_len
self.percentile = percentile
self.min_max_len = min_max_len
def filter(self, *data):
lens = [len(seq) for seq in data[0]]
if self.percentile > 0:
max_len = np.percentile(lens, self.percentile)
max_len = np.clip(max_len, self.min_max_len, self.max_len)
else:
max_len = self.max_len
valid_idxs = [i for i, l in enumerate(lens) if l <= max_len]
if len(data) == 1:
return list_index(data[0], valid_idxs)
else:
return tuple([list_index(d, valid_idxs) for d in data])
|
[
"numpy.clip",
"numpy.unique",
"numpy.asarray",
"numpy.max",
"numpy.argsort",
"numpy.percentile"
] |
[((1789, 1802), 'numpy.asarray', 'np.asarray', (['Y'], {}), '(Y)\n', (1799, 1802), True, 'import numpy as np\n'), ((1582, 1595), 'numpy.argsort', 'np.argsort', (['v'], {}), '(v)\n', (1592, 1595), True, 'import numpy as np\n'), ((417, 430), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (427, 430), True, 'import numpy as np\n'), ((471, 480), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (477, 480), True, 'import numpy as np\n'), ((4865, 4901), 'numpy.percentile', 'np.percentile', (['lens', 'self.percentile'], {}), '(lens, self.percentile)\n', (4878, 4901), True, 'import numpy as np\n'), ((4924, 4972), 'numpy.clip', 'np.clip', (['max_len', 'self.min_max_len', 'self.max_len'], {}), '(max_len, self.min_max_len, self.max_len)\n', (4931, 4972), True, 'import numpy as np\n'), ((2055, 2067), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (2064, 2067), True, 'import numpy as np\n')]
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from oddt.scoring.models import classifiers, regressors
@pytest.mark.filterwarnings('ignore:Stochastic Optimizer')
@pytest.mark.parametrize('cls',
[classifiers.svm(probability=True),
classifiers.neuralnetwork(random_state=42)])
def test_classifiers(cls):
# toy data
X = np.concatenate((np.zeros((5, 2)), np.ones((5, 2))))
Y = np.concatenate((np.ones(5), np.zeros(5)))
np.random.seed(42)
cls.fit(X, Y)
assert_array_equal(cls.predict(X), Y)
assert cls.score(X, Y) == 1.0
prob = cls.predict_proba(X)
assert_array_almost_equal(prob, [[0, 1]] * 5 + [[1, 0]] * 5, decimal=1)
log_prob = cls.predict_log_proba(X)
assert_array_almost_equal(np.log(prob), log_prob)
pickled = pickle.dumps(cls)
reloaded = pickle.loads(pickled)
prob_reloaded = reloaded.predict_proba(X)
assert_array_almost_equal(prob, prob_reloaded)
@pytest.mark.parametrize('reg',
[regressors.svm(C=10),
regressors.randomforest(random_state=42),
regressors.neuralnetwork(solver='lbfgs',
random_state=42,
hidden_layer_sizes=(20, 20)),
regressors.mlr()])
def test_regressors(reg):
X = np.vstack((np.arange(30, 10, -2, dtype='float64'),
np.arange(100, 90, -1, dtype='float64'))).T
Y = np.arange(10, dtype='float64')
np.random.seed(42)
reg.fit(X, Y)
pred = reg.predict(X)
assert (np.abs(pred.flatten() - Y) < 1).all()
assert reg.score(X, Y) > 0.9
pickled = pickle.dumps(reg)
reloaded = pickle.loads(pickled)
pred_reloaded = reloaded.predict(X)
assert_array_almost_equal(pred, pred_reloaded)
|
[
"oddt.scoring.models.regressors.randomforest",
"numpy.testing.assert_array_almost_equal",
"pytest.mark.filterwarnings",
"numpy.ones",
"oddt.scoring.models.regressors.neuralnetwork",
"pickle.dumps",
"numpy.log",
"oddt.scoring.models.classifiers.neuralnetwork",
"oddt.scoring.models.regressors.svm",
"numpy.zeros",
"numpy.random.seed",
"oddt.scoring.models.classifiers.svm",
"oddt.scoring.models.regressors.mlr",
"pickle.loads",
"numpy.arange"
] |
[((180, 237), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Stochastic Optimizer"""'], {}), "('ignore:Stochastic Optimizer')\n", (206, 237), False, 'import pytest\n'), ((559, 577), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (573, 577), True, 'import numpy as np\n'), ((711, 782), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['prob', '([[0, 1]] * 5 + [[1, 0]] * 5)'], {'decimal': '(1)'}), '(prob, [[0, 1]] * 5 + [[1, 0]] * 5, decimal=1)\n', (736, 782), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((892, 909), 'pickle.dumps', 'pickle.dumps', (['cls'], {}), '(cls)\n', (904, 909), False, 'import pickle\n'), ((925, 946), 'pickle.loads', 'pickle.loads', (['pickled'], {}), '(pickled)\n', (937, 946), False, 'import pickle\n'), ((997, 1043), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['prob', 'prob_reloaded'], {}), '(prob, prob_reloaded)\n', (1022, 1043), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1612, 1642), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': '"""float64"""'}), "(10, dtype='float64')\n", (1621, 1642), True, 'import numpy as np\n'), ((1648, 1666), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1662, 1666), True, 'import numpy as np\n'), ((1811, 1828), 'pickle.dumps', 'pickle.dumps', (['reg'], {}), '(reg)\n', (1823, 1828), False, 'import pickle\n'), ((1844, 1865), 'pickle.loads', 'pickle.loads', (['pickled'], {}), '(pickled)\n', (1856, 1865), False, 'import pickle\n'), ((1910, 1956), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['pred', 'pred_reloaded'], {}), '(pred, pred_reloaded)\n', (1935, 1956), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((853, 865), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (859, 865), True, 'import numpy as np\n'), ((296, 329), 'oddt.scoring.models.classifiers.svm', 'classifiers.svm', ([], {'probability': '(True)'}), '(probability=True)\n', (311, 329), False, 'from oddt.scoring.models import classifiers, regressors\n'), ((357, 399), 'oddt.scoring.models.classifiers.neuralnetwork', 'classifiers.neuralnetwork', ([], {'random_state': '(42)'}), '(random_state=42)\n', (382, 399), False, 'from oddt.scoring.models import classifiers, regressors\n'), ((1104, 1124), 'oddt.scoring.models.regressors.svm', 'regressors.svm', ([], {'C': '(10)'}), '(C=10)\n', (1118, 1124), False, 'from oddt.scoring.models import classifiers, regressors\n'), ((1152, 1192), 'oddt.scoring.models.regressors.randomforest', 'regressors.randomforest', ([], {'random_state': '(42)'}), '(random_state=42)\n', (1175, 1192), False, 'from oddt.scoring.models import classifiers, regressors\n'), ((1220, 1310), 'oddt.scoring.models.regressors.neuralnetwork', 'regressors.neuralnetwork', ([], {'solver': '"""lbfgs"""', 'random_state': '(42)', 'hidden_layer_sizes': '(20, 20)'}), "(solver='lbfgs', random_state=42,\n hidden_layer_sizes=(20, 20))\n", (1244, 1310), False, 'from oddt.scoring.models import classifiers, regressors\n'), ((1436, 1452), 'oddt.scoring.models.regressors.mlr', 'regressors.mlr', ([], {}), '()\n', (1450, 1452), False, 'from oddt.scoring.models import classifiers, regressors\n'), ((468, 484), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (476, 484), True, 'import numpy as np\n'), ((486, 501), 'numpy.ones', 'np.ones', (['(5, 2)'], {}), '((5, 2))\n', (493, 501), True, 'import numpy as np\n'), ((528, 538), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (535, 538), True, 'import numpy as np\n'), ((540, 551), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (548, 551), True, 'import numpy as np\n'), ((1500, 1538), 'numpy.arange', 'np.arange', (['(30)', '(10)', '(-2)'], {'dtype': '"""float64"""'}), "(30, 10, -2, dtype='float64')\n", (1509, 1538), True, 'import numpy as np\n'), ((1559, 1598), 'numpy.arange', 'np.arange', (['(100)', '(90)', '(-1)'], {'dtype': '"""float64"""'}), "(100, 90, -1, dtype='float64')\n", (1568, 1598), True, 'import numpy as np\n')]
|
"""
Test the fits-module by loading a dumped rtfits result and performing
all actions again
"""
import unittest
import numpy as np
import cloudpickle
import matplotlib.pyplot as plt
import copy
import os
class TestDUMPS(unittest.TestCase):
def setUp(self):
self.sig0_dB_path = os.path.dirname(__file__) + os.sep + "sig0_dB.dump"
self.sig0_linear_path = os.path.dirname(__file__) + os.sep + "sig0_linear.dump"
def load_data(self, path):
with open(path, 'rb') as file:
fit = cloudpickle.load(file)
return fit
# self.assertTrue(
# err < errdict[key],
# msg='derived error' + str(err) + 'too high for ' + str(key))
def test_rtplots(self):
for path, msg in zip([self.sig0_dB_path, self.sig0_linear_path],
['dB', 'linear']):
print(f'testing plotfunctions for {msg} fit')
fit = self.load_data(path)
# call performfit to re-initialize _fnevals functions
# and evaluate intermediate results
# (they might have been removed if symeninge has been used)
fit.lsq_kwargs['verbose'] = 0
fit.performfit(intermediate_results=True,
print_progress=True)
# get list of available plot-methods
method_list = [func for func in dir(fit.plot) if
callable(getattr(fit.plot, func)) and not func.startswith("__")]
for function_name in method_list:
print(f'... {function_name}')
if function_name == 'printsig0analysis':
# check 'dataset' index slider
f, s1, s2 = fit.plot.__getattribute__(function_name)(
range2=2, range1=1, use_index='dataset')
# check update functions
s1.set_val(1)
s2.set_val(1)
plt.close(f)
# check 'groups' index slider
f, s1, s2 = fit.plot.__getattribute__(function_name)(
range2=2, range1=1, use_index='groups')
# check update functions
s1.set_val(1)
s2.set_val(1)
plt.close(f)
elif function_name == 'analyzemodel':
f, sliders, txt_but = fit.plot.__getattribute__(
function_name)()
# check update functions
for key, s in sliders.items():
s.set_val((s.valmax - s.valmin)/2.)
for key, b in txt_but.items():
if key == 'buttons':
# the initial status is ALL OFF
stat = b.get_status()
for i in range(len(stat)):
b.set_active(i)
# now all should be ON
self.assertTrue(np.all(b.get_status()))
for i in range(len(stat)):
b.set_active(i)
# now all should be OFF again
self.assertTrue(~np.all(b.get_status()))
else:
# set the boundaries of the parameters
if 'min' in key:
b.set_val(0.02)
if 'max' in key:
b.set_val(0.99)
plt.close(f)
elif function_name == 'intermediate_residuals':
# check default (e.g. pandas datetime-offset)
f = fit.plot.__getattribute__(function_name)(fmt='%d.%b %Y')
plt.close(f)
# check grouping with respect to incidence angles and
# convert the labels to degrees
f = fit.plot.__getattribute__(function_name)(
grp=('inc', 10),
label_formatter=lambda x,y:round(np.rad2deg(x),2))
plt.close(f)
# check grouping with respect to datetimes
f = fit.plot.__getattribute__(function_name)(grp='groups')
plt.close(f)
# check grouping with respect to the dataset index
f = fit.plot.__getattribute__(function_name)(
grp='dataset', plottype='2D', fmt='%Y %b %d (%H:%M)')
plt.close(f)
else:
f = fit.plot.__getattribute__(function_name)()
plt.close(f)
def test_performfit(self):
for path, msg in zip([self.sig0_dB_path, self.sig0_linear_path],
['dB', 'linear']):
print(f'testing plotfunctions for {msg} fit')
fit = self.load_data(path)
old_results = fit.res_dict
# print model definition
fit.model_definition
print('testing performfit')
fit.lsq_kwargs['verbose'] = 0
fit.performfit(intermediate_results=True,
print_progress=True)
# call _cache_info() to make coveralls happy
fit._cache_info()
fit.R._cache_info()
# try to dump the file again (without fit-details)
fit.dump(os.path.join(os.path.dirname(__file__), 'testdump1.dump'),
mini=True)
# try to dump the file again (with fit-details)
fit.dump(os.path.join(os.path.dirname(__file__), 'testdump2.dump'),
mini=False)
for key, val in old_results.items():
self.assertTrue(np.allclose(fit.res_dict[key],
old_results[key], atol=1e-4, rtol=1e-4),
msg=f'fitted values for {msg} fit of {key} ' +
f'differ by {np.subtract(fit.res_dict[key], old_results[key]).mean()}')
if __name__ == "__main__":
unittest.main()
|
[
"cloudpickle.load",
"numpy.allclose",
"numpy.subtract",
"matplotlib.pyplot.close",
"os.path.dirname",
"unittest.main",
"numpy.rad2deg"
] |
[((6174, 6189), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6187, 6189), False, 'import unittest\n'), ((521, 543), 'cloudpickle.load', 'cloudpickle.load', (['file'], {}), '(file)\n', (537, 543), False, 'import cloudpickle\n'), ((292, 317), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (307, 317), False, 'import os\n'), ((376, 401), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (391, 401), False, 'import os\n'), ((1958, 1970), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (1967, 1970), True, 'import matplotlib.pyplot as plt\n'), ((2297, 2309), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (2306, 2309), True, 'import matplotlib.pyplot as plt\n'), ((5503, 5528), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5518, 5528), False, 'import os\n'), ((5675, 5700), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5690, 5700), False, 'import os\n'), ((5836, 5910), 'numpy.allclose', 'np.allclose', (['fit.res_dict[key]', 'old_results[key]'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(fit.res_dict[key], old_results[key], atol=0.0001, rtol=0.0001)\n', (5847, 5910), True, 'import numpy as np\n'), ((3595, 3607), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (3604, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3840, 3852), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (3849, 3852), True, 'import matplotlib.pyplot as plt\n'), ((4181, 4193), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (4190, 4193), True, 'import matplotlib.pyplot as plt\n'), ((4356, 4368), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (4365, 4368), True, 'import matplotlib.pyplot as plt\n'), ((4604, 4616), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (4613, 4616), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4739), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (4736, 4739), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4156), 'numpy.rad2deg', 'np.rad2deg', (['x'], {}), '(x)\n', (4153, 4156), True, 'import numpy as np\n'), ((6081, 6129), 'numpy.subtract', 'np.subtract', (['fit.res_dict[key]', 'old_results[key]'], {}), '(fit.res_dict[key], old_results[key])\n', (6092, 6129), True, 'import numpy as np\n')]
|
#!/usr/bin/python
#coding = utf-8
import numpy as np
import pandas as pd
import mysql.connector
class mysqlTool():
"""
This is the API to connect with mysql database.
"""
def __init__(self,databaseNameString:str,hostAddress:str,userName:str,passWord:str):
self.targetDB = mysql.connector.connect(
host = hostAddress,
user = userName,
passwd = passWord,
database = databaseNameString
# buffered = True
)
self.targetCursor = self.targetDB.cursor(buffered=True)
def getAllTables(self):
self.targetCursor.execute("SHOW TABLES")
return [i for i in self.targetCursor]
def getColNameOfTable(self,tableNameString:str):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
return [i for i in self.targetCursor.column_names]
def selectAllFromTable(self,tableNameString:str):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def selectDictFromTable(self,tableNameString:str,colNameAsKey:str,colNameAsValue:str):
try:
sql = "SELECT "+colNameAsKey+","+colNameAsValue+" FROM "+tableNameString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
resultDict = dict(zip([i[0] for i in result],[i[1] for i in result]))
return resultDict
except Exception as e:
print(e)
return {}
def selectColFromTable(self,tableNameString:str,colNameList:list):
colNameString = "".join(["`"+i+"`," for i in colNameList]).strip(",")
sql = "SELECT "+colNameString+" FROM "+tableNameString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def selectColFromTableWithCondition(self,tableNameString:str,colNameList:list,conditionString:str):
colNameString = "".join(["`"+i+"`," for i in colNameList]).strip(",")
sql = "SELECT "+colNameString+" FROM "+tableNameString+" WHERE "+conditionString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def selectAllFromTableWithCondition(self,tableNameString:str,conditionString:str):
sql = "SELECT * FROM "+tableNameString+" WHERE "+conditionString
self.targetCursor.execute(sql)
result = self.targetCursor.fetchall()
df = pd.DataFrame(result,columns = self.targetCursor.column_names)
return df
def insertRowIntoTable(self,tableNameString:str,valuesTuple:tuple):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
colNameString = "".join(["`"+i+"`," for i in self.targetCursor.column_names]).strip(", ")
sql = "INSERT INTO "+tableNameString+" ("+colNameString+") VALUES (" + "".join(["%s, " for i in range(len(self.targetCursor.column_names))]).strip(", ")+")"
val = valuesTuple
self.targetCursor.execute(sql,val)
self.targetDB.commit()
print("Insert Finished")
def replaceRowsIntoTable(self,tableNameString:str,valuesTupleList:list):
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
colNameString = "".join(["`"+i+"`," for i in self.targetCursor.column_names]).strip(", ")
sql = "REPLACE INTO "+tableNameString+" ("+colNameString+") VALUES (" + "".join(["%s, " for i in range(len(self.targetCursor.column_names))]).strip(", ")+")"
val = valuesTupleList
self.targetCursor.executemany(sql, val)
self.targetDB.commit()
print("Insert Finished")
def replaceDFIntoTable(self,tableNameString:str,dataFrame:pd.DataFrame):
try:
import numpy as np
DBTableColNameList = self.getColNameOfTable(tableNameString)
df = dataFrame[DBTableColNameList]
# convert to tuple
valuesTapleList = df.apply(lambda x: tuple([None if type(i)==type(np.nan) and np.isnan(i) else i for i in x]),axis=1).to_list()
sql = "SELECT * FROM "+tableNameString
self.targetCursor.execute(sql)
colNameString = "".join(["`"+i+"`," for i in self.targetCursor.column_names]).strip(", ")
sql = "REPLACE INTO "+tableNameString+" ("+colNameString+") VALUES (" + "".join(["%s, " for i in range(len(self.targetCursor.column_names))]).strip(", ")+")"
val = valuesTapleList
self.targetCursor.executemany(sql, val)
self.targetDB.commit()
print("Replace Finished")
except Exception as e:
print("Replace Failed, Error:",e)
class oracleTool():
"""
This is the API to connect with oracle database.
"""
def __init__(self,databaseNameString:str,hostAddress:str,port:int,userName:str,passWord:str):
from sqlalchemy import create_engine
uri = f'oracle+cx_oracle://{userName}:{passWord}@{hostAddress}:{port}/{databaseNameString}'
self.engine = create_engine(uri)
def readSql(self,sql:str):
data = pd.read_sql(sql,con=self.engine)
return data
class neo4jTool():
"""
This is the API to connect with neo4j database.
"""
def __init__(self, hostAddress:str,port:int,userName:str,password:str):
from py2neo import Graph
self.engine = Graph(hostAddress+":"+str(port),auth=(userName,password))
def readCypher(self,cypher:str):
data = self.engine.run(cypher)
return data
def convertDataType(self,x):
if isinstance(x,np.float64):
return float(x)
elif hasattr(x,'strftime'):
return x.strftime("%Y-%m-%d")
elif isinstance(x,list):
return [self.convertDataType(i) for i in x]
else:
return x
def updateDFToNode(self,nodeList:list,df:pd.DataFrame,colAsName:str):
nameWaitedToBeUpdated = df[colAsName].to_list()
nameList = [i for i in nodeList if i['name'] in nameWaitedToBeUpdated]
tmp = df.set_index(colAsName,drop=True)
[[node.update({j:self.convertDataType(tmp.loc[node['name']][j])}) for j in tmp.columns if j!= colAsName] for node in nameList]
def convertDFToNode(self, nodeType:str, df:pd.DataFrame, colAsName:str):
from py2neo import Node
nodeList = [Node(nodeType, name=df.iloc[i][colAsName]) for i in range(df.shape[0])]
[[nodeList[i].update({j:self.convertDataType(df.iloc[i][j])}) for j in df.columns if j!=colAsName] for i in range(df.shape[0])]
return nodeList
def addNodeFromDF(self, nodeType:str, df:pd.DataFrame, colAsName:str):
nodeList = self.convertDFToNode(nodeType, df, colAsName)
[self.engine.create(i) for i in nodeList]
return nodeList
def selectAllLabel(self):
labelList = self.readCypher("MATCH (res) RETURN distinct labels(res)")
return [i[0][0] for i in labelList]
def selectAllNode(self, nodeType:str):
nodeList = self.readCypher(f'''MATCH (res:`{nodeType}`) RETURN res''')
return [i['res'] for i in nodeList]
def selectAttrFromNode(self, nodeType:str, attrList:list):
if type(attrList)==type(''):
attrList = [attrList]
else:
pass
attr = "'],res['".join(attrList)
nodeList = self.readCypher(f"MATCH (res:`{nodeType}`) RETURN res['"+attr+"']")
return nodeList.to_data_frame().rename(columns=dict(zip(["res['"+i+"']" for i in attrList],attrList)))
def selectAllNodeWithCondition(self, nodeType: str, conditionString:str, resultVariableName:str = 'res'):
nodeList = self.readCypher(f'''MATCH ({resultVariableName}:`{nodeType}`) WHERE {conditionString} RETURN {resultVariableName}''')
return [i[resultVariableName] for i in nodeList]
def selectAttrFromNodeWithCondition(self, nodeType: str, attrList: list, conditionString:str, resultVariableName:str = 'res'):
if type(attrList) == type(''):
attrList = [attrList]
else:
pass
attr = "'],res['".join(attrList)
nodeList = self.readCypher(f"MATCH ({resultVariableName}:`{nodeType}`) WHERE {conditionString} RETURN {resultVariableName}['" + attr + "']")
return nodeList.to_data_frame().rename(columns=dict(zip([f"{resultVariableName}['" + i + "']" for i in attrList], attrList)))
def connectNodeByAttr(self, nodeTypeLeft:str, nodeTypeRight:str, attrNameLeft:str, attrNameRight:str, relationName:str):
from py2neo import Relationship
leftNode = self.selectAllNode(nodeTypeLeft)
rightNode = self.selectAllNode(nodeTypeRight)
pair = [(left,right) for left in leftNode for right in rightNode if left[attrNameLeft]==right[attrNameRight]]
relation = [Relationship(i[0],relationName,i[1]) for i in pair]
[self.engine.create(i) for i in relation]
def replaceNode(self, nodeObj):
self.engine.push(nodeObj)
def replaceNodeFromDF(self, nodeType:str, df:pd.DataFrame, colAsName:str):
nodeList = self.selectAllNodeWithCondition(nodeType,"res.name IN ['"+"','".join(df[colAsName].to_list())+"']")
self.updateDFToNode(nodeList,df,colAsName)
oldNode = [i['name'] for i in nodeList]
tmp = df[[(i not in oldNode) for i in df[colAsName]]]
self.addNodeFromDF(nodeType,tmp,colAsName)
[self.engine.push(i) for i in nodeList]
def deleteAllNode(self):
self.engine.delete_all()
print("All Nodes Have Been Deleted")
def deleteNode(self, nodeObj):
self.engine.delete(nodeObj)
|
[
"py2neo.Node",
"sqlalchemy.create_engine",
"numpy.isnan",
"pandas.DataFrame",
"py2neo.Relationship",
"pandas.read_sql"
] |
[((946, 1006), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': 'self.targetCursor.column_names'}), '(result, columns=self.targetCursor.column_names)\n', (958, 1006), True, 'import pandas as pd\n'), ((1689, 1749), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': 'self.targetCursor.column_names'}), '(result, columns=self.targetCursor.column_names)\n', (1701, 1749), True, 'import pandas as pd\n'), ((2100, 2160), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': 'self.targetCursor.column_names'}), '(result, columns=self.targetCursor.column_names)\n', (2112, 2160), True, 'import pandas as pd\n'), ((2406, 2466), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': 'self.targetCursor.column_names'}), '(result, columns=self.targetCursor.column_names)\n', (2418, 2466), True, 'import pandas as pd\n'), ((4703, 4721), 'sqlalchemy.create_engine', 'create_engine', (['uri'], {}), '(uri)\n', (4716, 4721), False, 'from sqlalchemy import create_engine\n'), ((4760, 4793), 'pandas.read_sql', 'pd.read_sql', (['sql'], {'con': 'self.engine'}), '(sql, con=self.engine)\n', (4771, 4793), True, 'import pandas as pd\n'), ((5862, 5904), 'py2neo.Node', 'Node', (['nodeType'], {'name': 'df.iloc[i][colAsName]'}), '(nodeType, name=df.iloc[i][colAsName])\n', (5866, 5904), False, 'from py2neo import Node\n'), ((8097, 8135), 'py2neo.Relationship', 'Relationship', (['i[0]', 'relationName', 'i[1]'], {}), '(i[0], relationName, i[1])\n', (8109, 8135), False, 'from py2neo import Relationship\n'), ((3813, 3824), 'numpy.isnan', 'np.isnan', (['i'], {}), '(i)\n', (3821, 3824), True, 'import numpy as np\n')]
|
import numpy as np
import random
N = 10
def null(a, rtol=1e-5):
u, s, v = np.linalg.svd(a)
rank = (s > rtol*s[0]).sum()
return rank, v[rank:].T.copy()
def gen_data(N, noisy=False):
lower = -1
upper = 1
dim = 2
X = np.random.rand(dim, N)*(upper-lower)+lower
while True:
Xsample = np.concatenate(
(np.ones((1, dim)), np.random.rand(dim, dim)*(upper-lower)+lower))
k, w = null(Xsample.T)
y = np.sign(np.dot(w.T, np.concatenate((np.ones((1, N)), X))))
if np.all(y):
break
return (X, y, w)
def change_label(y):
idx = random.sample(range(1, N), N/10)
y[idx] = -y[idx]
return y
if __name__ == '__main__':
X, y, w = gen_data(10)
print(X)
|
[
"numpy.linalg.svd",
"numpy.all",
"numpy.ones",
"numpy.random.rand"
] |
[((82, 98), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {}), '(a)\n', (95, 98), True, 'import numpy as np\n'), ((535, 544), 'numpy.all', 'np.all', (['y'], {}), '(y)\n', (541, 544), True, 'import numpy as np\n'), ((249, 271), 'numpy.random.rand', 'np.random.rand', (['dim', 'N'], {}), '(dim, N)\n', (263, 271), True, 'import numpy as np\n'), ((356, 373), 'numpy.ones', 'np.ones', (['(1, dim)'], {}), '((1, dim))\n', (363, 373), True, 'import numpy as np\n'), ((375, 399), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (389, 399), True, 'import numpy as np\n'), ((501, 516), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (508, 516), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy
import itertools
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Structure
from crystal import fillcell, tikz_atoms
def dfh(single = True, defect = False):
if defect:
single = False
a = 5.43
fcc = Lattice([[a/2,a/2,0],[a/2,0,a/2],[0,a/2,a/2]])
dfh = Structure(fcc,['Si']*2,[[0.00,0.00,0.00],[0.25,0.25,0.25]])
# Make the orthogonal cubic
dfh.make_supercell([[0,0,1],[1,-1,0],[1,1,-1]])
# Rotate the cell
rt = 0.70710678118654746
symmop = SymmOp.from_rotation_and_translation([[0,rt,rt],[0,rt,-rt],[1,0,0]])
dfh.apply_operation(symmop)
# Make supercell
if single == True:
dfh.make_supercell([1,1,8])
else:
dfh.make_supercell([2,2,8])
# Insert Mn atoms
for i,atom in enumerate(dfh):
if abs(atom.frac_coords[2] - 0.5) < 0.01 and atom.specie.symbol == 'Si':
dfh.append('Mn',atom.frac_coords)
del dfh[i]
# Do defects
if defect == 1:
defectMn = numpy.array([0,0,0.5])
for i,atom in enumerate(dfh):
if numpy.linalg.norm(atom.frac_coords - defectMn) < 0.01 and atom.specie.symbol == 'Mn':
dfh.append('Si',defectMn)
del dfh[i]
if defect == 2:
defectMn = numpy.array([0.5,0.5,0.5])
for i,atom in enumerate(dfh):
if numpy.linalg.norm(atom.frac_coords - defectMn) < 0.01 and atom.specie.symbol == 'Mn':
del dfh[i]
if defect == 3:
defectMn = numpy.array([0.5,0.25,0.5-1./32])
for i,atom in enumerate(dfh):
if numpy.linalg.norm(atom.frac_coords - defectMn) < 0.01 and atom.specie.symbol == 'Si':
dfh.append('Mn',defectMn)
del dfh[i]
return dfh
atoms = dfh(single = True)
atoms_full = fillcell(atoms)
bondatoms = []
snsite = numpy.array([0.625,0.625,0.625])
for sitei,sitej in itertools.combinations(atoms_full,2):
radius = sitei.specie.atomic_radius + sitej.specie.atomic_radius
bondlength = sitei.distance_from_point(sitej.coords)
if bondlength <= 1.25 * radius:
bondatoms.append((sitei,sitej))
tikz = tikz_atoms(atoms_full, bondatoms, drawcell = True)
|
[
"pymatgen.core.structure.Structure",
"itertools.combinations",
"crystal.fillcell",
"numpy.array",
"pymatgen.core.lattice.Lattice",
"numpy.linalg.norm",
"pymatgen.core.operations.SymmOp.from_rotation_and_translation",
"crystal.tikz_atoms"
] |
[((1909, 1924), 'crystal.fillcell', 'fillcell', (['atoms'], {}), '(atoms)\n', (1917, 1924), False, 'from crystal import fillcell, tikz_atoms\n'), ((1949, 1983), 'numpy.array', 'numpy.array', (['[0.625, 0.625, 0.625]'], {}), '([0.625, 0.625, 0.625])\n', (1960, 1983), False, 'import numpy\n'), ((2001, 2038), 'itertools.combinations', 'itertools.combinations', (['atoms_full', '(2)'], {}), '(atoms_full, 2)\n', (2023, 2038), False, 'import itertools\n'), ((2261, 2309), 'crystal.tikz_atoms', 'tikz_atoms', (['atoms_full', 'bondatoms'], {'drawcell': '(True)'}), '(atoms_full, bondatoms, drawcell=True)\n', (2271, 2309), False, 'from crystal import fillcell, tikz_atoms\n'), ((329, 395), 'pymatgen.core.lattice.Lattice', 'Lattice', (['[[a / 2, a / 2, 0], [a / 2, 0, a / 2], [0, a / 2, a / 2]]'], {}), '([[a / 2, a / 2, 0], [a / 2, 0, a / 2], [0, a / 2, a / 2]])\n', (336, 395), False, 'from pymatgen.core.lattice import Lattice\n'), ((386, 451), 'pymatgen.core.structure.Structure', 'Structure', (['fcc', "(['Si'] * 2)", '[[0.0, 0.0, 0.0], [0.25, 0.25, 0.25]]'], {}), "(fcc, ['Si'] * 2, [[0.0, 0.0, 0.0], [0.25, 0.25, 0.25]])\n", (395, 451), False, 'from pymatgen.core.structure import Structure\n'), ((604, 680), 'pymatgen.core.operations.SymmOp.from_rotation_and_translation', 'SymmOp.from_rotation_and_translation', (['[[0, rt, rt], [0, rt, -rt], [1, 0, 0]]'], {}), '([[0, rt, rt], [0, rt, -rt], [1, 0, 0]])\n', (640, 680), False, 'from pymatgen.core.operations import SymmOp\n'), ((1108, 1132), 'numpy.array', 'numpy.array', (['[0, 0, 0.5]'], {}), '([0, 0, 0.5])\n', (1119, 1132), False, 'import numpy\n'), ((1378, 1406), 'numpy.array', 'numpy.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (1389, 1406), False, 'import numpy\n'), ((1610, 1650), 'numpy.array', 'numpy.array', (['[0.5, 0.25, 0.5 - 1.0 / 32]'], {}), '([0.5, 0.25, 0.5 - 1.0 / 32])\n', (1621, 1650), False, 'import numpy\n'), ((1184, 1230), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(atom.frac_coords - defectMn)'], {}), '(atom.frac_coords - defectMn)\n', (1201, 1230), False, 'import numpy\n'), ((1458, 1504), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(atom.frac_coords - defectMn)'], {}), '(atom.frac_coords - defectMn)\n', (1475, 1504), False, 'import numpy\n'), ((1697, 1743), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(atom.frac_coords - defectMn)'], {}), '(atom.frac_coords - defectMn)\n', (1714, 1743), False, 'import numpy\n')]
|
# coding: utf-8
"""
@brief test log(time=1s)
"""
import unittest
import pandas
import numpy
from scipy.sparse.linalg import lsqr as sparse_lsqr
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from pandas_streaming.df import pandas_groupby_nan, numpy_types
class TestPandasHelper(ExtTestCase):
def test_pandas_groupbynan(self):
self.assertTrue(sparse_lsqr is not None)
types = [(int, -10), (float, -20.2), (str, "e"),
(bytes, bytes("a", "ascii"))]
skip = (numpy.bool_, numpy.complex64, numpy.complex128)
types += [(_, _(5)) for _ in numpy_types() if _ not in skip]
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
gr = pandas_groupby_nan(df, "value")
co = gr.sum()
li = list(co["value"])
try:
self.assertIsInstance(li[-1], float)
except AssertionError as e:
raise AssertionError("Issue with {0}".format(ty)) from e
try:
self.assertTrue(numpy.isnan(li[-1]))
except AssertionError as e:
raise AssertionError(
"Issue with value {}\n--df--\n{}\n--gr--\n{}\n--co--\n{}".format(
li, df, gr.count(), co)) from e
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
try:
gr = pandas_groupby_nan(df, ("value", "this"))
t = True
raise Exception("---")
except TypeError:
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertIsInstance(li[-1], float)
self.assertTrue(numpy.isnan(li[-1]))
try:
gr = pandas_groupby_nan(df, ["value", "this"])
t = True
except (TypeError, NotImplementedError):
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertEqual(len(li), 2)
def test_pandas_groupbynan_tuple(self):
data = [dict(a="a", b="b", c="c", n=1), dict(
b="b", n=2), dict(a="a", n=3), dict(c="c", n=4)]
df = pandas.DataFrame(data)
gr = df.groupby(["a", "b", "c"]).sum()
self.assertEqual(gr.shape, (1, 1))
for nanback in [True, False]:
try:
gr2_ = pandas_groupby_nan(
df, ["a", "b", "c"], nanback=nanback, suffix="NAN")
except NotImplementedError:
continue
gr2 = gr2_.sum().sort_values("n")
self.assertEqual(gr2.shape, (4, 4))
d = gr2.to_dict("records")
self.assertEqual(d[0]["a"], "a")
self.assertEqual(d[0]["b"], "b")
self.assertEqual(d[0]["c"], "c")
self.assertEqual(d[0]["n"], 1)
self.assertEqual(d[1]["a"], "NAN")
def test_pandas_groupbynan_regular(self):
df = pandas.DataFrame([dict(a="a", b=1), dict(a="a", b=2)])
gr = df.groupby(["a"]).sum()
gr2_ = pandas_groupby_nan(df, ["a"]).sum()
self.assertEqualDataFrame(gr, gr2_)
def test_pandas_groupbynan_regular_nanback(self):
df = pandas.DataFrame([dict(a="a", b=1, cc=0), dict(a="a", b=2)])
gr = df.groupby(["a", "cc"]).sum()
self.assertEqual(len(gr), 1)
self.assertRaise(
lambda: pandas_groupby_nan(df, ["a", "cc"], nanback=True).sum(),
NotImplementedError)
def test_pandas_groupbynan_doc(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
gr2 = pandas_groupby_nan(df, ["ind"]).sum()
ind = list(gr2['ind'])
self.assertTrue(numpy.isnan(ind[-1]))
val = list(gr2['a'])
self.assertEqual(val[-1], 30)
@ignore_warnings(UserWarning)
def test_pandas_groupbynan_doc2(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
gr2 = pandas_groupby_nan(df, ["ind", "a"], nanback=False).sum()
ind = list(gr2['ind'])
self.assertEqual(ind[-1], "²nan")
def test_pandas_groupbynan_doc3(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
self.assertRaise(lambda: pandas_groupby_nan(df, ["ind", "n"]).sum(),
NotImplementedError)
# ind = list(gr2['ind'])
# self.assertTrue(numpy.isnan(ind[-1]))
if __name__ == "__main__":
unittest.main()
|
[
"pandas.DataFrame",
"numpy.isnan",
"pandas_streaming.df.numpy_types",
"unittest.main",
"pyquickhelper.pycode.ignore_warnings",
"pandas_streaming.df.pandas_groupby_nan"
] |
[((4454, 4482), 'pyquickhelper.pycode.ignore_warnings', 'ignore_warnings', (['UserWarning'], {}), '(UserWarning)\n', (4469, 4482), False, 'from pyquickhelper.pycode import ExtTestCase, ignore_warnings\n'), ((5306, 5321), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5319, 5321), False, 'import unittest\n'), ((2733, 2755), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (2749, 2755), False, 'import pandas\n'), ((4229, 4251), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (4245, 4251), False, 'import pandas\n'), ((4680, 4702), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (4696, 4702), False, 'import pandas\n'), ((5046, 5068), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (5062, 5068), False, 'import pandas\n'), ((930, 952), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (946, 952), False, 'import pandas\n'), ((970, 1001), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', '"""value"""'], {}), "(df, 'value')\n", (988, 1001), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((1827, 1849), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {}), '(data)\n', (1843, 1849), False, 'import pandas\n'), ((4359, 4379), 'numpy.isnan', 'numpy.isnan', (['ind[-1]'], {}), '(ind[-1])\n', (4370, 4379), False, 'import numpy\n'), ((607, 620), 'pandas_streaming.df.numpy_types', 'numpy_types', ([], {}), '()\n', (618, 620), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((1888, 1929), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "('value', 'this')"], {}), "(df, ('value', 'this'))\n", (1906, 1929), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((2281, 2322), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "['value', 'this']"], {}), "(df, ['value', 'this'])\n", (2299, 2322), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((2925, 2995), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "['a', 'b', 'c']"], {'nanback': 'nanback', 'suffix': '"""NAN"""'}), "(df, ['a', 'b', 'c'], nanback=nanback, suffix='NAN')\n", (2943, 2995), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((3607, 3636), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "['a']"], {}), "(df, ['a'])\n", (3625, 3636), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((4266, 4297), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "['ind']"], {}), "(df, ['ind'])\n", (4284, 4297), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((4717, 4768), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "['ind', 'a']"], {'nanback': '(False)'}), "(df, ['ind', 'a'], nanback=False)\n", (4735, 4768), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((1295, 1314), 'numpy.isnan', 'numpy.isnan', (['li[-1]'], {}), '(li[-1])\n', (1306, 1314), False, 'import numpy\n'), ((2222, 2241), 'numpy.isnan', 'numpy.isnan', (['li[-1]'], {}), '(li[-1])\n', (2233, 2241), False, 'import numpy\n'), ((3942, 3991), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "['a', 'cc']"], {'nanback': '(True)'}), "(df, ['a', 'cc'], nanback=True)\n", (3960, 3991), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n'), ((5102, 5138), 'pandas_streaming.df.pandas_groupby_nan', 'pandas_groupby_nan', (['df', "['ind', 'n']"], {}), "(df, ['ind', 'n'])\n", (5120, 5138), False, 'from pandas_streaming.df import pandas_groupby_nan, numpy_types\n')]
|
import shutil
import numpy as np
ALL_SYNTHS_LIST = 'synth_imgs.txt'
TRAIN_IMAGES_LIST = 'train_imgs.txt'
VAL_IMAGES_LIST = 'val_imgs.txt'
TEST_IMAGES_LIST = 'test_imgs.txt'
TRAIN_STOP = 342000
VAL_STOP = TRAIN_STOP + 38000
'''
390000 examples : 342000 train and 38000 val (90/10 splits on 380000), 10000 test
'''
with open(ALL_SYNTHS_LIST,'r') as img_list:
files = np.array(img_list.read().splitlines())
files = files[np.random.permutation(files.shape[0])]
with open(TRAIN_IMAGES_LIST,"w") as list_file:
for i in range(TRAIN_STOP):
shutil.copy(files[i],'./train_imgs/')
shutil.copy(files[i][:-4] + "_r.jpg",'./train_imgs/')
shutil.copy(files[i][:-4] + "_b.jpg",'./train_imgs/')
fname = files[i].split('/')
fname = fname[len(fname) - 1]
list_file.write('./train_imgs/' + fname)
list_file.write('\n')
print("Copying training examples ..." + str(i) + "/342000")
with open(VAL_IMAGES_LIST,"w") as list_file:
for i in range(TRAIN_STOP,VAL_STOP):
shutil.copy(files[i],'./val_imgs/')
shutil.copy(files[i][:-4] + "_r.jpg",'./val_imgs/')
shutil.copy(files[i][:-4] + "_b.jpg",'./val_imgs/')
fname = files[i].split('/')
fname = fname[len(fname) - 1]
list_file.write('./val_imgs/' + fname)
list_file.write('\n')
print("Copying validation examples ..." + str(i) + "/38000")
with open(TEST_IMAGES_LIST,"w") as list_file:
for i in range(VAL_STOP,files.shape[0]):
shutil.copy(files[i],'./test_imgs/')
shutil.copy(files[i][:-4] + "_r.jpg",'./test_imgs/')
shutil.copy(files[i][:-4] + "_b.jpg",'./test_imgs/')
fname = files[i].split('/')
fname = fname[len(fname) - 1]
list_file.write('./test_imgs/' + fname)
list_file.write('\n')
print("Copying testing examples ..." + str(i) + "/10000")
|
[
"shutil.copy",
"numpy.random.permutation"
] |
[((428, 465), 'numpy.random.permutation', 'np.random.permutation', (['files.shape[0]'], {}), '(files.shape[0])\n', (449, 465), True, 'import numpy as np\n'), ((556, 594), 'shutil.copy', 'shutil.copy', (['files[i]', '"""./train_imgs/"""'], {}), "(files[i], './train_imgs/')\n", (567, 594), False, 'import shutil\n'), ((602, 656), 'shutil.copy', 'shutil.copy', (["(files[i][:-4] + '_r.jpg')", '"""./train_imgs/"""'], {}), "(files[i][:-4] + '_r.jpg', './train_imgs/')\n", (613, 656), False, 'import shutil\n'), ((664, 718), 'shutil.copy', 'shutil.copy', (["(files[i][:-4] + '_b.jpg')", '"""./train_imgs/"""'], {}), "(files[i][:-4] + '_b.jpg', './train_imgs/')\n", (675, 718), False, 'import shutil\n'), ((1034, 1070), 'shutil.copy', 'shutil.copy', (['files[i]', '"""./val_imgs/"""'], {}), "(files[i], './val_imgs/')\n", (1045, 1070), False, 'import shutil\n'), ((1078, 1130), 'shutil.copy', 'shutil.copy', (["(files[i][:-4] + '_r.jpg')", '"""./val_imgs/"""'], {}), "(files[i][:-4] + '_r.jpg', './val_imgs/')\n", (1089, 1130), False, 'import shutil\n'), ((1138, 1190), 'shutil.copy', 'shutil.copy', (["(files[i][:-4] + '_b.jpg')", '"""./val_imgs/"""'], {}), "(files[i][:-4] + '_b.jpg', './val_imgs/')\n", (1149, 1190), False, 'import shutil\n'), ((1510, 1547), 'shutil.copy', 'shutil.copy', (['files[i]', '"""./test_imgs/"""'], {}), "(files[i], './test_imgs/')\n", (1521, 1547), False, 'import shutil\n'), ((1555, 1608), 'shutil.copy', 'shutil.copy', (["(files[i][:-4] + '_r.jpg')", '"""./test_imgs/"""'], {}), "(files[i][:-4] + '_r.jpg', './test_imgs/')\n", (1566, 1608), False, 'import shutil\n'), ((1616, 1669), 'shutil.copy', 'shutil.copy', (["(files[i][:-4] + '_b.jpg')", '"""./test_imgs/"""'], {}), "(files[i][:-4] + '_b.jpg', './test_imgs/')\n", (1627, 1669), False, 'import shutil\n')]
|
import importlib
import json
import os
import pdb
import sys
import fnet
import pandas as pd
import tifffile
import numpy as np
from fnet.transforms import normalize
def pearson_loss(x, y):
#x = output
#y = target
vx = x - torch.mean(x)
vy = y - torch.mean(y)
cost = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))
return cost
# code retrieved on 21.05.21 from: https://github.com/pytorch/pytorch/issues/1254
def pearsonr(x, y):
"""
Mimics `scipy.stats.pearsonr`
Arguments
---------
x : 1D torch.Tensor
y : 1D torch.Tensor
Returns
-------
r_val : float
pearsonr correlation coefficient between x and y
Scipy docs ref:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
Scipy code ref:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py#L2975-L3033
Example:
>>> x = np.random.randn(100)
>>> y = np.random.randn(100)
>>> sp_corr = scipy.stats.pearsonr(x, y)[0]
>>> th_corr = pearsonr(torch.from_numpy(x), torch.from_numpy(y))
>>> np.allclose(sp_corr, th_corr)
"""
x = x.detach().cpu().numpy().flatten() #pred
y = y.detach().cpu().numpy().flatten() #target
pearson_img = np.corrcoef(x,y)
r_val = pearson_img[0,1]
return r_val
def load_model(path_model, gpu_ids=0, module='fnet_model', in_channels=1, out_channels=1):
module_fnet_model = importlib.import_module('fnet.' + module)
if os.path.isdir(path_model):
path_model = os.path.join(path_model, 'model.p')
model = module_fnet_model.Model(in_channels=in_channels, out_channels=out_channels)
model.load_state(path_model, gpu_ids=gpu_ids)
return model
def load_model_from_dir(path_model_dir, gpu_ids=0, in_channels=1, out_channels=1):
assert os.path.isdir(path_model_dir)
path_model_state = os.path.join(path_model_dir, 'model.p')
model = fnet.fnet_model.Model(in_channels=in_channels, out_channels=out_channels)
model.load_state(path_model_state, gpu_ids=gpu_ids)
return model
def compute_dataset_min_max_ranges(train_path, val_path=None, norm=False):
df_train = pd.read_csv(train_path)
if val_path is not None:
df_val = pd.read_csv(val_path)
df=pd.concat([df_train, df_val])
else:
df=df_train
min_bright=[]
max_bright =[]
min_inf = []
max_inf = []
min_dapi = []
max_dapi = []
if df.iloc[0,:]['target_channel'] is None:
no_target = True
else:
no_target = False
if df.iloc[0,:]['dapi_channel'] is None:
no_dapi = True
else:
no_dapi = False
for index in range(len(df)):
element=df.iloc[index, :]
image = tifffile.imread(element['file'])
if not no_target:
image_infection = image[element['target_channel'],:,:]
min_inf.append(np.min(image_infection))
max_inf.append(np.max(image_infection))
if not no_dapi:
image_dapi = image[element['dapi_channel'],:,:]
min_dapi.append(np.min(image_dapi))
max_dapi.append(np.max(image_dapi))
image_bright = image[element['signal_channel'],:,:]
if norm:
image_bright = normalize(image_bright)
min_bright.append(np.min(image_bright))
max_bright.append(np.max(image_bright))
min_inf = np.min(np.array(min_inf)) if not no_target else None
max_inf = np.max(np.array(max_inf)) if not no_target else None
min_dapi = np.min(np.array(min_dapi)) if not no_dapi else None
max_dapi = np.max(np.array(max_dapi)) if not no_dapi else None
min_bright = np.min(np.array(min_bright))
max_bright = np.max(np.array(max_bright))
return [min_bright, max_bright], [min_inf, max_inf], [min_dapi, max_dapi]
|
[
"fnet.fnet_model.Model",
"importlib.import_module",
"pandas.read_csv",
"numpy.corrcoef",
"tifffile.imread",
"os.path.join",
"numpy.max",
"numpy.array",
"os.path.isdir",
"numpy.min",
"pandas.concat",
"fnet.transforms.normalize"
] |
[((1322, 1339), 'numpy.corrcoef', 'np.corrcoef', (['x', 'y'], {}), '(x, y)\n', (1333, 1339), True, 'import numpy as np\n'), ((1503, 1544), 'importlib.import_module', 'importlib.import_module', (["('fnet.' + module)"], {}), "('fnet.' + module)\n", (1526, 1544), False, 'import importlib\n'), ((1552, 1577), 'os.path.isdir', 'os.path.isdir', (['path_model'], {}), '(path_model)\n', (1565, 1577), False, 'import os\n'), ((1886, 1915), 'os.path.isdir', 'os.path.isdir', (['path_model_dir'], {}), '(path_model_dir)\n', (1899, 1915), False, 'import os\n'), ((1939, 1978), 'os.path.join', 'os.path.join', (['path_model_dir', '"""model.p"""'], {}), "(path_model_dir, 'model.p')\n", (1951, 1978), False, 'import os\n'), ((1991, 2064), 'fnet.fnet_model.Model', 'fnet.fnet_model.Model', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels'}), '(in_channels=in_channels, out_channels=out_channels)\n', (2012, 2064), False, 'import fnet\n'), ((2234, 2257), 'pandas.read_csv', 'pd.read_csv', (['train_path'], {}), '(train_path)\n', (2245, 2257), True, 'import pandas as pd\n'), ((1600, 1635), 'os.path.join', 'os.path.join', (['path_model', '"""model.p"""'], {}), "(path_model, 'model.p')\n", (1612, 1635), False, 'import os\n'), ((2304, 2325), 'pandas.read_csv', 'pd.read_csv', (['val_path'], {}), '(val_path)\n', (2315, 2325), True, 'import pandas as pd\n'), ((2337, 2366), 'pandas.concat', 'pd.concat', (['[df_train, df_val]'], {}), '([df_train, df_val])\n', (2346, 2366), True, 'import pandas as pd\n'), ((2825, 2857), 'tifffile.imread', 'tifffile.imread', (["element['file']"], {}), "(element['file'])\n", (2840, 2857), False, 'import tifffile\n'), ((3794, 3814), 'numpy.array', 'np.array', (['min_bright'], {}), '(min_bright)\n', (3802, 3814), True, 'import numpy as np\n'), ((3840, 3860), 'numpy.array', 'np.array', (['max_bright'], {}), '(max_bright)\n', (3848, 3860), True, 'import numpy as np\n'), ((3371, 3394), 'fnet.transforms.normalize', 'normalize', (['image_bright'], {}), '(image_bright)\n', (3380, 3394), False, 'from fnet.transforms import normalize\n'), ((3421, 3441), 'numpy.min', 'np.min', (['image_bright'], {}), '(image_bright)\n', (3427, 3441), True, 'import numpy as np\n'), ((3469, 3489), 'numpy.max', 'np.max', (['image_bright'], {}), '(image_bright)\n', (3475, 3489), True, 'import numpy as np\n'), ((3517, 3534), 'numpy.array', 'np.array', (['min_inf'], {}), '(min_inf)\n', (3525, 3534), True, 'import numpy as np\n'), ((3584, 3601), 'numpy.array', 'np.array', (['max_inf'], {}), '(max_inf)\n', (3592, 3601), True, 'import numpy as np\n'), ((3653, 3671), 'numpy.array', 'np.array', (['min_dapi'], {}), '(min_dapi)\n', (3661, 3671), True, 'import numpy as np\n'), ((3720, 3738), 'numpy.array', 'np.array', (['max_dapi'], {}), '(max_dapi)\n', (3728, 3738), True, 'import numpy as np\n'), ((2987, 3010), 'numpy.min', 'np.min', (['image_infection'], {}), '(image_infection)\n', (2993, 3010), True, 'import numpy as np\n'), ((3039, 3062), 'numpy.max', 'np.max', (['image_infection'], {}), '(image_infection)\n', (3045, 3062), True, 'import numpy as np\n'), ((3185, 3203), 'numpy.min', 'np.min', (['image_dapi'], {}), '(image_dapi)\n', (3191, 3203), True, 'import numpy as np\n'), ((3233, 3251), 'numpy.max', 'np.max', (['image_dapi'], {}), '(image_dapi)\n', (3239, 3251), True, 'import numpy as np\n')]
|
import numpy as np
import numpy.random as rand
from functools import reduce
class Network:
def __init__(self, layer_sizes):
# layer_sizes: list of numbers representing number of neurons per layer
# Create a numpy array of biases for each layer except the (first) input layer
self.biases = [rand.randn(l, 1) for l in layer_sizes[1:]]
# The weights are an array of matrices. 'Between' each two layers is one matrix.
# Every row contains a set of weights for each node
self.weights = [rand.randn(y, x) for x, y in zip(layer_sizes[:-1], layer_sizes[1:])]
def feed_forward(self, input):
# Perform a left fold
return reduce(lambda input, b_w: np.dot(b_w[1], input)+b_w[0], zip(self.biases, self.weights), input)
def sigmoid(z):
# The sigmoid function
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_deriv(z):
# First-order derivative of the sigmoid function
return sigmoid(z) * (1 - sigmoid(z))
|
[
"numpy.exp",
"numpy.dot",
"numpy.random.randn"
] |
[((322, 338), 'numpy.random.randn', 'rand.randn', (['l', '(1)'], {}), '(l, 1)\n', (332, 338), True, 'import numpy.random as rand\n'), ((539, 555), 'numpy.random.randn', 'rand.randn', (['y', 'x'], {}), '(y, x)\n', (549, 555), True, 'import numpy.random as rand\n'), ((857, 867), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (863, 867), True, 'import numpy as np\n'), ((715, 736), 'numpy.dot', 'np.dot', (['b_w[1]', 'input'], {}), '(b_w[1], input)\n', (721, 736), True, 'import numpy as np\n')]
|
import collections
import torch
import einops
import cached_property
import padertorch as pt
# loss: torch.Tenso r =None,
# losses: dict =None,
# scalars: dict =None,
# histograms: dict =None,
# audios: dict =None,
# images: dict =None,
class ReviewSummary(collections.abc.Mapping):
"""
>>> review_summary = ReviewSummary()
>>> review_summary
ReviewSummary(prefix='', _data={})
"""
_keys = set(pt.train.hooks.SummaryHook.empty_summary_dict().keys()) | {
'loss', 'losses'
}
def __init__(self, prefix='', _data=None, sampling_rate=None, visible_dB=60):
if _data is None:
_data = {}
self.data = _data
self.prefix = prefix
self.sampling_rate = sampling_rate
self.visible_dB = visible_dB
def add_to_loss(self, value):
assert torch.isfinite(value), value
if 'loss' in self.data:
self.data['loss'] = self.data['loss'] + value
else:
self.data['loss'] = value
def add_scalar(self, name, *value):
# Save the mean of all added values
value = pt.data.batch.example_to_numpy(value, detach=True)
self.data.setdefault(
'scalars',
{}
).setdefault(
f'{self.prefix}{name}',
[]
).extend(value)
def add_audio(self, name, signal, sampling_rate=None, batch_first=None,
normalize=True):
if sampling_rate is None:
sampling_rate = self.sampling_rate
assert sampling_rate is not None, sampling_rate
audio = pt.summary.audio(
signal=signal, sampling_rate=sampling_rate,
batch_first=batch_first, normalize=normalize
)
self.data.setdefault(
'audios',
{}
)[f'{self.prefix}{name}'] = audio
def add_text(self, name, text):
assert isinstance(text, str), (type(text), text)
self.data.setdefault(
'texts',
{}
)[f'{self.prefix}{name}'] = text
def _rearrange(self, array, rearrange):
if rearrange is not None:
return einops.rearrange(array, rearrange)
else:
return array
def add_image(self, name, image):
# Save the last added value
image = pt.utils.to_numpy(image, detach=True)
if image.ndim != 3:
raise AssertionError(
'Did you forgot to call "pt.summary.*_to_image"?\n'
f'Expect ndim == 3, got shape {image.shape}.'
)
self.data.setdefault(
'images',
{}
)[f'{self.prefix}{name}'] = image
def add_stft_image(
self, name, signal,
*, batch_first=None, color='viridis', rearrange=None):
signal = self._rearrange(signal, rearrange)
image = pt.summary.stft_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB)
self.add_image(name, image)
def add_spectrogram_image(
self, name, signal,
*, batch_first=None, color='viridis', rearrange=None):
signal = self._rearrange(signal, rearrange)
image = pt.summary.spectrogram_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB)
self.add_image(name, image)
def add_mask_image(self, name, mask, *, batch_first=None, color='viridis', rearrange=None):
mask = self._rearrange(mask, rearrange)
image = pt.summary.mask_to_image(mask, batch_first=batch_first, color=color)
self.add_image(name, image)
def add_histogram(self, name, values):
value = pt.utils.to_numpy(values, detach=True)
self.data.setdefault(
'histograms',
{}
).setdefault(
f'{self.prefix}{name}',
[]
).append(value)
def __contains__(self, item):
return item in self.data
def __getitem__(self, key):
assert key in self._keys, (key, self._keys)
return self.data[key]
def __setitem__(self, key, value):
assert key in self._keys, (key, self._keys)
self.data[key] = value
def get(self, item, default):
if item in self:
return self.data[item]
else:
return default
def pop(self, *args, **kwargs):
"""pop(key[, default])"""
return self.data.pop(*args, **kwargs)
def setdefault(self, key, default):
self.data.setdefault(key, default)
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
return f'{self.__class__.__name__}(prefix={self.prefix!r}, _data={dict(self)!r})'
def _repr_pretty_(self, p, cycle):
"""
>>> review_summary = ReviewSummary()
>>> review_summary.add_to_loss(1)
>>> review_summary.add_scalar('abc', 2)
>>> review_summary
ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}})
>>> from IPython.lib.pretty import pprint
>>> pprint(review_summary)
ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}})
>>> pprint(review_summary, max_width=79-18)
ReviewSummary(
prefix='',
_data={'loss': 1, 'scalars': {'abc': [2]}}
)
>>> pprint(review_summary, max_width=79-40)
ReviewSummary(
prefix='',
_data={'loss': 1,
'scalars': {'abc': [2]}}
)
"""
if cycle:
p.text(f'{self.__class__.__name__}(...)')
else:
txt = f'{self.__class__.__name__}('
with p.group(4, txt, ''):
p.breakable(sep='')
p.text('prefix=')
p.pretty(self.prefix)
p.text(',')
p.breakable()
txt = '_data='
with p.group(len(txt), txt, ''):
p.pretty(dict(self))
p.breakable('')
p.text(')')
class _Plotter:
def __init__(self, review: 'ReviewSummary'):
self.review = review
def image(
self, key, origin='lower', **kwargs
):
import numpy as np
import matplotlib.pyplot as plt
kwargs = {
'origin': origin,
**kwargs,
}
if key not in self.review['images']:
from paderbox.utils.mapping import DispatchError
raise DispatchError(key, self.review['images'].keys())
X = np.einsum('chw->hwc', self.review['images'][key])
if origin == 'lower':
X = X[::-1]
else:
assert origin == 'upper'
# ToDo: Where is AxesImage defined?
ax: 'plt.AxesImage' = plt.imshow(
X,
**kwargs,
)
# ax.set_title(key)
plt.title(key)
plt.grid(False)
return ax
def images(
self,
columns=1,
font_scale=1.0,
line_width=3,
figure_size=(8.0, 6.0),
):
from paderbox.visualization import axes_context
from paderbox.visualization.context_manager import _AxesHandler
with axes_context(
columns=columns,
font_scale=font_scale,
line_width=line_width,
figure_size=figure_size,
) as axes:
axes: _AxesHandler
for k in self.review['images']:
axes.new.grid(False) # set gca
self.image(k)
@cached_property.cached_property
def plot(self):
return self._Plotter(self)
def play(self, key=None):
if key is None:
for k in self['audios'].keys():
self.play(k)
elif key in self['audios']:
from paderbox.io.play import play
data, sample_rate = self['audios'][key]
play(data, sample_rate=sample_rate, name=key)
else:
from paderbox.utils.mapping import DispatchError
raise DispatchError(key, self['audios'].keys())
|
[
"matplotlib.pyplot.imshow",
"padertorch.summary.spectrogram_to_image",
"matplotlib.pyplot.grid",
"padertorch.data.batch.example_to_numpy",
"padertorch.summary.mask_to_image",
"torch.isfinite",
"einops.rearrange",
"paderbox.visualization.axes_context",
"paderbox.io.play.play",
"numpy.einsum",
"padertorch.train.hooks.SummaryHook.empty_summary_dict",
"padertorch.summary.audio",
"matplotlib.pyplot.title",
"padertorch.summary.stft_to_image",
"padertorch.utils.to_numpy"
] |
[((834, 855), 'torch.isfinite', 'torch.isfinite', (['value'], {}), '(value)\n', (848, 855), False, 'import torch\n'), ((1106, 1156), 'padertorch.data.batch.example_to_numpy', 'pt.data.batch.example_to_numpy', (['value'], {'detach': '(True)'}), '(value, detach=True)\n', (1136, 1156), True, 'import padertorch as pt\n'), ((1587, 1698), 'padertorch.summary.audio', 'pt.summary.audio', ([], {'signal': 'signal', 'sampling_rate': 'sampling_rate', 'batch_first': 'batch_first', 'normalize': 'normalize'}), '(signal=signal, sampling_rate=sampling_rate, batch_first=\n batch_first, normalize=normalize)\n', (1603, 1698), True, 'import padertorch as pt\n'), ((2309, 2346), 'padertorch.utils.to_numpy', 'pt.utils.to_numpy', (['image'], {'detach': '(True)'}), '(image, detach=True)\n', (2326, 2346), True, 'import padertorch as pt\n'), ((2854, 2956), 'padertorch.summary.stft_to_image', 'pt.summary.stft_to_image', (['signal'], {'batch_first': 'batch_first', 'color': 'color', 'visible_dB': 'self.visible_dB'}), '(signal, batch_first=batch_first, color=color,\n visible_dB=self.visible_dB)\n', (2878, 2956), True, 'import padertorch as pt\n'), ((3188, 3298), 'padertorch.summary.spectrogram_to_image', 'pt.summary.spectrogram_to_image', (['signal'], {'batch_first': 'batch_first', 'color': 'color', 'visible_dB': 'self.visible_dB'}), '(signal, batch_first=batch_first, color=\n color, visible_dB=self.visible_dB)\n', (3219, 3298), True, 'import padertorch as pt\n'), ((3491, 3559), 'padertorch.summary.mask_to_image', 'pt.summary.mask_to_image', (['mask'], {'batch_first': 'batch_first', 'color': 'color'}), '(mask, batch_first=batch_first, color=color)\n', (3515, 3559), True, 'import padertorch as pt\n'), ((3656, 3694), 'padertorch.utils.to_numpy', 'pt.utils.to_numpy', (['values'], {'detach': '(True)'}), '(values, detach=True)\n', (3673, 3694), True, 'import padertorch as pt\n'), ((2144, 2178), 'einops.rearrange', 'einops.rearrange', (['array', 'rearrange'], {}), '(array, rearrange)\n', (2160, 2178), False, 'import einops\n'), ((6616, 6665), 'numpy.einsum', 'np.einsum', (['"""chw->hwc"""', "self.review['images'][key]"], {}), "('chw->hwc', self.review['images'][key])\n", (6625, 6665), True, 'import numpy as np\n'), ((6871, 6894), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X'], {}), '(X, **kwargs)\n', (6881, 6894), True, 'import matplotlib.pyplot as plt\n'), ((6986, 7000), 'matplotlib.pyplot.title', 'plt.title', (['key'], {}), '(key)\n', (6995, 7000), True, 'import matplotlib.pyplot as plt\n'), ((7013, 7028), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (7021, 7028), True, 'import matplotlib.pyplot as plt\n'), ((7387, 7491), 'paderbox.visualization.axes_context', 'axes_context', ([], {'columns': 'columns', 'font_scale': 'font_scale', 'line_width': 'line_width', 'figure_size': 'figure_size'}), '(columns=columns, font_scale=font_scale, line_width=line_width,\n figure_size=figure_size)\n', (7399, 7491), False, 'from paderbox.visualization import axes_context\n'), ((8129, 8174), 'paderbox.io.play.play', 'play', (['data'], {'sample_rate': 'sample_rate', 'name': 'key'}), '(data, sample_rate=sample_rate, name=key)\n', (8133, 8174), False, 'from paderbox.io.play import play\n'), ((426, 473), 'padertorch.train.hooks.SummaryHook.empty_summary_dict', 'pt.train.hooks.SummaryHook.empty_summary_dict', ([], {}), '()\n', (471, 473), True, 'import padertorch as pt\n')]
|
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import HuberRegressor
import numpy as np
import pickle
from dataloader import HeadlineDataset
from csv import writer
import os, subprocess
import math
from collections import Counter
def get_weights(ys, countings):
total = sum(countings.values())
weights = []
for y in ys:
bin_num = int(y * 5)
weights.append(total / countings[bin_num])
print(ys[:10])
print(weights[:10])
return weights
def load_data(ds):
with open(f'data/{ds}_features.pkl', 'rb') as f:
train_features = pickle.load(f)
with open(f'data/{ds}-lstm.csv') as f:
lines = f.readlines()[1:]
for i, line in enumerate(lines):
lstm_prediction = float(line.split(',')[1])
train_features[i][0]['lstm-output'] = lstm_prediction
Xs = []
Ys = []
for features, y in train_features:
Ys.append(y)
x = []
for k in ["orig_score", "edit_score", "bert_sim", "glove_sim", "score_diff", "lstm-output"]:
x.append(features[k])
x = np.array(x)
Xs.append(x)
return Xs, Ys
# grouping bins
countings = Counter()
for i in range(30):
countings[i] += 1
dev_dataset = HeadlineDataset('dev')
train_dataset = HeadlineDataset('training')
for sample in dev_dataset:
bin_num = int(sample['label'] * 5)
countings[bin_num] += 1
for sample in train_dataset:
bin_num = int(sample['label'] * 5)
countings[bin_num] += 1
print('load data')
Xs, Ys = load_data('train')
train_weights = get_weights(Ys, countings)
dev_Xs, dev_Ys = load_data('dev')
dev_weights = get_weights(dev_Ys, countings)
model = GradientBoostingRegressor(
learning_rate=0.05, n_estimators=50,
subsample=0.5,
min_samples_split=2,
max_depth=3
)
print('train')
model.fit(Xs, Ys, train_weights)
print('trained')
print(model.feature_importances_)
pred_Ys = model.predict(dev_Xs)
dev_rmse = math.sqrt(mean_squared_error(dev_Ys, pred_Ys))
print(dev_rmse)
test_Xs, _ = load_data('test')
pred_Ys = model.predict(test_Xs)
test_dataset = HeadlineDataset('test')
with open('data/task-1-output.csv', 'w') as f:
output_writer = writer(f)
output_writer.writerow(('id', 'pred'))
for row, pred in zip(test_dataset, pred_Ys):
output_writer.writerow((row['id'], pred.item()))
os.chdir('data')
subprocess.run(['zip', 'task-1-output.zip', 'task-1-output.csv'])
os.chdir('..')
|
[
"subprocess.run",
"dataloader.HeadlineDataset",
"csv.writer",
"pickle.load",
"sklearn.metrics.mean_squared_error",
"collections.Counter",
"os.chdir",
"numpy.array",
"sklearn.ensemble.GradientBoostingRegressor"
] |
[((1300, 1309), 'collections.Counter', 'Counter', ([], {}), '()\n', (1307, 1309), False, 'from collections import Counter\n'), ((1366, 1388), 'dataloader.HeadlineDataset', 'HeadlineDataset', (['"""dev"""'], {}), "('dev')\n", (1381, 1388), False, 'from dataloader import HeadlineDataset\n'), ((1405, 1432), 'dataloader.HeadlineDataset', 'HeadlineDataset', (['"""training"""'], {}), "('training')\n", (1420, 1432), False, 'from dataloader import HeadlineDataset\n'), ((1804, 1920), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'learning_rate': '(0.05)', 'n_estimators': '(50)', 'subsample': '(0.5)', 'min_samples_split': '(2)', 'max_depth': '(3)'}), '(learning_rate=0.05, n_estimators=50, subsample=\n 0.5, min_samples_split=2, max_depth=3)\n', (1829, 1920), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((2222, 2245), 'dataloader.HeadlineDataset', 'HeadlineDataset', (['"""test"""'], {}), "('test')\n", (2237, 2245), False, 'from dataloader import HeadlineDataset\n'), ((2472, 2488), 'os.chdir', 'os.chdir', (['"""data"""'], {}), "('data')\n", (2480, 2488), False, 'import os, subprocess\n'), ((2489, 2554), 'subprocess.run', 'subprocess.run', (["['zip', 'task-1-output.zip', 'task-1-output.csv']"], {}), "(['zip', 'task-1-output.zip', 'task-1-output.csv'])\n", (2503, 2554), False, 'import os, subprocess\n'), ((2555, 2569), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (2563, 2569), False, 'import os, subprocess\n'), ((2089, 2124), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['dev_Ys', 'pred_Ys'], {}), '(dev_Ys, pred_Ys)\n', (2107, 2124), False, 'from sklearn.metrics import mean_squared_error\n'), ((2313, 2322), 'csv.writer', 'writer', (['f'], {}), '(f)\n', (2319, 2322), False, 'from csv import writer\n'), ((707, 721), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (718, 721), False, 'import pickle\n'), ((1218, 1229), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1226, 1229), True, 'import numpy as np\n')]
|
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
This module contains useful fudge math routines that do not fit into any other module.
"""
from pqu import PQU
from fudge.core.utilities import brb
try :
import numpy
numpyFloat64 = numpy.float64( 1. )
except :
numpyFloat64 = 1.
__metaclass__ = type
def runningZSum( data, xLabel = None, yLabel = None, zLabel = None, normalize = False ) :
"""Returns the running sum of dy * z (normalized to 1 of normalize is True) for each x as an endl3dmath object.
Data must be list of ( x, list of ( y, z ) )."""
d3 = []
for x_yz in data : d3.append( [ x_yz[0], runningYSum( x_yz[1], normalize = normalize ).data ] )
from brownies.legacy.endl import endl3dmathClasses
return endl3dmathClasses.endl3dmath(d3, xLabel = xLabel, yLabel = yLabel, zLabel = zLabel, checkDataType = 0)
def runningYSum( data, normalize = False ) :
"""Returns the running sum of dx * y (normalized to 1 of normalize is True) as an endl2dmath object.
Data must be list of ( x, y )."""
x1 = None
runningSum = []
for xy in data :
x2 = xy[0]
y2 = xy[1]
if ( x1 is None ) :
Sum = 0.
else :
Sum += 0.5 * ( y2 + y1 ) * ( x2 - x1 )
runningSum.append( [ x2, Sum ] )
x1 = x2
y1 = y2
if( normalize and ( Sum != 0. ) ) :
for xy in runningSum : xy[1] /= Sum
from brownies.legacy.endl import endl2dmathClasses
return endl2dmathClasses.endl2dmath(runningSum, checkDataType = 0)
def ZSum( data ) :
"""Returns the area under the curve z(y) for each x as an endl2dmath object. Data must be list of
( x, list of ( y, z ) )."""
d2 = []
for x_yz in data : d2.append( [ x_yz[0], YSum( x_yz[1] ) ] )
from brownies.legacy.endl import endl2dmathClasses
return endl2dmathClasses.endl2dmath(d2, checkDataType = 0)
def YSum( data ) :
"""Returns the area under the curve y(x). Data must be list of list( x, y )."""
x1 = None
for x2, y2 in data :
if ( x1 is None ) :
Sum = 0.
else :
Sum += ( y2 + y1 ) * ( x2 - x1 )
x1 = x2
y1 = y2
return 0.5 * Sum
class fastSumOfManyAddends :
"""This class in designed to sum a lot of endl2dmath or fudge2dmath object together efficiently. For example,
consider the list f2d of 100,000 fudge2dmath objects that are to be summed. One way to do this is as
s = fudge2dmath( )
for f in f2d : s = s + f
In general, this is very inefficient and will take a long time. Using, this class as
fs = fastSumOfManyAddends( )
for f in f2d : fs.appendAddend( f )
s = fs.returnSum( )
is, in general, much more efficient (i.e., runs a lot faster) and it should never be less efficient.
While this class was designed for endl2dmath and fudge2dmath objects, it should work for any object
for which the '+' operation is defined."""
def __init__( self ) :
"""Constructor for fastSumOfManyAddends."""
self.clear( )
def appendAddend( self, addend ) :
"""Adds addend to current sum efficiently."""
n = len( self.list )
for i in range( n ) :
if( self.list[i] is None ) :
self.list[i] = addend
addend = None
break
else :
addend = addend + self.list[i]
self.list[i] = None
if( addend is not None ) : self.list.append( addend )
def clear( self ) :
"""Clears currently summed data."""
self.list = []
def returnSum( self ) :
"""Returns the current sum of all addends appended."""
s = None
for l in self.list :
if( l is not None ) :
if( s is None ) :
s = l
else :
s = s + l
return( s )
def getValue( n ) :
if( isNumber( n ) ) : return( n )
if( isinstance( n, PQU.PQU ) ) : return( n.getValue( ) )
raise Exception( 'Invalue number object = %s' % brb.getType( n ) )
|
[
"brownies.legacy.endl.endl2dmathClasses.endl2dmath",
"numpy.float64",
"fudge.core.utilities.brb.getType",
"brownies.legacy.endl.endl3dmathClasses.endl3dmath"
] |
[((390, 408), 'numpy.float64', 'numpy.float64', (['(1.0)'], {}), '(1.0)\n', (403, 408), False, 'import numpy\n'), ((902, 1001), 'brownies.legacy.endl.endl3dmathClasses.endl3dmath', 'endl3dmathClasses.endl3dmath', (['d3'], {'xLabel': 'xLabel', 'yLabel': 'yLabel', 'zLabel': 'zLabel', 'checkDataType': '(0)'}), '(d3, xLabel=xLabel, yLabel=yLabel, zLabel=\n zLabel, checkDataType=0)\n', (930, 1001), False, 'from brownies.legacy.endl import endl3dmathClasses\n'), ((1627, 1684), 'brownies.legacy.endl.endl2dmathClasses.endl2dmath', 'endl2dmathClasses.endl2dmath', (['runningSum'], {'checkDataType': '(0)'}), '(runningSum, checkDataType=0)\n', (1655, 1684), False, 'from brownies.legacy.endl import endl2dmathClasses\n'), ((1985, 2034), 'brownies.legacy.endl.endl2dmathClasses.endl2dmath', 'endl2dmathClasses.endl2dmath', (['d2'], {'checkDataType': '(0)'}), '(d2, checkDataType=0)\n', (2013, 2034), False, 'from brownies.legacy.endl import endl2dmathClasses\n'), ((4214, 4228), 'fudge.core.utilities.brb.getType', 'brb.getType', (['n'], {}), '(n)\n', (4225, 4228), False, 'from fudge.core.utilities import brb\n')]
|
import keras
import random
import numpy as np
from glob import glob
from keras.models import Model
from keras.utils import np_utils
from keras.models import load_model
import matplotlib.pyplot as plt
import os
import keras.backend as K
import tensorflow as tf
from keras.utils import to_categorical
from tqdm import tqdm
import sys
sys.path.append('..')
from helpers.losses import *
from helpers.utils import load_vol_brats
class intervention():
def __init__(self, model, test_path):
self.model = model
self.vol_path = glob(test_path)
self.test_image, self.gt = load_vol_brats(self.vol_path[3], slicen = 78, pad = 0)
def mean_swap(self, plot = True, save_path='/home/parth/Interpretable_ML/BioExp/results/RCT'):
channel = 3
f_index = 0
test_image, gt = load_vol_brats(self.vol_path[f_index], slicen = 78, pad = 0)
prediction = np.argmax(self.model.predict(test_image[None, ...]), axis = -1)[0]
n_classes = (len(np.unique(prediction)))
corr = np.zeros((n_classes, n_classes))
slices = [78]
plt.figure(figsize = (20,20))
for vol in range(len(test_path)):
for slicen in slices:
test_image, gt = load_vol_brats(self.vol_path[vol], slicen = slicen, pad = 0)
prediction = np.argmax(self.model.predict(test_image[None, ...]), axis = -1)[0]
print("Original Dice Whole:", dice_whole_coef(prediction, gt))
class_dict = {0:'bg', 1:'core', 2:'edema', 3:'enhancing'}
corr_temp = np.zeros((n_classes, n_classes))
for i in range(n_classes):
for j in range(n_classes):
new_mean = np.mean(test_image[gt == i], axis = 0)
old_mean = np.mean(test_image[gt == j], axis = 0)
test_image_intervention = np.copy(test_image)
test_image_intervention[gt == j] += (new_mean - old_mean)
prediction_intervention = np.argmax(self.model.predict(test_image_intervention[None, ...]), axis = -1)[0]
corr[i,j] += dice_label_coef(prediction, gt, (j,)) - dice_label_coef(prediction_intervention, gt, (j,))
corr_temp[i,j] += dice_label_coef(prediction, gt, (j,)) - dice_label_coef(prediction_intervention, gt, (j,))
if plot == True:
plt.subplot(n_classes, n_classes, 1+4*i+j)
plt.xticks([])
plt.yticks([])
plt.title("{} --> {}, Dice Change={}".format(class_dict[j], class_dict[i], "{0:.2f}".format(-corr[i,j])))
plt.imshow(prediction_intervention, cmap = plt.cm.RdBu, vmin = 0, vmax = 3)
plt.colorbar()
print(corr_temp)#/(vol*len(slices))
np.set_printoptions(precision = 2)
plt.rcParams.update({'font.size': 24})
intervention_importance = corr /(len(self.vol_path)*len(slices))
print(intervention_importance)
os.makedirs(save_path, exist_ok = True)
# np.save(save_path + '/mean_swap_all_images.npy', intervention_importance)
if plot == True:
plt.show()
def blocks(self):
test_image, gt = load_vol_brats(self.vol_path[1], slicen = 78, pad = 8)
prediction = np.argmax(self.model.predict(test_image[None, ...]), axis = -1)[0]
n_classes = (len(np.unique(prediction)))
corr = np.zeros((n_classes, n_classes))
slices = [78]
intervention_image = np.empty(test_image.shape)
for _modality in range(4):
for i in range(2):
for j in range(2):
try:
intervention_image[:,:,_modality][test_image.shape[0]//2*i:test_image.shape[0]//2*(i+1),
test_image.shape[1]//2*j:test_image.shape[1]//2*(j+1)].fill(np.mean(test_image[gt == 2*i+j], axis = 0)[_modality])
except Exception as e:
print(e)
prediction_intervention = model.predict(intervention_image[None, ...])
plt.imshow(intervention_image[:, :, 0])
plt.colorbar()
plt.show()
plt.imshow(np.argmax(prediction_intervention, axis = -1)[0], vmin=0, vmax=3)
plt.colorbar()
plt.show()
def adverserial(self, epochs=100, epsilon = 0.01, mode = 'gradient', plot=False, test_image=None, gt=None):
sess = K.get_session()
keras.layers.core.K.set_learning_phase(0)
image = test_image[None, ...] # if test_image is not None else self.test_image[None, ...]
gt = gt[None, ...] # if gt is not None else self.gt[None, ...]
noise = np.zeros_like(image)
adverserial_image = image.copy()
if mode == 'gradient':
loss = keras.losses.categorical_crossentropy(self.model.output, tf.convert_to_tensor(to_categorical(gt, num_classes=4)))
elif mode == 'random':
loss = -keras.losses.categorical_crossentropy(self.model.output,
tf.convert_to_tensor(self.generate_random_classification(mode='random')))
elif mode =='swap':
loss = -keras.losses.categorical_crossentropy(self.model.output,
tf.convert_to_tensor(self.generate_random_classification(mode='swap')))
grads = K.gradients(loss, self.model.input)
delta = K.sign(grads[0])
noise = noise + delta
adverserial_image = adverserial_image+epsilon*delta
adverserial_image, noise_ar, delta_ = sess.run([adverserial_image, noise, delta], feed_dict={self.model.input: image})
delta_image_perc = (np.mean(np.abs(image - adverserial_image))*100)/np.ptp(image)
delta_dice_perc = (dice_whole_coef(self.model.predict(image).argmax(axis=-1),
gt) - dice_whole_coef(self.model.predict(adverserial_image).argmax(axis=-1),
gt))*100/dice_whole_coef(self.model.predict(image).argmax(axis=-1),
gt)
# print("perc. change in image:{}, perc. change in dice:{}, Sensitivity:{}".format(delta_image_perc,
# delta_dice_perc, delta_dice_perc/delta_image_perc))
imshape = image.shape[1]
if plot==True:
plt.figure(figsize = (40,10))
plt.rcParams.update({'font.size': 34})
plt.subplot(1,4,1)
plt.title("Original image")
plt.imshow(image[:, :, :, 0].reshape((imshape, imshape)))
plt.xticks([])
plt.yticks([])
# plt.subplot(1,6,2)
# plt.title("Added Noise")
# plt.imshow(noise_ar[:, :, :, 0].reshape((imshape, imshape)))
# plt.xticks([])
# plt.yticks([])
plt.subplot(1,4,2)
plt.title("Image + Noise, % Change = {}".format("{0:.2f}".format(delta_image_perc)))
plt.imshow(adverserial_image[:, :, :, 0].reshape((imshape, imshape)))
plt.xticks([])
plt.yticks([])
# plt.subplot(1,6,4)
# plt.title("Ground Truth")
# plt.imshow(self.gt, vmin = 0, vmax=3)
# plt.xticks([])
# plt.yticks([])
plt.subplot(1,4,3)
plt.title("Old Seg, Dice = {}".format("{0:.2f}".format(dice_whole_coef(self.model.predict(image).argmax(axis=-1), gt))))
plt.imshow(np.argmax(self.model.predict(image), axis = -1).reshape((imshape, imshape)), vmin = 0, vmax=3)
plt.xticks([])
plt.yticks([])
plt.subplot(1,4,4)
plt.title("New Seg, Dice={}, Sensitivity={}".format("{0:.2f}".format(dice_whole_coef(self.model.predict(adverserial_image).argmax(axis=-1),
gt)), "{0:.2f}".format(delta_dice_perc/delta_image_perc)))
plt.imshow(np.argmax(self.model.predict(adverserial_image), axis = -1).reshape((imshape, imshape)), vmin = 0, vmax=3)
plt.xticks([])
plt.yticks([])
plt.tight_layout(pad=0)
plt.show()
# plt.savefig('/home/parth/Interpretable_ML/Adverserial Examples/adv_{}.png'.format(epsilon))
return(delta_image_perc, delta_dice_perc, delta_dice_perc/delta_image_perc)
def generate_random_classification(self, mode='random'):
if mode == 'random':
true_target = self.gt.flatten()
true_target[true_target==4] = 3
index_list = [0, 1, 2, 3]
adverserial_random = np.zeros_like(true_target)
for i in range(adverserial_random.shape[0]):
adverserial_random[i] = np.random.choice(np.setdiff1d(index_list, true_target[i]))
print("Target image")
plt.imshow(adverserial_random.reshape((256, 256)), vmin=0., vmax=3.)
plt.show()
return to_categorical(adverserial_random, num_classes=4).reshape(self.test_image.shape)
elif mode == 'swap':
true_target = self.gt.flatten()
true_target[true_target==4] = 3
index_list = [0, 1, 2, 3]
adverserial_random = np.zeros_like(true_target)
for i in index_list:
adverserial_random[true_target == i] = np.random.choice(np.setdiff1d(index_list, i))
print("Target image")
plt.imshow(adverserial_random.reshape((256, 256)), vmin=0., vmax=3.)
plt.show()
return to_categorical(adverserial_random, num_classes=4).reshape(self.test_image.shape)
if __name__ == "__main__":
model = load_model('/home/parth/Interpretable_ML/saved_models/SimUnet/model_lrsch.hdf5',
custom_objects={'gen_dice_loss':gen_dice_loss,
'dice_whole_metric':dice_whole_metric,
'dice_core_metric':dice_core_metric,
'dice_en_metric':dice_en_metric})
model.load_weights('/home/parth/Interpretable_ML/saved_models/SimUnet/SimUnet.40_0.060.hdf5')
I = intervention(model, '/media/parth/DATA/datasets/brats_2018/val/**')
test_path = glob('/media/parth/DATA/datasets/brats_2018/val/**')
average_change = []
for epsilon in [0.7]: #, 0.07, 0.21, 0.7]:
for i in tqdm(range(len(test_path))):
test_image, gt = load_vol_brats(test_path[i], slicen = 78, pad = 0)
if len(np.unique(gt)) == 4:
print(len(np.unique(gt)))
# I.blocks('/home/parth/Interpretable_ML/BioExp/sample_vol/brats/**')
adv = I.adverserial(epsilon = epsilon, mode='gradient', test_image=test_image, gt=gt)
if adv[1] > 0:
average_change.append(adv)
print(adv)
print(np.mean(average_change, axis = 0))
# I.generate_random_classification(mode='swap')
# I.mean_swap(plot = False)
|
[
"numpy.ptp",
"keras.backend.gradients",
"keras.utils.to_categorical",
"sys.path.append",
"keras.layers.core.K.set_learning_phase",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.empty",
"matplotlib.pyplot.yticks",
"glob.glob",
"numpy.abs",
"matplotlib.pyplot.xticks",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"keras.backend.sign",
"numpy.set_printoptions",
"numpy.copy",
"keras.models.load_model",
"numpy.unique",
"os.makedirs",
"matplotlib.pyplot.colorbar",
"helpers.utils.load_vol_brats",
"matplotlib.pyplot.rcParams.update",
"numpy.zeros",
"matplotlib.pyplot.figure",
"keras.backend.get_session",
"numpy.setdiff1d",
"matplotlib.pyplot.tight_layout",
"numpy.zeros_like"
] |
[((332, 353), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (347, 353), False, 'import sys\n'), ((8210, 8464), 'keras.models.load_model', 'load_model', (['"""/home/parth/Interpretable_ML/saved_models/SimUnet/model_lrsch.hdf5"""'], {'custom_objects': "{'gen_dice_loss': gen_dice_loss, 'dice_whole_metric': dice_whole_metric,\n 'dice_core_metric': dice_core_metric, 'dice_en_metric': dice_en_metric}"}), "('/home/parth/Interpretable_ML/saved_models/SimUnet/model_lrsch.hdf5'\n , custom_objects={'gen_dice_loss': gen_dice_loss, 'dice_whole_metric':\n dice_whole_metric, 'dice_core_metric': dice_core_metric,\n 'dice_en_metric': dice_en_metric})\n", (8220, 8464), False, 'from keras.models import load_model\n'), ((8745, 8797), 'glob.glob', 'glob', (['"""/media/parth/DATA/datasets/brats_2018/val/**"""'], {}), "('/media/parth/DATA/datasets/brats_2018/val/**')\n", (8749, 8797), False, 'from glob import glob\n'), ((526, 541), 'glob.glob', 'glob', (['test_path'], {}), '(test_path)\n', (530, 541), False, 'from glob import glob\n'), ((571, 621), 'helpers.utils.load_vol_brats', 'load_vol_brats', (['self.vol_path[3]'], {'slicen': '(78)', 'pad': '(0)'}), '(self.vol_path[3], slicen=78, pad=0)\n', (585, 621), False, 'from helpers.utils import load_vol_brats\n'), ((774, 830), 'helpers.utils.load_vol_brats', 'load_vol_brats', (['self.vol_path[f_index]'], {'slicen': '(78)', 'pad': '(0)'}), '(self.vol_path[f_index], slicen=78, pad=0)\n', (788, 830), False, 'from helpers.utils import load_vol_brats\n'), ((970, 1002), 'numpy.zeros', 'np.zeros', (['(n_classes, n_classes)'], {}), '((n_classes, n_classes))\n', (978, 1002), True, 'import numpy as np\n'), ((1022, 1050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (1032, 1050), True, 'import matplotlib.pyplot as plt\n'), ((2476, 2508), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (2495, 2508), True, 'import numpy as np\n'), ((2513, 2551), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 24}"], {}), "({'font.size': 24})\n", (2532, 2551), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2692), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (2666, 2692), False, 'import os\n'), ((2846, 2896), 'helpers.utils.load_vol_brats', 'load_vol_brats', (['self.vol_path[1]'], {'slicen': '(78)', 'pad': '(8)'}), '(self.vol_path[1], slicen=78, pad=8)\n', (2860, 2896), False, 'from helpers.utils import load_vol_brats\n'), ((3036, 3068), 'numpy.zeros', 'np.zeros', (['(n_classes, n_classes)'], {}), '((n_classes, n_classes))\n', (3044, 3068), True, 'import numpy as np\n'), ((3109, 3135), 'numpy.empty', 'np.empty', (['test_image.shape'], {}), '(test_image.shape)\n', (3117, 3135), True, 'import numpy as np\n'), ((3557, 3596), 'matplotlib.pyplot.imshow', 'plt.imshow', (['intervention_image[:, :, 0]'], {}), '(intervention_image[:, :, 0])\n', (3567, 3596), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3613), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3611, 3613), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3626), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3624, 3626), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3722), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3720, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3733, 3735), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3871), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (3869, 3871), True, 'import keras.backend as K\n'), ((3875, 3916), 'keras.layers.core.K.set_learning_phase', 'keras.layers.core.K.set_learning_phase', (['(0)'], {}), '(0)\n', (3913, 3916), False, 'import keras\n'), ((4086, 4106), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (4099, 4106), True, 'import numpy as np\n'), ((4643, 4678), 'keras.backend.gradients', 'K.gradients', (['loss', 'self.model.input'], {}), '(loss, self.model.input)\n', (4654, 4678), True, 'import keras.backend as K\n'), ((4690, 4706), 'keras.backend.sign', 'K.sign', (['grads[0]'], {}), '(grads[0])\n', (4696, 4706), True, 'import keras.backend as K\n'), ((937, 958), 'numpy.unique', 'np.unique', (['prediction'], {}), '(prediction)\n', (946, 958), True, 'import numpy as np\n'), ((2795, 2805), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2803, 2805), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3024), 'numpy.unique', 'np.unique', (['prediction'], {}), '(prediction)\n', (3012, 3024), True, 'import numpy as np\n'), ((4980, 4993), 'numpy.ptp', 'np.ptp', (['image'], {}), '(image)\n', (4986, 4993), True, 'import numpy as np\n'), ((5446, 5474), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(40, 10)'}), '(figsize=(40, 10))\n', (5456, 5474), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5518), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 34}"], {}), "({'font.size': 34})\n", (5499, 5518), True, 'import matplotlib.pyplot as plt\n'), ((5522, 5542), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (5533, 5542), True, 'import matplotlib.pyplot as plt\n'), ((5544, 5571), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image"""'], {}), "('Original image')\n", (5553, 5571), True, 'import matplotlib.pyplot as plt\n'), ((5637, 5651), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5647, 5651), True, 'import matplotlib.pyplot as plt\n'), ((5655, 5669), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5665, 5669), True, 'import matplotlib.pyplot as plt\n'), ((5840, 5860), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(2)'], {}), '(1, 4, 2)\n', (5851, 5860), True, 'import matplotlib.pyplot as plt\n'), ((6024, 6038), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6034, 6038), True, 'import matplotlib.pyplot as plt\n'), ((6042, 6056), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6052, 6056), True, 'import matplotlib.pyplot as plt\n'), ((6198, 6218), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(3)'], {}), '(1, 4, 3)\n', (6209, 6218), True, 'import matplotlib.pyplot as plt\n'), ((6453, 6467), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6463, 6467), True, 'import matplotlib.pyplot as plt\n'), ((6471, 6485), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6481, 6485), True, 'import matplotlib.pyplot as plt\n'), ((6489, 6509), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(4)'], {}), '(1, 4, 4)\n', (6500, 6509), True, 'import matplotlib.pyplot as plt\n'), ((6839, 6853), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6849, 6853), True, 'import matplotlib.pyplot as plt\n'), ((6857, 6871), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6867, 6871), True, 'import matplotlib.pyplot as plt\n'), ((6875, 6898), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)'}), '(pad=0)\n', (6891, 6898), True, 'import matplotlib.pyplot as plt\n'), ((6902, 6912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6910, 6912), True, 'import matplotlib.pyplot as plt\n'), ((7299, 7325), 'numpy.zeros_like', 'np.zeros_like', (['true_target'], {}), '(true_target)\n', (7312, 7325), True, 'import numpy as np\n'), ((7571, 7581), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7579, 7581), True, 'import matplotlib.pyplot as plt\n'), ((8926, 8972), 'helpers.utils.load_vol_brats', 'load_vol_brats', (['test_path[i]'], {'slicen': '(78)', 'pad': '(0)'}), '(test_path[i], slicen=78, pad=0)\n', (8940, 8972), False, 'from helpers.utils import load_vol_brats\n'), ((9278, 9309), 'numpy.mean', 'np.mean', (['average_change'], {'axis': '(0)'}), '(average_change, axis=0)\n', (9285, 9309), True, 'import numpy as np\n'), ((1136, 1192), 'helpers.utils.load_vol_brats', 'load_vol_brats', (['self.vol_path[vol]'], {'slicen': 'slicen', 'pad': '(0)'}), '(self.vol_path[vol], slicen=slicen, pad=0)\n', (1150, 1192), False, 'from helpers.utils import load_vol_brats\n'), ((1429, 1461), 'numpy.zeros', 'np.zeros', (['(n_classes, n_classes)'], {}), '((n_classes, n_classes))\n', (1437, 1461), True, 'import numpy as np\n'), ((3640, 3683), 'numpy.argmax', 'np.argmax', (['prediction_intervention'], {'axis': '(-1)'}), '(prediction_intervention, axis=-1)\n', (3649, 3683), True, 'import numpy as np\n'), ((7825, 7851), 'numpy.zeros_like', 'np.zeros_like', (['true_target'], {}), '(true_target)\n', (7838, 7851), True, 'import numpy as np\n'), ((8068, 8078), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8076, 8078), True, 'import matplotlib.pyplot as plt\n'), ((4257, 4290), 'keras.utils.to_categorical', 'to_categorical', (['gt'], {'num_classes': '(4)'}), '(gt, num_classes=4)\n', (4271, 4290), False, 'from keras.utils import to_categorical\n'), ((4940, 4973), 'numpy.abs', 'np.abs', (['(image - adverserial_image)'], {}), '(image - adverserial_image)\n', (4946, 4973), True, 'import numpy as np\n'), ((7421, 7461), 'numpy.setdiff1d', 'np.setdiff1d', (['index_list', 'true_target[i]'], {}), '(index_list, true_target[i])\n', (7433, 7461), True, 'import numpy as np\n'), ((7593, 7642), 'keras.utils.to_categorical', 'to_categorical', (['adverserial_random'], {'num_classes': '(4)'}), '(adverserial_random, num_classes=4)\n', (7607, 7642), False, 'from keras.utils import to_categorical\n'), ((8987, 9000), 'numpy.unique', 'np.unique', (['gt'], {}), '(gt)\n', (8996, 9000), True, 'import numpy as np\n'), ((1542, 1578), 'numpy.mean', 'np.mean', (['test_image[gt == i]'], {'axis': '(0)'}), '(test_image[gt == i], axis=0)\n', (1549, 1578), True, 'import numpy as np\n'), ((1598, 1634), 'numpy.mean', 'np.mean', (['test_image[gt == j]'], {'axis': '(0)'}), '(test_image[gt == j], axis=0)\n', (1605, 1634), True, 'import numpy as np\n'), ((1669, 1688), 'numpy.copy', 'np.copy', (['test_image'], {}), '(test_image)\n', (1676, 1688), True, 'import numpy as np\n'), ((7938, 7965), 'numpy.setdiff1d', 'np.setdiff1d', (['index_list', 'i'], {}), '(index_list, i)\n', (7950, 7965), True, 'import numpy as np\n'), ((8090, 8139), 'keras.utils.to_categorical', 'to_categorical', (['adverserial_random'], {'num_classes': '(4)'}), '(adverserial_random, num_classes=4)\n', (8104, 8139), False, 'from keras.utils import to_categorical\n'), ((9022, 9035), 'numpy.unique', 'np.unique', (['gt'], {}), '(gt)\n', (9031, 9035), True, 'import numpy as np\n'), ((2128, 2176), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_classes', 'n_classes', '(1 + 4 * i + j)'], {}), '(n_classes, n_classes, 1 + 4 * i + j)\n', (2139, 2176), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2192), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2188, 2192), True, 'import matplotlib.pyplot as plt\n'), ((2200, 2214), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2210, 2214), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2404), 'matplotlib.pyplot.imshow', 'plt.imshow', (['prediction_intervention'], {'cmap': 'plt.cm.RdBu', 'vmin': '(0)', 'vmax': '(3)'}), '(prediction_intervention, cmap=plt.cm.RdBu, vmin=0, vmax=3)\n', (2345, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2418, 2432), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2430, 2432), True, 'import matplotlib.pyplot as plt\n'), ((3383, 3427), 'numpy.mean', 'np.mean', (['test_image[gt == 2 * i + j]'], {'axis': '(0)'}), '(test_image[gt == 2 * i + j], axis=0)\n', (3390, 3427), True, 'import numpy as np\n')]
|
import pytest
grblas = pytest.importorskip("grblas")
from metagraph.tests.util import default_plugin_resolver
from . import RoundTripper
from metagraph.plugins.numpy.types import NumpyMatrixType
from metagraph.plugins.graphblas.types import GrblasMatrixType
import numpy as np
def test_matrix_roundtrip_dense_square(default_plugin_resolver):
rt = RoundTripper(default_plugin_resolver)
mat = np.array([[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3]])
rt.verify_round_trip(mat)
rt.verify_round_trip(mat.astype(int))
rt.verify_round_trip(mat.astype(bool))
def test_matrix_roundtrip_dense_rect(default_plugin_resolver):
rt = RoundTripper(default_plugin_resolver)
mat = np.array(
[[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3], [-1.1, 2.7, 3.3]]
)
rt.verify_round_trip(mat)
rt.verify_round_trip(mat.astype(int))
rt.verify_round_trip(mat.astype(bool))
def test_numpy_2_grblas(default_plugin_resolver):
dpr = default_plugin_resolver
x = np.array([[1, 2, 3], [3, 3, 9], [3, 0, 3], [4, 2, 2]])
assert x.shape == (4, 3)
# Convert numpy -> grblas.Matrix
intermediate = grblas.Matrix.from_values(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
[1, 2, 3, 3, 3, 9, 3, 0, 3, 4, 2, 2],
nrows=4,
ncols=3,
dtype=grblas.dtypes.INT64,
)
y = dpr.translate(x, grblas.Matrix)
dpr.assert_equal(y, intermediate)
# Convert numpy <- grblas.Matrix
x2 = dpr.translate(y, NumpyMatrixType)
dpr.assert_equal(x, x2)
|
[
"numpy.array",
"pytest.importorskip"
] |
[((24, 53), 'pytest.importorskip', 'pytest.importorskip', (['"""grblas"""'], {}), "('grblas')\n", (43, 53), False, 'import pytest\n'), ((403, 465), 'numpy.array', 'np.array', (['[[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3]]'], {}), '([[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3]])\n', (411, 465), True, 'import numpy as np\n'), ((703, 788), 'numpy.array', 'np.array', (['[[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3], [-1.1, 2.7, 3.3]]'], {}), '([[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3], [-1.1, 2.7, 3.3]]\n )\n', (711, 788), True, 'import numpy as np\n'), ((1007, 1061), 'numpy.array', 'np.array', (['[[1, 2, 3], [3, 3, 9], [3, 0, 3], [4, 2, 2]]'], {}), '([[1, 2, 3], [3, 3, 9], [3, 0, 3], [4, 2, 2]])\n', (1015, 1061), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
""" This file is a Python translation of the MATLAB file acm.m
Python version by RDL 29 Mar 2012
Copyright notice from acm.m:
copyright 1996, by <NAME>. For use with the book
"Statistical Digital Signal Processing and Modeling"
(John Wiley & Sons, 1996).
"""
from __future__ import print_function,division
import numpy as np
from convm import convm
def acm(x,p):
""" Find an all-pole model using the autocorrelation method
Usage: a,err = acm(x,p)
The input sequence x is modeled as the unit sample response of
a filter having a system function of the form
H(z) = b(0)/A(z)
where the coefficients of A(z) are contained in the vector
a=[1, a(1), ... a(p)]
The input p defines the number of poles in the model.
The modeling error is returned in err.
The numerator b(0) is typically set equal to the square
root of err.
"""
x = x.flatten()
N = len(x)
if p > N:
print('ERROR: model order too large')
else:
X = convm(x, p+1)
Xq = X[0:N+p-1,0:p]
xq1 = -X[1:N+p, 0]
a = np.linalg.lstsq(Xq, xq1)[0]
a = np.insert(a, 0, 1)
err = np.dot(X[0:N+p,0].conj().T, X)
err = np.dot(err, a)
err = np.abs(err)
return a, err
|
[
"numpy.insert",
"numpy.abs",
"numpy.dot",
"numpy.linalg.lstsq",
"convm.convm"
] |
[((995, 1010), 'convm.convm', 'convm', (['x', '(p + 1)'], {}), '(x, p + 1)\n', (1000, 1010), False, 'from convm import convm\n'), ((1117, 1135), 'numpy.insert', 'np.insert', (['a', '(0)', '(1)'], {}), '(a, 0, 1)\n', (1126, 1135), True, 'import numpy as np\n'), ((1195, 1209), 'numpy.dot', 'np.dot', (['err', 'a'], {}), '(err, a)\n', (1201, 1209), True, 'import numpy as np\n'), ((1224, 1235), 'numpy.abs', 'np.abs', (['err'], {}), '(err)\n', (1230, 1235), True, 'import numpy as np\n'), ((1077, 1101), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['Xq', 'xq1'], {}), '(Xq, xq1)\n', (1092, 1101), True, 'import numpy as np\n')]
|
import numpy as np
class RegularizeOrthogonal(object):
"""
Orthogonal
"""
def __init__(self, coeff_lambda=0.0):
self.coeff_lambda = coeff_lambda
def cost(self, layers):
c = 0.0
for layer in layers:
wt = layer.w.transpose()
for j in range(layer.output_size):
wtj = wt[j] / np.sqrt(wt[j].dot(wt[j]))
for k in range(layer.output_size):
if j == k:
continue
wtk = wt[k] / np.sqrt(wt[k].dot(wt[k]))
c += np.abs(wtj.dot(wtk))
return self.coeff_lambda * c
def cost_gradient(self, layers, dc_db, dc_dw):
for l, layer in enumerate(layers):
wt = layer.w.transpose()
tmp = np.zeros_like(wt)
for j in range(layer.output_size):
dj = np.sqrt(wt[j].dot(wt[j]))
wtj = wt[j] / dj
# TODO: simplify this
s = 2 * (np.eye(len(wtj)) - np.outer(wtj, wtj)) / dj
for k in range(layer.output_size):
if j == k:
continue
dk = np.sqrt(wt[k].dot(wt[k]))
wtk = wt[k] / dk
tmp[j] += wtk.dot(s) * np.sign(wtj.dot(wtk))
dc_dw[l] += self.coeff_lambda * tmp.transpose()
return dc_db, dc_dw
|
[
"numpy.outer",
"numpy.zeros_like"
] |
[((795, 812), 'numpy.zeros_like', 'np.zeros_like', (['wt'], {}), '(wt)\n', (808, 812), True, 'import numpy as np\n'), ((1022, 1040), 'numpy.outer', 'np.outer', (['wtj', 'wtj'], {}), '(wtj, wtj)\n', (1030, 1040), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from pandas.util import testing as pdt
import pytest
from spandex import TableFrame
from spandex.io import db_to_df, df_to_db
def test_tableframe(loader):
table = loader.tables.sample.hf_bg
for cache in [False, True]:
tf = TableFrame(table, index_col='gid', cache=cache)
assert isinstance(tf.index, pd.Index)
num_rows = len(tf)
assert num_rows > 1
assert set(tf.columns) == set(table.__table__.columns.keys())
for column_name in tf.columns:
if column_name != 'gid':
if cache:
assert column_name not in tf._cached.keys()
assert isinstance(tf[column_name], pd.Series)
if cache:
assert column_name in tf._cached.keys()
assert isinstance(getattr(tf, column_name), pd.Series)
df = tf[['objectid']]
assert isinstance(df, pd.DataFrame)
assert len(df) == num_rows
assert set(df.columns) == set(['objectid'])
assert np.issubdtype(df.objectid.dtype, int)
def test_sim_export(loader):
# Try importing the UrbanSim simulation framework, otherwise skip test.
sim = pytest.importorskip('urbansim.sim.simulation')
# Register input parcels table.
parcels = loader.tables.sample.heather_farms
parcels_in = TableFrame(parcels, index_col='gid')
sim.add_table('parcels_in', parcels_in, copy_col=False)
# Register output parcels table.
@sim.table()
def parcels_out(parcels_in):
return pd.DataFrame(index=parcels_in.parcel_id)
# Specify default table for output columns as decorator.
out = sim.column('parcels_out')
# Specify some output columns.
@out
def apn(apn='parcels_in.puid'):
return apn.groupby(parcels_in.parcel_id).first().astype(str)
@out
def county_id():
return 13
@out
def area(acr='parcels_in.parcel_acr'):
return 4047. * acr.groupby(parcels_in.parcel_id).median()
# Register model to export output table to database.
@sim.model()
def export(parcels_out):
schema = loader.tables.sample
df_to_db(parcels_out.to_frame(), 'parcels_out', schema=schema)
# Inspect output table.
column_names = ['apn', 'county_id', 'area']
parcels_out_df1 = sim.get_table('parcels_out').to_frame()
assert set(parcels_out_df1.columns) == set(column_names)
assert parcels_out_df1.county_id.unique() == [13]
# Export table to database and import back to compare.
sim.run(['export'])
parcels_out_table = loader.tables.sample.parcels_out
parcels_out_df2 = db_to_df(parcels_out_table, index_col='parcel_id')
pdt.assert_frame_equal(parcels_out_df1[column_names],
parcels_out_df2[column_names])
|
[
"spandex.io.db_to_df",
"numpy.issubdtype",
"spandex.TableFrame",
"pytest.importorskip",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal"
] |
[((1216, 1262), 'pytest.importorskip', 'pytest.importorskip', (['"""urbansim.sim.simulation"""'], {}), "('urbansim.sim.simulation')\n", (1235, 1262), False, 'import pytest\n'), ((1366, 1402), 'spandex.TableFrame', 'TableFrame', (['parcels'], {'index_col': '"""gid"""'}), "(parcels, index_col='gid')\n", (1376, 1402), False, 'from spandex import TableFrame\n'), ((2653, 2703), 'spandex.io.db_to_df', 'db_to_df', (['parcels_out_table'], {'index_col': '"""parcel_id"""'}), "(parcels_out_table, index_col='parcel_id')\n", (2661, 2703), False, 'from spandex.io import db_to_df, df_to_db\n'), ((2708, 2797), 'pandas.util.testing.assert_frame_equal', 'pdt.assert_frame_equal', (['parcels_out_df1[column_names]', 'parcels_out_df2[column_names]'], {}), '(parcels_out_df1[column_names], parcels_out_df2[\n column_names])\n', (2730, 2797), True, 'from pandas.util import testing as pdt\n'), ((281, 328), 'spandex.TableFrame', 'TableFrame', (['table'], {'index_col': '"""gid"""', 'cache': 'cache'}), "(table, index_col='gid', cache=cache)\n", (291, 328), False, 'from spandex import TableFrame\n'), ((1061, 1098), 'numpy.issubdtype', 'np.issubdtype', (['df.objectid.dtype', 'int'], {}), '(df.objectid.dtype, int)\n', (1074, 1098), True, 'import numpy as np\n'), ((1566, 1606), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'parcels_in.parcel_id'}), '(index=parcels_in.parcel_id)\n', (1578, 1606), True, 'import pandas as pd\n')]
|
# encoding: UTF-8
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
name = 'crrCython',
ext_modules = cythonize("crrCython.pyx"),
include_dirs = [numpy.get_include()]
)
|
[
"Cython.Build.cythonize",
"numpy.get_include"
] |
[((146, 172), 'Cython.Build.cythonize', 'cythonize', (['"""crrCython.pyx"""'], {}), "('crrCython.pyx')\n", (155, 172), False, 'from Cython.Build import cythonize\n'), ((192, 211), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (209, 211), False, 'import numpy\n')]
|
import pandas as pd
import pandas
import numpy as np
#provide local path
testfile='../input/test.csv'
data = open(testfile).readlines()
sequences={} #(key, value) = (id , sequence)
for i in range(1,len(data)):
line=data[i]
line =line.replace('"','')
line = line[:-1].split(',')
id = int(line[0])
sequence=[int(x) for x in line[1:]];
sequences[id]=sequence
# In[ ]:
def checkRecurrence(seq, order= 2, minlength = 7):
"""
:type seq: List[int]
:type order: int
:type minlength: int
:rtype: List[int]
Check whether the input sequence is a recurrence sequence with given order.
If it is, return the coefficients for the recurrenec relation.
If not, return None.
"""
if len(seq)< max((2*order+1), minlength):
return None
################ Set up the system of equations
A,b = [], []
for i in range(order):
A.append(seq[i:i+order])
b.append(seq[i+order])
A,b =np.array(A), np.array(b)
try:
if np.linalg.det(A)==0:
return None
except TypeError:
return None
############# Solve for the coefficients (c0, c1, c2, ...)
coeffs = np.linalg.inv(A).dot(b)
############ Check if the next terms satisfy recurrence relation
for i in range(2*order, len(seq)):
predict = np.sum(coeffs*np.array(seq[i-order:i]))
if abs(predict-seq[i])>10**(-2):
return None
return list(coeffs)
def predictNextTerm(seq, coeffs):
"""
:type seq: List[int]
:type coeffs: List[int]
:rtype: int
Given a sequence and coefficienes, compute the next term for the sequence.
"""
order = len(coeffs)
predict = np.sum(coeffs*np.array(seq[-order:]))
return int(round(predict))
# ## Example: ##
# * Given a sequence [1,5,11,21,39,73,139,269,527].
# * We verify if it's 3rd order recurrence sequence and find the coefficients (2,-5,4).
# * We then predict the next term using the last 3 terms and the relation $a_{n+3} = 2a_{n}-5a_{n+1}+4a_{n+2}$.
# In[ ]:
seq = [1,5,11,21,39,73,139,269,527]
print (checkRecurrence(seq,3))
print (predictNextTerm(seq, [2,-5,4]))
# # Find 2nd order sequeneces in the test set #
# In[ ]:
order2Seq={} #(key, value) = (sequence id, [prediction, coefficients])
for id in sequences:
seq = sequences[id]
coeff = checkRecurrence(seq,2)
if coeff!=None:
predict = predictNextTerm(seq, coeff)
order2Seq[id]=(predict,coeff)
print ("We found %d sequences\n" %len(order2Seq))
print ("Some examples\n")
print ("ID, Prediction, Coefficients")
for key in sorted(order2Seq)[0:5]:
value = order2Seq[key]
print ("%s, %s, %s" %(key, value[0], [int(round(x)) for x in value[1]]))
# # Find 3rd order sequeneces in the test set #
# In[ ]:
order3Seq={}
for id in sequences:
if id in order2Seq:
continue
seq = sequences[id]
coeff = checkRecurrence(seq,3)
if coeff!=None:
predict = predictNextTerm(seq, coeff)
order3Seq[id]=(predict,coeff)
print ("We found %d sequences\n" %len(order3Seq))
print ("Some examples\n")
print ("ID, Prediction, Coefficients")
for key in sorted(order3Seq)[0:5]:
value = order3Seq[key]
print ("%s, %s, %s" %(key, value[0], [int(round(x)) for x in value[1]]))
# # Find 4th order sequeneces in the test set #
# In[ ]:
order4Seq={}
for id in sequences:
if id in order2Seq or id in order3Seq:
continue
seq = sequences[id]
coeff = checkRecurrence(seq,4)
if coeff!=None:
predict = predictNextTerm(seq, coeff)
order4Seq[id]=(predict,coeff)
print ("We found %d sequences \n" %len(order4Seq))
print ("Some examples\n")
print ("ID, Prediction, Coefficients")
for key in sorted(order4Seq)[4:5]:
value = order4Seq[key]
print ("%s, %s, %s" %(key, value[0], [int(round(x)) for x in value[1]]))
print (sequences[239][0:17])
# ## Recurrence relations not included in OEIS ##
# In the previous cells,
# * We find that Sequence 239 is a 4th order sequence and predict the next term as 5662052980.
# * We check OEIS https://oeis.org/A000773, which confirms the prediction is correct.
# * We observe that this recurrence relation is not described in OEIS. (There are more such sequences.)
# In[ ]:
print("Conclusion:")
print("Number of sequences in the test set:", len(sequences))
print("Number of 2nd order sequences:", len(order2Seq))
print("Number of 3rd order sequences:", len(order3Seq))
print("Number of 4th order sequences:", len(order4Seq))
|
[
"numpy.array",
"numpy.linalg.inv",
"numpy.linalg.det"
] |
[((1019, 1030), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (1027, 1030), True, 'import numpy as np\n'), ((1032, 1043), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (1040, 1043), True, 'import numpy as np\n'), ((1067, 1083), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (1080, 1083), True, 'import numpy as np\n'), ((1241, 1257), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (1254, 1257), True, 'import numpy as np\n'), ((1816, 1838), 'numpy.array', 'np.array', (['seq[-order:]'], {}), '(seq[-order:])\n', (1824, 1838), True, 'import numpy as np\n'), ((1417, 1443), 'numpy.array', 'np.array', (['seq[i - order:i]'], {}), '(seq[i - order:i])\n', (1425, 1443), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Sample 10^6 particles from anisotropic Hernquist DF.
Created: February 2021
Author: <NAME>
"""
import sys
from emcee import EnsembleSampler as Sampler
import numpy as np
sys.path.append("../src")
from constants import G, M_sun, kpc
from hernquist import calc_DF_aniso
def hernquist_df_aniso(theta, M, a):
"""
Evaluate anisotropic Hernquist distribution function.
Calculates log-probability of given phase space position theta. Functional
form is Eq. (44) in Naik et al., (2020).
Parameters
----------
theta: array-like, shape (6,)
Array containing phase space position (x, y, z, vx, vy, vz). UNITS:
metres and metres/second for positions/velocities respectively.
M: float
Total mass of Hernquist blob. UNITS: kilograms.
a: float
Scale radius of Hernquist blob. UNITS: metres.
Returns
-------
lnf: float
Unnormalised ln-probability associated with phase space position.
"""
q = theta[:3]
p = theta[3:]
f = calc_DF_aniso(q, p, M, a)
if f == 0:
return -1e+20
else:
lnf = np.log(f)
return lnf
def sample(N, M, a):
"""
Sample N particles from isotropic Hernquist distribution function.
Takes either isotropic or anisotropic DF, parametrised by mass M and scale
radius a.
Sampler uses 50 MCMC walkers, each taking N iterations (after burn-in).
These samples are then thinned by an interval of 50, giving N
quasi-independent samples.
Parameters
----------
N: int
Number of particles to sample. Note: this needs to be a multiple of 50.
M: float
Total mass of Hernquist blob. UNITS: kilograms.
a: float
Scale radius of Hernquist blob. UNITS: metres.
Returns
-------
pos: (N, 3) array
Positions of sampled particles, in Cartesian coordinates. UNITS:
metres.
vel: (N, 3) array
Velocities of sampled particles, in Cartesian coordinates. UNITS:
metres/second.
"""
# set up sampler
df_function = hernquist_df_aniso
nwalkers, ndim = 100, 6
n_burnin = 1000
assert N % nwalkers == 0
n_iter = N
s = Sampler(nwalkers, ndim, df_function, args=[M, a])
# set up initial walker positions
v_sig = 0.5 * np.sqrt(G * M / a) / np.sqrt(3)
sig = np.array([0.3 * a, 0.3 * a, 0.3 * a, v_sig, v_sig, v_sig])
p0 = -sig + 2 * sig * np.random.rand(nwalkers, ndim)
# burn in
print("\nBurning in...", flush=True)
s.run_mcmc(p0, n_burnin, progress=True)
# take final sample
p0 = s.chain[:, -1, :]
s.reset()
print("\n\nTaking final sample...", flush=True)
s.run_mcmc(p0, n_iter, progress=True, thin=100)
pos = s.flatchain[:, :3]
vel = s.flatchain[:, 3:]
return pos, vel
def downsample(pos, vel, a, x_truncation):
"""Downsample from truncated Hernquist."""
r = np.linalg.norm(pos, axis=-1)
allowed = np.where(r < x_truncation * a)[0]
inds = np.random.choice(allowed, size=N)
pos = pos[inds]
vel = vel[inds]
return pos, vel
if __name__ == '__main__':
M = 1e+10 * M_sun
a = 5 * kpc
N = 1000000
pos, vel = sample(2 * N, M, a)
pos, vel = downsample(pos, vel, a, x_truncation=200)
np.savez("hq_aniso_orig", pos=pos, vel=vel)
|
[
"numpy.savez",
"numpy.sqrt",
"numpy.random.rand",
"numpy.random.choice",
"numpy.where",
"numpy.log",
"emcee.EnsembleSampler",
"numpy.array",
"numpy.linalg.norm",
"hernquist.calc_DF_aniso",
"sys.path.append"
] |
[((223, 248), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (238, 248), False, 'import sys\n'), ((1068, 1093), 'hernquist.calc_DF_aniso', 'calc_DF_aniso', (['q', 'p', 'M', 'a'], {}), '(q, p, M, a)\n', (1081, 1093), False, 'from hernquist import calc_DF_aniso\n'), ((2235, 2284), 'emcee.EnsembleSampler', 'Sampler', (['nwalkers', 'ndim', 'df_function'], {'args': '[M, a]'}), '(nwalkers, ndim, df_function, args=[M, a])\n', (2242, 2284), True, 'from emcee import EnsembleSampler as Sampler\n'), ((2384, 2442), 'numpy.array', 'np.array', (['[0.3 * a, 0.3 * a, 0.3 * a, v_sig, v_sig, v_sig]'], {}), '([0.3 * a, 0.3 * a, 0.3 * a, v_sig, v_sig, v_sig])\n', (2392, 2442), True, 'import numpy as np\n'), ((2949, 2977), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(-1)'}), '(pos, axis=-1)\n', (2963, 2977), True, 'import numpy as np\n'), ((3037, 3070), 'numpy.random.choice', 'np.random.choice', (['allowed'], {'size': 'N'}), '(allowed, size=N)\n', (3053, 3070), True, 'import numpy as np\n'), ((3314, 3357), 'numpy.savez', 'np.savez', (['"""hq_aniso_orig"""'], {'pos': 'pos', 'vel': 'vel'}), "('hq_aniso_orig', pos=pos, vel=vel)\n", (3322, 3357), True, 'import numpy as np\n'), ((1155, 1164), 'numpy.log', 'np.log', (['f'], {}), '(f)\n', (1161, 1164), True, 'import numpy as np\n'), ((2363, 2373), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2370, 2373), True, 'import numpy as np\n'), ((2992, 3022), 'numpy.where', 'np.where', (['(r < x_truncation * a)'], {}), '(r < x_truncation * a)\n', (3000, 3022), True, 'import numpy as np\n'), ((2342, 2360), 'numpy.sqrt', 'np.sqrt', (['(G * M / a)'], {}), '(G * M / a)\n', (2349, 2360), True, 'import numpy as np\n'), ((2469, 2499), 'numpy.random.rand', 'np.random.rand', (['nwalkers', 'ndim'], {}), '(nwalkers, ndim)\n', (2483, 2499), True, 'import numpy as np\n')]
|
# coding: utf-8
import os, pickle, csv, json
import subprocess
from typing import NamedTuple, List, TextIO, Tuple, Dict, Optional, Union, Iterable, Hashable
import numpy as np
import pandas as pd
from scipy import stats
from itertools import product, groupby, takewhile
from collections import namedtuple, Counter
import multiprocessing
import logging
import string
import matplotlib
matplotlib.use("Agg")
# pids with missing data (i.e., pdbs missing for either sid, eid, and/or gid)
pids_missing_data = {'2000524',
'2001234',
'2001249',
'2001255',
'2001287',
'2001291',
'2001306',
'2001308',
'2001311',
'2002239',
'2002243',
'2002247',
'2002255',
'2002713',
'2002963',
'2002990',
'2002992',
'2003008',
'2003011',
'2003015',
'997529',
'996023'}
unfetched_pids = {'2000659',
'2001302',
'2002102',
'2002465',
'2002809',
'2002833',
'2002850',
'2003001',
'2003047',
'2003059',
'2003078',
'2003126',
'2003183',
'996313',
'996492',
'996508',
'997542',
'997940',
'998465',
'998529',
'998574'}
# fetched, but corrupt
bad_pids = {'1998935',
'2000659',
'2001302',
'2002102',
'2002465',
'2002809',
'2002833',
'2002850',
'2003078',
'2003126',
'2003183',
'2003763',
'2003832',
'997766'}
# stopped early due to crashes or errors
stopped_pids = {'2003699',
'2003183',
'2002494',
'2002247',
'2002912',
'2003801'}
# restarted version of stopped puzzle
restarted_pids = {'2003704',
'2002499',
'2002255',
'2002914',
'2003806'}
pids_missing_energies = {'996547'}
pids_missing_pdl_actions = {'998071',
'1998729',
'998219'}
skip_pids = pids_missing_energies.union(pids_missing_pdl_actions).union(bad_pids)
class EnergyComponent(NamedTuple):
name: str
weight: float
energy: float
class PDB_Info(NamedTuple):
sid: str
pid: str
uid: str
gid: str
sharing_gid: str
scoretype: str
pdl: Dict
energy: float
energy_components: List[EnergyComponent]
timestamp: int
parent_sid: Optional[str]
tmscore: float
deviations: np.ndarray
class SnapshotDelta(NamedTuple):
sid: str
parent_sid: Optional[str]
timestamp: int
action_diff: Counter
macro_diff: Counter
action_count: int
energy_diff: float
class SolvingLineVariant(NamedTuple):
action_count: int
time: int
indices: List[int]
class SolvingLine(NamedTuple):
action_count: int
time: int
pdb_infos: List[PDB_Info]
variants: List[SolvingLineVariant]
@property
def energies(self):
return [x.energy for x in self.pdb_infos]
class EvolvingLine(NamedTuple):
source: Dict
pdb_infos: List[PDB_Info]
@property
def energies(self):
return [x.energy for x in self.pdb_infos]
class PuzzleMeta(NamedTuple):
pid: str
best_tmscores: Dict
pfront: np.ndarray
upload_baseline: float
energy_baseline: float
structure: Dict
class PatternInstance(NamedTuple):
cid: int
uid: str
pid: str
start_idx: int
end_idx: int
class PatternInstanceExt(NamedTuple):
cid: int
uid: str
pid: str
start_idx: int
end_idx: int
start_pdb: PDB_Info
end_pdb: PDB_Info
pre_best: PDB_Info
post_best: PDB_Info
class SubPatternInstance(NamedTuple):
p: PatternInstance
label: str
start_idx: int
end_idx: int
class SubLookup(NamedTuple):
clusters: Dict[str, Dict[int, Dict[int, Dict[int, np.ndarray]]]] # (user to k to cid to sub_k to cluster labels)
mrfs: Dict[str, Dict[int, Dict[int, Dict[int, Dict[int, np.ndarray]]]]] # (user to k to cid to sub_k to mrf dictionary (cluster label to mrf))
models: Dict[str, Dict[int, Dict[int, Dict[int, Dict]]]] # (user to k to cid to sub_k to dict of ticc model parameters)
bics: Dict[str, Dict[int, Dict[int, Dict[int, float]]]] # (user to k to cid to sub_k to bic)
class SubSeriesLookup(NamedTuple):
patterns: Dict[Hashable, np.ndarray] # e.g., (uid, pid, start index) -> series for that pattern
series: np.ndarray
idx_lookup: Dict[Hashable, Tuple[int, int]]
class SubclusterSeries(NamedTuple):
labels: List[str]
series: np.ndarray
# type aliases
SubClusters = Dict[int, Dict[int, Dict[int, np.ndarray]]]
SubMRFs = Dict[int, Dict[int, Dict[int, Dict[int, np.ndarray]]]]
PatternLookup = Union[Dict[str, Iterable[PatternInstance]], Dict[int, Dict[int, Iterable[PatternInstance]]]]
@pd.api.extensions.register_series_accessor("foldit")
class FolditSeriesAccessor:
def __init__(self, pandas_obj: pd.Series):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj: pd.Series):
# verify there is a column latitude and a column longitude
if ('lines' not in obj.index or 'evol_lines' not in obj.index) and (obj.name != "lines" and obj.name != "evol_lines"):
raise AttributeError("Must have 'lines' and 'evol_lines'.")
@property
def solo_pdbs(self):
return [p for l in self._obj.lines for p in l.pdb_infos] if self._obj.lines else []
@property
def evol_pdbs(self):
return [p for l in self._obj.evol_lines for p in l.pdb_infos] if self._obj.evol_lines else []
@property
def solo_energies(self):
return [p.energy for p in self._obj.foldit.solo_pdbs]
@property
def evol_energies(self):
return [p.energy for p in self._obj.foldit.evol_pdbs]
@pd.api.extensions.register_dataframe_accessor("foldit")
class FolditAccessor:
def __init__(self, pandas_obj: pd.Series):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj: pd.Series):
# verify there is a column latitude and a column longitude
if 'lines' not in obj.columns or 'evol_lines' not in obj.columns:
raise AttributeError("Must have 'lines' and 'evol_lines'.")
@property
def solo_pdbs(self):
return self._obj.apply(lambda r: r.foldit.solo_pdbs, axis=1)
@property
def evol_pdbs(self):
return self._obj.apply(lambda r: r.foldit.evol_pdbs, axis=1)
@property
def solo_energies(self):
return self._obj.apply(lambda r: r.foldit.solo_energies, axis=1)
@property
def evol_energies(self):
return self._obj.apply(lambda r: r.foldit.evol_energies, axis=1)
# @property
# def pdbs(self):
ROOT_NID = ('00000000-0000-0000-0000-000000000000', 0)
category_lookup = {
'overall': '992758',
'beginner': '992759',
'prediction': '992760',
'design': '992761',
'electron': '994237',
'contacts': '997946',
'symmetry': '992769',
'casp10': '992762',
'casp11': '997398',
'casp_roll': '993715',
'hand_folding': '994890',
'small_molecule_design': '2002074',
"pilot": "2004148",
'all': 'all', # dummy to allow select of all categorized puzzles
}
action_types = {
'optimize': {'ActionGlobalMinimize', 'ActionGlobalMinimizeBackbone', 'ActionGlobalMinimizeSidechains',
'ActionLocalMinimize', 'ActionRepack'},
'hybrid': {'ActionLocalMinimizePull', 'LoopHash', 'ActionBuild', 'ActionPullSidechain', 'ActionTweak',
'ActionRebuild'},
'manual': {'ActionSetPhiPsi', 'ActionJumpWidget', 'ActionRotamerCycle', 'ActionRotamerSelect'},
'guiding': {'ActionInsertCut', 'ActionLockToggle', 'ActionCopyToggle', 'ActionSecStructAssignHelix',
'ActionSecStructAssignLoop', 'ActionSecStructAssignSheet', 'ActionSecStructDSSP', 'ActionSecStructDrag',
'ActionBandAddAtomAtom', 'ActionBandAddDrag', 'ActionBandAddResRes', 'ActionBandDrag',
'ActionBandLength', 'ActionBandStrength'},
}
action_types['deliberate'] = action_types['hybrid'].union(action_types['manual']).union(action_types['guiding'])
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
def iden(x):
return x
def get_ranks(datafile):
puzzles = {}
with open("{}.csv".format(datafile)) as fp:
ranks_in = csv.DictReader(fp)
for row in ranks_in:
row['energy'] = float(row['best_score'])
row['best_score'] = max(float(row['best_score']) * -10 + 8000, 0)
pid = row['pid']
if pid not in puzzles:
puzzles[pid] = {
'groups': {},
'soloists': [],
'evolvers': [],
'categories': []
}
if row['gid'] == '0':
row['gid'] = 'NULL' # no sense in having both 0 and NULL for no group
gid = row['gid']
if gid != 'NULL':
gs = puzzles[pid]['groups']
if gid not in gs:
gs[gid] = {
'score': row['best_score'],
'type': row['type'],
'gid': gid,
'uid': row['uid'],
}
if gs[gid]['score'] < row['best_score']:
gs[gid]['score'] = row['best_score']
gs[gid]['type'] = row['type']
gs[gid]['uid'] = row['uid']
if row['type'] == '1':
puzzles[pid]['soloists'].append(row)
if row['type'] == '2':
puzzles[pid]['evolvers'].append(row)
for pid in puzzles:
p = puzzles[pid]
p['groups'] = list(p['groups'].values())
# reverse sorts to put them in descending order (top ranked should be first)
p['groups'].sort(key=lambda x: x['score'], reverse=True)
for i, g in enumerate(p['groups']):
g['rank'] = i
g['norm_rank'] = i / len(p['groups'])
p['soloists'].sort(key=lambda x: x['best_score'], reverse=True)
for i, s in enumerate(p['soloists']):
s['rank'] = i
s['norm_rank'] = i / len(p['soloists'])
p['evolvers'].sort(key=lambda x: x['best_score'], reverse=True)
for i, e in enumerate(p['evolvers']):
e['rank'] = i
e['norm_rank'] = i / len(p['evolvers'])
return puzzles
def get_ranks_labeled():
puzzles = get_ranks("data/rprp_puzzle_ranks_latest")
with open("data/puzzle_categories_latest.csv") as fp:
cat_in = csv.DictReader(fp)
for r in cat_in:
pid = r['nid']
if pid in puzzles:
puzzles[pid]['categories'] = r['categories'].split(',')
puzzles[pid]['categories'].append('all')
with open("data/puzzle_labels_latest.json") as fp:
lab_in = json.load(fp)
for r in lab_in:
pid = r['pid']
if pid in puzzles:
assert r['title'] is not None
puzzles[pid]['title'] = r['title']
if r['desc'] is not None:
puzzles[pid]['desc'] = r['desc']
return puzzles
def add_pdbs_to_ranks(puzzles):
print("loading pdbs")
with open("data/top_pdbs.pickle", 'rb') as pdb_fp:
pdbs = pickle.load(pdb_fp)
pdbs = [p for p in pdbs if 'PID' in p and len(p['PDL']) > 0]
print("grouping pdbs")
pdbs_by_pid = {pid: list(g) for pid, g in groupby(pdbs, lambda p: p['PID'])}
for pid in pids_missing_data.union(unfetched_pids):
pid in puzzles and puzzles.pop(pid)
for pid in puzzles.copy():
pid not in pdbs_by_pid and puzzles.pop(pid)
for pid, ps in pdbs_by_pid.items():
if pid in puzzles:
puzzles[pid]['pdbs'] = ps
def sig_test(a, b, fstr="{} (n={}) {} (n={})", normal=False, thresholds=frozenset()):
if normal:
t, p = stats.ttest_ind(a, b, equal_var=False)
else:
U2, p = stats.mannwhitneyu(np.array(a), np.array(b), use_continuity=True, alternative='two-sided')
U = min(U2, len(a) * len(b) - U2)
N = len(a) * len(b)
f = len(list(filter(lambda xy: xy[0] > xy[1], product(a, b)))) / N
u = len(list(filter(lambda xy: xy[0] < xy[1], product(a, b)))) / N
if ('p' not in thresholds or p < thresholds['p']) and ('r' not in thresholds or abs(f - u) > thresholds['r']):
print(fstr.format("mean={:.6f}, median={:.6f}, std={:.6f}".format(np.mean(a), np.median(a), np.std(a)), len(a),
"mean={:.6f}, median={:.6f}, std={:.6f}".format(np.mean(b), np.median(b), np.std(b)), len(b)))
if normal:
print("test statistic t: {:.6f}".format(t))
else:
print("<NAME> U: {:.6f}".format(U))
print("significance (two-tailed): {:.6f}".format(p))
print("rank-biserial correlation: {:.3f}".format(f - u))
return p, f - u
def get_atoms(pdb):
raw = [[float(x) for x in s.strip(' "[]').split(" ")] for s in pdb['ca'].split(",")]
if all(k == 0 for k in raw[-1]):
return np.array(raw[:-1])
# remove spurious atom at 0 0 0 that appears at the end of each of these
return np.array(raw)
def rmsd(X, Y):
# center of mass
X = X - X.mean(axis=0)
Y = Y - Y.mean(axis=0)
# covariance matrix
R = np.dot(X.T, Y)
V, S, Wt = np.linalg.svd(R)
d = (np.linalg.det(V) * np.linalg.det(Wt)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
U = np.dot(V, Wt)
Xp = np.dot(X, U)
deviations = np.linalg.norm(Xp - Y, axis=1)
return (deviations ** 2).sum() ** 0.5, deviations
# https://github.com/charnley/rmsd
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1471868/
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4321859/
def weighted_rmsd(X, Y, p=50):
weights = np.array([[1]] * len(Y))
wrmsd = 0
wrmsd_old = float('inf')
i = 0
# there may be rare cases where this doesn't converge, so limit to 1000 iterations just in case
while abs(wrmsd - wrmsd_old) > 1e-6 and i < 1000:
i += 1
wrmsd_old = wrmsd
# weighted center of mass
X = X - (weights * X).mean(axis=0)
Y = Y - (weights * Y).mean(axis=0)
# weighted covariance matrix
R = np.dot(X.T, weights * Y)
V, S, Wt = np.linalg.svd(R)
d = (np.linalg.det(V) * np.linalg.det(Wt)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
U = np.dot(V, Wt)
Xp = np.dot(X, U)
deviations = np.linalg.norm(Xp - Y, axis=1)
wrmsd = ((weights.flatten() * deviations ** 2).sum() / weights.sum()) ** 0.5
dp = np.percentile(deviations, p)
weights = np.exp(-deviations ** 2 / dp ** 2).reshape((len(deviations), 1))
return wrmsd, weights, deviations
# take in index i and series of Unix-style timestamps
# return indices start, end representing the period containing i with no gaps of size break_threshold or larger
# break_threshold given in seconds
def expand_seed(i, timestamps, break_threshold=900):
start = end = i
i = end + 1
while i < len(timestamps) and timestamps[i] - timestamps[end] < break_threshold:
end = i
i += 1
return start, end
# takes in list of Unix-style timestamps
def get_sessions(timestamps):
sessions = []
i = 0
while i < len(timestamps):
sessions.append(expand_seed(i, timestamps))
i = sessions[-1][1] + 1
return sessions
def time_splits_helper(timestamps, chunk, splits):
sessions = get_sessions(timestamps)
start_idx = end_idx = 0
time_left = chunk
session_idx = 0
ret = []
times = []
for i in range(splits):
logging.debug('split {}'.format(i))
while time_left > 0 and session_idx < len(sessions):
logging.debug('time left {}'.format(time_left))
ses = sessions[session_idx]
session_start, session_end = ses
if session_duration(ses, timestamps) <= time_left:
logging.debug('session {} {} fits'.format(session_idx, sessions[session_idx]))
end_idx = session_end
time_left -= session_duration(ses, timestamps)
session_idx += 1
if session_idx == len(sessions):
logging.debug('adding {} to the end'.format(start_idx))
ret.append((start_idx, len(timestamps)))
times.append(sum(session_duration((s, e), timestamps) for s, e in sessions if s >= start_idx))
logging.debug('time: {}'.format(times[-1]))
else:
ns, ne = sessions[session_idx]
minimal_addition = session_duration((ns, ne), timestamps) if ns == ne else timestamps[ns + 1] - \
timestamps[ns]
# the minimum we could add to the current split would put us further away than we currently are
if abs(time_left - minimal_addition) > abs(time_left):
times.append(session_duration((session_start, end_idx), timestamps) + sum(
session_duration((s, e), timestamps) for s, e in sessions if
s >= start_idx and e < session_start))
if start_idx == end_idx:
end_idx += 1
logging.debug("close as we can get, adding {} up to {}".format(start_idx, end_idx))
ret.append((start_idx, end_idx))
logging.debug('time: {}'.format(times[-1]))
start_idx = end_idx
time_left = 0
else:
if session_start == session_end:
end_idx = session_end
else:
end_idx = session_start + 1
while session_duration((session_start, end_idx), timestamps) < time_left:
end_idx += 1
if abs(time_left - (timestamps[end_idx] - timestamps[session_start])) > abs(
time_left - (
timestamps[end_idx - 1] - timestamps[session_start])) and end_idx > start_idx + 1:
end_idx -= 1
logging.debug('splitting session at {}'.format(end_idx))
sessions[session_idx] = (end_idx, session_end)
times.append(session_duration((session_start, end_idx), timestamps) + sum(
session_duration((s, e), timestamps) for s, e in sessions if s >= start_idx and e < session_start))
if start_idx == end_idx:
end_idx += 1
logging.debug('adding {} up to {}'.format(start_idx, end_idx))
ret.append((start_idx, end_idx))
logging.debug('time: {}'.format(times[-1]))
start_idx = end_idx
time_left = 0
time_left = chunk
return ret, times
def get_time_splits(time, timestamps, splits):
chunk = time / splits
ret, times = time_splits_helper(timestamps, chunk, splits)
while len(ret) < splits - 1 and len(timestamps) >= 2 * splits:
chunk *= 0.9
logging.debug("bad split possibly due to degenerate chunk size, trying with {}".format(chunk))
ret, times = time_splits_helper(timestamps, chunk, splits)
if len(ret) == splits - 1 and any(e - s > 0 for s, e in ret):
idx = np.argmax([t if s != e else 0 for (s, e), t in zip(ret, times)])
shifted = ret[:idx] + [(ret[idx][0], ret[idx][1] - 1)] + [(s - 1, e - 1) for s, e in ret[idx + 1:]] + [
(ret[-1][1] - 1, ret[-1][1])]
logging.debug("short one slice, shifting everything to make another ({} to {})".format(ret, shifted))
ret = shifted
if ret[-1][1] < len(timestamps):
logging.debug("extending final slice to the end ({} to {})".format(ret[-1][1], len(timestamps)))
ret = ret[:-1] + [(ret[-1][0], len(timestamps))]
assert len(ret) == splits or len(timestamps) < 2 * splits, "{} -- wanted {} splits, got {}".format(ret, splits,
len(ret))
# assert len(ret) == splits or splits > 12, "{} -- wanted {} splits, got {}".format(ret, splits, len(ret))
assert all(e1 == s2 for (s1, e1), (s2, e2) in zip(ret, ret[1:]))
covered_timestamps = np.concatenate([np.arange(s, e) for s, e in ret])
assert all(x in covered_timestamps for x in range(len(timestamps))), \
"{} -- not all timestamp indices accounted for: {}".format(ret, [x for x in range(len(timestamps)) if
x not in covered_timestamps])
# allowed_deviation = max([max(np.diff(timestamps[s:e]), default=0) for s, e in get_sessions(timestamps) if s != e], default=300) / 2
# assert all(abs(t - chunk) < max(allowed_deviation, chunk * 0.1) for t in times[:-1]) or len(timestamps) <= 2 * splits, \
# "{} -- splits deviate too far from target size ({} ± {}): {}".format(ret, chunk, max(allowed_deviation, chunk * 0.1), times)
return ret
def session_duration(session, timestamps):
st, en = session
if st == en:
# assume 5 minutes of work where we have just a single snapshot for the session, based on standard upload rate
return 300
return timestamps[en] - timestamps[st]
def time_played(timestamps):
return sum(session_duration(ses, timestamps) for ses in get_sessions(timestamps))
def align_timestamps(timestamps):
sessions = [slice(s, e + 1) for s, e in get_sessions(timestamps)]
ts_aligned = timestamps
for ses1, ses2 in zip(sessions, sessions[1:]):
adjustment = ts_aligned[ses2.start] - ts_aligned[ses1.stop - 1]
ts_aligned = np.concatenate((ts_aligned[:ses2.start], ts_aligned[ses2.start:] - adjustment + 900))
assert (np.diff(ts_aligned) <= 900).all()
return ts_aligned
def get_children(nid, history):
uuid, count = nid
main = list(filter(lambda x: x['uuid'] == uuid, history[nid])) if nid in history else []
if len(main) == 0 and count != 0:
main = list(filter(lambda x: x['uuid'] == uuid and x['count'] > count, history[(uuid, 0)]))
other = list(
filter(lambda x: x['uuid'] != uuid and x['parent_count'] == count, history[nid])) if nid in history else []
children = []
if len(main) > 0:
c = min(main, key=lambda x: x['count'])
children.append((c['uuid'], c['count']))
for r in other:
children.append((r['uuid'], r['count']))
return children
def get_nid(s):
return (s['uuid'], int(s['count']))
def output_atoms(atoms: np.ndarray, fp: TextIO) -> None:
i = 1
for ca in atoms:
fp.write("ATOM {:6d} CA XXX A {:3d} {:8.3f}{:8.3f}{:8.3f} 1.00 0.00\n".format(i, i, *ca))
i += 1
def tmscore(pairs: List[Tuple[str, str]], tmp_input_name: str, atoms_lookup: Dict) -> Dict:
if len(pairs) == 0:
return {}
logging.debug("{}: batch computing {} tmscores".format(tmp_input_name, len(pairs)))
if not os.path.exists(tmp_input_name):
os.makedirs(tmp_input_name)
# write the necessary atom files
sids = {s for ss in pairs for s in ss}
for sid in sids:
with open("{}/{}.atoms".format(tmp_input_name, sid), 'w') as fp:
output_atoms(atoms_lookup[sid], fp)
# empirically derived formula for chunksize to equalize batch time and spawning time
# based on estimates that batches run 100 scores in ~1.5s, and Python starts ~6 batches per second
chunksize = max(100, (len(pairs) / 0.09) ** 0.5)
if len(pairs) // chunksize > (multiprocessing.cpu_count() / 4):
chunksize = len(pairs) / (
multiprocessing.cpu_count() / 4) # avoid spawning huge numbers of batches as this kills the performance
splits = np.array_split(pairs, len(pairs) // chunksize if len(pairs) > chunksize else 1)
ps = []
for i, split in enumerate(splits):
input_name = "{}/{}.tmscore_input".format(tmp_input_name, i)
with open(input_name, 'w') as fp:
for a, b in split:
fp.write("{} {}\n".format(a, b))
ps.append((subprocess.Popen(['./tmscore_batch.zsh', input_name], stdout=subprocess.PIPE, encoding='utf-8'),
input_name))
scores = []
for p, fname in ps:
scores.extend([s.split() for s in p.communicate()[0].splitlines()])
subprocess.run(['rm', fname])
subprocess.run(["rsync", "-a", "--delete", "tmp_data/empty_dir/", "{}/".format(tmp_input_name)])
return {(a, b): float(s) for a, b, s in scores}
def get_overlap(segment, target):
seg_sessions = get_sessions(segment)
tar_sessions = get_sessions(target)
tar_adj = []
for s, e in tar_sessions:
cands = [ses for ses in seg_sessions if target[s] < segment[ses[1]] and target[e] > segment[ses[0]]]
if len(cands) > 0:
start = s
while all(target[start] < segment[cs] for cs, ce in cands):
start += 1
# assert start < e
end = e
while all(target[end] > segment[ce] for cs, ce in cands):
end -= 1
# assert end >= start
if start <= end:
tar_adj.append((start, end))
return tar_adj
def load_frame(datafile):
df = pd.read_hdf(datafile, 'df')
bts = pd.read_hdf(datafile, 'bts')
puz = pd.read_hdf(datafile, 'puz').iloc[0] # tuple gets wrapped in a pandas data structure, so unwrap it here
logging.debug(datafile)
return df, bts, puz
def collect_pdl_entries(soln):
entries = list(takewhile(lambda x: x['header']['uid'] == soln.uid, soln.pdl[::-1]))
actions = {}
macros = {}
for e in entries:
for a, c in e['actions'].items():
actions[a] = actions.get(a, 0) + c
for m, c in e['macros'].items():
macros[m] = macros.get(m, 0) + c
return actions, macros
def get_data_value(uid, pid, key, data):
r = data[(data.uid == uid) & (data.pid == pid)]
return r[key].iloc[0]
def get_action_labels():
return ['band', 'build', 'cut', 'global_min', 'idealize', 'local_min', 'lock',
'rebuild', 'repack', 'assign_loop', 'save', 'reset', 'ss_load', 'ss_save']
def get_action_keys():
"""
index: action type
0: banding
1: build
2: cuts
3: global minimize
4: idealize
5: local minimize
6: locking
7: rebuild
8: repack
9: assign secondary structure loop
10: quicksave
11: reset recent best
12: load secondary structure
12: save secondary structure
"""
actionset_band = {'ActionBandAddAtomAtom',
'ActionBandAddDrag',
'ActionBandAddResRes',
'ActionBandDrag',
'ActionBandLength',
'ActionBandStrength',
'ActionBandDelete',
'ActionBandDisableToggle'}
actionset_cut = {'ActionDeleteCut',
'ActionInsertCut'}
actionset_global = {'ActionGlobalMinimize',
'ActionGlobalMinimizeBackbone',
'ActionGlobalMinimizeSidechains'}
actionset_save = {'ActionStandaloneQuicksave', 'ActionNoviceQuicksave'}
actionset_load = {'ActionStandaloneResetRecentBest', 'ActionNoviceResetRecentBest'}
actionset_ss_save = {'ActionStandaloneSecstructSave', 'ActionNoviceSecstructSave'}
actionset_ss_load = {'ActionStandaloneSecstructLoad', 'ActionNoviceSecstructLoad'}
return [actionset_band, {'ActionBuild'}, actionset_cut, actionset_global, {'ActionIdealize'},
{'ActionLocalMinimize'},
{'ActionLockToggle'}, {'ActionRebuild'}, {'ActionRepack'}, {'ActionSecStructAssignLoop'}, actionset_save,
actionset_load, actionset_ss_load, actionset_ss_save]
def get_action_stream(action_diff: Counter):
keys = get_action_keys()
return [sum(action_diff.get(a, 0) for a in k) for k in keys]
def get_pattern_label(p, cid, sub_k):
if sub_k == 0:
assert p.cid == cid
return str(cid)
return str(cid) + string.ascii_uppercase[p.cid]
|
[
"csv.DictReader",
"logging.debug",
"multiprocessing.cpu_count",
"numpy.array",
"scipy.stats.ttest_ind",
"numpy.linalg.norm",
"pandas.api.extensions.register_series_accessor",
"numpy.arange",
"os.path.exists",
"numpy.mean",
"subprocess.Popen",
"subprocess.run",
"itertools.product",
"numpy.diff",
"numpy.exp",
"numpy.dot",
"numpy.concatenate",
"pandas.read_hdf",
"matplotlib.use",
"itertools.takewhile",
"pickle.load",
"numpy.std",
"numpy.linalg.svd",
"numpy.median",
"itertools.groupby",
"os.makedirs",
"numpy.linalg.det",
"pandas.api.extensions.register_dataframe_accessor",
"json.load",
"numpy.percentile"
] |
[((386, 407), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (400, 407), False, 'import matplotlib\n'), ((5594, 5646), 'pandas.api.extensions.register_series_accessor', 'pd.api.extensions.register_series_accessor', (['"""foldit"""'], {}), "('foldit')\n", (5636, 5646), True, 'import pandas as pd\n'), ((6597, 6652), 'pandas.api.extensions.register_dataframe_accessor', 'pd.api.extensions.register_dataframe_accessor', (['"""foldit"""'], {}), "('foldit')\n", (6642, 6652), True, 'import pandas as pd\n'), ((14076, 14089), 'numpy.array', 'np.array', (['raw'], {}), '(raw)\n', (14084, 14089), True, 'import numpy as np\n'), ((14215, 14229), 'numpy.dot', 'np.dot', (['X.T', 'Y'], {}), '(X.T, Y)\n', (14221, 14229), True, 'import numpy as np\n'), ((14245, 14261), 'numpy.linalg.svd', 'np.linalg.svd', (['R'], {}), '(R)\n', (14258, 14261), True, 'import numpy as np\n'), ((14385, 14398), 'numpy.dot', 'np.dot', (['V', 'Wt'], {}), '(V, Wt)\n', (14391, 14398), True, 'import numpy as np\n'), ((14408, 14420), 'numpy.dot', 'np.dot', (['X', 'U'], {}), '(X, U)\n', (14414, 14420), True, 'import numpy as np\n'), ((14438, 14468), 'numpy.linalg.norm', 'np.linalg.norm', (['(Xp - Y)'], {'axis': '(1)'}), '(Xp - Y, axis=1)\n', (14452, 14468), True, 'import numpy as np\n'), ((26446, 26473), 'pandas.read_hdf', 'pd.read_hdf', (['datafile', '"""df"""'], {}), "(datafile, 'df')\n", (26457, 26473), True, 'import pandas as pd\n'), ((26484, 26512), 'pandas.read_hdf', 'pd.read_hdf', (['datafile', '"""bts"""'], {}), "(datafile, 'bts')\n", (26495, 26512), True, 'import pandas as pd\n'), ((26632, 26655), 'logging.debug', 'logging.debug', (['datafile'], {}), '(datafile)\n', (26645, 26655), False, 'import logging\n'), ((9203, 9221), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (9217, 9221), False, 'import os, pickle, csv, json\n'), ((11456, 11474), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (11470, 11474), False, 'import os, pickle, csv, json\n'), ((11759, 11772), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (11768, 11772), False, 'import os, pickle, csv, json\n'), ((12197, 12216), 'pickle.load', 'pickle.load', (['pdb_fp'], {}), '(pdb_fp)\n', (12208, 12216), False, 'import os, pickle, csv, json\n'), ((12801, 12839), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['a', 'b'], {'equal_var': '(False)'}), '(a, b, equal_var=False)\n', (12816, 12839), False, 'from scipy import stats\n'), ((13969, 13987), 'numpy.array', 'np.array', (['raw[:-1]'], {}), '(raw[:-1])\n', (13977, 13987), True, 'import numpy as np\n'), ((15159, 15183), 'numpy.dot', 'np.dot', (['X.T', '(weights * Y)'], {}), '(X.T, weights * Y)\n', (15165, 15183), True, 'import numpy as np\n'), ((15203, 15219), 'numpy.linalg.svd', 'np.linalg.svd', (['R'], {}), '(R)\n', (15216, 15219), True, 'import numpy as np\n'), ((15363, 15376), 'numpy.dot', 'np.dot', (['V', 'Wt'], {}), '(V, Wt)\n', (15369, 15376), True, 'import numpy as np\n'), ((15390, 15402), 'numpy.dot', 'np.dot', (['X', 'U'], {}), '(X, U)\n', (15396, 15402), True, 'import numpy as np\n'), ((15424, 15454), 'numpy.linalg.norm', 'np.linalg.norm', (['(Xp - Y)'], {'axis': '(1)'}), '(Xp - Y, axis=1)\n', (15438, 15454), True, 'import numpy as np\n'), ((15553, 15581), 'numpy.percentile', 'np.percentile', (['deviations', 'p'], {}), '(deviations, p)\n', (15566, 15581), True, 'import numpy as np\n'), ((22856, 22945), 'numpy.concatenate', 'np.concatenate', (['(ts_aligned[:ses2.start], ts_aligned[ses2.start:] - adjustment + 900)'], {}), '((ts_aligned[:ses2.start], ts_aligned[ses2.start:] -\n adjustment + 900))\n', (22870, 22945), True, 'import numpy as np\n'), ((24161, 24191), 'os.path.exists', 'os.path.exists', (['tmp_input_name'], {}), '(tmp_input_name)\n', (24175, 24191), False, 'import os, pickle, csv, json\n'), ((24201, 24228), 'os.makedirs', 'os.makedirs', (['tmp_input_name'], {}), '(tmp_input_name)\n', (24212, 24228), False, 'import os, pickle, csv, json\n'), ((25532, 25561), 'subprocess.run', 'subprocess.run', (["['rm', fname]"], {}), "(['rm', fname])\n", (25546, 25561), False, 'import subprocess\n'), ((26732, 26799), 'itertools.takewhile', 'takewhile', (["(lambda x: x['header']['uid'] == soln.uid)", 'soln.pdl[::-1]'], {}), "(lambda x: x['header']['uid'] == soln.uid, soln.pdl[::-1])\n", (26741, 26799), False, 'from itertools import product, groupby, takewhile\n'), ((12357, 12390), 'itertools.groupby', 'groupby', (['pdbs', "(lambda p: p['PID'])"], {}), "(pdbs, lambda p: p['PID'])\n", (12364, 12390), False, 'from itertools import product, groupby, takewhile\n'), ((12885, 12896), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (12893, 12896), True, 'import numpy as np\n'), ((12898, 12909), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (12906, 12909), True, 'import numpy as np\n'), ((14271, 14287), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (14284, 14287), True, 'import numpy as np\n'), ((14290, 14307), 'numpy.linalg.det', 'np.linalg.det', (['Wt'], {}), '(Wt)\n', (14303, 14307), True, 'import numpy as np\n'), ((21460, 21475), 'numpy.arange', 'np.arange', (['s', 'e'], {}), '(s, e)\n', (21469, 21475), True, 'import numpy as np\n'), ((24731, 24758), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (24756, 24758), False, 'import multiprocessing\n'), ((26523, 26551), 'pandas.read_hdf', 'pd.read_hdf', (['datafile', '"""puz"""'], {}), "(datafile, 'puz')\n", (26534, 26551), True, 'import pandas as pd\n'), ((15233, 15249), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (15246, 15249), True, 'import numpy as np\n'), ((15252, 15269), 'numpy.linalg.det', 'np.linalg.det', (['Wt'], {}), '(Wt)\n', (15265, 15269), True, 'import numpy as np\n'), ((15600, 15634), 'numpy.exp', 'np.exp', (['(-deviations ** 2 / dp ** 2)'], {}), '(-deviations ** 2 / dp ** 2)\n', (15606, 15634), True, 'import numpy as np\n'), ((22954, 22973), 'numpy.diff', 'np.diff', (['ts_aligned'], {}), '(ts_aligned)\n', (22961, 22973), True, 'import numpy as np\n'), ((24820, 24847), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (24845, 24847), False, 'import multiprocessing\n'), ((25279, 25379), 'subprocess.Popen', 'subprocess.Popen', (["['./tmscore_batch.zsh', input_name]"], {'stdout': 'subprocess.PIPE', 'encoding': '"""utf-8"""'}), "(['./tmscore_batch.zsh', input_name], stdout=subprocess.\n PIPE, encoding='utf-8')\n", (25295, 25379), False, 'import subprocess\n'), ((13073, 13086), 'itertools.product', 'product', (['a', 'b'], {}), '(a, b)\n', (13080, 13086), False, 'from itertools import product, groupby, takewhile\n'), ((13144, 13157), 'itertools.product', 'product', (['a', 'b'], {}), '(a, b)\n', (13151, 13157), False, 'from itertools import product, groupby, takewhile\n'), ((13355, 13365), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (13362, 13365), True, 'import numpy as np\n'), ((13367, 13379), 'numpy.median', 'np.median', (['a'], {}), '(a)\n', (13376, 13379), True, 'import numpy as np\n'), ((13381, 13390), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (13387, 13390), True, 'import numpy as np\n'), ((13475, 13485), 'numpy.mean', 'np.mean', (['b'], {}), '(b)\n', (13482, 13485), True, 'import numpy as np\n'), ((13487, 13499), 'numpy.median', 'np.median', (['b'], {}), '(b)\n', (13496, 13499), True, 'import numpy as np\n'), ((13501, 13510), 'numpy.std', 'np.std', (['b'], {}), '(b)\n', (13507, 13510), True, 'import numpy as np\n')]
|
import os
import argparse
import numpy as np
import scipy
import imageio
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import graphkke.generate_graphs.graph_generation as graph_generation
import graphkke.generate_graphs.generate_SDE as generate_SDE
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str,
default='/home/katerynam/work/data/artificial/test/')
parser.add_argument('--n_graphs', type=int,
default=500)
parser.add_argument('--n_nodes', type=int,
default=300)
parser.add_argument('--radius', type=float,
default=0.6)
parser.add_argument('--n_wells', type=int,
default=3)
parser.add_argument('--out_state', type=int,
default=0.1)
parser.add_argument('--if_plot', type=bool,
default=True)
parser.add_argument('--seed', type=int,
default=7)
args = parser.parse_args()
def randb(n, b):
return b[0] + (b[1] - b[0]) * scipy.rand(1, n)
def rand(n, bounds, boxes):
d = boxes.size
x = np.zeros([d, n])
for i in range(d):
x[i, :] = randb(n, bounds[i, :])
return x
if __name__ == '__main__':
lm = generate_SDE.LemonSlice2D([0.9, 0.9], args.n_graphs, 2, args.n_wells)
x = rand(1, np.asarray([[-0.5, 0.5], [-0.5, 0.5]]), np.asarray([10, 10]))
sde_traj = np.asarray(lm.sim_determ_system(x[:, 0]))
k_means = KMeans(n_clusters=args.n_wells).fit(sde_traj)
graph_states = k_means.labels_
# sde_traj = np.load(args.input_dir + 'traj.npy')
# graph_states = np.load(args.input_dir + 'graph_states.npy')
plt.scatter(sde_traj[:, 0], sde_traj[:, 1], c=graph_states)
plt.show()
sim_graph = graph_generation.LemonGraph(args.radius, args.n_graphs, args.n_nodes,
graph_states)
graphs, images, node_points = sim_graph.create_adj_matrix(sde_traj, args.out_state, args.if_plot)
for i, image in enumerate(images):
imageio.imwrite(args.input_dir + f'/traj_{i}.png', image)
imageio.mimsave(args.input_dir + '/anim.gif', images, fps=2)
np.save(os.path.join(args.input_dir + 'traj.npy'), sde_traj)
np.save(os.path.join(args.input_dir + 'graphs.npy'), graphs)
np.save(os.path.join(args.input_dir + 'graph_states.npy'), graph_states)
np.save(os.path.join(args.input_dir + 'node_points.npy'), node_points)
|
[
"sklearn.cluster.KMeans",
"graphkke.generate_graphs.generate_SDE.LemonSlice2D",
"argparse.ArgumentParser",
"imageio.imwrite",
"numpy.asarray",
"os.path.join",
"numpy.zeros",
"graphkke.generate_graphs.graph_generation.LemonGraph",
"matplotlib.pyplot.scatter",
"imageio.mimsave",
"scipy.rand",
"matplotlib.pyplot.show"
] |
[((282, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (305, 307), False, 'import argparse\n'), ((1114, 1130), 'numpy.zeros', 'np.zeros', (['[d, n]'], {}), '([d, n])\n', (1122, 1130), True, 'import numpy as np\n'), ((1246, 1315), 'graphkke.generate_graphs.generate_SDE.LemonSlice2D', 'generate_SDE.LemonSlice2D', (['[0.9, 0.9]', 'args.n_graphs', '(2)', 'args.n_wells'], {}), '([0.9, 0.9], args.n_graphs, 2, args.n_wells)\n', (1271, 1315), True, 'import graphkke.generate_graphs.generate_SDE as generate_SDE\n'), ((1674, 1733), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sde_traj[:, 0]', 'sde_traj[:, 1]'], {'c': 'graph_states'}), '(sde_traj[:, 0], sde_traj[:, 1], c=graph_states)\n', (1685, 1733), True, 'import matplotlib.pyplot as plt\n'), ((1738, 1748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1746, 1748), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1853), 'graphkke.generate_graphs.graph_generation.LemonGraph', 'graph_generation.LemonGraph', (['args.radius', 'args.n_graphs', 'args.n_nodes', 'graph_states'], {}), '(args.radius, args.n_graphs, args.n_nodes,\n graph_states)\n', (1793, 1853), True, 'import graphkke.generate_graphs.graph_generation as graph_generation\n'), ((2106, 2166), 'imageio.mimsave', 'imageio.mimsave', (["(args.input_dir + '/anim.gif')", 'images'], {'fps': '(2)'}), "(args.input_dir + '/anim.gif', images, fps=2)\n", (2121, 2166), False, 'import imageio\n'), ((1333, 1371), 'numpy.asarray', 'np.asarray', (['[[-0.5, 0.5], [-0.5, 0.5]]'], {}), '([[-0.5, 0.5], [-0.5, 0.5]])\n', (1343, 1371), True, 'import numpy as np\n'), ((1373, 1393), 'numpy.asarray', 'np.asarray', (['[10, 10]'], {}), '([10, 10])\n', (1383, 1393), True, 'import numpy as np\n'), ((2044, 2101), 'imageio.imwrite', 'imageio.imwrite', (["(args.input_dir + f'/traj_{i}.png')", 'image'], {}), "(args.input_dir + f'/traj_{i}.png', image)\n", (2059, 2101), False, 'import imageio\n'), ((2180, 2221), 'os.path.join', 'os.path.join', (["(args.input_dir + 'traj.npy')"], {}), "(args.input_dir + 'traj.npy')\n", (2192, 2221), False, 'import os\n'), ((2245, 2288), 'os.path.join', 'os.path.join', (["(args.input_dir + 'graphs.npy')"], {}), "(args.input_dir + 'graphs.npy')\n", (2257, 2288), False, 'import os\n'), ((2310, 2359), 'os.path.join', 'os.path.join', (["(args.input_dir + 'graph_states.npy')"], {}), "(args.input_dir + 'graph_states.npy')\n", (2322, 2359), False, 'import os\n'), ((2387, 2435), 'os.path.join', 'os.path.join', (["(args.input_dir + 'node_points.npy')"], {}), "(args.input_dir + 'node_points.npy')\n", (2399, 2435), False, 'import os\n'), ((1040, 1056), 'scipy.rand', 'scipy.rand', (['(1)', 'n'], {}), '(1, n)\n', (1050, 1056), False, 'import scipy\n'), ((1467, 1498), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'args.n_wells'}), '(n_clusters=args.n_wells)\n', (1473, 1498), False, 'from sklearn.cluster import KMeans\n')]
|
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_finance import candlestick_ohlc
# import matplotlib as mpl then mpl.use('TkAgg')
import pandas as pd
import numpy as np
from datetime import datetime
df = pd.read_csv('BitMEX-OHLCV-1d.csv')
df.columns = ['date', 'open', 'high', 'low', 'close', 'volume']
chart_figure = plt.figure(figsize=(10, 5))
chart_figure.set_facecolor('w')
chart_gridspec = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axes = []
axes.append(plt.subplot(chart_gridspec[0]))
axes.append(plt.subplot(chart_gridspec[1], sharex=axes[0]))
axes[0].get_xaxis().set_visible(False)
x = np.arange(len(df.index))
ohlc = df[['open', 'high', 'low', 'close']].astype(int).values
dohlc = np.hstack((np.reshape(x, (-1, 1)), ohlc))
candlestick_ohlc(axes[0], dohlc, width=0.5, colorup='r', colordown='b')
axes[1].bar(x, df.volume, color='k', width=0.6, align='center')
plt.tight_layout()
plt.show()
|
[
"mpl_finance.candlestick_ohlc",
"numpy.reshape",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((236, 270), 'pandas.read_csv', 'pd.read_csv', (['"""BitMEX-OHLCV-1d.csv"""'], {}), "('BitMEX-OHLCV-1d.csv')\n", (247, 270), True, 'import pandas as pd\n'), ((351, 378), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (361, 378), True, 'import matplotlib.pyplot as plt\n'), ((428, 473), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[3, 1]'}), '(2, 1, height_ratios=[3, 1])\n', (445, 473), True, 'import matplotlib.gridspec as gridspec\n'), ((770, 841), 'mpl_finance.candlestick_ohlc', 'candlestick_ohlc', (['axes[0]', 'dohlc'], {'width': '(0.5)', 'colorup': '"""r"""', 'colordown': '"""b"""'}), "(axes[0], dohlc, width=0.5, colorup='r', colordown='b')\n", (786, 841), False, 'from mpl_finance import candlestick_ohlc\n'), ((908, 926), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (924, 926), True, 'import matplotlib.pyplot as plt\n'), ((927, 937), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (935, 937), True, 'import matplotlib.pyplot as plt\n'), ((496, 526), 'matplotlib.pyplot.subplot', 'plt.subplot', (['chart_gridspec[0]'], {}), '(chart_gridspec[0])\n', (507, 526), True, 'import matplotlib.pyplot as plt\n'), ((540, 586), 'matplotlib.pyplot.subplot', 'plt.subplot', (['chart_gridspec[1]'], {'sharex': 'axes[0]'}), '(chart_gridspec[1], sharex=axes[0])\n', (551, 586), True, 'import matplotlib.pyplot as plt\n'), ((739, 761), 'numpy.reshape', 'np.reshape', (['x', '(-1, 1)'], {}), '(x, (-1, 1))\n', (749, 761), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from __future__ import division, print_function
import numpy as np
import rospy
from rospkg.rospack import RosPack
from copy import deepcopy
from tf2_ros import TransformListener, Buffer
from bopt_grasp_quality.srv import bopt, boptResponse
from bayesian_optimization import Random_Explorer
from bayesian_optimization.opt_nodes import RS_Node
# from math import nan
from geometry_msgs.msg import PoseStamped, Pose, Transform
def TF2Pose(TF_msg):
new_pose = PoseStamped()
new_pose.header = TF_msg.header
new_pose.pose.position.x = TF_msg.transform.translation.x
new_pose.pose.position.y = TF_msg.transform.translation.y
new_pose.pose.position.z = TF_msg.transform.translation.z
new_pose.pose.orientation.x = TF_msg.transform.rotation.x
new_pose.pose.orientation.y = TF_msg.transform.rotation.y
new_pose.pose.orientation.z = TF_msg.transform.rotation.z
new_pose.pose.orientation.w = TF_msg.transform.rotation.w
return new_pose
if __name__ == "__main__":
rospy.init_node('ros_bo')
# lb_y = rospy.get_param('~lb_x', -.2)
# ub_y = rospy.get_param('~ub_x', .2)
lb_x = [float(xx) for xx in rospy.get_param('~lb_x', [-.2, 0., -.2])]
ub_x = [float(xx) for xx in rospy.get_param('~ub_x', [.2, 0., .2])]
ee_link = rospy.get_param('~ee_link', 'hand_root')
base_link = rospy.get_param('~base_link', 'world')
service_name = rospy.get_param('~commander_service', 'bayes_optimization')
n_iter = rospy.get_param('~search_iters', 20)
resolution = rospy.get_param('~resolution', .001)
tf_buffer = Buffer(rospy.Duration(50))
tf_listener = TransformListener(tf_buffer)
rospy.loginfo(rospy.get_name().split('/')[1] + ': Initialization....')
rospy.loginfo(rospy.get_name().split('/')[1] + ': Getting current pose....')
rospy.sleep(0.5)
try:
ARM_TF = tf_buffer.lookup_transform(base_link, ee_link, rospy.Time().now(), rospy.Duration(0.1))
current_pose = TF2Pose(ARM_TF)
except Exception as e:
rospy.logerr('error in finding the arm...')
rospy.logerr('Starting at (0, 0, 0), (0, 0, 0, 1)')
current_pose = PoseStamped()
current_pose.pose.orientation.w = 1.
pose = [
[current_pose.pose.position.x, current_pose.pose.position.y, current_pose.pose.position.z],
[current_pose.pose.orientation.x, current_pose.pose.orientation.y, current_pose.pose.orientation.z, current_pose.pose.orientation.w]]
rospy.loginfo(
rospy.get_name().split('/')[1] + ': starting at: ({:.3f}, {:.3f}, {:.3f})-({:.3f}, {:.3f}, {:.3f}, {:.3f})'.format(*pose[0] + pose[1])
)
n = len(lb_x)
init_pos = np.array([
current_pose.pose.position.x,
current_pose.pose.position.y,
current_pose.pose.position.z])
assert(len(lb_x) == len(ub_x))
params = {
Random_Explorer.PARAMS.iters :n_iter,
Random_Explorer.PARAMS.init_pos : init_pos,
Random_Explorer.PARAMS.sampling : [resolution] * n}
# lb = current_pose.pose.position.y + lb_x * np.ones((n,))
# ub = current_pose.pose.position.y + ub_x * np.ones((n,))
lb = init_pos[np.arange(len(lb_x))] + lb_x - 1e-10
ub = init_pos[np.arange(len(ub_x))] + ub_x
RS_Node(n, params, lb=lb, ub=ub, init_pose=current_pose.pose, service_name=service_name)
|
[
"rospy.logerr",
"tf2_ros.TransformListener",
"rospy.init_node",
"rospy.get_param",
"numpy.array",
"rospy.Time",
"geometry_msgs.msg.PoseStamped",
"rospy.get_name",
"rospy.Duration",
"rospy.sleep",
"bayesian_optimization.opt_nodes.RS_Node"
] |
[((486, 499), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (497, 499), False, 'from geometry_msgs.msg import PoseStamped, Pose, Transform\n'), ((1025, 1050), 'rospy.init_node', 'rospy.init_node', (['"""ros_bo"""'], {}), "('ros_bo')\n", (1040, 1050), False, 'import rospy\n'), ((1297, 1337), 'rospy.get_param', 'rospy.get_param', (['"""~ee_link"""', '"""hand_root"""'], {}), "('~ee_link', 'hand_root')\n", (1312, 1337), False, 'import rospy\n'), ((1354, 1392), 'rospy.get_param', 'rospy.get_param', (['"""~base_link"""', '"""world"""'], {}), "('~base_link', 'world')\n", (1369, 1392), False, 'import rospy\n'), ((1412, 1471), 'rospy.get_param', 'rospy.get_param', (['"""~commander_service"""', '"""bayes_optimization"""'], {}), "('~commander_service', 'bayes_optimization')\n", (1427, 1471), False, 'import rospy\n'), ((1485, 1521), 'rospy.get_param', 'rospy.get_param', (['"""~search_iters"""', '(20)'], {}), "('~search_iters', 20)\n", (1500, 1521), False, 'import rospy\n'), ((1539, 1576), 'rospy.get_param', 'rospy.get_param', (['"""~resolution"""', '(0.001)'], {}), "('~resolution', 0.001)\n", (1554, 1576), False, 'import rospy\n'), ((1638, 1666), 'tf2_ros.TransformListener', 'TransformListener', (['tf_buffer'], {}), '(tf_buffer)\n', (1655, 1666), False, 'from tf2_ros import TransformListener, Buffer\n'), ((1827, 1843), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (1838, 1843), False, 'import rospy\n'), ((2689, 2793), 'numpy.array', 'np.array', (['[current_pose.pose.position.x, current_pose.pose.position.y, current_pose.\n pose.position.z]'], {}), '([current_pose.pose.position.x, current_pose.pose.position.y,\n current_pose.pose.position.z])\n', (2697, 2793), True, 'import numpy as np\n'), ((3267, 3360), 'bayesian_optimization.opt_nodes.RS_Node', 'RS_Node', (['n', 'params'], {'lb': 'lb', 'ub': 'ub', 'init_pose': 'current_pose.pose', 'service_name': 'service_name'}), '(n, params, lb=lb, ub=ub, init_pose=current_pose.pose, service_name=\n service_name)\n', (3274, 3360), False, 'from bayesian_optimization.opt_nodes import RS_Node\n'), ((1600, 1618), 'rospy.Duration', 'rospy.Duration', (['(50)'], {}), '(50)\n', (1614, 1618), False, 'import rospy\n'), ((1169, 1212), 'rospy.get_param', 'rospy.get_param', (['"""~lb_x"""', '[-0.2, 0.0, -0.2]'], {}), "('~lb_x', [-0.2, 0.0, -0.2])\n", (1184, 1212), False, 'import rospy\n'), ((1243, 1284), 'rospy.get_param', 'rospy.get_param', (['"""~ub_x"""', '[0.2, 0.0, 0.2]'], {}), "('~ub_x', [0.2, 0.0, 0.2])\n", (1258, 1284), False, 'import rospy\n'), ((1937, 1956), 'rospy.Duration', 'rospy.Duration', (['(0.1)'], {}), '(0.1)\n', (1951, 1956), False, 'import rospy\n'), ((2032, 2075), 'rospy.logerr', 'rospy.logerr', (['"""error in finding the arm..."""'], {}), "('error in finding the arm...')\n", (2044, 2075), False, 'import rospy\n'), ((2084, 2135), 'rospy.logerr', 'rospy.logerr', (['"""Starting at (0, 0, 0), (0, 0, 0, 1)"""'], {}), "('Starting at (0, 0, 0), (0, 0, 0, 1)')\n", (2096, 2135), False, 'import rospy\n'), ((2160, 2173), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2171, 2173), False, 'from geometry_msgs.msg import PoseStamped, Pose, Transform\n'), ((1917, 1929), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (1927, 1929), False, 'import rospy\n'), ((1685, 1701), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (1699, 1701), False, 'import rospy\n'), ((1760, 1776), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (1774, 1776), False, 'import rospy\n'), ((2510, 2526), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (2524, 2526), False, 'import rospy\n')]
|
#!/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import numpy.linalg as nl
import scipy.linalg as sl
import scipy.stats as ss
import time
aca = np.ascontiguousarray
def nul(n):
return np.zeros((n, n))
def iuc(x, y):
"""
Checks if pair of generalized EVs x,y is inside the unit circle. Here for legacy reasons
"""
out = np.empty_like(x, dtype=bool)
nonzero = y != 0
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = abs(x[nonzero] / y[nonzero]) < 1.0
return out
def ouc(x, y):
"""
Check if pair of generalized EVs x,y is outside the unit circle. Here for legacy reasons
"""
# stolen from scipy and inverted
out = np.empty_like(x, dtype=bool)
nonzero = y != 0
# handles (x, y) = (0, 0) too
out[~nonzero] = True
out[nonzero] = abs(x[nonzero] / y[nonzero]) > 1.0
return out
def klein(A, B=None, nstates=None, verbose=False, force=False):
"""
Klein's method
"""
st = time.time()
if B is None:
B = np.eye(A.shape[0])
SS, TT, alp, bet, Q, Z = sl.ordqz(A, B, sort="ouc")
if np.any(np.isclose(alp, bet)):
mess = " Warning: unit root detected!"
else:
mess = ""
# check for precision
if not fast0(Q @ SS @ Z.T - A, 2):
raise ValueError("Numerical errors in QZ")
if verbose > 1:
out = np.empty_like(alp)
nonzero = bet != 0
out[~nonzero] = np.inf * np.abs(alp[~nonzero])
out[nonzero] = alp[nonzero] / bet[nonzero]
print(
"[RE solver:]".ljust(15, " ") + " Generalized EVs:\n", np.sort(np.abs(out))
)
# check for Blanchard-Kahn
out = ouc(alp, bet)
if not nstates:
nstates = sum(out)
else:
if not nstates == sum(out):
mess = (
"B-K condition not satisfied: %s states but %s Evs inside the unit circle."
% (nstates, sum(out))
+ mess
)
if not force:
raise ValueError(mess)
elif verbose:
print(mess)
S11 = SS[:nstates, :nstates]
T11 = TT[:nstates, :nstates]
Z11 = Z[:nstates, :nstates]
Z21 = Z[nstates:, :nstates]
# changed from sl to nl because of stability:
omg = Z21 @ nl.inv(Z11)
lam = Z11 @ nl.inv(S11) @ T11 @ nl.inv(Z11)
if verbose:
print(
"[RE solver:]".ljust(15, " ")
+ " Done in %s. Determinant of `Z11` is %1.2e. There are %s EVs o.u.c. (of %s)."
% (np.round((time.time() - st), 5), nl.det(Z11), sum(out), len(out))
+ mess
)
return omg, lam
# def re_bk(A, B=None, d_endo=None, verbose=False, force=False):
# """
# Klein's method
# """
# # TODO: rename this
# print('[RE solver:]'.ljust(15, ' ') +
# ' `re_bk` is depreciated. Use `klein` instead.')
# if B is None:
# B = np.eye(A.shape[0])
# MM, PP, alp, bet, Q, Z = sl.ordqz(A, B, sort='iuc')
# if not fast0(Q @ MM @ Z.T - A, 2):
# raise ValueError('Numerical errors in QZ')
# if verbose > 1:
# print('[RE solver:]'.ljust(15, ' ') +
# ' Pairs of `alp` and `bet`:\n', np.vstack((alp, bet)).T)
# out = ouc(alp, bet)
# if not d_endo:
# d_endo = sum(out)
# else:
# if sum(out) > d_endo:
# mess = 'B-K condition not satisfied: %s EVs outside the unit circle for %s forward looking variables.' % (
# sum(out), d_endo)
# elif sum(out) < d_endo:
# mess = 'B-K condition not satisfied: %s EVs outside the unit circle for %s forward looking variables.' % (
# sum(out), d_endo)
# else:
# mess = ''
# if mess and not force:
# raise ValueError(mess)
# elif mess and verbose:
# print(mess)
# Z21 = Z.T[-d_endo:, :d_endo]
# Z22 = Z.T[-d_endo:, d_endo:]
# if verbose:
# print('[RE solver:]'.ljust(
# 15, ' ')+' Determinant of `Z21` is %1.2e. There are %s EVs o.u.c.' % (nl.det(Z21), sum(out)))
# return -nl.inv(Z21) @ Z22
def lti(AA, BB, CC, dimp, dimq, tol=1e-6, check=False, verbose=False):
"""standard linear time iteration"""
if check:
pass
g = np.eye(dimq + dimp)
norm = tol + 1
icnt = 0
while norm > tol:
gn = g
g = -nl.solve(BB + AA @ g, CC)
norm = np.max(np.abs(gn - g))
icnt += 1
if verbose:
print(icnt)
omg = g[dimq:, :dimq]
lam = g[:dimq, :dimq]
return omg, lam
def speed_kills(A, B, dimp, dimq, selector=None, tol=1e-6, check=False, verbose=False):
"""Improved linear time iteration"""
q, A = nl.qr(A)
B = q.T @ B
B11i = nl.inv(B[dimq:, dimq:])
A[dimq:] = B11i @ A[dimq:]
B[dimq:] = B11i @ B[dimq:]
A[:dimq] -= B[:dimq, dimq:] @ A[dimq:]
B[:dimq, :dimq] -= B[:dimq, dimq:] @ B[dimq:, :dimq]
B[:dimq, dimq:] = 0
B[dimq:, dimq:] = np.eye(dimp)
A1 = A[:dimq, :dimq]
A3 = A[dimq:, dimq:]
A2 = A[:dimq, dimq:]
B1 = B[:dimq, :dimq]
B2 = B[dimq:, :dimq]
g = -B2
norm = tol + 1
icnt = 0
icnt = 0
while norm > tol:
gn = g
g = A3 @ g @ nl.solve(A1 + A2 @ g, B1) - B2
if selector is not None:
norm = np.max(np.abs(gn - g)[selector])
else:
norm = np.max(np.abs(gn - g))
icnt += 1
if verbose:
print(icnt)
if icnt == max_iter:
raise Exception("iteration did not converge")
return g, -nl.inv(A[:dimq, :dimq] + A2 @ g) @ B1
def fast0(A, mode=-1, tol=1e-08):
con = abs(A) < tol
if mode == -1:
return con
elif mode == 0:
return con.all(axis=0)
elif mode == 1:
return con.all(axis=1)
else:
return con.all()
def map2arr(iterator, return_np_array=True, check_nones=True):
"""Function to cast result from `map` to a tuple of stacked results
By default, this returns numpy arrays. Automatically checks if the map object is a tuple, and if not, just one object is returned (instead of a tuple). Be warned, this does not work if the result of interest of the mapped function is a single tuple.
Parameters
----------
iterator : iter
the iterator returning from `map`
Returns
-------
numpy array (optional: list)
"""
res = ()
mode = 0
for obj in iterator:
if check_nones and obj is None:
continue
if not mode:
if isinstance(obj, tuple):
for entry in obj:
res = res + ([entry],)
mode = 1
else:
res = [obj]
mode = 2
else:
if mode == 1:
for no, entry in enumerate(obj):
res[no].append(entry)
else:
res.append(obj)
if return_np_array:
if mode == 1:
res = tuple(np.array(tupo) for tupo in res)
else:
res = np.array(res)
return res
def napper(cond, interval=0.1):
import time
start_time = time.time()
while not cond():
elt = round(time.time() - start_time, 3)
print("Zzzz... " + str(elt) + "s", end="\r", flush=True)
time.sleep(interval)
print("Zzzz... " + str(elt) + "s.")
def timeprint(s, round_to=5, full=False):
if s < 60:
if full:
return str(np.round(s, round_to)) + " seconds"
return str(np.round(s, round_to)) + "s"
m, s = divmod(s, 60)
if m < 60:
if full:
return "%s minutes, %s seconds" % (int(m), int(s))
return "%sm%ss" % (int(m), int(s))
h, m = divmod(m, 60)
if full:
return "%s hours, %s minutes, %s seconds" % (int(h), int(m), int(s))
return "%sh%sm%ss" % (int(h), int(m), int(s))
def shuffle(a, axis=-1):
"""Shuffle along single axis"""
shape = a.shape
res = a.reshape(-1, a.shape[axis])
np.random.shuffle(res)
return res.reshape(shape)
def print_dict(d):
for k in d.keys():
print(str(k) + ":", d[k])
return 0
def sabs(x, eps=1e-10):
"""absolute value but smooth around 0"""
return np.sqrt(x ** 2 + eps)
# aliases
map2list = map2arr
indof = np.searchsorted
|
[
"numpy.abs",
"numpy.eye",
"numpy.linalg.solve",
"numpy.linalg.qr",
"numpy.sqrt",
"numpy.isclose",
"time.sleep",
"numpy.linalg.det",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.empty_like",
"scipy.linalg.ordqz",
"time.time",
"numpy.round",
"numpy.random.shuffle"
] |
[((200, 216), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (208, 216), True, 'import numpy as np\n'), ((354, 382), 'numpy.empty_like', 'np.empty_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (367, 382), True, 'import numpy as np\n'), ((709, 737), 'numpy.empty_like', 'np.empty_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (722, 737), True, 'import numpy as np\n'), ((999, 1010), 'time.time', 'time.time', ([], {}), '()\n', (1008, 1010), False, 'import time\n'), ((1090, 1116), 'scipy.linalg.ordqz', 'sl.ordqz', (['A', 'B'], {'sort': '"""ouc"""'}), "(A, B, sort='ouc')\n", (1098, 1116), True, 'import scipy.linalg as sl\n'), ((4041, 4060), 'numpy.eye', 'np.eye', (['(dimq + dimp)'], {}), '(dimq + dimp)\n', (4047, 4060), True, 'import numpy as np\n'), ((4481, 4489), 'numpy.linalg.qr', 'nl.qr', (['A'], {}), '(A)\n', (4486, 4489), True, 'import numpy.linalg as nl\n'), ((4518, 4541), 'numpy.linalg.inv', 'nl.inv', (['B[dimq:, dimq:]'], {}), '(B[dimq:, dimq:])\n', (4524, 4541), True, 'import numpy.linalg as nl\n'), ((4753, 4765), 'numpy.eye', 'np.eye', (['dimp'], {}), '(dimp)\n', (4759, 4765), True, 'import numpy as np\n'), ((6922, 6933), 'time.time', 'time.time', ([], {}), '()\n', (6931, 6933), False, 'import time\n'), ((7785, 7807), 'numpy.random.shuffle', 'np.random.shuffle', (['res'], {}), '(res)\n', (7802, 7807), True, 'import numpy as np\n'), ((8014, 8035), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + eps)'], {}), '(x ** 2 + eps)\n', (8021, 8035), True, 'import numpy as np\n'), ((1041, 1059), 'numpy.eye', 'np.eye', (['A.shape[0]'], {}), '(A.shape[0])\n', (1047, 1059), True, 'import numpy as np\n'), ((1132, 1152), 'numpy.isclose', 'np.isclose', (['alp', 'bet'], {}), '(alp, bet)\n', (1142, 1152), True, 'import numpy as np\n'), ((1382, 1400), 'numpy.empty_like', 'np.empty_like', (['alp'], {}), '(alp)\n', (1395, 1400), True, 'import numpy as np\n'), ((2305, 2316), 'numpy.linalg.inv', 'nl.inv', (['Z11'], {}), '(Z11)\n', (2311, 2316), True, 'import numpy.linalg as nl\n'), ((2353, 2364), 'numpy.linalg.inv', 'nl.inv', (['Z11'], {}), '(Z11)\n', (2359, 2364), True, 'import numpy.linalg as nl\n'), ((7080, 7100), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (7090, 7100), False, 'import time\n'), ((1461, 1482), 'numpy.abs', 'np.abs', (['alp[~nonzero]'], {}), '(alp[~nonzero])\n', (1467, 1482), True, 'import numpy as np\n'), ((4145, 4170), 'numpy.linalg.solve', 'nl.solve', (['(BB + AA @ g)', 'CC'], {}), '(BB + AA @ g, CC)\n', (4153, 4170), True, 'import numpy.linalg as nl\n'), ((4193, 4207), 'numpy.abs', 'np.abs', (['(gn - g)'], {}), '(gn - g)\n', (4199, 4207), True, 'import numpy as np\n'), ((6823, 6836), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (6831, 6836), True, 'import numpy as np\n'), ((1625, 1636), 'numpy.abs', 'np.abs', (['out'], {}), '(out)\n', (1631, 1636), True, 'import numpy as np\n'), ((2333, 2344), 'numpy.linalg.inv', 'nl.inv', (['S11'], {}), '(S11)\n', (2339, 2344), True, 'import numpy.linalg as nl\n'), ((5010, 5035), 'numpy.linalg.solve', 'nl.solve', (['(A1 + A2 @ g)', 'B1'], {}), '(A1 + A2 @ g, B1)\n', (5018, 5035), True, 'import numpy.linalg as nl\n'), ((5166, 5180), 'numpy.abs', 'np.abs', (['(gn - g)'], {}), '(gn - g)\n', (5172, 5180), True, 'import numpy as np\n'), ((5333, 5365), 'numpy.linalg.inv', 'nl.inv', (['(A[:dimq, :dimq] + A2 @ g)'], {}), '(A[:dimq, :dimq] + A2 @ g)\n', (5339, 5365), True, 'import numpy.linalg as nl\n'), ((6978, 6989), 'time.time', 'time.time', ([], {}), '()\n', (6987, 6989), False, 'import time\n'), ((7297, 7318), 'numpy.round', 'np.round', (['s', 'round_to'], {}), '(s, round_to)\n', (7305, 7318), True, 'import numpy as np\n'), ((5100, 5114), 'numpy.abs', 'np.abs', (['(gn - g)'], {}), '(gn - g)\n', (5106, 5114), True, 'import numpy as np\n'), ((6759, 6773), 'numpy.array', 'np.array', (['tupo'], {}), '(tupo)\n', (6767, 6773), True, 'import numpy as np\n'), ((7242, 7263), 'numpy.round', 'np.round', (['s', 'round_to'], {}), '(s, round_to)\n', (7250, 7263), True, 'import numpy as np\n'), ((2580, 2591), 'numpy.linalg.det', 'nl.det', (['Z11'], {}), '(Z11)\n', (2586, 2591), True, 'import numpy.linalg as nl\n'), ((2557, 2568), 'time.time', 'time.time', ([], {}), '()\n', (2566, 2568), False, 'import time\n')]
|
import collections
import logging
from event_model import DocumentRouter, RunRouter
import numpy
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import matplotlib
from qtpy.QtWidgets import ( # noqa
QLabel,
QWidget,
QVBoxLayout,
)
from traitlets.traitlets import Bool, List, Set
from traitlets.config import Configurable
from .hints import hinted_fields, guess_dimensions # noqa
from .image import LatestFrameImageManager
from ..utils import load_config
matplotlib.use('Qt5Agg') # must set before importing matplotlib.pyplot
import matplotlib.pyplot as plt # noqa
log = logging.getLogger('bluesky_browser')
class LinePlotManager(Configurable):
"""
Manage the line plots for one FigureManager.
"""
omit_single_point_plot = Bool(True, config=True)
def __init__(self, fig_manager, dimensions):
self.update_config(load_config())
self.fig_manager = fig_manager
self.start_doc = None
self.dimensions = dimensions
self.dim_streams = set(stream for _, stream in self.dimensions)
if len(self.dim_streams) > 1:
raise NotImplementedError
def __call__(self, name, start_doc):
self.start_doc = start_doc
return [], [self.subfactory]
def subfactory(self, name, descriptor_doc):
if self.omit_single_point_plot and self.start_doc.get('num_points') == 1:
return []
if len(self.dimensions) > 1:
return [] # This is a job for Grid.
fields = set(hinted_fields(descriptor_doc))
# Filter out the fields with a data type or shape that we cannot
# represent in a line plot.
for field in list(fields):
dtype = descriptor_doc['data_keys'][field]['dtype']
if dtype not in ('number', 'integer'):
fields.discard(field)
ndim = len(descriptor_doc['data_keys'][field]['shape'] or [])
if ndim != 0:
fields.discard(field)
callbacks = []
dim_stream, = self.dim_streams # TODO Handle multiple dim_streams.
if descriptor_doc.get('name') == dim_stream:
dimension, = self.dimensions
x_keys, stream_name = dimension
fields -= set(x_keys)
assert stream_name == dim_stream # TODO Handle multiple dim_streams.
for x_key in x_keys:
figure_label = f'Scalars v {x_key}'
fig = self.fig_manager.get_figure(
('line', x_key, tuple(fields)), figure_label, len(fields), sharex=True)
for y_key, ax in zip(fields, fig.axes):
log.debug('plot %s against %s', y_key, x_key)
ylabel = y_key
y_units = descriptor_doc['data_keys'][y_key].get('units')
ax.set_ylabel(y_key)
if y_units:
ylabel += f' [{y_units}]'
# Set xlabel only on lowest axes, outside for loop below.
def func(event_page, y_key=y_key):
"""
Extract x points and y points to plot out of an EventPage.
This will be passed to LineWithPeaks.
"""
y_data = event_page['data'][y_key]
if x_key == 'time':
t0 = self.start_doc['time']
x_data = numpy.asarray(event_page['time']) - t0
elif x_key == 'seq_num':
x_data = event_page['seq_num']
else:
x_data = event_page['data'][x_key]
return x_data, y_data
line = Line(func, ax=ax)
callbacks.append(line)
if fields:
# Set the xlabel on the bottom-most axis.
if x_key == 'time':
xlabel = x_key
x_units = 's'
elif x_key == 'seq_num':
xlabel = 'sequence number'
x_units = None
else:
xlabel = x_key
x_units = descriptor_doc['data_keys'][x_key].get('units')
if x_units:
xlabel += f' [{x_units}]'
ax.set_xlabel(x_key)
fig.tight_layout()
# TODO Plot other streams against time.
for callback in callbacks:
callback('start', self.start_doc)
callback('descriptor', descriptor_doc)
return callbacks
class Line(DocumentRouter):
"""
Draw a matplotlib Line Arist update it for each Event.
Parameters
----------
func : callable
This must accept an EventPage and return two lists of floats
(x points and y points). The two lists must contain an equal number of
items, but that number is arbitrary. That is, a given document may add
one new point to the plot, no new points, or multiple new points.
label_template : string
This string will be formatted with the RunStart document. Any missing
values will be filled with '?'. If the keyword argument 'label' is
given, this argument will be ignored.
ax : matplotlib Axes, optional
If None, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.plot` to style Line object.
"""
def __init__(self, func, *, label_template='{scan_id} [{uid:.8}]', ax=None, **kwargs):
self.func = func
if ax is None:
import matplotlib.pyplot as plt
_, ax = plt.subplots()
self.ax = ax
self.line, = ax.plot([], [], **kwargs)
self.x_data = []
self.y_data = []
self.label_template = label_template
self.label = kwargs.get('label')
def start(self, doc):
if self.label is None:
d = collections.defaultdict(lambda: '?')
d.update(**doc)
label = self.label_template.format_map(d)
else:
label = self.label
if label:
self.line.set_label(label)
self.ax.legend(loc='best')
def event_page(self, doc):
x, y = self.func(doc)
self._update(x, y)
def _update(self, x, y):
"""
Takes in new x and y points and redraws plot if they are not empty.
"""
if not len(x) == len(y):
raise ValueError("User function is expected to provide the same "
"number of x and y points. Got {len(x)} x points "
"and {len(y)} y points.")
if not x:
# No new data. Short-circuit.
return
self.x_data.extend(x)
self.y_data.extend(y)
self.line.set_data(self.x_data, self.y_data)
self.ax.relim(visible_only=True)
self.ax.autoscale_view(tight=True)
self.ax.figure.canvas.draw_idle()
class Grid(DocumentRouter):
"""
Draw a matplotlib AxesImage Arist update it for each Event.
The purposes of this callback is to create (on initialization) of a
matplotlib grid image and then update it with new data for every `event`.
NOTE: Some important parameters are fed in through **kwargs like `extent`
which defines the axes min and max and `origin` which defines if the grid
co-ordinates start in the bottom left or top left of the plot. For more
info see https://matplotlib.org/tutorials/intermediate/imshow_extent.html
or https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.imshow.html#matplotlib.axes.Axes.imshow
Parameters
----------
func : callable
This must accept a BulkEvent and return three lists of floats (x
grid co-ordinates, y grid co-ordinates and grid position intensity
values). The three lists must contain an equal number of items, but
that number is arbitrary. That is, a given document may add one new
point, no new points or multiple new points to the plot.
shape : tuple
The (row, col) shape of the grid.
ax : matplotlib Axes, optional.
if ``None``, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.imshow` to style the AxesImage object.
"""
def __init__(self, func, shape, *, ax=None, **kwargs):
self.func = func
self.shape = shape
if ax is None:
_, ax = plt.subplots()
self.ax = ax
self.grid_data = numpy.full(self.shape, numpy.nan)
self.image, = ax.imshow(self.grid_data, **kwargs)
def event_page(self, doc):
'''
Takes in a bulk_events document and updates grid_data with the values
returned from self.func(doc)
Parameters
----------
doc : dict
The bulk event dictionary that contains the 'data' and 'timestamps'
associated with the bulk event.
Returns
-------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the bulk event.
'''
x_coords, y_coords, I_vals = self.func(doc)
self._update(x_coords, y_coords, I_vals)
def _update(self, x_coords, y_coords, I_vals):
'''
Updates self.grid_data with the values from the lists x_coords,
y_coords, I_vals.
Parameters
----------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the event. The length of all three lists must
be the same.
'''
if not len(x_coords) == len(y_coords) == len(I_vals):
raise ValueError("User function is expected to provide the same "
"number of x, y and I points. Got {0} x points, "
"{1} y points and {2} I values."
"".format(len(x_coords), len(y_coords),
len(I_vals)))
if not x_coords:
# No new data, Short-circuit.
return
# Update grid_data and the plot.
self.grid_data[x_coords, y_coords] = I_vals
self.image.set_array(self.grid_data)
class FigureManager(Configurable):
"""
For a given Viewer, encasulate the matplotlib Figures and associated tabs.
"""
factories = List([
LinePlotManager,
LatestFrameImageManager],
config=True)
enabled = Bool(True, config=True)
exclude_streams = Set([], config=True)
def __init__(self, add_tab):
self.update_config(load_config())
self.add_tab = add_tab
self._figures = {}
def get_figure(self, key, label, *args, **kwargs):
try:
return self._figures[key]
except KeyError:
return self._add_figure(key, label, *args, **kwargs)
def _add_figure(self, key, label, *args, **kwargs):
tab = QWidget()
fig, _ = plt.subplots(*args, **kwargs)
canvas = FigureCanvas(fig)
canvas.setMinimumWidth(640)
canvas.setParent(tab)
toolbar = NavigationToolbar(canvas, tab)
tab_label = QLabel(label)
tab_label.setMaximumHeight(20)
layout = QVBoxLayout()
layout.addWidget(tab_label)
layout.addWidget(canvas)
layout.addWidget(toolbar)
tab.setLayout(layout)
self.add_tab(tab, label)
self._figures[key] = fig
return fig
def __call__(self, name, start_doc):
if not self.enabled:
return [], []
dimensions = start_doc.get('hints', {}).get('dimensions', guess_dimensions(start_doc))
rr = RunRouter(
[factory(self, dimensions) for factory in self.factories])
rr('start', start_doc)
return [rr], []
|
[
"logging.getLogger",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"traitlets.traitlets.Set",
"qtpy.QtWidgets.QVBoxLayout",
"matplotlib.use",
"qtpy.QtWidgets.QLabel",
"qtpy.QtWidgets.QWidget",
"numpy.asarray",
"traitlets.traitlets.Bool",
"traitlets.traitlets.List",
"collections.defaultdict",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"numpy.full",
"matplotlib.pyplot.subplots"
] |
[((562, 586), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (576, 586), False, 'import matplotlib\n'), ((682, 718), 'logging.getLogger', 'logging.getLogger', (['"""bluesky_browser"""'], {}), "('bluesky_browser')\n", (699, 718), False, 'import logging\n'), ((852, 875), 'traitlets.traitlets.Bool', 'Bool', (['(True)'], {'config': '(True)'}), '(True, config=True)\n', (856, 875), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((10662, 10723), 'traitlets.traitlets.List', 'List', (['[LinePlotManager, LatestFrameImageManager]'], {'config': '(True)'}), '([LinePlotManager, LatestFrameImageManager], config=True)\n', (10666, 10723), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((10763, 10786), 'traitlets.traitlets.Bool', 'Bool', (['(True)'], {'config': '(True)'}), '(True, config=True)\n', (10767, 10786), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((10809, 10829), 'traitlets.traitlets.Set', 'Set', (['[]'], {'config': '(True)'}), '([], config=True)\n', (10812, 10829), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((8707, 8740), 'numpy.full', 'numpy.full', (['self.shape', 'numpy.nan'], {}), '(self.shape, numpy.nan)\n', (8717, 8740), False, 'import numpy\n'), ((11232, 11241), 'qtpy.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (11239, 11241), False, 'from qtpy.QtWidgets import QLabel, QWidget, QVBoxLayout\n'), ((11259, 11288), 'matplotlib.pyplot.subplots', 'plt.subplots', (['*args'], {}), '(*args, **kwargs)\n', (11271, 11288), True, 'import matplotlib.pyplot as plt\n'), ((11306, 11323), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (11318, 11323), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((11408, 11438), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['canvas', 'tab'], {}), '(canvas, tab)\n', (11425, 11438), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((11459, 11472), 'qtpy.QtWidgets.QLabel', 'QLabel', (['label'], {}), '(label)\n', (11465, 11472), False, 'from qtpy.QtWidgets import QLabel, QWidget, QVBoxLayout\n'), ((11530, 11543), 'qtpy.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (11541, 11543), False, 'from qtpy.QtWidgets import QLabel, QWidget, QVBoxLayout\n'), ((5824, 5838), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5836, 5838), True, 'import matplotlib.pyplot as plt\n'), ((6117, 6154), 'collections.defaultdict', 'collections.defaultdict', (["(lambda : '?')"], {}), "(lambda : '?')\n", (6140, 6154), False, 'import collections\n'), ((8646, 8660), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8658, 8660), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3569), 'numpy.asarray', 'numpy.asarray', (["event_page['time']"], {}), "(event_page['time'])\n", (3549, 3569), False, 'import numpy\n')]
|
#!/usr/bin/env python
# Break up idstr file into separate measid/objectid lists per exposure on /data0
import os
import sys
import numpy as np
import time
from dlnpyutils import utils as dln, db
from astropy.io import fits
import sqlite3
import socket
from argparse import ArgumentParser
def breakup_idstr(dbfile):
""" Break-up idstr file into separate measid/objectid lists per exposure on /data0."""
t00 = time.time()
outdir = '/data0/dnidever/nsc/instcal/v3/idstr/'
# Load the exposures table
expcat = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz',1)
# Make sure it's a list
if type(dbfile) is str: dbfile=[dbfile]
print('Breaking up '+str(len(dbfile))+' database files')
# Loop over files
for i,dbfile1 in enumerate(dbfile):
print(str(i+1)+' '+dbfile1)
if os.path.exists(dbfile1):
t0 = time.time()
dbbase1 = os.path.basename(dbfile1)[0:-9] # remove _idstr.db ending
# Get existing index names for this database
d = sqlite3.connect(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = d.cursor()
cmd = 'select measid,exposure,objectid from idstr'
t1 = time.time()
data = cur.execute(cmd).fetchall()
print(' '+str(len(data))+' rows read in %5.1f sec. ' % (time.time()-t1))
# Break up data into lists
measid,exposure,objectid = list(zip(*data))
measid = np.array(measid)
objectid = np.array(objectid)
exposure = np.array(exposure)
eindex = dln.create_index(exposure)
# Match exposures to exposure catalog
ind1,ind2 = dln.match(expcat['EXPOSURE'],eindex['value'])
# Loop over exposures and write output files
nexp = len(eindex['value'])
print(' '+str(nexp)+' exposures')
measid_maxlen = np.max(dln.strlen(measid))
objectid_maxlen = np.max(dln.strlen(objectid))
df = np.dtype([('measid',np.str,measid_maxlen+1),('objectid',np.str,objectid_maxlen+1)])
# Loop over the exposures and write out the files
for k in range(nexp):
if nexp>100:
if k % 100 == 0: print(' '+str(k+1))
ind = eindex['index'][eindex['lo'][k]:eindex['hi'][k]+1]
cat = np.zeros(len(ind),dtype=df)
cat['measid'] = measid[ind]
cat['objectid'] = objectid[ind]
instcode = expcat['INSTRUMENT'][ind1[k]]
dateobs = expcat['DATEOBS'][ind1[k]]
night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
if os.path.exists(outdir+instcode+'/'+night+'/'+eindex['value'][k]) is False:
# Sometimes this crashes because another process is making the directory at the same time
try:
os.makedirs(outdir+instcode+'/'+night+'/'+eindex['value'][k])
except:
pass
outfile = outdir+instcode+'/'+night+'/'+eindex['value'][k]+'/'+eindex['value'][k]+'__'+dbbase1+'.npy'
np.save(outfile,cat)
print(' dt = %6.1f sec. ' % (time.time()-t0))
else:
print(' '+dbfile1+' NOT FOUND')
print('dt = %6.1f sec.' % (time.time()-t00))
if __name__ == "__main__":
parser = ArgumentParser(description='Break up idstr into separate lists per exposure.')
parser.add_argument('dbfile', type=str, nargs=1, help='Database filename')
args = parser.parse_args()
hostname = socket.gethostname()
host = hostname.split('.')[0]
dbfile = args.dbfile[0]
# Input is a list
if dbfile[0]=='@':
listfile = dbfile[1:]
if os.path.exists(listfile):
dbfile = dln.readlines(listfile)
else:
print(listfile+' NOT FOUND')
sys.exit()
breakup_idstr(dbfile)
|
[
"os.path.exists",
"dlnpyutils.utils.match",
"sys.exit",
"sqlite3.connect",
"argparse.ArgumentParser",
"os.makedirs",
"dlnpyutils.utils.strlen",
"dlnpyutils.utils.create_index",
"numpy.array",
"astropy.io.fits.getdata",
"dlnpyutils.utils.readlines",
"os.path.basename",
"time.time",
"numpy.dtype",
"socket.gethostname",
"numpy.save"
] |
[((421, 432), 'time.time', 'time.time', ([], {}), '()\n', (430, 432), False, 'import time\n'), ((532, 624), 'astropy.io.fits.getdata', 'fits.getdata', (['"""/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz"""', '(1)'], {}), "(\n '/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz', 1)\n", (544, 624), False, 'from astropy.io import fits\n'), ((3464, 3542), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Break up idstr into separate lists per exposure."""'}), "(description='Break up idstr into separate lists per exposure.')\n", (3478, 3542), False, 'from argparse import ArgumentParser\n'), ((3669, 3689), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3687, 3689), False, 'import socket\n'), ((864, 887), 'os.path.exists', 'os.path.exists', (['dbfile1'], {}), '(dbfile1)\n', (878, 887), False, 'import os\n'), ((3839, 3863), 'os.path.exists', 'os.path.exists', (['listfile'], {}), '(listfile)\n', (3853, 3863), False, 'import os\n'), ((906, 917), 'time.time', 'time.time', ([], {}), '()\n', (915, 917), False, 'import time\n'), ((1072, 1164), 'sqlite3.connect', 'sqlite3.connect', (['dbfile1'], {'detect_types': '(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)'}), '(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.\n PARSE_COLNAMES)\n', (1087, 1164), False, 'import sqlite3\n'), ((1267, 1278), 'time.time', 'time.time', ([], {}), '()\n', (1276, 1278), False, 'import time\n'), ((1528, 1544), 'numpy.array', 'np.array', (['measid'], {}), '(measid)\n', (1536, 1544), True, 'import numpy as np\n'), ((1568, 1586), 'numpy.array', 'np.array', (['objectid'], {}), '(objectid)\n', (1576, 1586), True, 'import numpy as np\n'), ((1610, 1628), 'numpy.array', 'np.array', (['exposure'], {}), '(exposure)\n', (1618, 1628), True, 'import numpy as np\n'), ((1650, 1676), 'dlnpyutils.utils.create_index', 'dln.create_index', (['exposure'], {}), '(exposure)\n', (1666, 1676), True, 'from dlnpyutils import utils as dln, db\n'), ((1751, 1797), 'dlnpyutils.utils.match', 'dln.match', (["expcat['EXPOSURE']", "eindex['value']"], {}), "(expcat['EXPOSURE'], eindex['value'])\n", (1760, 1797), True, 'from dlnpyutils import utils as dln, db\n'), ((2072, 2169), 'numpy.dtype', 'np.dtype', (["[('measid', np.str, measid_maxlen + 1), ('objectid', np.str, \n objectid_maxlen + 1)]"], {}), "([('measid', np.str, measid_maxlen + 1), ('objectid', np.str, \n objectid_maxlen + 1)])\n", (2080, 2169), True, 'import numpy as np\n'), ((3887, 3910), 'dlnpyutils.utils.readlines', 'dln.readlines', (['listfile'], {}), '(listfile)\n', (3900, 3910), True, 'from dlnpyutils import utils as dln, db\n'), ((3978, 3988), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3986, 3988), False, 'import sys\n'), ((940, 965), 'os.path.basename', 'os.path.basename', (['dbfile1'], {}), '(dbfile1)\n', (956, 965), False, 'import os\n'), ((1976, 1994), 'dlnpyutils.utils.strlen', 'dln.strlen', (['measid'], {}), '(measid)\n', (1986, 1994), True, 'from dlnpyutils import utils as dln, db\n'), ((2033, 2053), 'dlnpyutils.utils.strlen', 'dln.strlen', (['objectid'], {}), '(objectid)\n', (2043, 2053), True, 'from dlnpyutils import utils as dln, db\n'), ((3234, 3255), 'numpy.save', 'np.save', (['outfile', 'cat'], {}), '(outfile, cat)\n', (3241, 3255), True, 'import numpy as np\n'), ((3405, 3416), 'time.time', 'time.time', ([], {}), '()\n', (3414, 3416), False, 'import time\n'), ((2747, 2821), 'os.path.exists', 'os.path.exists', (["(outdir + instcode + '/' + night + '/' + eindex['value'][k])"], {}), "(outdir + instcode + '/' + night + '/' + eindex['value'][k])\n", (2761, 2821), False, 'import os\n'), ((2981, 3052), 'os.makedirs', 'os.makedirs', (["(outdir + instcode + '/' + night + '/' + eindex['value'][k])"], {}), "(outdir + instcode + '/' + night + '/' + eindex['value'][k])\n", (2992, 3052), False, 'import os\n'), ((3297, 3308), 'time.time', 'time.time', ([], {}), '()\n', (3306, 3308), False, 'import time\n'), ((1395, 1406), 'time.time', 'time.time', ([], {}), '()\n', (1404, 1406), False, 'import time\n')]
|
import autograd.numpy as anp
import numpy as np
from autograd import value_and_grad
from pymoo.factory import normalize
from pymoo.util.ref_dirs.energy import squared_dist
from pymoo.util.ref_dirs.optimizer import Adam
from pymoo.util.reference_direction import ReferenceDirectionFactory, scale_reference_directions
class LayerwiseRieszEnergyReferenceDirectionFactory(ReferenceDirectionFactory):
def __init__(self,
n_dim,
partitions,
return_as_tuple=False,
n_max_iter=1000,
verbose=False,
X=None,
**kwargs):
super().__init__(n_dim, **kwargs)
self.scalings = None
self.n_max_iter = n_max_iter
self.verbose = verbose
self.return_as_tuple = return_as_tuple
self.X = X
self.partitions = partitions
def _step(self, optimizer, X, scalings):
obj, grad = value_and_grad(calc_potential_energy)(scalings, X)
scalings = optimizer.next(scalings, np.array(grad))
scalings = normalize(scalings, xl=0, xu=scalings.max())
return scalings, obj
def _solve(self, X, scalings):
# initialize the optimizer for the run
optimizer = Adam()
# for each iteration of gradient descent
for i in range(self.n_max_iter):
# execute one optimization step
_scalings, _obj = self._step(optimizer, X, scalings)
# evaluate how much the points have moved
delta = np.abs(_scalings - scalings).sum()
if self.verbose:
print(i, "objective", _obj, "delta", delta)
# if there was only a little delta during the last iteration -> terminate
if delta < 1e-5:
scalings = _scalings
break
# otherwise use the new points for the next iteration
scalings = _scalings
self.scalings = scalings
return get_points(X, scalings)
def do(self):
X = []
scalings = []
for k, p in enumerate(self.partitions):
if p > 1:
val = np.linspace(0, 1, p + 1)[1:-1]
_X = []
for i in range(self.n_dim):
for j in range(i + 1, self.n_dim):
x = np.zeros((len(val), self.n_dim))
x[:, i] = val
x[:, j] = 1 - val
_X.append(x)
X.append(np.row_stack(_X + [np.eye(self.n_dim)]))
elif p == 1:
X.append(np.eye(self.n_dim))
else:
X.append(np.full(self.n_dim, 1 / self.n_dim)[None, :])
scalings.append(1 - k / len(self.partitions))
scalings = np.array(scalings)
X = self._solve(X, scalings)
return X
# ---------------------------------------------------------------------------------------------------------
# Energy Functions
# ---------------------------------------------------------------------------------------------------------
def get_points(X, scalings):
vals = []
for i in range(len(X)):
vals.append(scale_reference_directions(X[i], scalings[i]))
X = anp.row_stack(vals)
return X
def calc_potential_energy(scalings, X):
X = get_points(X, scalings)
i, j = anp.triu_indices(len(X), 1)
D = squared_dist(X, X)[i, j]
if np.any(D < 1e-12):
return np.nan, np.nan
return (1 / D).mean()
|
[
"numpy.abs",
"numpy.eye",
"autograd.numpy.row_stack",
"pymoo.util.ref_dirs.optimizer.Adam",
"numpy.any",
"numpy.array",
"pymoo.util.reference_direction.scale_reference_directions",
"numpy.linspace",
"pymoo.util.ref_dirs.energy.squared_dist",
"autograd.value_and_grad",
"numpy.full"
] |
[((3256, 3275), 'autograd.numpy.row_stack', 'anp.row_stack', (['vals'], {}), '(vals)\n', (3269, 3275), True, 'import autograd.numpy as anp\n'), ((3444, 3461), 'numpy.any', 'np.any', (['(D < 1e-12)'], {}), '(D < 1e-12)\n', (3450, 3461), True, 'import numpy as np\n'), ((1252, 1258), 'pymoo.util.ref_dirs.optimizer.Adam', 'Adam', ([], {}), '()\n', (1256, 1258), False, 'from pymoo.util.ref_dirs.optimizer import Adam\n'), ((2797, 2815), 'numpy.array', 'np.array', (['scalings'], {}), '(scalings)\n', (2805, 2815), True, 'import numpy as np\n'), ((3411, 3429), 'pymoo.util.ref_dirs.energy.squared_dist', 'squared_dist', (['X', 'X'], {}), '(X, X)\n', (3423, 3429), False, 'from pymoo.util.ref_dirs.energy import squared_dist\n'), ((944, 981), 'autograd.value_and_grad', 'value_and_grad', (['calc_potential_energy'], {}), '(calc_potential_energy)\n', (958, 981), False, 'from autograd import value_and_grad\n'), ((1039, 1053), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (1047, 1053), True, 'import numpy as np\n'), ((3201, 3246), 'pymoo.util.reference_direction.scale_reference_directions', 'scale_reference_directions', (['X[i]', 'scalings[i]'], {}), '(X[i], scalings[i])\n', (3227, 3246), False, 'from pymoo.util.reference_direction import ReferenceDirectionFactory, scale_reference_directions\n'), ((1535, 1563), 'numpy.abs', 'np.abs', (['(_scalings - scalings)'], {}), '(_scalings - scalings)\n', (1541, 1563), True, 'import numpy as np\n'), ((2159, 2183), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(p + 1)'], {}), '(0, 1, p + 1)\n', (2170, 2183), True, 'import numpy as np\n'), ((2609, 2627), 'numpy.eye', 'np.eye', (['self.n_dim'], {}), '(self.n_dim)\n', (2615, 2627), True, 'import numpy as np\n'), ((2672, 2707), 'numpy.full', 'np.full', (['self.n_dim', '(1 / self.n_dim)'], {}), '(self.n_dim, 1 / self.n_dim)\n', (2679, 2707), True, 'import numpy as np\n'), ((2536, 2554), 'numpy.eye', 'np.eye', (['self.n_dim'], {}), '(self.n_dim)\n', (2542, 2554), True, 'import numpy as np\n')]
|
import numpy as np
import math
import fatpack
import matplotlib.pyplot as plt
import pandas as pd
#Create a function that reutrns the Goodman correction:
def Goodman_method_correction(M_a,M_m,M_max):
M_u = 1.5*M_max
M_ar = M_a/(1-M_m/M_u)
return M_ar
def Equivalent_bending_moment(M_ar,Neq,m):
P = M_ar.shape
M_sum = 0
j = P[0]
for i in range(j):
M_sum = math.pow(M_ar[i],m) + M_sum
M_eq = math.pow((M_sum/Neq),(1/m))
return M_eq
def get_DEL(y,Neq,m):
S, Sm = fatpack.find_rainflow_ranges(y.flatten(), return_means=True, k=256)
data_arr = np.array([Sm , S ]).T
M_ar = Goodman_method_correction(data_arr[:,1],data_arr[:,0],np.max(S))
print(sum(M_ar.shape))
M_eq = Equivalent_bending_moment(M_ar,Neq,m)
return M_eq
|
[
"math.pow",
"numpy.array",
"numpy.max"
] |
[((433, 461), 'math.pow', 'math.pow', (['(M_sum / Neq)', '(1 / m)'], {}), '(M_sum / Neq, 1 / m)\n', (441, 461), False, 'import math\n'), ((596, 613), 'numpy.array', 'np.array', (['[Sm, S]'], {}), '([Sm, S])\n', (604, 613), True, 'import numpy as np\n'), ((683, 692), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (689, 692), True, 'import numpy as np\n'), ((394, 414), 'math.pow', 'math.pow', (['M_ar[i]', 'm'], {}), '(M_ar[i], m)\n', (402, 414), False, 'import math\n')]
|
# %% [Algorithm 1c Loop]
# # MUSHROOMS
# %% [markdown]
# ## Binary Classification
# %% [markdown]
# ### Imports
# %%
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %% [markdown]
# ### Load Data
dataset = pd.read_csv(r"C:\Users\yxie367\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
#dataset = pd.read_csv(r"C:\Users\xieya\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
# %% [markdown]
# ### View Data and Informations
# %%
dataset.head()
# %%
dataset.info()
# %%
edible, poisonous = dataset['class'].value_counts()
# print("Edible:\t ", edible,"\nPoisonous:", poisonous)
# %%
# Categorical to numerical
labels = {'e': 0, 'p': 1}
dataset['class'].replace(labels, inplace=True)
edible, poisonous = dataset['class'].value_counts()
#print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous)
# %% [markdown]
# # NN1 Stalk Root - Rooted (r)
# %% [markdown]
# ### Split Dataset
# %% [markdown]
# #### Get the Labels
# %%
X, y = dataset.drop('class', axis=1), dataset['class'].copy()
#print("X:",X.shape,"\ny:",y.shape)
# %% [markdown]
# #### Train Set and Test Set
total_error_1 = 0
total_error_2 = 0
total_error_comb = 0
randnum = np.arange(2,44,4)
num_trials = len(randnum)
record = ""
wrong_record = ""
run = 1
# %% Data cleaning
from sklearn.model_selection import train_test_split
X_white = pd.DataFrame()
X_not_white = pd.DataFrame()
y_white = pd.Series(dtype='float64')
y_not_white = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"stalk-root"] == "r":
X_white = X_white.append(X.iloc[i,:])
y_white = y_white.append(pd.Series(y.iloc[i]))
else:
X_not_white = X_not_white.append(X.iloc[i,:])
y_not_white = y_not_white.append(pd.Series(y.iloc[i]))
# %% Data cleaning pt2
X_green = pd.DataFrame()
X_not_green = pd.DataFrame()
y_green = pd.Series(dtype='float64')
y_not_green = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"odor"] == "a":
X_green = X_green.append(X.iloc[i,:])
y_green = y_green.append(pd.Series(y.iloc[i]))
else:
X_not_green = X_not_green.append(X.iloc[i,:])
y_not_green = y_not_green.append(pd.Series(y.iloc[i]))
# %%
for j in randnum:
X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=j)
X_train_not_green, X_test_not_green, y_train_not_green, y_test_not_green = train_test_split(X_not_green, y_not_green, test_size=1-(6905/(8124-len(X_green))), random_state=j)
X_train_green = (X_train_not_green)
y_train_green = (y_train_not_green)
X_train_white = (X_train_not_white)
y_train_white = (y_train_not_white)
# %%
from sklearn.utils import shuffle
X_train_full1 = shuffle(X_train_white, random_state=j)
X_test = shuffle(X, random_state=j).iloc[4000:8000]
y_train_full1 = shuffle(y_train_white, random_state=j)
y_test = shuffle(y, random_state=j).iloc[4000:8000]
# %% [markdown]
# #### Validation Set
# %%
X_valid1, X_train1 = X_train_full1[:500], X_train_full1[500:]
y_valid1, y_train1 = y_train_full1[:500], y_train_full1[500:]
# print("X_train:", X_train1.shape[0], "y_train", y_train1.shape[0])
# print("X_valid: ", X_valid1.shape[0], "y_valid ", y_valid1.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train1 = pipeline.fit_transform(X_train1)
X_valid1 = pipeline.fit_transform(X_valid1)
X_test1 = pipeline.fit_transform(X_test)
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
# %%
# tf.random.set_seed(j)
tf.random.set_random_seed(j)
# %%
model1 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model1.summary()
# %% [markdown]
# #### Compile the Model
# %%
model1.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model1 = model1.fit(X_train1, y_train1,
epochs=100,
validation_data=(X_valid1, y_valid1),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results1 = model1.evaluate(X_test1, y_test)
# print("test loss, test acc:", results1)
# %% [markdown]
# ### Make Some Predictions
# %%
X_new1 = X_test1[:5]
y_prob1 = model1.predict(X_new1)
# print(y_prob.round(3))
# %%
y_pred1 = (model1.predict(X_new1) > 0.5).astype("int32")
# print(y_pred)
y_test_pred = (model1.predict(X_test1) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df1 = pd.DataFrame(model1.predict(X_test1))
y_test_pred1 = pd.DataFrame(y_test_pred).reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test_pred1], axis=1)
y_test1 = y_test.reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test1], axis=1)
X_df1.columns = ["X_pred","y_pred","y_actual"]
#print(X_df1)
# %%
import math
table1 = pd.DataFrame(columns=["KL_div","abs_distance","correctness"])
for i in range(0,len(X_df1)):
# KL divergence
p = X_df1.loc[i,"X_pred"]
try:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
except:
kl = 0
table1.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table1.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred1 = X_df1.loc[i,"y_pred"]
y_act1 = X_df1.loc[i,"y_actual"]
if y_pred1 == y_act1:
table1.loc[i,"correctness"] = 1 # correct prediction
else:
table1.loc[i,"correctness"] = 0 # wrong prediction
table1.loc[i,"y_pred"] = y_pred1
#print(table1)
# %%
table1["count"] = 1
correctness1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness1["percent"] = 100*(correctness1["correctness"]/correctness1["count"])
#print(correctness1)
# %%
index = []
for i in (correctness1.index):
index.append(str(i))
plt.bar(index,correctness1["percent"], width=0.7)
for index,data in enumerate(correctness1["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.80, 0.1))).apply(sum)
kl1["percent"] = (kl1["correctness"]/kl1["count"])
kl1.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl1["percent"])
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg1 = np.arange(0, 0.70, 0.1).reshape((-1, 1))
y_reg1 = kl1["percent"]
reg_model1 = LinearRegression().fit(x_reg1,y_reg1)
# %%
# print('intercept(alpha):', reg_model1.intercept_)
# print('slope(theta):', reg_model1.coef_)
# %% [markdown]
# # NN2 Odor - Almond (a)
# %% [markdown]
# #### Train Set and Test Set
# %%
from sklearn.utils import shuffle
X_train_full2 = shuffle(X_train_green, random_state=j)
# X_test2 = shuffle(X_test_green, random_state=j)
y_train_full2 = shuffle(y_train_green, random_state=j)
# y_test2 = shuffle(y_test_green, random_state=j)
# %% [markdown]
# #### Validation Set
# %%
X_valid2, X_train2 = X_train_full2[:500], X_train_full2[500:]
y_valid2, y_train2 = y_train_full2[:500], y_train_full2[500:]
# print("X_train:", X_train2.shape[0], "y_train", y_train2.shape[0])
# print("X_valid: ", X_valid2.shape[0], "y_valid ", y_valid2.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train2 = pipeline.fit_transform(X_train2)
X_valid2 = pipeline.fit_transform(X_valid2)
X_test2 = pipeline.fit_transform(X_test)
y_test2 = y_test
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
tf.random.set_random_seed(j)
# %%
model2 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model2.summary()
# %% [markdown]
# #### Compile the Model
# %%
model2.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model2 = model2.fit(X_train2, y_train2,
epochs=100,
validation_data=(X_valid2, y_valid2),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results2 = model2.evaluate(X_test2, y_test2)
# print("test loss, test acc:", results2)
# %% [markdown]
# ### Make Some Predictions
# %%
# y_pred2 = (model2.predict(X_new2) > 0.5).astype("int32")
# print(y_pred2)
y_test_pred2 = (model2.predict(X_test2) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df2 = pd.DataFrame(model2.predict(X_test2))
y_test_pred2 = pd.DataFrame(y_test_pred2).reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test_pred2], axis=1)
y_test2 = y_test2.reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test2], axis=1)
X_df2.columns = ["X_pred","y_pred","y_actual"]
#print(X_df2)
# %%
import math
table2 = pd.DataFrame(columns=["KL_div","abs_distance","y_pred","correctness"])
for i in range(0,len(X_df2)):
# KL divergence
p = X_df2.loc[i,"X_pred"]
if p > 0:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
else:
kl = 1
table2.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table2.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred = X_df2.loc[i,"y_pred"]
y_act = X_df2.loc[i,"y_actual"]
if y_pred == y_act:
table2.loc[i,"correctness"] = 1 # correct prediction
else:
table2.loc[i,"correctness"] = 0 # wrong prediction
table2.loc[i,"y_pred"] = y_pred
#print(table2)
# %%
table2["count"] = 1
correctness2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness2["percent"] = 100*(correctness2["correctness"]/correctness2["count"])
#print(correctness2)
# %%
index = []
for i in (correctness2.index):
index.append(str(i))
plt.bar(index,correctness2["percent"], width=0.7)
for index,data in enumerate(correctness2["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
kl2["percent"] = (kl2["correctness"]/kl2["count"])
kl2.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl2["percent"])
# print(kl)
# print(np.arange(0, 0.7, 0.05))
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg2 = np.arange(0, 0.7, 0.1).reshape((-1, 1))
y_reg2 = kl2["percent"]
reg_model2 = LinearRegression().fit(x_reg2,y_reg2)
# %%
# print('intercept(alpha):', reg_model2.intercept_)
# print('slope(theta):', reg_model2.coef_)
# %% [markdown]
# ## Algorithm C: It = argmax(Ct,i)
# %%
# Correct answer
ans = pd.DataFrame(X_df2["y_actual"])
# NN1
alpha1 = reg_model1.intercept_
theta1 = reg_model1.coef_
# NN2
alpha2 = reg_model2.intercept_
theta2 = reg_model2.coef_
# %%
# Creating NN tables
nn1 = table1.drop(["abs_distance","correctness"], axis=1)
nn1["conf"] = 1 + theta1 * nn1["KL_div"]
nn2 = table2.drop(["abs_distance","correctness"], axis=1)
nn2["conf"] = 1 + theta2 * nn2["KL_div"]
# nn2
# %%
# Determing higher confidence NN and choosing that arm
for i in range(0,len(nn1)):
if nn1.loc[i,"conf"] > nn2.loc[i,"conf"]:
ans.loc[i,"y_pred"] = nn1.loc[i,"y_pred"]
ans.loc[i,"NN"] = 1
ans.loc[i,"conf"] = nn1.loc[i,"conf"]
else:
ans.loc[i,"y_pred"] = nn2.loc[i,"y_pred"]
ans.loc[i,"NN"] = 2
ans.loc[i,"conf"] = nn2.loc[i,"conf"]
# ans
# %% [markdown]
# #### Comparing performance
# %%
# NN1 performance
cost1 = 0
for i in range(0,len(nn1)):
if nn1.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost1 += 1
else:
pass
# NN2 performance
cost2 = 0
for i in range(0,len(nn2)):
if nn2.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost2 += 1
else:
pass
# Combined performance
cost3 = 0
for i in range(0,len(nn1)):
nn = ans.loc[i,"NN"]
nn_conf = ans.loc[i,"conf"]
if ans.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost3 += 1
wrong_record = wrong_record + (f"Run:{run} - Wrong NN:{nn}, Conf:{nn_conf}") + "\n"
else:
pass
# %%
record = record+(f"Run:{run} - Error count for NN1:{cost1}, NN2:{cost2}, Combined:{cost3}") + "\n"
total_error_1 += cost1
total_error_2 += cost2
total_error_comb += cost3
print(f"Run {run} complete!")
run+=1
print(record)
print(f"Average error count for NN1:{total_error_1/num_trials}, NN2:{total_error_2/num_trials}, Combined:{total_error_comb/num_trials}")
#%%
# print(wrong_record)
# %%
|
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"math.log",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"sklearn.compose.ColumnTransformer",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"tensorflow.keras.layers.InputLayer",
"tensorflow.random.set_random_seed",
"sklearn.preprocessing.OrdinalEncoder",
"sklearn.linear_model.LinearRegression",
"pandas.Series",
"sklearn.utils.shuffle",
"matplotlib.pyplot.bar",
"tensorflow.keras.callbacks.ModelCheckpoint",
"pandas.concat"
] |
[((299, 388), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\yxie367\\\\Documents\\\\GitHub\\\\Mushrooms\\\\DATA\\\\mushrooms.csv"""'], {}), "(\n 'C:\\\\Users\\\\yxie367\\\\Documents\\\\GitHub\\\\Mushrooms\\\\DATA\\\\mushrooms.csv')\n", (310, 388), True, 'import pandas as pd\n'), ((1241, 1260), 'numpy.arange', 'np.arange', (['(2)', '(44)', '(4)'], {}), '(2, 44, 4)\n', (1250, 1260), True, 'import numpy as np\n'), ((1406, 1420), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1418, 1420), True, 'import pandas as pd\n'), ((1435, 1449), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1447, 1449), True, 'import pandas as pd\n'), ((1460, 1486), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1469, 1486), True, 'import pandas as pd\n'), ((1501, 1527), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1510, 1527), True, 'import pandas as pd\n'), ((1854, 1868), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1866, 1868), True, 'import pandas as pd\n'), ((1883, 1897), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1895, 1897), True, 'import pandas as pd\n'), ((1908, 1934), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1917, 1934), True, 'import pandas as pd\n'), ((1949, 1975), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1958, 1975), True, 'import pandas as pd\n'), ((2871, 2909), 'sklearn.utils.shuffle', 'shuffle', (['X_train_white'], {'random_state': 'j'}), '(X_train_white, random_state=j)\n', (2878, 2909), False, 'from sklearn.utils import shuffle\n'), ((2986, 3024), 'sklearn.utils.shuffle', 'shuffle', (['y_train_white'], {'random_state': 'j'}), '(y_train_white, random_state=j)\n', (2993, 3024), False, 'from sklearn.utils import shuffle\n'), ((3900, 3967), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('cat_attr_pipeline', cat_attr_pipeline, cols)]"], {}), "([('cat_attr_pipeline', cat_attr_pipeline, cols)])\n", (3917, 3967), False, 'from sklearn.compose import ColumnTransformer\n'), ((4395, 4423), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['j'], {}), '(j)\n', (4420, 4423), True, 'import tensorflow as tf\n'), ((5003, 5071), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""../SavedModels/best_model.h5"""'], {'save_best_only': '(True)'}), "('../SavedModels/best_model.h5', save_best_only=True)\n", (5018, 5071), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((5133, 5185), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)', 'restore_best_weights': '(True)'}), '(patience=3, restore_best_weights=True)\n', (5146, 5185), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((6195, 6235), 'pandas.concat', 'pd.concat', (['[X_df1, y_test_pred1]'], {'axis': '(1)'}), '([X_df1, y_test_pred1], axis=1)\n', (6204, 6235), True, 'import pandas as pd\n'), ((6292, 6327), 'pandas.concat', 'pd.concat', (['[X_df1, y_test1]'], {'axis': '(1)'}), '([X_df1, y_test1], axis=1)\n', (6301, 6327), True, 'import pandas as pd\n'), ((6436, 6499), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['KL_div', 'abs_distance', 'correctness']"}), "(columns=['KL_div', 'abs_distance', 'correctness'])\n", (6448, 6499), True, 'import pandas as pd\n'), ((7532, 7582), 'matplotlib.pyplot.bar', 'plt.bar', (['index', "correctness1['percent']"], {'width': '(0.7)'}), "(index, correctness1['percent'], width=0.7)\n", (7539, 7582), True, 'import matplotlib.pyplot as plt\n'), ((7746, 7762), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(120)'], {}), '(0, 120)\n', (7754, 7762), True, 'import matplotlib.pyplot as plt\n'), ((7766, 7793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL Divergence"""'], {}), "('KL Divergence')\n", (7776, 7793), True, 'import matplotlib.pyplot as plt\n'), ((7798, 7821), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% correct"""'], {}), "('% correct')\n", (7808, 7821), True, 'import matplotlib.pyplot as plt\n'), ((8131, 8158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL Divergence"""'], {}), "('KL Divergence')\n", (8141, 8158), True, 'import matplotlib.pyplot as plt\n'), ((8163, 8186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% correct"""'], {}), "('% correct')\n", (8173, 8186), True, 'import matplotlib.pyplot as plt\n'), ((8704, 8742), 'sklearn.utils.shuffle', 'shuffle', (['X_train_green'], {'random_state': 'j'}), '(X_train_green, random_state=j)\n', (8711, 8742), False, 'from sklearn.utils import shuffle\n'), ((8817, 8855), 'sklearn.utils.shuffle', 'shuffle', (['y_train_green'], {'random_state': 'j'}), '(y_train_green, random_state=j)\n', (8824, 8855), False, 'from sklearn.utils import shuffle\n'), ((9729, 9796), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('cat_attr_pipeline', cat_attr_pipeline, cols)]"], {}), "([('cat_attr_pipeline', cat_attr_pipeline, cols)])\n", (9746, 9796), False, 'from sklearn.compose import ColumnTransformer\n'), ((10208, 10236), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['j'], {}), '(j)\n', (10233, 10236), True, 'import tensorflow as tf\n'), ((10816, 10884), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""../SavedModels/best_model.h5"""'], {'save_best_only': '(True)'}), "('../SavedModels/best_model.h5', save_best_only=True)\n", (10831, 10884), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((10946, 10998), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)', 'restore_best_weights': '(True)'}), '(patience=3, restore_best_weights=True)\n', (10959, 10998), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((11914, 11954), 'pandas.concat', 'pd.concat', (['[X_df2, y_test_pred2]'], {'axis': '(1)'}), '([X_df2, y_test_pred2], axis=1)\n', (11923, 11954), True, 'import pandas as pd\n'), ((12012, 12047), 'pandas.concat', 'pd.concat', (['[X_df2, y_test2]'], {'axis': '(1)'}), '([X_df2, y_test2], axis=1)\n', (12021, 12047), True, 'import pandas as pd\n'), ((12156, 12229), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['KL_div', 'abs_distance', 'y_pred', 'correctness']"}), "(columns=['KL_div', 'abs_distance', 'y_pred', 'correctness'])\n", (12168, 12229), True, 'import pandas as pd\n'), ((13259, 13309), 'matplotlib.pyplot.bar', 'plt.bar', (['index', "correctness2['percent']"], {'width': '(0.7)'}), "(index, correctness2['percent'], width=0.7)\n", (13266, 13309), True, 'import matplotlib.pyplot as plt\n'), ((13473, 13489), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(120)'], {}), '(0, 120)\n', (13481, 13489), True, 'import matplotlib.pyplot as plt\n'), ((13493, 13520), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL Divergence"""'], {}), "('KL Divergence')\n", (13503, 13520), True, 'import matplotlib.pyplot as plt\n'), ((13525, 13548), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% correct"""'], {}), "('% correct')\n", (13535, 13548), True, 'import matplotlib.pyplot as plt\n'), ((14346, 14377), 'pandas.DataFrame', 'pd.DataFrame', (["X_df2['y_actual']"], {}), "(X_df2['y_actual'])\n", (14358, 14377), True, 'import pandas as pd\n'), ((8086, 8108), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (8095, 8108), True, 'import numpy as np\n'), ((13812, 13834), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (13821, 13834), True, 'import numpy as np\n'), ((1670, 1690), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (1679, 1690), True, 'import pandas as pd\n'), ((1797, 1817), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (1806, 1817), True, 'import pandas as pd\n'), ((2112, 2132), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (2121, 2132), True, 'import pandas as pd\n'), ((2239, 2259), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (2248, 2259), True, 'import pandas as pd\n'), ((2923, 2949), 'sklearn.utils.shuffle', 'shuffle', (['X'], {'random_state': 'j'}), '(X, random_state=j)\n', (2930, 2949), False, 'from sklearn.utils import shuffle\n'), ((3038, 3064), 'sklearn.utils.shuffle', 'shuffle', (['y'], {'random_state': 'j'}), '(y, random_state=j)\n', (3045, 3064), False, 'from sklearn.utils import shuffle\n'), ((4468, 4497), 'tensorflow.keras.layers.InputLayer', 'InputLayer', ([], {'input_shape': '(22,)'}), '(input_shape=(22,))\n', (4478, 4497), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((4525, 4553), 'tensorflow.keras.layers.Dense', 'Dense', (['(45)'], {'activation': '"""relu"""'}), "(45, activation='relu')\n", (4530, 4553), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((4582, 4612), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4587, 4612), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((6134, 6159), 'pandas.DataFrame', 'pd.DataFrame', (['y_test_pred'], {}), '(y_test_pred)\n', (6146, 6159), True, 'import pandas as pd\n'), ((8289, 8311), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (8298, 8311), True, 'import numpy as np\n'), ((8375, 8393), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8391, 8393), False, 'from sklearn.linear_model import LinearRegression\n'), ((10281, 10310), 'tensorflow.keras.layers.InputLayer', 'InputLayer', ([], {'input_shape': '(22,)'}), '(input_shape=(22,))\n', (10291, 10310), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((10338, 10366), 'tensorflow.keras.layers.Dense', 'Dense', (['(45)'], {'activation': '"""relu"""'}), "(45, activation='relu')\n", (10343, 10366), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((10395, 10425), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (10400, 10425), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((11852, 11878), 'pandas.DataFrame', 'pd.DataFrame', (['y_test_pred2'], {}), '(y_test_pred2)\n', (11864, 11878), True, 'import pandas as pd\n'), ((14008, 14030), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (14017, 14030), True, 'import numpy as np\n'), ((14093, 14111), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (14109, 14111), False, 'from sklearn.linear_model import LinearRegression\n'), ((3820, 3836), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (3834, 3836), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((9649, 9665), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (9663, 9665), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((7292, 7314), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (7301, 7314), True, 'import numpy as np\n'), ((7949, 7971), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (7958, 7971), True, 'import numpy as np\n'), ((13019, 13041), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (13028, 13041), True, 'import numpy as np\n'), ((13676, 13698), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (13685, 13698), True, 'import numpy as np\n'), ((6624, 6635), 'math.log', 'math.log', (['p'], {}), '(p)\n', (6632, 6635), False, 'import math\n'), ((6644, 6659), 'math.log', 'math.log', (['(1 - p)'], {}), '(1 - p)\n', (6652, 6659), False, 'import math\n'), ((12358, 12369), 'math.log', 'math.log', (['p'], {}), '(p)\n', (12366, 12369), False, 'import math\n'), ((12378, 12393), 'math.log', 'math.log', (['(1 - p)'], {}), '(1 - p)\n', (12386, 12393), False, 'import math\n')]
|
#!/usr/bin/env python
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from conversion import read_imgs_masks
from os.path import isfile, basename
XERR=0.1
ELINEWIDTH=3
CAPSIZE=5
CAPTHICK=3
FMT='cD'
def harm_plot(ydata, labels, outPrefix, bshell_b):
'''
:param ydata: list of [y1, y2, y3, ...] where each yi is a list
:param labels: list of strings
:param outPrefix:
:return:
'''
outPrefix += f'_b{bshell_b}'
labels= list(labels)
num_series, num_sub= np.shape(ydata)
iter_obj= [i for i in range(num_series)]
# errorbar plot
plt.figure(1)
plt.grid(True)
for i in iter_obj:
x= list(i*np.ones((num_sub,)))
y= ydata[i]
plt.plot(x, y, 'r*')
plt.errorbar([i], np.mean(y), xerr=XERR, yerr=np.std(y),
ecolor='k', capsize=CAPSIZE, capthick=CAPTHICK, elinewidth=ELINEWIDTH, fmt=FMT)
plt.xticks(iter_obj, labels)
plt.title('Comparison of meanFA before and after harmonization')
plt.ylabel('meanFA over IIT_mean_FA_skeleton')
plt.savefig(outPrefix+'_ebarplot.png')
# plt.show()
# box plot
# plt.figure(2)
# plt.grid(True)
# for i in iter_obj:
# x = list(i * np.ones((num_sub,)))
# y = ydata[i]
# plt.plot(x, y, 'r*')
#
# plt.boxplot(ydata, labels=labels, positions=iter_obj,
# boxprops=dict(linewidth=4),
# medianprops=dict(linewidth=4),
# whiskerprops=dict(linewidth=2))
#
#
# plt.title(f'Comparison of boxplot before and after harmonization for b{bshell_b}')
# plt.ylabel('meanFA over IIT_mean_FA_skeleton')
# plt.savefig(outPrefix+'_boxplot.png')
# plt.show()
# return (outPrefix+'_ebarplot.png', outPrefix+'_boxplot.png')
return outPrefix+'_ebarplot.png'
def generate_csv(imgs, site_means, outPrefix, bshell_b):
try:
imgs, _= read_imgs_masks(imgs)
except:
pass
statFile = outPrefix + '_stat.csv'
if isfile(statFile):
df= pd.read_csv(statFile)
df= df.assign(**{f'meanFA b{bshell_b}':site_means})
else:
stat = {'subject': [basename(f) for f in imgs], f'meanFA b{bshell_b}': site_means}
df = pd.DataFrame(stat)
df.to_csv(statFile, index=False)
if __name__=='__main__':
sub=['hi','hello','go','come']
ref_mean= [0.46, 0.49, 0.44, 0.40]
target_mean_before= [0.42, 0.58, 0.43, 0.66]
target_mean_after= [0.5 , 0.45, 0.40, 0.55]
labels=['Reference','Target before','Target after']
bshell_b= 2000
harm_plot([ref_mean, target_mean_before, target_mean_after], labels, '/tmp/abc', bshell_b)
# harm_plot([ref_mean], ['Reference'], '/tmp/abc', bshell_b)
generate_csv(sub, ref_mean, '/tmp/abc', bshell_b)
|
[
"conversion.read_imgs_masks",
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"os.path.isfile",
"matplotlib.pyplot.figure",
"os.path.basename",
"numpy.std",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"numpy.shape"
] |
[((60, 81), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (74, 81), False, 'import matplotlib\n'), ((553, 568), 'numpy.shape', 'np.shape', (['ydata'], {}), '(ydata)\n', (561, 568), True, 'import numpy as np\n'), ((640, 653), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (650, 653), True, 'import matplotlib.pyplot as plt\n'), ((658, 672), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (666, 672), True, 'import matplotlib.pyplot as plt\n'), ((956, 984), 'matplotlib.pyplot.xticks', 'plt.xticks', (['iter_obj', 'labels'], {}), '(iter_obj, labels)\n', (966, 984), True, 'import matplotlib.pyplot as plt\n'), ((989, 1053), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparison of meanFA before and after harmonization"""'], {}), "('Comparison of meanFA before and after harmonization')\n", (998, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1104), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""meanFA over IIT_mean_FA_skeleton"""'], {}), "('meanFA over IIT_mean_FA_skeleton')\n", (1068, 1104), True, 'import matplotlib.pyplot as plt\n'), ((1109, 1149), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outPrefix + '_ebarplot.png')"], {}), "(outPrefix + '_ebarplot.png')\n", (1120, 1149), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2077), 'os.path.isfile', 'isfile', (['statFile'], {}), '(statFile)\n', (2067, 2077), False, 'from os.path import isfile, basename\n'), ((763, 783), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r*"""'], {}), "(x, y, 'r*')\n", (771, 783), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1983), 'conversion.read_imgs_masks', 'read_imgs_masks', (['imgs'], {}), '(imgs)\n', (1977, 1983), False, 'from conversion import read_imgs_masks\n'), ((2091, 2112), 'pandas.read_csv', 'pd.read_csv', (['statFile'], {}), '(statFile)\n', (2102, 2112), True, 'import pandas as pd\n'), ((2287, 2305), 'pandas.DataFrame', 'pd.DataFrame', (['stat'], {}), '(stat)\n', (2299, 2305), True, 'import pandas as pd\n'), ((811, 821), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (818, 821), True, 'import numpy as np\n'), ((714, 733), 'numpy.ones', 'np.ones', (['(num_sub,)'], {}), '((num_sub,))\n', (721, 733), True, 'import numpy as np\n'), ((839, 848), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (845, 848), True, 'import numpy as np\n'), ((2211, 2222), 'os.path.basename', 'basename', (['f'], {}), '(f)\n', (2219, 2222), False, 'from os.path import isfile, basename\n')]
|
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pandas as pd
import numpy as np
import warnings
import rpy2.robjects as robjects
from rpy2.robjects import numpy2ri, pandas2ri, Formula
from rpy2.robjects.packages import importr
pandas2ri.activate()
numpy2ri.activate()
# import R libraries
DESeq2 = importr('DESeq2')
edgeR = importr('edgeR')
Limma = importr('limma')
stats = importr('stats')
to_dataframe = robjects.r('function(x) data.frame(x)')
class DE_rpy2:
"""
Running DESeq2, edgeR, limma through rpy2
input:
count_matrix: a pandas dataframe with each column as count
(float values in FPKM/RPKM are also acceptable as internal rounding will be done)
, and a id column for gene id
example:
id sampleA sampleB
geneA 5.1 1
geneB 4.2 5
geneC 1 2
design_matrix: a pandas dataframe with each column as a condition, and one row for one sample
Note that the sample name must be the index not a column
condition
sampleA1 treated
sampleA2 treated
sampleB1 untreated
sampleB2 untreated
design_formula: default to be the column name of design matrix, example: "~ condition""
If it contains multiple conditions, this formula must be customised,
or the DESeq2 will only consider the first condition.
gene_column: column name of gene id columns in count_matrix, default = 'id'
"""
def __init__(self, count_matrix, design_matrix, design_formula=None, gene_column='id'):
assert gene_column in count_matrix, \
'column: \'%s\', not found in count matrix' % gene_column
assert count_matrix.shape[1] - 1 == design_matrix.shape[0], \
'The number of rows in design matrix must ' \
'be equal to the number of samples in count matrix'
assert all(pd.isna(count_matrix)), \
'Null values are found in count matrix' \
'Please check it'
assert len(design_matrix.columns), \
'Columns names are needed in design matrix'
if 'float' in count_matrix.drop(gene_column, axis=1):
warnings.warn('DESeq2 and edgeR only accept integer counts\n'
'The values in count matrix are automatically rounded\n'
'In fact the FPKM/RPKM input is not encouraged by DESeq2 officially\n')
# parameters used in DESeq2
self.count_matrix = pandas2ri.py2ri(count_matrix.drop(gene_column, axis=1).astype('int'))
self.design_matrix = pandas2ri.py2ri(design_matrix)
self.gene_ids = count_matrix[gene_column]
self.gene_column = gene_column
self.deseq2_result = None
self.deseq2_label = None
if design_formula is None:
condition = design_matrix.columns[0]
if len(design_matrix.columns) > 1:
warnings.warn('Multiple conditions are set in design matrix,\n'
'you\'d better customise the design formula.\n'
'Here it only considers the first condition\n')
self.design_formula = Formula('~ ' + condition)
else:
self.design_formula = Formula(design_formula)
# parameters used in edgeR
self.edgeR_group = numpy2ri.py2ri(design_matrix.iloc[:, 0].values)
self.edgeR_gene_names = numpy2ri.py2ri(count_matrix[gene_column].values)
self.edgeR_result = None
self.edgeR_label = None
# parameters used in limma
self.limma_result = None
self.limma_label = None
self.final_label = None
def deseq2(self, threshold=0.05, **kwargs):
"""
Run the standard DESeq2 workflow.
Get the DESeq2 results as DataFrame.
Return the label of each gene: 0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the adjusted p-value.
default = 0.05.
:param kwargs: parameters of DESeq2 functions.
See official instructions for details:
http://www.bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# Run DESeq2 workflow
dds = DESeq2.DESeqDataSetFromMatrix(countData=self.count_matrix,
colData=self.design_matrix,
design=self.design_formula)
dds = DESeq2.DESeq(dds, **kwargs)
res = DESeq2.results(dds, **kwargs)
# Store the output matrix as DataFrame
self.deseq2_result = pandas2ri.ri2py(to_dataframe(res))
self.deseq2_result[self.gene_column] = self.gene_ids
# The adjusted p-value in the DESeq2 results
# may contain NAN
if any(pd.isna(self.deseq2_result['padj'].values)):
warnings.warn('There exist NAN in the adjusted p-value\n'
'see https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/'
'inst/doc/DESeq2.html#why-are-some-p-values-set-to-na\n')
# Reject the H0 hypothesis if p-value < threshold
labels = [int(x) for x in (self.deseq2_result['padj'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.deseq2_label = label
return label
def edger(self, threshold=0.05):
"""
Run the standard edgeR workflow.
Get the edgR results as DataFrame.
Return the label of each gene:
0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the p-value.
default = 0.05.
See official instructions for details:
https://www.bioconductor.org/packages/release/bioc/vignettes/edgeR/inst/doc/edgeRUsersGuide.pdf
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# run edgeR workflow
# Create the DGEList object
dgList = edgeR.DGEList(counts=self.count_matrix, group=self.edgeR_group, genes=self.edgeR_gene_names)
# Normalize
dgList = edgeR.calcNormFactors(dgList, method="TMM")
# Setting up the model
robjects.r.assign('edgeR_group', self.edgeR_group)
designMat = stats.model_matrix(Formula('~ edgeR_group'))
# Estimating Dispersions
dgList = edgeR.estimateGLMCommonDisp(dgList, design=designMat)
dgList = edgeR.estimateGLMTrendedDisp(dgList, design=designMat)
dgList = edgeR.estimateGLMTagwiseDisp(dgList, design=designMat)
# Differential Expression
fit = edgeR.glmQLFit(dgList, designMat)
test = edgeR.glmQLFTest(fit)
res = edgeR.topTags(test, n=self.count_matrix.nrow)
res_df = pandas2ri.ri2py(to_dataframe(res))
# Sort the result on gene ids
gene_df = pd.DataFrame({'genes': self.gene_ids})
self.edgeR_result = pd.merge(gene_df, res_df, how='left')
# Reject the H0 hypothesis
labels = [int(x) for x in (self.edgeR_result['PValue'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.edgeR_label = label
return label
def limma(self, threshold=0.05):
"""
Run the standard limma workflow.
Get the limma results as DataFrame.
Return the label of each gene:
0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the p-value.
default = 0.05.
See official instructions for details:
https://ucdavis-bioinformatics-training.github.io/2018-June-RNA-Seq-Workshop/thursday/DE.html
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# Create the DGEList object
dgList = edgeR.DGEList(counts=self.count_matrix, group=self.edgeR_group, genes=self.edgeR_gene_names)
# Normalize
dgList = edgeR.calcNormFactors(dgList, method="TMM")
# Setting up the model
robjects.r.assign('edgeR_group', self.edgeR_group)
designMat = stats.model_matrix(Formula('~ edgeR_group'))
# voom
v = Limma.voom(dgList, designMat)
# fitting
fit = Limma.lmFit(v, designMat)
fit = Limma.eBayes(fit)
res = Limma.topTable(fit, n=self.count_matrix.nrow)
res_df = pandas2ri.ri2py(to_dataframe(res))
# Sort the result on gene ids
gene_df = pd.DataFrame({'genes': self.gene_ids})
self.limma_result = pd.merge(gene_df, res_df, how='left')
# Reject the H0 hypothesis
labels = [int(x) for x in (self.limma_result['adj.P.Val'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.limma_label = label
return label
def plot_label_difference(self):
"""
Plot the Venn diagram of the 3 label output.
Since we only interest in the differentially expressed genes.
The number on Venn diagram shows the number of samples labeled as 1.
Say differentially expressed genes.
"""
if self.limma_label is None:
warnings.warn('Seems you haven\'t get limma label\n'
'Automatically running limma...')
self.limma_label = self.limma()
if self.deseq2_label is None:
warnings.warn('Seems you haven\'t get DESeq2 label\n'
'Automatically running DESeq2...')
self.deseq2_label = self.deseq2()
if self.edgeR_label is None:
warnings.warn('Seems you haven\'t get edgeR label\n'
'Automatically running edgeR...')
self.edgeR_label = self.edger()
# Import the plot package
from matplotlib_venn import venn3
import matplotlib.pyplot as plt
labels = np.array([self.deseq2_label['label'].values, self.edgeR_label['label'].values,
self.limma_label['label'].values]).T
names = ['DESeq2', 'edgeR', 'limma']
venn_df = pd.DataFrame(data=labels, columns=names)
sets = {'000': 0, '001': 0, '010': 0, '011': 0, '100': 0, '101': 0, '110': 0, '111': 0}
for i in range(venn_df.shape[0]):
loc = [str(num) for num in venn_df.iloc[i, :]]
loc = loc[0] + loc[1] + loc[2]
sets[loc] += 1
venn3(sets, set_labels=names)
plt.show()
return sets
def get_final_label(self, method='inner'):
"""
There are 2 methods availabel:
inner: set those genes as differentially expressed,
say label 1, if all 3 tools agreed
vote: set those genes as differentially expressed,
say label 1, if all 2 out of the 3 tools agreed
union: set those genes as differentially expressed,
say label 1, as long as 1 tool agreed
"""
label = None
menu = ['inner', 'vote', 'union']
assert method in menu, \
'Please choose the correct method'
if self.limma_label is None:
warnings.warn('Seems you haven\'t get limma label\n'
'Automatically running limma...')
self.limma_label = self.limma()
if self.deseq2_label is None:
warnings.warn('Seems you haven\'t get DESeq2 label\n'
'Automatically running DESeq2...')
self.deseq2_label = self.deseq2()
if self.edgeR_label is None:
warnings.warn('Seems you haven\'t get edgeR label\n'
'Automatically running edgeR...')
self.edgeR_label = self.edger()
labels = self.deseq2_label['label'].values + self.edgeR_label['label'].values + self.limma_label['label'].values
if method == 'inner':
label = [int(x) for x in (labels == 3)]
if method == 'vote':
label = [int(x) for x in (labels >= 2)]
if method == 'union':
label = [int(x) for x in (labels >= 1)]
self.final_label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': label})
return self.final_label
|
[
"rpy2.robjects.pandas2ri.activate",
"rpy2.robjects.pandas2ri.py2ri",
"rpy2.robjects.r.assign",
"rpy2.robjects.numpy2ri.py2ri",
"pandas.merge",
"matplotlib_venn.venn3",
"rpy2.robjects.Formula",
"rpy2.robjects.packages.importr",
"numpy.array",
"pandas.DataFrame",
"warnings.warn",
"pandas.isna",
"rpy2.robjects.numpy2ri.activate",
"rpy2.robjects.r",
"matplotlib.pyplot.show"
] |
[((1288, 1308), 'rpy2.robjects.pandas2ri.activate', 'pandas2ri.activate', ([], {}), '()\n', (1306, 1308), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((1310, 1329), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (1327, 1329), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((1364, 1381), 'rpy2.robjects.packages.importr', 'importr', (['"""DESeq2"""'], {}), "('DESeq2')\n", (1371, 1381), False, 'from rpy2.robjects.packages import importr\n'), ((1391, 1407), 'rpy2.robjects.packages.importr', 'importr', (['"""edgeR"""'], {}), "('edgeR')\n", (1398, 1407), False, 'from rpy2.robjects.packages import importr\n'), ((1417, 1433), 'rpy2.robjects.packages.importr', 'importr', (['"""limma"""'], {}), "('limma')\n", (1424, 1433), False, 'from rpy2.robjects.packages import importr\n'), ((1443, 1459), 'rpy2.robjects.packages.importr', 'importr', (['"""stats"""'], {}), "('stats')\n", (1450, 1459), False, 'from rpy2.robjects.packages import importr\n'), ((1478, 1517), 'rpy2.robjects.r', 'robjects.r', (['"""function(x) data.frame(x)"""'], {}), "('function(x) data.frame(x)')\n", (1488, 1517), True, 'import rpy2.robjects as robjects\n'), ((3682, 3712), 'rpy2.robjects.pandas2ri.py2ri', 'pandas2ri.py2ri', (['design_matrix'], {}), '(design_matrix)\n', (3697, 3712), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((4447, 4494), 'rpy2.robjects.numpy2ri.py2ri', 'numpy2ri.py2ri', (['design_matrix.iloc[:, 0].values'], {}), '(design_matrix.iloc[:, 0].values)\n', (4461, 4494), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((4528, 4576), 'rpy2.robjects.numpy2ri.py2ri', 'numpy2ri.py2ri', (['count_matrix[gene_column].values'], {}), '(count_matrix[gene_column].values)\n', (4542, 4576), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((6570, 6634), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': labels}"], {}), "({self.gene_column: self.gene_ids, 'label': labels})\n", (6582, 6634), True, 'import pandas as pd\n'), ((7618, 7668), 'rpy2.robjects.r.assign', 'robjects.r.assign', (['"""edgeR_group"""', 'self.edgeR_group'], {}), "('edgeR_group', self.edgeR_group)\n", (7635, 7668), True, 'import rpy2.robjects as robjects\n'), ((8281, 8319), 'pandas.DataFrame', 'pd.DataFrame', (["{'genes': self.gene_ids}"], {}), "({'genes': self.gene_ids})\n", (8293, 8319), True, 'import pandas as pd\n'), ((8349, 8386), 'pandas.merge', 'pd.merge', (['gene_df', 'res_df'], {'how': '"""left"""'}), "(gene_df, res_df, how='left')\n", (8357, 8386), True, 'import pandas as pd\n'), ((8520, 8584), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': labels}"], {}), "({self.gene_column: self.gene_ids, 'label': labels})\n", (8532, 8584), True, 'import pandas as pd\n'), ((9536, 9586), 'rpy2.robjects.r.assign', 'robjects.r.assign', (['"""edgeR_group"""', 'self.edgeR_group'], {}), "('edgeR_group', self.edgeR_group)\n", (9553, 9586), True, 'import rpy2.robjects as robjects\n'), ((9978, 10016), 'pandas.DataFrame', 'pd.DataFrame', (["{'genes': self.gene_ids}"], {}), "({'genes': self.gene_ids})\n", (9990, 10016), True, 'import pandas as pd\n'), ((10046, 10083), 'pandas.merge', 'pd.merge', (['gene_df', 'res_df'], {'how': '"""left"""'}), "(gene_df, res_df, how='left')\n", (10054, 10083), True, 'import pandas as pd\n'), ((10220, 10284), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': labels}"], {}), "({self.gene_column: self.gene_ids, 'label': labels})\n", (10232, 10284), True, 'import pandas as pd\n'), ((11640, 11680), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'labels', 'columns': 'names'}), '(data=labels, columns=names)\n', (11652, 11680), True, 'import pandas as pd\n'), ((11962, 11991), 'matplotlib_venn.venn3', 'venn3', (['sets'], {'set_labels': 'names'}), '(sets, set_labels=names)\n', (11967, 11991), False, 'from matplotlib_venn import venn3\n'), ((12001, 12011), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12009, 12011), True, 'import matplotlib.pyplot as plt\n'), ((13674, 13737), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': label}"], {}), "({self.gene_column: self.gene_ids, 'label': label})\n", (13686, 13737), True, 'import pandas as pd\n'), ((2976, 2997), 'pandas.isna', 'pd.isna', (['count_matrix'], {}), '(count_matrix)\n', (2983, 2997), True, 'import pandas as pd\n'), ((3269, 3464), 'warnings.warn', 'warnings.warn', (['"""DESeq2 and edgeR only accept integer counts\nThe values in count matrix are automatically rounded\nIn fact the FPKM/RPKM input is not encouraged by DESeq2 officially\n"""'], {}), '(\n """DESeq2 and edgeR only accept integer counts\nThe values in count matrix are automatically rounded\nIn fact the FPKM/RPKM input is not encouraged by DESeq2 officially\n"""\n )\n', (3282, 3464), False, 'import warnings\n'), ((4281, 4306), 'rpy2.robjects.Formula', 'Formula', (["('~ ' + condition)"], {}), "('~ ' + condition)\n", (4288, 4306), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((4357, 4380), 'rpy2.robjects.Formula', 'Formula', (['design_formula'], {}), '(design_formula)\n', (4364, 4380), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((6116, 6158), 'pandas.isna', 'pd.isna', (["self.deseq2_result['padj'].values"], {}), "(self.deseq2_result['padj'].values)\n", (6123, 6158), True, 'import pandas as pd\n'), ((6174, 6366), 'warnings.warn', 'warnings.warn', (['"""There exist NAN in the adjusted p-value\nsee https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#why-are-some-p-values-set-to-na\n"""'], {}), '(\n """There exist NAN in the adjusted p-value\nsee https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#why-are-some-p-values-set-to-na\n"""\n )\n', (6187, 6366), False, 'import warnings\n'), ((7709, 7733), 'rpy2.robjects.Formula', 'Formula', (['"""~ edgeR_group"""'], {}), "('~ edgeR_group')\n", (7716, 7733), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((9627, 9651), 'rpy2.robjects.Formula', 'Formula', (['"""~ edgeR_group"""'], {}), "('~ edgeR_group')\n", (9634, 9651), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((10708, 10798), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get limma label\nAutomatically running limma..."""'], {}), '(\n """Seems you haven\'t get limma label\nAutomatically running limma...""")\n', (10721, 10798), False, 'import warnings\n'), ((10919, 11011), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2..."""'], {}), '(\n """Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2...""")\n', (10932, 11011), False, 'import warnings\n'), ((11133, 11223), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get edgeR label\nAutomatically running edgeR..."""'], {}), '(\n """Seems you haven\'t get edgeR label\nAutomatically running edgeR...""")\n', (11146, 11223), False, 'import warnings\n'), ((11431, 11549), 'numpy.array', 'np.array', (["[self.deseq2_label['label'].values, self.edgeR_label['label'].values, self.\n limma_label['label'].values]"], {}), "([self.deseq2_label['label'].values, self.edgeR_label['label'].\n values, self.limma_label['label'].values])\n", (11439, 11549), True, 'import numpy as np\n'), ((12679, 12769), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get limma label\nAutomatically running limma..."""'], {}), '(\n """Seems you haven\'t get limma label\nAutomatically running limma...""")\n', (12692, 12769), False, 'import warnings\n'), ((12890, 12982), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2..."""'], {}), '(\n """Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2...""")\n', (12903, 12982), False, 'import warnings\n'), ((13104, 13194), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get edgeR label\nAutomatically running edgeR..."""'], {}), '(\n """Seems you haven\'t get edgeR label\nAutomatically running edgeR...""")\n', (13117, 13194), False, 'import warnings\n'), ((4024, 4187), 'warnings.warn', 'warnings.warn', (['"""Multiple conditions are set in design matrix,\nyou\'d better customise the design formula.\nHere it only considers the first condition\n"""'], {}), '(\n """Multiple conditions are set in design matrix,\nyou\'d better customise the design formula.\nHere it only considers the first condition\n"""\n )\n', (4037, 4187), False, 'import warnings\n')]
|
"""
Takes the MNIST dataset as input (images and labels separated)
and creates a new dataset only with 0's and 1's
"""
import numpy as np
DATA_PATH = "data/raw/"
OUTPUT_PATH = "data/processed/mnist/"
X = np.loadtxt(DATA_PATH + "mnist2500_X.txt")
labels = np.loadtxt(DATA_PATH + "mnist2500_labels.txt")
X_new = []
labels_new = []
for i,label in enumerate(labels):
if label < 5:
labels_new.append(label)
X_new.append(X[i])
if i%100 == 0:
print(f"{i} labels passed")
np.savetxt(OUTPUT_PATH + "mnist2500_X_01234.txt",X_new)
np.savetxt(OUTPUT_PATH +"mnist2500_labels_01234.txt",labels_new)
|
[
"numpy.loadtxt",
"numpy.savetxt"
] |
[((206, 247), 'numpy.loadtxt', 'np.loadtxt', (["(DATA_PATH + 'mnist2500_X.txt')"], {}), "(DATA_PATH + 'mnist2500_X.txt')\n", (216, 247), True, 'import numpy as np\n'), ((257, 303), 'numpy.loadtxt', 'np.loadtxt', (["(DATA_PATH + 'mnist2500_labels.txt')"], {}), "(DATA_PATH + 'mnist2500_labels.txt')\n", (267, 303), True, 'import numpy as np\n'), ((503, 559), 'numpy.savetxt', 'np.savetxt', (["(OUTPUT_PATH + 'mnist2500_X_01234.txt')", 'X_new'], {}), "(OUTPUT_PATH + 'mnist2500_X_01234.txt', X_new)\n", (513, 559), True, 'import numpy as np\n'), ((559, 625), 'numpy.savetxt', 'np.savetxt', (["(OUTPUT_PATH + 'mnist2500_labels_01234.txt')", 'labels_new'], {}), "(OUTPUT_PATH + 'mnist2500_labels_01234.txt', labels_new)\n", (569, 625), True, 'import numpy as np\n')]
|
from pykalman import KalmanFilter
import numpy as np
kf = KalmanFilter(transition_matrices=np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]),
observation_matrices=np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]),
transition_covariance=0.003 * np.eye(6, dtype=float)) # TODO: change this constant
t = 0
means = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
covariances = np.eye(6, dtype=float)
def kalman_filter(measurement):
global t, means, covariances
new_filtered_means, new_filtered_covariances = (kf.filter_update(means, covariances, measurement))
means, covariances = new_filtered_means, new_filtered_covariances
t = t + 1.0
# print(means[:3]);
return means[:3]
|
[
"numpy.array",
"numpy.eye"
] |
[((899, 939), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (907, 939), True, 'import numpy as np\n'), ((954, 976), 'numpy.eye', 'np.eye', (['(6)'], {'dtype': 'float'}), '(6, dtype=float)\n', (960, 976), True, 'import numpy as np\n'), ((92, 304), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0,\n 1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n', (100, 304), True, 'import numpy as np\n'), ((575, 686), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])\n', (583, 686), True, 'import numpy as np\n'), ((829, 851), 'numpy.eye', 'np.eye', (['(6)'], {'dtype': 'float'}), '(6, dtype=float)\n', (835, 851), True, 'import numpy as np\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import gzip
import numpy as np
import matplotlib.pyplot as plt
from Bio import SeqIO, SeqUtils
# -
# !rm -f atroparvus.fa.gz gambiae.fa.gz 2>/dev/null
# !wget https://vectorbase.org/common/downloads/Current_Release/AgambiaePEST/fasta/data/VectorBase-55_AgambiaePEST_Genome.fasta -O gambiae.fa
# !gzip -9 gambiae.fa
# !wget https://vectorbase.org/common/downloads/Current_Release/AatroparvusEBRO/fasta/data/VectorBase-55_AatroparvusEBRO_Genome.fasta -O atroparvus.fa
# !gzip -9 atroparvus.fa
gambiae_name = 'gambiae.fa.gz'
atroparvus_name = 'atroparvus.fa.gz'
recs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')
for rec in recs:
print(rec.description)
#Do not do this with atroparvus
recs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')
chrom_Ns = {}
chrom_sizes = {}
for rec in recs:
if rec.description.find('supercontig') > -1:
continue
print(rec.description, rec.id, rec)
chrom = rec.id.split('_')[1]
if chrom in ['UNKN']:#, 'Y_unplaced']:
continue
chrom_Ns[chrom] = []
on_N = False
curr_size = 0
for pos, nuc in enumerate(rec.seq):
if nuc in ['N', 'n']:
curr_size += 1
on_N = True
else:
if on_N:
chrom_Ns[chrom].append(curr_size)
curr_size = 0
on_N = False
if on_N:
chrom_Ns[chrom].append(curr_size)
chrom_sizes[chrom] = len(rec.seq)
for chrom, Ns in chrom_Ns.items():
size = chrom_sizes[chrom]
if len(Ns) > 0:
max_Ns = max(Ns)
else:
max_Ns = 'NA'
print(f'{chrom} ({size}): %Ns ({round(100 * sum(Ns) / size, 1)}), num Ns: {len(Ns)}, max N: {max_Ns}')
# ## Atroparvus super-contigs
recs = SeqIO.parse(gzip.open(atroparvus_name, 'rt', encoding='utf-8'), 'fasta')
sizes = []
size_N = []
for rec in recs:
size = len(rec.seq)
sizes.append(size)
count_N = 0
for nuc in rec.seq:
if nuc in ['n', 'N']:
count_N += 1
size_N.append((size, count_N / size))
print(len(sizes), np.median(sizes), np.mean(sizes), max(sizes), min(sizes),
np.percentile(sizes, 10), np.percentile(sizes, 90))
small_split = 4800
large_split = 540000
fig, axs = plt.subplots(1, 3, figsize=(16, 9), dpi=300, squeeze=False, sharey=True)
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x <= small_split])
axs[0, 0].plot(xs, ys, '.')
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > small_split and x <= large_split])
axs[0, 1].plot(xs, ys, '.')
axs[0, 1].set_xlim(small_split, large_split)
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > large_split])
axs[0, 2].plot(xs, ys, '.')
axs[0, 0].set_ylabel('Fraction of Ns', fontsize=12)
axs[0, 1].set_xlabel('Contig size', fontsize=12)
fig.suptitle('Fraction of Ns per contig size', fontsize=26)
fig.savefig('frac.png')
|
[
"numpy.mean",
"numpy.median",
"gzip.open",
"numpy.percentile",
"matplotlib.pyplot.subplots"
] |
[((2511, 2583), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(16, 9)', 'dpi': '(300)', 'squeeze': '(False)', 'sharey': '(True)'}), '(1, 3, figsize=(16, 9), dpi=300, squeeze=False, sharey=True)\n', (2523, 2583), True, 'import matplotlib.pyplot as plt\n'), ((865, 912), 'gzip.open', 'gzip.open', (['gambiae_name', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(gambiae_name, 'rt', encoding='utf-8')\n", (874, 912), False, 'import gzip\n'), ((1019, 1066), 'gzip.open', 'gzip.open', (['gambiae_name', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(gambiae_name, 'rt', encoding='utf-8')\n", (1028, 1066), False, 'import gzip\n'), ((2039, 2089), 'gzip.open', 'gzip.open', (['atroparvus_name', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(atroparvus_name, 'rt', encoding='utf-8')\n", (2048, 2089), False, 'import gzip\n'), ((2343, 2359), 'numpy.median', 'np.median', (['sizes'], {}), '(sizes)\n', (2352, 2359), True, 'import numpy as np\n'), ((2361, 2375), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (2368, 2375), True, 'import numpy as np\n'), ((2407, 2431), 'numpy.percentile', 'np.percentile', (['sizes', '(10)'], {}), '(sizes, 10)\n', (2420, 2431), True, 'import numpy as np\n'), ((2433, 2457), 'numpy.percentile', 'np.percentile', (['sizes', '(90)'], {}), '(sizes, 90)\n', (2446, 2457), True, 'import numpy as np\n')]
|
import argparse
import os
import os.path as osp
import cv2
import numpy as np
from scipy.stats import multivariate_normal
from scipy.stats import norm
import matplotlib
# matplotlib.use('agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import subprocess
import shutil
import chainer
from chainer import training
from chainer.training import extensions
from chainer.dataset import concat_examples
from chainer.backends.cuda import to_cpu
import chainer.functions as F
from chainer import serializers
import net_200x200 as net
import data_generator
from config_parser import ConfigParser
from utils import *
def save_reconstruction_arrays(data, model, folder_name="."):
print("Clear Images from Last Reconstructions\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Saving Array RECONSTRUCTIONS\n")
(train_b0, train_b1) = data
no_images = 10
train_ind = np.linspace(0, len(train_b0) - 1, no_images, dtype=int)
result = model(train_b0[train_ind], train_b1[train_ind])
gt_b0 = np.swapaxes(train_b0[train_ind], 1, 3)
gt_b1 = np.swapaxes(train_b1[train_ind], 1, 3)
rec_b0 = np.swapaxes(result[0].data, 1, 3)
rec_b1 = np.swapaxes(result[1].data, 1, 3)
output = {"gt_b0": gt_b0, "gt_b1": gt_b1, 'rec_b0': rec_b0, 'rec_b1': rec_b1}
np.savez(os.path.join("result", "reconstruction_arrays/train" + ".npz"), **output)
def eval_seen_data(data, model, groups, folder_name=".", pairs=None):
print("Clear Images from Last Seen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on SEEN data\n")
(data_b0, data_b1) = data
n = 100
every_nth = len(data_b0) / n
if every_nth == 0:
every_nth = 1
axis_ranges = [-5, 5]
for group_key in groups:
for label in groups[group_key]:
print(("Visualising label:\t{0}, Group:\t{1}".format(label, group_key)))
indecies = [i for i, x in enumerate(train_labels) if x == label]
filtered_data_b0 = data_b0.take(indecies, axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(indecies, axis=0)[::every_nth]
latent_mu = model.get_latent(filtered_data_b0, filtered_data_b1).data
pairs = [(0,1), (0,2), (1,2)]
for pair in pairs:
plt.scatter(latent_mu[:, pair[0]], latent_mu[:, pair[1]], c='red', label=label, alpha=0.75)
plt.grid()
# major axes
plt.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
plt.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
plt.xlim(axis_ranges[0], axis_ranges[1])
plt.ylim(axis_ranges[0], axis_ranges[1])
plt.xlabel("Z_" + str(pair[0]))
plt.ylabel("Z_" + str(pair[1]))
plt.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
plt.savefig(osp.join(folder_name, "group_" + str(group_key) + "_" + label + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
plt.close()
def eval_seen_data_single(data, model, labels=[], folder_name=".", pairs=None):
print("Clear Images from Last Seen Scatter Single\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on SEEN SINGLE data\n")
(data_b0, data_b1) = data
axis_ranges = [-15, 15]
# pairs = [(0,1)]
n = 100
every_nth = len(data_b0) / n
if every_nth == 0:
every_nth = 1
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[::every_nth]
labels = labels[::every_nth]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
for i in range(0, len(latent[0]), 33):
fig = plt.figure()
fig.canvas.set_window_title(labels[i])
ax = fig.add_subplot(1, len(pairs) + 1, 1, projection='3d')
points = filtered_data_b0[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0 = filtered_points[...,0][::3]
ys_0 = filtered_points[...,1][::3]
zs_0 = filtered_points[...,2][::3]
ax.scatter(xs_0, ys_0, zs_0, c='r', alpha=0.5)
points = filtered_data_b1[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1 = filtered_points[...,0][::3]
ys_1 = filtered_points[...,1][::3]
zs_1 = filtered_points[...,2][::3]
ax.scatter(xs_1, ys_1, zs_1, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(1, len(pairs) + 1, j + 2)
ax.scatter(latent[pair[0], i], latent[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
# plt.savefig(osp.join(folder_name, str(i) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
def eval_unseen_data(data, model, folder_name=".", pairs=None):
print("Clear Images from Last Unseen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on UNSEEN data\n")
(data_b0, data_b1) = data
axis_ranges = [-5, 5]
# pairs = [(0,1), (0,2), (1,2)]
# pairs = [(0,1)]
# n = 100
# every_nth = len(data_b0) / n
# if every_nth == 0:
# every_nth = 1
every_nth = 2
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[::every_nth]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
latent_flipped = np.array(model.get_latent(filtered_data_b1, filtered_data_b0))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
for i in range(len(filtered_data_b0)):
print(("{0}/{1}".format(i, len(latent[0]))))
fig = plt.figure()
ax = fig.add_subplot(2, 4, 1, projection='3d')
points = filtered_data_b0[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0 = filtered_points[...,0][::3]
ys_0 = filtered_points[...,1][::3]
zs_0 = filtered_points[...,2][::3]
ax.scatter(xs_0, ys_0, zs_0, c='r', alpha=0.5)
points = filtered_data_b1[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1 = filtered_points[...,0][::3]
ys_1 = filtered_points[...,1][::3]
zs_1 = filtered_points[...,2][::3]
ax.scatter(xs_1, ys_1, zs_1, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, 4, j + 2)
ax.scatter(latent[pair[0], i], latent[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
# ax.set_xlim(axis_ranges[0], axis_ranges[1])
# ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
ax = fig.add_subplot(2, 4, 5, projection='3d')
ax.scatter(xs_1, ys_1, zs_1, c='r', alpha=0.5)
ax.scatter(xs_0, ys_0, zs_0, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, 4, j + 6)
ax.scatter(latent_flipped[pair[0], i], latent_flipped[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
# ax.set_xlim(axis_ranges[0], axis_ranges[1])
# ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
# plt.savefig(osp.join(folder_name, str(i) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
def eval_unseen_time(data, model, folder_name=".", pairs=None):
print("Clear Images from Last Unseen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on UNSEEN data through time\n")
cmap = plt.cm.get_cmap('cool')
(data_b0, data_b1) = data
axis_ranges = [-20, 20]
# pairs = [(0,1), (0,2), (1,2)]
pairs = [(0,1), (2,3)]
npz_size = 50
npz_files = 4
for k in range(npz_files):
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[k * npz_size : (k+1) * npz_size - 1]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[k * npz_size : (k+1) * npz_size - 1]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
latent_flipped = np.array(model.get_latent(filtered_data_b1, filtered_data_b0))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
print(("{0}/{1}".format(k, npz_files)))
fig = plt.figure()
###################
#### FIRST ROW ####
###################
ax = fig.add_subplot(2, len(pairs) + 2, 1, projection='3d')
points = filtered_data_b0[1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0_first = filtered_points[...,0][::3]
ys_0_first = filtered_points[...,1][::3]
zs_0_first = filtered_points[...,2][::3]
ax.scatter(xs_0_first, ys_0_first, zs_0_first, c='r', alpha=0.5)
points = filtered_data_b1[1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1_first = filtered_points[...,0][::3]
ys_1_first = filtered_points[...,1][::3]
zs_1_first = filtered_points[...,2][::3]
ax.scatter(xs_1_first, ys_1_first, zs_1_first, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
ax = fig.add_subplot(2, len(pairs) + 2, 2, projection='3d')
points = filtered_data_b0[-1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0_last = filtered_points[...,0][::3]
ys_0_last = filtered_points[...,1][::3]
zs_0_last = filtered_points[...,2][::3]
ax.scatter(xs_0_last, ys_0_last, zs_0_last, c='r', alpha=0.5)
points = filtered_data_b1[-1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1_last = filtered_points[...,0][::3]
ys_1_last = filtered_points[...,1][::3]
zs_1_last = filtered_points[...,2][::3]
ax.scatter(xs_1_last, ys_1_last, zs_1_last, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, len(pairs) + 2, j + 3)
for i in range(len(latent[0])):
x = (latent[pair[0], i], latent[pair[1], i])
rgba = cmap(i/float(npz_size))
ax.scatter(x[0], x[1], c=[rgba[:3]], label="unseen", s=30, alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
##################
### SECOND ROW ###
##################
ax = fig.add_subplot(2, len(pairs) + 2, len(pairs) + 3, projection='3d')
ax.scatter(xs_1_first, ys_1_first, zs_1_first, c='r', alpha=0.5)
ax.scatter(xs_0_first, ys_0_first, zs_0_first, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
ax = fig.add_subplot(2, len(pairs) + 2, len(pairs) + 4, projection='3d')
ax.scatter(xs_1_last, ys_1_last, zs_1_last, c='r', alpha=0.5)
ax.scatter(xs_0_last, ys_0_last, zs_0_last, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, len(pairs) + 2, j + len(pairs) + 5)
for i in range(len(latent_flipped[0])):
x = (latent_flipped[pair[0], i], latent_flipped[pair[1], i])
rgba = cmap(i/float(npz_size))
ax.scatter(x[0], x[1], c=[rgba[:3]], label="unseen", s=30, alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
# plt.savefig(osp.join(folder_name, "npz_" + str(k) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
if __name__ == "__main__":
ignore = ["unlabelled", "train"]
generator = data_generator.DataGenerator()
train_b0, train_b1, train_labels, train_concat, train_vectors, test_b0, test_b1, test_labels, test_concat, test_vectors, unseen_b0, unseen_b1,\
unseen_labels, groups = generator.generate_dataset(ignore=ignore, args=None)
print('\n###############################################')
print("DATA_LOADED")
print(("# Training Branch 0: \t\t{0}".format(train_b0.shape)))
print(("# Training Branch 1: \t\t{0}".format(train_b1.shape)))
print(("# Training labels: \t{0}".format(set(train_labels))))
print(("# Training labels: \t{0}".format(train_labels.shape)))
print(("# Training concat: \t{0}".format(len(train_concat))))
print(("# Training vectors: \t{0}".format(train_vectors.shape)))
print(("# Testing Branch 0: \t\t{0}".format(test_b0.shape)))
print(("# Testing Branch 1: \t\t{0}".format(test_b1.shape)))
print(("# Testing labels: \t{0}".format(set(test_labels))))
print(("# Testing concat: \t{0}".format(len(test_concat))))
print(("# Testing labels: \t{0}".format(test_labels.shape)))
print(("# Testing vectors: \t{0}".format(test_vectors.shape)))
print(("# Unseen Branch 0: \t\t{0}".format(unseen_b0.shape)))
print(("# Unseen Branch 1: \t\t{0}".format(unseen_b1.shape)))
print(("# Unseen labels: \t{0}".format(set(unseen_labels))))
print(("\n# Groups: \t{0}".format(groups)))
print('###############################################\n')
model = net.Conv_Siam_VAE(train_b0.shape[1], train_b1.shape[1], n_latent=8, groups=groups, alpha=1, beta=1, gamma=1)
serializers.load_npz("result/models/final.model", model)
model.to_cpu()
pairs = list(itertools.combinations(list(range(len(groups))), 2))
# save the pointcloud reconstructions
# save_reconstruction_arrays((train_b0, train_b0), model, folder_name="result/reconstruction_arrays/")
# evaluate on the data that was seen during trainig
# eval_seen_data((train_b0, train_b1), model, groups, folder_name="eval/scatter/seen/", pairs=pairs)
# evaluate on the data that was seen during trainig one by one + 3D
# eval_seen_data_single((test_b0, test_b1), model, labels=test_labels, folder_name="eval/scatter/seen_single/", pairs=pairs)
# evaluate on the data that was NOT seen during trainig
# eval_unseen_data((unseen_b0, unseen_b1), model, folder_name="eval/scatter/unseen/", pairs=pairs)
# evaluate the unseen data through time
eval_unseen_time((unseen_b0, unseen_b1), model, folder_name="eval/scatter/unseen_time/", pairs=pairs)
|
[
"net_200x200.Conv_Siam_VAE",
"os.listdir",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.swapaxes",
"os.remove",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"data_generator.DataGenerator",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"chainer.serializers.load_npz",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1128, 1166), 'numpy.swapaxes', 'np.swapaxes', (['train_b0[train_ind]', '(1)', '(3)'], {}), '(train_b0[train_ind], 1, 3)\n', (1139, 1166), True, 'import numpy as np\n'), ((1176, 1214), 'numpy.swapaxes', 'np.swapaxes', (['train_b1[train_ind]', '(1)', '(3)'], {}), '(train_b1[train_ind], 1, 3)\n', (1187, 1214), True, 'import numpy as np\n'), ((1226, 1259), 'numpy.swapaxes', 'np.swapaxes', (['result[0].data', '(1)', '(3)'], {}), '(result[0].data, 1, 3)\n', (1237, 1259), True, 'import numpy as np\n'), ((1270, 1303), 'numpy.swapaxes', 'np.swapaxes', (['result[1].data', '(1)', '(3)'], {}), '(result[1].data, 1, 3)\n', (1281, 1303), True, 'import numpy as np\n'), ((3781, 3816), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b0', '(1)', '(3)'], {}), '(filtered_data_b0, 1, 3)\n', (3792, 3816), True, 'import numpy as np\n'), ((3837, 3872), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b1', '(1)', '(3)'], {}), '(filtered_data_b1, 1, 3)\n', (3848, 3872), True, 'import numpy as np\n'), ((6370, 6405), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b0', '(1)', '(3)'], {}), '(filtered_data_b0, 1, 3)\n', (6381, 6405), True, 'import numpy as np\n'), ((6426, 6461), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b1', '(1)', '(3)'], {}), '(filtered_data_b1, 1, 3)\n', (6437, 6461), True, 'import numpy as np\n'), ((9265, 9288), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""cool"""'], {}), "('cool')\n", (9280, 9288), True, 'import matplotlib.pyplot as plt\n'), ((14066, 14096), 'data_generator.DataGenerator', 'data_generator.DataGenerator', ([], {}), '()\n', (14094, 14096), False, 'import data_generator\n'), ((15463, 15576), 'net_200x200.Conv_Siam_VAE', 'net.Conv_Siam_VAE', (['train_b0.shape[1]', 'train_b1.shape[1]'], {'n_latent': '(8)', 'groups': 'groups', 'alpha': '(1)', 'beta': '(1)', 'gamma': '(1)'}), '(train_b0.shape[1], train_b1.shape[1], n_latent=8, groups=\n groups, alpha=1, beta=1, gamma=1)\n', (15480, 15576), True, 'import net_200x200 as net\n'), ((15573, 15629), 'chainer.serializers.load_npz', 'serializers.load_npz', (['"""result/models/final.model"""', 'model'], {}), "('result/models/final.model', model)\n", (15593, 15629), False, 'from chainer import serializers\n'), ((1394, 1456), 'os.path.join', 'os.path.join', (['"""result"""', "('reconstruction_arrays/train' + '.npz')"], {}), "('result', 'reconstruction_arrays/train' + '.npz')\n", (1406, 1456), False, 'import os\n'), ((3923, 3935), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3933, 3935), True, 'import matplotlib.pyplot as plt\n'), ((5511, 5521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5519, 5521), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6571), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6569, 6571), True, 'import matplotlib.pyplot as plt\n'), ((8927, 8937), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8935, 8937), True, 'import matplotlib.pyplot as plt\n'), ((9851, 9886), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b0', '(1)', '(3)'], {}), '(filtered_data_b0, 1, 3)\n', (9862, 9886), True, 'import numpy as np\n'), ((9908, 9943), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b1', '(1)', '(3)'], {}), '(filtered_data_b1, 1, 3)\n', (9919, 9943), True, 'import numpy as np\n'), ((9995, 10007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10005, 10007), True, 'import matplotlib.pyplot as plt\n'), ((13979, 13989), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13987, 13989), True, 'import matplotlib.pyplot as plt\n'), ((797, 820), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (807, 820), False, 'import os\n'), ((863, 889), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (872, 889), False, 'import os\n'), ((1633, 1656), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (1643, 1656), False, 'import os\n'), ((1699, 1725), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (1708, 1725), False, 'import os\n'), ((2373, 2469), 'matplotlib.pyplot.scatter', 'plt.scatter', (['latent_mu[:, pair[0]]', 'latent_mu[:, pair[1]]'], {'c': '"""red"""', 'label': 'label', 'alpha': '(0.75)'}), "(latent_mu[:, pair[0]], latent_mu[:, pair[1]], c='red', label=\n label, alpha=0.75)\n", (2384, 2469), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2479), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2477, 2479), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2557), 'matplotlib.pyplot.plot', 'plt.plot', (['[axis_ranges[0], axis_ranges[1]]', '[0, 0]', '"""k"""'], {}), "([axis_ranges[0], axis_ranges[1]], [0, 0], 'k')\n", (2510, 2557), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2616), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[axis_ranges[0], axis_ranges[1]]', '"""k"""'], {}), "([0, 0], [axis_ranges[0], axis_ranges[1]], 'k')\n", (2569, 2616), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2661), 'matplotlib.pyplot.xlim', 'plt.xlim', (['axis_ranges[0]', 'axis_ranges[1]'], {}), '(axis_ranges[0], axis_ranges[1])\n', (2629, 2661), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2706), 'matplotlib.pyplot.ylim', 'plt.ylim', (['axis_ranges[0]', 'axis_ranges[1]'], {}), '(axis_ranges[0], axis_ranges[1])\n', (2674, 2706), True, 'import matplotlib.pyplot as plt\n'), ((2785, 2849), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'bbox_to_anchor': '(1, 1)', 'fontsize': '(14)'}), "(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)\n", (2795, 2849), True, 'import matplotlib.pyplot as plt\n'), ((3001, 3012), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3010, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3219), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (3206, 3219), False, 'import os\n'), ((3262, 3288), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (3271, 3288), False, 'import os\n'), ((5684, 5707), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (5694, 5707), False, 'import os\n'), ((5750, 5776), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (5759, 5776), False, 'import os\n'), ((9100, 9123), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (9110, 9123), False, 'import os\n'), ((9166, 9192), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (9175, 9192), False, 'import os\n')]
|
# utils/test_kronecker.py
"""Tests for rom_operator_inference.utils._kronecker."""
import pytest
import numpy as np
import rom_operator_inference as opinf
# Index generation for fast self-product kronecker evaluation =================
def test_kron2c_indices(n_tests=100):
"""Test utils._kronecker.kron2c_indices()."""
mask = opinf.utils.kron2c_indices(4)
assert np.all(mask == np.array([[0, 0],
[1, 0], [1, 1],
[2, 0], [2, 1], [2, 2],
[3, 0], [3, 1], [3, 2], [3, 3]],
dtype=int))
submask = opinf.utils.kron2c_indices(3)
assert np.allclose(submask, mask[:6])
r = 10
_r2 = r * (r + 1) // 2
mask = opinf.utils.kron2c_indices(r)
assert mask.shape == (_r2, 2)
assert np.all(mask[0] == 0)
assert np.all(mask[-1] == r - 1)
assert mask.sum(axis=0)[0] == sum(i*(i+1) for i in range(r))
# Ensure consistency with utils.kron2c().
for _ in range(n_tests):
x = np.random.random(r)
assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron2c(x))
def test_kron3c_indices(n_tests=100):
"""Test utils._kronecker.kron3c_indices()."""
mask = opinf.utils.kron3c_indices(2)
assert np.all(mask == np.array([[0, 0, 0],
[1, 0, 0], [1, 1, 0], [1, 1, 1]],
dtype=int))
r = 10
mask = opinf.utils.kron3c_indices(r)
_r3 = r * (r + 1) * (r + 2) // 6
mask = opinf.utils.kron3c_indices(r)
assert mask.shape == (_r3, 3)
assert np.all(mask[0] == 0)
assert np.all(mask[-1] == r - 1)
# Ensure consistency with utils.kron3c().
for _ in range(n_tests):
x = np.random.random(r)
assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron3c(x))
# Kronecker (Khatri-Rao) products =============================================
# utils.kron2c() --------------------------------------------------------------
def _test_kron2c_single_vector(n):
"""Do one vector test of utils._kronecker.kron2c()."""
x = np.random.random(n)
x2 = opinf.utils.kron2c(x)
assert x2.ndim == 1
assert x2.shape[0] == n*(n+1)//2
for i in range(n):
assert np.allclose(x2[i*(i+1)//2:(i+1)*(i+2)//2], x[i]*x[:i+1])
def _test_kron2c_single_matrix(n):
"""Do one matrix test of utils._kronecker.kron2c()."""
X = np.random.random((n,n))
X2 = opinf.utils.kron2c(X)
assert X2.ndim == 2
assert X2.shape[0] == n*(n+1)//2
assert X2.shape[1] == n
for i in range(n):
assert np.allclose(X2[i*(i+1)//2:(i+1)*(i+2)//2], X[i]*X[:i+1])
def test_kron2c(n_tests=100):
"""Test utils._kronecker.kron2c()."""
# Try with bad input.
with pytest.raises(ValueError) as exc:
opinf.utils.kron2c(np.random.random((3,3,3)), checkdim=True)
assert exc.value.args[0] == "x must be one- or two-dimensional"
# Correct inputs.
for n in np.random.randint(2, 100, n_tests):
_test_kron2c_single_vector(n)
_test_kron2c_single_matrix(n)
# utils.kron3c() --------------------------------------------------------------
def _test_kron3c_single_vector(n):
"""Do one vector test of utils._kronecker.kron3c()."""
x = np.random.random(n)
x3 = opinf.utils.kron3c(x)
assert x3.ndim == 1
assert x3.shape[0] == n*(n+1)*(n+2)//6
for i in range(n):
assert np.allclose(x3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6],
x[i]*opinf.utils.kron2c(x[:i+1]))
def _test_kron3c_single_matrix(n):
"""Do one matrix test of utils._kronecker.kron3c()."""
X = np.random.random((n,n))
X3 = opinf.utils.kron3c(X)
assert X3.ndim == 2
assert X3.shape[0] == n*(n+1)*(n+2)//6
assert X3.shape[1] == n
for i in range(n):
assert np.allclose(X3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6],
X[i]*opinf.utils.kron2c(X[:i+1]))
def test_kron3c(n_tests=50):
"""Test utils._kronecker.kron3c()."""
# Try with bad input.
with pytest.raises(ValueError) as exc:
opinf.utils.kron3c(np.random.random((2,4,3)), checkdim=True)
assert exc.value.args[0] == "x must be one- or two-dimensional"
# Correct inputs.
for n in np.random.randint(2, 30, n_tests):
_test_kron3c_single_vector(n)
_test_kron3c_single_matrix(n)
# Matricized tensor management ================================================
# utils.expand_quadratic() ----------------------------------------------------
def _test_expand_quadratic_single(r):
"""Do one test of utils._kronecker.expand_quadratic()."""
x = np.random.random(r)
# Do a valid expand_quadratic() calculation and check dimensions.
s = r*(r+1)//2
Hc = np.random.random((r,s))
H = opinf.utils.expand_quadratic(Hc)
assert H.shape == (r,r**2)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hc @ opinf.utils.kron2c(x), Hxx)
# Check properties of the tensor for H.
Htensor = H.reshape((r,r,r))
assert np.allclose(Htensor @ x @ x, Hxx)
for subH in H:
assert np.allclose(subH, subH.T)
def test_expand_quadratic(n_tests=100):
"""Test utils._kronecker.expand_quadratic()."""
# Try to do expand_quadratic() with a bad second dimension.
r = 5
sbad = r*(r+3)//2
Hc = np.random.random((r, sbad))
with pytest.raises(ValueError) as exc:
opinf.utils.expand_quadratic(Hc)
assert exc.value.args[0] == \
f"invalid shape (r,s) = {(r,sbad)} with s != r(r+1)/2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_expand_quadratic_single(r)
# utils.compress_quadratic() --------------------------------------------------
def _test_compress_quadratic_single(r):
"""Do one test of utils._kronecker.compress_quadratic()."""
x = np.random.random(r)
# Do a valid compress_quadratic() calculation and check dimensions.
H = np.random.random((r,r**2))
s = r*(r+1)//2
Hc = opinf.utils.compress_quadratic(H)
assert Hc.shape == (r,s)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hxx, Hc @ opinf.utils.kron2c(x))
# Check that expand_quadratic() and compress_quadratic()
# are inverses up to symmetry.
H2 = opinf.utils.expand_quadratic(Hc)
Ht = H.reshape((r,r,r))
Htnew = np.empty_like(Ht)
for i in range(r):
Htnew[i] = (Ht[i] + Ht[i].T) / 2
assert np.allclose(H2, Htnew.reshape(H.shape))
def test_compress_quadratic(n_tests=100):
"""Test utils._kronecker.compress_quadratic()."""
# Try to do compress_quadratic() with a bad second dimension.
r = 5
r2bad = r**2 + 1
H = np.random.random((r, r2bad))
with pytest.raises(ValueError) as exc:
opinf.utils.compress_quadratic(H)
assert exc.value.args[0] == \
f"invalid shape (r,a) = {(r,r2bad)} with a != r**2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_compress_quadratic_single(r)
# utils.expand_cubic() --------------------------------------------------------
def _test_expand_cubic_single(r):
"""Do one test of utils._kronecker.expand_cubic()."""
x = np.random.random(r)
# Do a valid expand_cubic() calculation and check dimensions.
s = r*(r+1)*(r+2)//6
Gc = np.random.random((r,s))
G = opinf.utils.expand_cubic(Gc)
assert G.shape == (r,r**3)
# Check that Gc(x^3) == G(x⊗x⊗x).
Gxxx = G @ np.kron(x,np.kron(x,x))
assert np.allclose(Gc @ opinf.utils.kron3c(x), Gxxx)
# Check properties of the tensor for G.
Gtensor = G.reshape((r,r,r,r))
assert np.allclose(Gtensor @ x @ x @ x, Gxxx)
for subG in G:
assert np.allclose(subG, subG.T)
def test_expand_cubic(n_tests=50):
"""Test utils._kronecker.expand_cubic()."""
# Try to do expand_cubic() with a bad second dimension.
r = 5
sbad = r*(r+1)*(r+3)//6
Gc = np.random.random((r, sbad))
with pytest.raises(ValueError) as exc:
opinf.utils.expand_cubic(Gc)
assert exc.value.args[0] == \
f"invalid shape (r,s) = {(r,sbad)} with s != r(r+1)(r+2)/6"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 30, n_tests):
_test_expand_cubic_single(r)
# utils.compress_cubic() ------------------------------------------------------
def _test_compress_cubic_single(r):
"""Do one test of utils._kronecker.compress_cubic()."""
x = np.random.random(r)
# Do a valid compress_cubic() calculation and check dimensions.
G = np.random.random((r,r**3))
s = r*(r+1)*(r+2)//6
Gc = opinf.utils.compress_cubic(G)
assert Gc.shape == (r,s)
# Check that Gc(x^3) == G(x⊗x⊗x).
Gxxx = G @ np.kron(x,np.kron(x,x))
assert np.allclose(Gxxx, Gc @ opinf.utils.kron3c(x))
# Check that expand_cubic() and compress_cubic() are "inverses."
G_new = opinf.utils.expand_cubic(Gc)
assert np.allclose(Gc, opinf.utils.compress_cubic(G_new))
def test_compress_cubic(n_tests=50):
"""Test utils._kronecker.compress_cubic()."""
# Try to do compress_cubic() with a bad second dimension.
r = 5
r3bad = r**3 + 1
G = np.random.random((r, r3bad))
with pytest.raises(ValueError) as exc:
opinf.utils.compress_cubic(G)
assert exc.value.args[0] == \
f"invalid shape (r,a) = {(r,r3bad)} with a != r**3"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 30, n_tests):
_test_compress_cubic_single(r)
|
[
"rom_operator_inference.utils.kron3c",
"numpy.prod",
"numpy.allclose",
"rom_operator_inference.utils.kron2c",
"numpy.random.random",
"numpy.kron",
"numpy.array",
"numpy.random.randint",
"rom_operator_inference.utils.expand_cubic",
"rom_operator_inference.utils.kron3c_indices",
"numpy.empty_like",
"pytest.raises",
"rom_operator_inference.utils.compress_cubic",
"numpy.all",
"rom_operator_inference.utils.compress_quadratic",
"rom_operator_inference.utils.expand_quadratic",
"rom_operator_inference.utils.kron2c_indices"
] |
[((338, 367), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['(4)'], {}), '(4)\n', (364, 367), True, 'import rom_operator_inference as opinf\n'), ((654, 683), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['(3)'], {}), '(3)\n', (680, 683), True, 'import rom_operator_inference as opinf\n'), ((695, 725), 'numpy.allclose', 'np.allclose', (['submask', 'mask[:6]'], {}), '(submask, mask[:6])\n', (706, 725), True, 'import numpy as np\n'), ((776, 805), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['r'], {}), '(r)\n', (802, 805), True, 'import rom_operator_inference as opinf\n'), ((851, 871), 'numpy.all', 'np.all', (['(mask[0] == 0)'], {}), '(mask[0] == 0)\n', (857, 871), True, 'import numpy as np\n'), ((883, 908), 'numpy.all', 'np.all', (['(mask[-1] == r - 1)'], {}), '(mask[-1] == r - 1)\n', (889, 908), True, 'import numpy as np\n'), ((1259, 1288), 'rom_operator_inference.utils.kron3c_indices', 'opinf.utils.kron3c_indices', (['(2)'], {}), '(2)\n', (1285, 1288), True, 'import rom_operator_inference as opinf\n'), ((1476, 1505), 'rom_operator_inference.utils.kron3c_indices', 'opinf.utils.kron3c_indices', (['r'], {}), '(r)\n', (1502, 1505), True, 'import rom_operator_inference as opinf\n'), ((1554, 1583), 'rom_operator_inference.utils.kron3c_indices', 'opinf.utils.kron3c_indices', (['r'], {}), '(r)\n', (1580, 1583), True, 'import rom_operator_inference as opinf\n'), ((1629, 1649), 'numpy.all', 'np.all', (['(mask[0] == 0)'], {}), '(mask[0] == 0)\n', (1635, 1649), True, 'import numpy as np\n'), ((1661, 1686), 'numpy.all', 'np.all', (['(mask[-1] == r - 1)'], {}), '(mask[-1] == r - 1)\n', (1667, 1686), True, 'import numpy as np\n'), ((2135, 2154), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (2151, 2154), True, 'import numpy as np\n'), ((2164, 2185), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (2182, 2185), True, 'import rom_operator_inference as opinf\n'), ((2446, 2470), 'numpy.random.random', 'np.random.random', (['(n, n)'], {}), '((n, n))\n', (2462, 2470), True, 'import numpy as np\n'), ((2479, 2500), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['X'], {}), '(X)\n', (2497, 2500), True, 'import rom_operator_inference as opinf\n'), ((3001, 3035), 'numpy.random.randint', 'np.random.randint', (['(2)', '(100)', 'n_tests'], {}), '(2, 100, n_tests)\n', (3018, 3035), True, 'import numpy as np\n'), ((3297, 3316), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (3313, 3316), True, 'import numpy as np\n'), ((3326, 3347), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (3344, 3347), True, 'import rom_operator_inference as opinf\n'), ((3673, 3697), 'numpy.random.random', 'np.random.random', (['(n, n)'], {}), '((n, n))\n', (3689, 3697), True, 'import numpy as np\n'), ((3706, 3727), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['X'], {}), '(X)\n', (3724, 3727), True, 'import rom_operator_inference as opinf\n'), ((4292, 4325), 'numpy.random.randint', 'np.random.randint', (['(2)', '(30)', 'n_tests'], {}), '(2, 30, n_tests)\n', (4309, 4325), True, 'import numpy as np\n'), ((4673, 4692), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (4689, 4692), True, 'import numpy as np\n'), ((4792, 4816), 'numpy.random.random', 'np.random.random', (['(r, s)'], {}), '((r, s))\n', (4808, 4816), True, 'import numpy as np\n'), ((4824, 4856), 'rom_operator_inference.utils.expand_quadratic', 'opinf.utils.expand_quadratic', (['Hc'], {}), '(Hc)\n', (4852, 4856), True, 'import rom_operator_inference as opinf\n'), ((5097, 5130), 'numpy.allclose', 'np.allclose', (['(Htensor @ x @ x)', 'Hxx'], {}), '(Htensor @ x @ x, Hxx)\n', (5108, 5130), True, 'import numpy as np\n'), ((5390, 5417), 'numpy.random.random', 'np.random.random', (['(r, sbad)'], {}), '((r, sbad))\n', (5406, 5417), True, 'import numpy as np\n'), ((5660, 5694), 'numpy.random.randint', 'np.random.randint', (['(2)', '(100)', 'n_tests'], {}), '(2, 100, n_tests)\n', (5677, 5694), True, 'import numpy as np\n'), ((5931, 5950), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (5947, 5950), True, 'import numpy as np\n'), ((6032, 6061), 'numpy.random.random', 'np.random.random', (['(r, r ** 2)'], {}), '((r, r ** 2))\n', (6048, 6061), True, 'import numpy as np\n'), ((6087, 6120), 'rom_operator_inference.utils.compress_quadratic', 'opinf.utils.compress_quadratic', (['H'], {}), '(H)\n', (6117, 6120), True, 'import rom_operator_inference as opinf\n'), ((6376, 6408), 'rom_operator_inference.utils.expand_quadratic', 'opinf.utils.expand_quadratic', (['Hc'], {}), '(Hc)\n', (6404, 6408), True, 'import rom_operator_inference as opinf\n'), ((6449, 6466), 'numpy.empty_like', 'np.empty_like', (['Ht'], {}), '(Ht)\n', (6462, 6466), True, 'import numpy as np\n'), ((6785, 6813), 'numpy.random.random', 'np.random.random', (['(r, r2bad)'], {}), '((r, r2bad))\n', (6801, 6813), True, 'import numpy as np\n'), ((7054, 7088), 'numpy.random.randint', 'np.random.randint', (['(2)', '(100)', 'n_tests'], {}), '(2, 100, n_tests)\n', (7071, 7088), True, 'import numpy as np\n'), ((7315, 7334), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (7331, 7334), True, 'import numpy as np\n'), ((7436, 7460), 'numpy.random.random', 'np.random.random', (['(r, s)'], {}), '((r, s))\n', (7452, 7460), True, 'import numpy as np\n'), ((7468, 7496), 'rom_operator_inference.utils.expand_cubic', 'opinf.utils.expand_cubic', (['Gc'], {}), '(Gc)\n', (7492, 7496), True, 'import rom_operator_inference as opinf\n'), ((7754, 7792), 'numpy.allclose', 'np.allclose', (['(Gtensor @ x @ x @ x)', 'Gxxx'], {}), '(Gtensor @ x @ x @ x, Gxxx)\n', (7765, 7792), True, 'import numpy as np\n'), ((8045, 8072), 'numpy.random.random', 'np.random.random', (['(r, sbad)'], {}), '((r, sbad))\n', (8061, 8072), True, 'import numpy as np\n'), ((8316, 8349), 'numpy.random.randint', 'np.random.randint', (['(2)', '(30)', 'n_tests'], {}), '(2, 30, n_tests)\n', (8333, 8349), True, 'import numpy as np\n'), ((8574, 8593), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (8590, 8593), True, 'import numpy as np\n'), ((8671, 8700), 'numpy.random.random', 'np.random.random', (['(r, r ** 3)'], {}), '((r, r ** 3))\n', (8687, 8700), True, 'import numpy as np\n'), ((8732, 8761), 'rom_operator_inference.utils.compress_cubic', 'opinf.utils.compress_cubic', (['G'], {}), '(G)\n', (8758, 8761), True, 'import rom_operator_inference as opinf\n'), ((9008, 9036), 'rom_operator_inference.utils.expand_cubic', 'opinf.utils.expand_cubic', (['Gc'], {}), '(Gc)\n', (9032, 9036), True, 'import rom_operator_inference as opinf\n'), ((9289, 9317), 'numpy.random.random', 'np.random.random', (['(r, r3bad)'], {}), '((r, r3bad))\n', (9305, 9317), True, 'import numpy as np\n'), ((9554, 9587), 'numpy.random.randint', 'np.random.randint', (['(2)', '(30)', 'n_tests'], {}), '(2, 30, n_tests)\n', (9571, 9587), True, 'import numpy as np\n'), ((1062, 1081), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (1078, 1081), True, 'import numpy as np\n'), ((1775, 1794), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (1791, 1794), True, 'import numpy as np\n'), ((2285, 2359), 'numpy.allclose', 'np.allclose', (['x2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]', '(x[i] * x[:i + 1])'], {}), '(x2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2], x[i] * x[:i + 1])\n', (2296, 2359), True, 'import numpy as np\n'), ((2628, 2702), 'numpy.allclose', 'np.allclose', (['X2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]', '(X[i] * X[:i + 1])'], {}), '(X2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2], X[i] * X[:i + 1])\n', (2639, 2702), True, 'import numpy as np\n'), ((2794, 2819), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2807, 2819), False, 'import pytest\n'), ((4085, 4110), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4098, 4110), False, 'import pytest\n'), ((4939, 4952), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (4946, 4952), True, 'import numpy as np\n'), ((5165, 5190), 'numpy.allclose', 'np.allclose', (['subH', 'subH.T'], {}), '(subH, subH.T)\n', (5176, 5190), True, 'import numpy as np\n'), ((5427, 5452), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5440, 5452), False, 'import pytest\n'), ((5469, 5501), 'rom_operator_inference.utils.expand_quadratic', 'opinf.utils.expand_quadratic', (['Hc'], {}), '(Hc)\n', (5497, 5501), True, 'import rom_operator_inference as opinf\n'), ((6201, 6214), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (6208, 6214), True, 'import numpy as np\n'), ((6823, 6848), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6836, 6848), False, 'import pytest\n'), ((6865, 6898), 'rom_operator_inference.utils.compress_quadratic', 'opinf.utils.compress_quadratic', (['H'], {}), '(H)\n', (6895, 6898), True, 'import rom_operator_inference as opinf\n'), ((7827, 7852), 'numpy.allclose', 'np.allclose', (['subG', 'subG.T'], {}), '(subG, subG.T)\n', (7838, 7852), True, 'import numpy as np\n'), ((8082, 8107), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8095, 8107), False, 'import pytest\n'), ((8124, 8152), 'rom_operator_inference.utils.expand_cubic', 'opinf.utils.expand_cubic', (['Gc'], {}), '(Gc)\n', (8148, 8152), True, 'import rom_operator_inference as opinf\n'), ((9064, 9097), 'rom_operator_inference.utils.compress_cubic', 'opinf.utils.compress_cubic', (['G_new'], {}), '(G_new)\n', (9090, 9097), True, 'import rom_operator_inference as opinf\n'), ((9327, 9352), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9340, 9352), False, 'import pytest\n'), ((9369, 9398), 'rom_operator_inference.utils.compress_cubic', 'opinf.utils.compress_cubic', (['G'], {}), '(G)\n', (9395, 9398), True, 'import rom_operator_inference as opinf\n'), ((394, 500), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1], [3, 2], [3, 3]\n ]'], {'dtype': 'int'}), '([[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1], [\n 3, 2], [3, 3]], dtype=int)\n', (402, 500), True, 'import numpy as np\n'), ((1109, 1133), 'numpy.prod', 'np.prod', (['x[mask]'], {'axis': '(1)'}), '(x[mask], axis=1)\n', (1116, 1133), True, 'import numpy as np\n'), ((1135, 1156), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (1153, 1156), True, 'import rom_operator_inference as opinf\n'), ((1315, 1380), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]]'], {'dtype': 'int'}), '([[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]], dtype=int)\n', (1323, 1380), True, 'import numpy as np\n'), ((1822, 1846), 'numpy.prod', 'np.prod', (['x[mask]'], {'axis': '(1)'}), '(x[mask], axis=1)\n', (1829, 1846), True, 'import numpy as np\n'), ((1848, 1869), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (1866, 1869), True, 'import rom_operator_inference as opinf\n'), ((2855, 2882), 'numpy.random.random', 'np.random.random', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (2871, 2882), True, 'import numpy as np\n'), ((4146, 4173), 'numpy.random.random', 'np.random.random', (['(2, 4, 3)'], {}), '((2, 4, 3))\n', (4162, 4173), True, 'import numpy as np\n'), ((4980, 5001), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (4998, 5001), True, 'import rom_operator_inference as opinf\n'), ((6247, 6268), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (6265, 6268), True, 'import rom_operator_inference as opinf\n'), ((7592, 7605), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (7599, 7605), True, 'import numpy as np\n'), ((7634, 7655), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (7652, 7655), True, 'import rom_operator_inference as opinf\n'), ((8855, 8868), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (8862, 8868), True, 'import numpy as np\n'), ((8903, 8924), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (8921, 8924), True, 'import rom_operator_inference as opinf\n'), ((3540, 3569), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x[:i + 1]'], {}), '(x[:i + 1])\n', (3558, 3569), True, 'import rom_operator_inference as opinf\n'), ((3948, 3977), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['X[:i + 1]'], {}), '(X[:i + 1])\n', (3966, 3977), True, 'import rom_operator_inference as opinf\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 12:48:08 2020
@author: smith
"""
import spacy
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
import os
import multiprocessing
import csv
import re
import pandas as pd
from time import time
from datetime import datetime
from collections import defaultdict
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
import logging
import gensim
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
w2v_dir = '/home/smith/Smith_Scripts/NLP_GeneExpression/w2v_model/model071520/'
w2v_model = Word2Vec.load(os.path.join(w2v_dir, 'w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model'))
modelName = '_w2v071520_'
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/'
clusters = ['Cluster' + str(x) for x in range(20)]
category = 'CellTypes'
comparison = 'MarkerGenes'
termIndex = pd.read_excel(os.path.join(resultDirectory, 'MarkerGenes_Results/Combined_Clusters_' + category + '_' + comparison + '_Frequency.xlsx'), index_col=0)
termIndex = termIndex.sort_values(by='Combined Occurances', ascending=False)
enrichIndex = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx', index_col=0)
enrIndex = enrichIndex.iloc[:,::4]
def calcTopSimilarities(cluster, category, min_freq=5, topn=2000, save=False):
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')
clusterNum=cluster.replace('Cluster', '')
genesDf = pd.read_excel('/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx')
genesList = genesDf[str(clusterNum) + '_n'].tolist()
genes = genesList
genes = []
for gene in genesList:
genes.append(gene.lower())
# words = pd.read_excel(os.path.join(resultDirectory, str(cluster) + '_' + comparison + '_Results/' + category + '_' + cluster + '_Frequency.xlsx'), index_col=0)
# words = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster0_EnrichedFunctions_onlyTest.xlsx', index_col=0)
# wordsRedacted = words.loc[words['Occurances'] > min_freq]['word'].tolist()
words = enrIndex
wordsRedacted = words[cluster + ' term'].tolist()[:-1]
if category == 'CellTypes':
wordsRedacted = termIndex['word'].tolist()[:150]
newWords = []
for item in wordsRedacted:
try:
item = item.replace(' ', '_')
newWords.append(item)
except AttributeError:
pass
cat = pd.DataFrame()
catX = pd.DataFrame()
for gene in genes:
gene = gene.lower()
try:
df = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity', 'similarity'])
df['gene'] = gene
df2 = df.loc[df['entity'].isin(newWords)]
df2 = df2.reset_index(drop=True)
dfX = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity ' + gene, 'similarity ' + gene])
dfX2 = dfX.loc[dfX['entity ' + gene].isin(newWords)]
dfX2 = dfX2.reset_index(drop=True)
cat = pd.concat([cat, df2], axis=0)
cat = cat.reset_index(drop=True)
catX = pd.concat([catX, dfX2], axis=1)
catX = catX.reset_index(drop=True)
except KeyError:
pass
if save:
# cat.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
# catX.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))
cat.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
catX.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))
return(cat, catX)
def averageSimilarities(cluster, category):
clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
# clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')
if not os.path.exists(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')):
raise FileNotFoundError("Similarities file doesn't exist at " + os.path.join(clusterDirectory, cluster + modelName + '_Similarities_Enriched_' + category + '.xlsx'))
else:
df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
itemList = []
aveList = []
stdList = []
weightList = []
countList = []
geneList = []
for item in df['entity'].unique().tolist():
ave = np.mean(df.loc[df['entity']==item]['similarity'])
std = np.std(df.loc[df['entity']==item]['similarity'])
gene = df.loc[df['entity']==item]['gene'].tolist()
count = len(gene)
weightedAve = df.loc[df['entity']==item].shape[0]*ave
itemList.append(item)
aveList.append(ave)
stdList.append(std)
weightList.append(weightedAve)
countList.append(count)
geneList.append(gene)
df = pd.DataFrame(data=[itemList, aveList, stdList, weightList, countList, geneList]).T
df.columns=['entity', 'ave_similarity', 'stdev', 'weighted_ave', 'count', 'similar_genes']
df = df.sort_values(by='weighted_ave', ascending=False)
df = df.drop_duplicates(subset='entity', keep='first')
df.to_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'))
return(df)
def combineAverageSims(clusters, category, save=True):
clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
bigDf = pd.DataFrame()
for cluster in clusters:
df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'), index_col=0)
df.columns=[cluster + '_entity', cluster + '_average_sim', cluster + '_stdev', cluster + '_weightedAve', cluster + '_count', cluster + '_similarGenes']
bigDf = pd.concat([bigDf, df], axis=1)
if save:
bigDf.to_excel(os.path.join(clusterDirectory, 'Combined_AverageSimilarities' + modelName + category + '.xlsx'))
return(bigDf)
cat, catX = calcTopSimilarities('Cluster0', 'Functions', save=True)
df = averageSimilarities('Cluster0', 'Functions')
for cluster in clusters:
calcTopSimilarities(cluster, 'CellTypes', min_freq=5, topn=10000, save=True)
for cluster in clusters:
averageSimilarities(cluster, 'CellTypes')
df = combineAverageSims(clusters, 'CellTypes', save=True)
df = averageSimilarities('Cluster5', 'Functions')
###FREQUENCY DISTRIBUTION:
cat = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx')
def tsnescatterplot(model, setName, word, list_names,):
""" Plot in seaborn the results from the t-SNE dimensionality reduction algorithm of the vectors of a query word,
its list of most similar words, and a list of words.
"""
arrays = np.empty((0, 300), dtype='f')
word_labels = [word]
color_list = ['red']
# adds the vector of the query word
arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)
# gets list of most similar words
close_words = model.wv.most_similar([word])
# adds the vector for each of the closest words to the array
try:
for wrd_score in close_words:
wrd_vector = model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('blue')
arrays = np.append(arrays, wrd_vector, axis=0)
# adds the vector for each of the words from list_names to the array
for wrd in list_names:
wrd_vector = model.wv.__getitem__([wrd])
word_labels.append(wrd)
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
except KeyError:
pass
# Reduces the dimensionality from 300 to 50 dimensions with PCA
reduc = PCA(n_components=42).fit_transform(arrays) ###### CHANGED FROM 50 DURING TUTORIAL
# Finds t-SNE coordinates for 2 dimensions
np.set_printoptions(suppress=True)
Y = TSNE(n_components=2, random_state=0, perplexity=10).fit_transform(reduc)
# Sets everything up to plot
df = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words': word_labels,
'color': color_list})
fig, _ = plt.subplots()
fig.set_size_inches(9, 9)
# Basic plot
p1 = sns.regplot(data=df,
x="x",
y="y",
fit_reg=False,
marker="o",
scatter_kws={'s': 40,
'facecolors': df['color']
}
)
# Adds annotations one by one with a loop
for line in range(0, df.shape[0]):
p1.text(df["x"][line],
df['y'][line],
' ' + df["words"][line].title(),
horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df['color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for {}'.format(word.title()))
plt.savefig(os.path.join(resultDirectory, setName + modelName + word + '_tSNE_42PCs.png'))
tsnescatterplot(w2v_model, setName, word, newWords)
w2v_model.wv.most_similar(positive=["drug_addiction"], topn=20)
w2v_model.wv.most_similar(positive=["nucleus_accumbens"], topn=20)
w2v_model.wv.most_similar(positive=["vta"], topn=20)
w2v_model.wv.most_similar(positive=["dbi"], topn=20)
w2v_model.wv.most_similar(positive=["enkephalin", "cacng4"], negative=["opioid"], topn=20)
w2v_model.wv.most_similar(positive=["slc17a7", "cacng4"], negative=["glutamatergic_neuron"], topn=20)
###RUN PCA:
# fit a 2d PCA model to the vectors
X = w2v_model[w2v_model.wv.vocab]
pca = PCA(n_components=50)
result = pca.fit_transform(X)
#Plot the result
fig, ax = plt.subplots()
ax.plot(result[:, 0], result[:, 1], 'o')
ax.set_title('Entities')
plt.show()
words = list(w2v_model.wv.vocab.keys())
|
[
"logging.basicConfig",
"numpy.mean",
"seaborn.regplot",
"sklearn.decomposition.PCA",
"numpy.set_printoptions",
"numpy.std",
"os.path.join",
"sklearn.manifold.TSNE",
"seaborn.set_style",
"numpy.append",
"numpy.empty",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((569, 594), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (582, 594), True, 'import seaborn as sns\n'), ((624, 738), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s - %(asctime)s: %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'level': 'logging.INFO'}), "(format='%(levelname)s - %(asctime)s: %(message)s',\n datefmt='%H:%M:%S', level=logging.INFO)\n", (643, 738), False, 'import logging\n'), ((1409, 1573), 'pandas.read_excel', 'pd.read_excel', (['"""/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx"""'], {'index_col': '(0)'}), "(\n '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx'\n , index_col=0)\n", (1422, 1573), True, 'import pandas as pd\n'), ((7399, 7553), 'pandas.read_excel', 'pd.read_excel', (['"""/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx"""'], {}), "(\n '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx'\n )\n", (7412, 7553), True, 'import pandas as pd\n'), ((10983, 11003), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(50)'}), '(n_components=50)\n', (10986, 11003), False, 'from sklearn.decomposition import PCA\n'), ((11061, 11075), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11073, 11075), True, 'import matplotlib.pyplot as plt\n'), ((11142, 11152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11150, 11152), True, 'import matplotlib.pyplot as plt\n'), ((843, 935), 'os.path.join', 'os.path.join', (['w2v_dir', '"""w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model"""'], {}), "(w2v_dir,\n 'w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model')\n", (855, 935), False, 'import os\n'), ((1181, 1306), 'os.path.join', 'os.path.join', (['resultDirectory', "('MarkerGenes_Results/Combined_Clusters_' + category + '_' + comparison +\n '_Frequency.xlsx')"], {}), "(resultDirectory, 'MarkerGenes_Results/Combined_Clusters_' +\n category + '_' + comparison + '_Frequency.xlsx')\n", (1193, 1306), False, 'import os\n'), ((1821, 1885), 'os.path.join', 'os.path.join', (['resultDirectory', "(cluster + '_MarkerGenes_Results/')"], {}), "(resultDirectory, cluster + '_MarkerGenes_Results/')\n", (1833, 1885), False, 'import os\n'), ((1946, 2125), 'pandas.read_excel', 'pd.read_excel', (['"""/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx"""'], {}), "(\n '/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx'\n )\n", (1959, 2125), True, 'import pandas as pd\n'), ((3037, 3051), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3049, 3051), True, 'import pandas as pd\n'), ((3063, 3077), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3075, 3077), True, 'import pandas as pd\n'), ((6377, 6391), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6389, 6391), True, 'import pandas as pd\n'), ((7802, 7831), 'numpy.empty', 'np.empty', (['(0, 300)'], {'dtype': '"""f"""'}), "((0, 300), dtype='f')\n", (7810, 7831), True, 'import numpy as np\n'), ((8962, 8996), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (8981, 8996), True, 'import numpy as np\n'), ((9130, 9245), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [x for x in Y[:, 0]], 'y': [y for y in Y[:, 1]], 'words': word_labels,\n 'color': color_list}"], {}), "({'x': [x for x in Y[:, 0]], 'y': [y for y in Y[:, 1]], 'words':\n word_labels, 'color': color_list})\n", (9142, 9245), True, 'import pandas as pd\n'), ((9329, 9343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9341, 9343), True, 'import matplotlib.pyplot as plt\n'), ((9405, 9521), 'seaborn.regplot', 'sns.regplot', ([], {'data': 'df', 'x': '"""x"""', 'y': '"""y"""', 'fit_reg': '(False)', 'marker': '"""o"""', 'scatter_kws': "{'s': 40, 'facecolors': df['color']}"}), "(data=df, x='x', y='y', fit_reg=False, marker='o', scatter_kws={\n 's': 40, 'facecolors': df['color']})\n", (9416, 9521), True, 'import seaborn as sns\n'), ((5289, 5340), 'numpy.mean', 'np.mean', (["df.loc[df['entity'] == item]['similarity']"], {}), "(df.loc[df['entity'] == item]['similarity'])\n", (5296, 5340), True, 'import numpy as np\n'), ((5353, 5403), 'numpy.std', 'np.std', (["df.loc[df['entity'] == item]['similarity']"], {}), "(df.loc[df['entity'] == item]['similarity'])\n", (5359, 5403), True, 'import numpy as np\n'), ((5745, 5830), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[itemList, aveList, stdList, weightList, countList, geneList]'}), '(data=[itemList, aveList, stdList, weightList, countList, geneList]\n )\n', (5757, 5830), True, 'import pandas as pd\n'), ((6058, 6168), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_averageSimilarities_Enriched' +\n category + modelName + '.xlsx')\n", (6070, 6168), False, 'import os\n'), ((6761, 6791), 'pandas.concat', 'pd.concat', (['[bigDf, df]'], {'axis': '(1)'}), '([bigDf, df], axis=1)\n', (6770, 6791), True, 'import pandas as pd\n'), ((10325, 10402), 'os.path.join', 'os.path.join', (['resultDirectory', "(setName + modelName + word + '_tSNE_42PCs.png')"], {}), "(resultDirectory, setName + modelName + word + '_tSNE_42PCs.png')\n", (10337, 10402), False, 'import os\n'), ((3666, 3695), 'pandas.concat', 'pd.concat', (['[cat, df2]'], {'axis': '(0)'}), '([cat, df2], axis=0)\n', (3675, 3695), True, 'import pandas as pd\n'), ((3760, 3791), 'pandas.concat', 'pd.concat', (['[catX, dfX2]'], {'axis': '(1)'}), '([catX, dfX2], axis=1)\n', (3769, 3791), True, 'import pandas as pd\n'), ((4169, 4272), 'os.path.join', 'os.path.join', (['resultDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')"], {}), "(resultDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + '.xlsx')\n", (4181, 4272), False, 'import os\n'), ((4292, 4400), 'os.path.join', 'os.path.join', (['resultDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx')"], {}), "(resultDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + 'axis1.xlsx')\n", (4304, 4400), False, 'import os\n'), ((4702, 4806), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + '.xlsx')\n", (4714, 4806), False, 'import os\n'), ((5016, 5120), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + '.xlsx')\n", (5028, 5120), False, 'import os\n'), ((6456, 6566), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_averageSimilarities_Enriched' +\n category + modelName + '.xlsx')\n", (6468, 6566), False, 'import os\n'), ((6836, 6935), 'os.path.join', 'os.path.join', (['clusterDirectory', "('Combined_AverageSimilarities' + modelName + category + '.xlsx')"], {}), "(clusterDirectory, 'Combined_AverageSimilarities' + modelName +\n category + '.xlsx')\n", (6848, 6935), False, 'import os\n'), ((8368, 8405), 'numpy.append', 'np.append', (['arrays', 'wrd_vector'], {'axis': '(0)'}), '(arrays, wrd_vector, axis=0)\n', (8377, 8405), True, 'import numpy as np\n'), ((8672, 8709), 'numpy.append', 'np.append', (['arrays', 'wrd_vector'], {'axis': '(0)'}), '(arrays, wrd_vector, axis=0)\n', (8681, 8709), True, 'import numpy as np\n'), ((8824, 8844), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(42)'}), '(n_components=42)\n', (8827, 8844), False, 'from sklearn.decomposition import PCA\n'), ((9010, 9061), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(0)', 'perplexity': '(10)'}), '(n_components=2, random_state=0, perplexity=10)\n', (9014, 9061), False, 'from sklearn.manifold import TSNE\n'), ((4877, 4981), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + modelName + '_Similarities_Enriched_' + category + '.xlsx')"], {}), "(clusterDirectory, cluster + modelName +\n '_Similarities_Enriched_' + category + '.xlsx')\n", (4889, 4981), False, 'import os\n')]
|
#!/usr/bin/env python
# encoding: utf-8
from flask import Flask, request, jsonify
import base64
import numpy as np
from util.args_help import fill_from_args
import os
import logging
from dpr.simple_mmap_dataset import Corpus
from dpr.faiss_index import ANNIndex
logger = logging.getLogger(__name__)
class Options():
def __init__(self):
self.port = 5001
self.corpus_dir = ''
self.model_name = 'facebook/rag-token-nq'
self.rest_dtype = 16
self.local_only = False # only accessible on same machine
self.debug = False
self.log_info = False
self.__required_args__ = ['corpus_dir']
def get_rest_dtype(self):
return np.float32 if self.rest_dtype == 32 else np.float16
def run(opts: Options):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(format='%(filename)s:%(lineno)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if opts.log_info else logging.WARNING)
app = Flask(__name__)
if not opts.log_info:
log = logging.getLogger('werkzeug')
log.disabled = True
app.logger.disabled = True
app.logger.setLevel(logging.WARNING)
passages = Corpus(os.path.join(opts.corpus_dir))
index = ANNIndex(os.path.join(opts.corpus_dir, "index.faiss"))
dim = index.dim()
print(dim)
@app.route('/config', methods=['GET'])
def get_config():
return jsonify({'dtype': opts.rest_dtype, 'dim': dim, 'corpus': opts.corpus_dir})
@app.route('/retrieve', methods=['POST'])
def retrieve_docs():
rest_dtype = opts.get_rest_dtype()
query = request.get_json()
# input is three parts:
# the base64 encoded fp16 numpy matrix
# k (the number of records per document)
# return-vectors flag
query_vectors = np.frombuffer(base64.decodebytes(query['query_vectors'].encode('ascii')), dtype=rest_dtype).reshape(-1, dim)
k = query['k']
include_vectors = 'include_vectors' in query and query['include_vectors']
query_vectors = query_vectors.astype(np.float32)
scores, indexes = index.search(query_vectors, k)
docs = [[passages[ndx] for ndx in ndxs] for ndxs in indexes]
if 'pid' in docs[0][0]:
doc_dicts = [{'pid': [dqk['pid'] for dqk in dq],
'title': [dqk['title'] for dqk in dq],
'text': [dqk['text'] for dqk in dq]} for dq in docs]
else:
doc_dicts = [{'title': [dqk['title'] for dqk in dq],
'text': [dqk['text'] for dqk in dq]} for dq in docs]
retval = {'docs': doc_dicts}
if include_vectors:
doc_vectors = np.zeros([query_vectors.shape[0], k, query_vectors.shape[1]], dtype=rest_dtype)
for qi, docs_qi in enumerate(docs):
for ki, doc_qi_ki in enumerate(docs_qi):
doc_vectors[qi, ki] = doc_qi_ki['vector']
retval['doc_vectors'] = base64.b64encode(doc_vectors).decode('ascii')
# print(retval)
# output
# list of docs: len(docs) == query_vectors.shape[0]; len(docs[i].title) == len(docs[i].text) == k
# doc_vectors: query_vectors.shape[0] x k x query_vectors.shape[1]
return jsonify(retval)
app.run(host='127.0.0.1' if opts.local_only else '0.0.0.0', debug=opts.debug, port=opts.port)
if __name__ == '__main__':
opts = Options()
fill_from_args(opts)
run(opts)
|
[
"logging.getLogger",
"logging.basicConfig",
"util.args_help.fill_from_args",
"flask.Flask",
"base64.b64encode",
"os.path.join",
"flask.request.get_json",
"numpy.zeros",
"logging.root.removeHandler",
"flask.jsonify"
] |
[((272, 299), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'import logging\n'), ((865, 1029), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(filename)s:%(lineno)d - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if opts.log_info else logging.WARNING)'}), "(format='%(filename)s:%(lineno)d - %(message)s', datefmt\n ='%m/%d/%Y %H:%M:%S', level=logging.INFO if opts.log_info else logging.\n WARNING)\n", (884, 1029), False, 'import logging\n'), ((1078, 1093), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1083, 1093), False, 'from flask import Flask, request, jsonify\n'), ((3548, 3568), 'util.args_help.fill_from_args', 'fill_from_args', (['opts'], {}), '(opts)\n', (3562, 3568), False, 'from util.args_help import fill_from_args\n'), ((825, 860), 'logging.root.removeHandler', 'logging.root.removeHandler', (['handler'], {}), '(handler)\n', (851, 860), False, 'import logging\n'), ((1134, 1163), 'logging.getLogger', 'logging.getLogger', (['"""werkzeug"""'], {}), "('werkzeug')\n", (1151, 1163), False, 'import logging\n'), ((1294, 1323), 'os.path.join', 'os.path.join', (['opts.corpus_dir'], {}), '(opts.corpus_dir)\n', (1306, 1323), False, 'import os\n'), ((1346, 1390), 'os.path.join', 'os.path.join', (['opts.corpus_dir', '"""index.faiss"""'], {}), "(opts.corpus_dir, 'index.faiss')\n", (1358, 1390), False, 'import os\n'), ((1510, 1584), 'flask.jsonify', 'jsonify', (["{'dtype': opts.rest_dtype, 'dim': dim, 'corpus': opts.corpus_dir}"], {}), "({'dtype': opts.rest_dtype, 'dim': dim, 'corpus': opts.corpus_dir})\n", (1517, 1584), False, 'from flask import Flask, request, jsonify\n'), ((1716, 1734), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1732, 1734), False, 'from flask import Flask, request, jsonify\n'), ((3379, 3394), 'flask.jsonify', 'jsonify', (['retval'], {}), '(retval)\n', (3386, 3394), False, 'from flask import Flask, request, jsonify\n'), ((2808, 2887), 'numpy.zeros', 'np.zeros', (['[query_vectors.shape[0], k, query_vectors.shape[1]]'], {'dtype': 'rest_dtype'}), '([query_vectors.shape[0], k, query_vectors.shape[1]], dtype=rest_dtype)\n', (2816, 2887), True, 'import numpy as np\n'), ((3091, 3120), 'base64.b64encode', 'base64.b64encode', (['doc_vectors'], {}), '(doc_vectors)\n', (3107, 3120), False, 'import base64\n')]
|
# Noysim -- Noise simulation tools for Aimsun.
# Copyright (c) 2010-2011 by <NAME>, Ghent University & Griffith University.
#
# Basic geometry functions and classes
import numpy
import pylab
EPSILON = 10e-12 # smallest difference for points/directions
#---------------------------------------------------------------------------------------------------
# Convenience functions
#---------------------------------------------------------------------------------------------------
def parse_coordinates(*args):
""" parse 2D/3D coordinates x,y(,z) in a variety of fashions, and return a 3-element tuple """
n = len(args)
if n == 0:
return (0.0,0.0,0.0)
if n == 1:
try: # try if a Point object is supplied
return args[0].coordinates()
except:
if type(args[0]) in (tuple,list):
# coordinates supplied as a tuple (x,y) or (x,y,z)
if len(args[0]) == 2:
return (args[0][0], args[0][1], 0.0)
if len(args[0]) == 3:
return (args[0][0], args[0][1], args[0][2])
if type(args[0]) is str:
# coordinates supplied as a string '(x,y,z)'
c = args[0].strip('()').split(',')
return (float(c[0]), float(c[1]), float(c[2]))
else:
# coordinates supplied as separate arguments x,y or x,y,z
if n == 2:
return (args[0], args[1], 0.0)
if n == 3:
return (args[0], args[1], args[2])
raise Exception('unable to parse coordinates: ' + str(args))
def asPoint(p):
""" create a point object from 2D/3D coordinates """
if isinstance(p, Point):
return p
else:
return Point(p)
def asDirection(d):
""" create a direction object from a tuple (bearing, gradient) """
if isinstance(d, Direction):
return d
else:
return Direction(bearing = d[0], gradient = d[1])
#---------------------------------------------------------------------------------------------------
# Point class
#---------------------------------------------------------------------------------------------------
class Point(object):
""" basic 3D point class """
def __init__(self, *xyz):
object.__init__(self)
self.x, self.y, self.z = parse_coordinates(*xyz)
def copy(self):
""" return a copy """
return Point(self.x, self.y, self.z)
def coordinates(self):
""" return the coordinates as a tuple (x,y,z) """
return (self.x, self.y, self.z)
def __getitem__(self, key):
""" implement list style access to coordinates: p[0], p[1], p[2] """
return self.coordinates()[key]
def __str__(self):
""" string representation of a point """
return '(%.2f,%.2f,%.2f)' % self.coordinates()
def middle(self, other):
""" return the middle point between self and another point """
return Point((self.x + other.x)/2.0, (self.y + other.y)/2.0, (self.z + other.z)/2.0)
def distanceSquared(self, other):
""" return the squared distance to another point """
return (self.x - other.x)**2 + (self.y - other.y)**2 + (self.z - other.z)**2
def distance(self, other):
""" return the distance to another point """
return numpy.sqrt(self.distanceSquared(other))
def distanceXY(self, other):
""" return the distance to another point, both projected to the xy-plane """
return numpy.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)
def __eq__(self, other):
""" check if points coincide """
if other == None:
return False
return (self.distance(other) < EPSILON)
def __ne__(self, other):
""" check if points do not coincide """
return not self.__eq__(other)
def __cmp__(self, other):
""" compare the coordinates, first x, then y, then z """
if self.x == other.x:
if (self.y == other.y):
return (self.z < other.z)
else:
return (self.y < other.y)
else:
return (self.x < other.x)
def projectXY(self, z = 0.0):
""" return the projection of the point on the xy-plane """
return Point(self.x, self.y, z)
def transform(self, func):
""" perform a coordinate transformation with the given function (x,y,z) to (x',y',z') """
self.x, self.y, self.z = func((self.x, self.y, self.z))
def plot(self, color = 'black', size = 5):
""" plot the point in the xy-plane """
pylab.plot([self.x], [self.y], color = color, linestyle = 'None', marker = '.', markersize = size)
#---------------------------------------------------------------------------------------------------
# Direction class
#---------------------------------------------------------------------------------------------------
class Direction(object):
""" basic geometrical 3D direction class """
def __init__(self, bearing, gradient = 0.0):
object.__init__(self)
# both bearing and gradient are stored in degrees
self.bearing = bearing
self.gradient = gradient
def copy(self):
""" return a copy """
return Direction(self.bearing, self.gradient)
def __getitem__(self, key):
""" implement list style access to bearing and gradient """
return (self.bearing, self.gradient)[key]
def bearingRadians(self):
""" return the bearing (horizontal angle with the x-axis) in radians """
return numpy.radians(self.bearing)
def gradientRadians(self):
""" return the gradient (vertical angle with the xy-plane) in radians """
return numpy.radians(self.gradient)
def __str__(self):
""" return a string representation of the direction """
return '[%.2f,%.2f]' % (self.bearing, self.gradient)
def __eq__(self, other):
""" check if directions coincide """
if other == None:
return False
db = abs(self.bearing - other.bearing)
dg = abs(self.gradient - other.gradient)
return (db <= EPSILON) and (dg <= EPSILON)
def __ne__(self, other):
""" check if directions do not coincide """
return not self.__eq__(other)
def directionFromTo(p1, p2):
""" returns the direction from point 1 to point 2 """
(dx, dy, dz) = (p2.x - p1.x, p2.y - p1.y, p2.z - p1.z)
siz = p1.distance(p2)
return Direction(bearing = numpy.degrees(numpy.arctan2(dy, dx)), gradient = numpy.degrees(numpy.arcsin(dz/siz)))
#---------------------------------------------------------------------------------------------------
# Test code
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
points = []
points.append(Point(1.2, 3.4))
points.append(Point([5.6, 7.8, 9.0]))
points.append(Point('(7.8, 9.0, 1.2)'))
pylab.figure()
for p in points:
p.plot()
try:
pylab.show()
except:
pass
|
[
"numpy.radians",
"numpy.sqrt",
"pylab.plot",
"numpy.arcsin",
"pylab.figure",
"numpy.arctan2",
"pylab.show"
] |
[((6690, 6704), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (6702, 6704), False, 'import pylab\n'), ((3341, 3402), 'numpy.sqrt', 'numpy.sqrt', (['((self.x - other.x) ** 2 + (self.y - other.y) ** 2)'], {}), '((self.x - other.x) ** 2 + (self.y - other.y) ** 2)\n', (3351, 3402), False, 'import numpy\n'), ((4364, 4458), 'pylab.plot', 'pylab.plot', (['[self.x]', '[self.y]'], {'color': 'color', 'linestyle': '"""None"""', 'marker': '"""."""', 'markersize': 'size'}), "([self.x], [self.y], color=color, linestyle='None', marker='.',\n markersize=size)\n", (4374, 4458), False, 'import pylab\n'), ((5319, 5346), 'numpy.radians', 'numpy.radians', (['self.bearing'], {}), '(self.bearing)\n', (5332, 5346), False, 'import numpy\n'), ((5470, 5498), 'numpy.radians', 'numpy.radians', (['self.gradient'], {}), '(self.gradient)\n', (5483, 5498), False, 'import numpy\n'), ((6754, 6766), 'pylab.show', 'pylab.show', ([], {}), '()\n', (6764, 6766), False, 'import pylab\n'), ((6227, 6248), 'numpy.arctan2', 'numpy.arctan2', (['dy', 'dx'], {}), '(dy, dx)\n', (6240, 6248), False, 'import numpy\n'), ((6276, 6298), 'numpy.arcsin', 'numpy.arcsin', (['(dz / siz)'], {}), '(dz / siz)\n', (6288, 6298), False, 'import numpy\n')]
|
import itertools
import logging
import netCDF4
import numpy
from .. import core
from ..constants import masked as cfdm_masked
from ..decorators import (
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
from ..functions import abspath
from ..mixin.container import Container
from ..mixin.netcdf import NetCDFHDF5
from . import NumpyArray, abstract
logger = logging.getLogger(__name__)
class Data(Container, NetCDFHDF5, core.Data):
"""An orthogonal multidimensional array with masking and units.
.. versionadded:: (cfdm) 1.7.0
"""
def __init__(
self,
array=None,
units=None,
calendar=None,
fill_value=None,
source=None,
copy=True,
dtype=None,
mask=None,
_use_array=True,
**kwargs,
):
"""**Initialisation**
:Parameters:
array: data_like, optional
The array of values.
{{data_like}}
Ignored if the *source* parameter is set.
*Parameter example:*
``array=[34.6]``
*Parameter example:*
``array=[[1, 2], [3, 4]]``
*Parameter example:*
``array=numpy.ma.arange(10).reshape(2, 1, 5)``
units: `str`, optional
The physical units of the data. Ignored if the *source*
parameter is set.
The units may also be set after initialisation with the
`set_units` method.
*Parameter example:*
``units='km hr-1'``
*Parameter example:*
``units='days since 2018-12-01'``
calendar: `str`, optional
The calendar for reference time units. Ignored if the
*source* parameter is set.
The calendar may also be set after initialisation with the
`set_calendar` method.
*Parameter example:*
``calendar='360_day'``
fill_value: optional
The fill value of the data. By default, or if set to
`None`, the `numpy` fill value appropriate to the array's
data type will be used (see
`numpy.ma.default_fill_value`). Ignored if the *source*
parameter is set.
The fill value may also be set after initialisation with
the `set_fill_value` method.
*Parameter example:*
``fill_value=-999.``
dtype: data-type, optional
The desired data-type for the data. By default the
data-type will be inferred form the *array* parameter.
The data-type may also be set after initialisation
with the `dtype` attribute.
*Parameter example:*
``dtype=float``
*Parameter example:*
``dtype='float32'``
*Parameter example:*
``dtype=numpy.dtype('i2')``
mask: data_like, optional
Apply this mask to the data given by the *array*
parameter. By default, or if *mask* is `None`, no mask
is applied. May be any data_like object that
broadcasts to *array*. Masking will be carried out
where mask elements evaluate to `True`.
{{data_like}}
This mask will applied in addition to any mask already
defined by the *array* parameter.
source: optional
Initialise the array, units, calendar and fill value
from those of *source*.
{{init source}}
copy: `bool`, optional
If False then do not deep copy input parameters prior
to initialisation. By default arguments are deep
copied.
kwargs: ignored
Not used. Present to facilitate subclassing.
"""
if dtype is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = array.astype(dtype)
array = NumpyArray(array)
if mask is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = numpy.ma.array(array, mask=mask)
array = NumpyArray(array)
super().__init__(
array=array,
units=units,
calendar=calendar,
fill_value=fill_value,
source=source,
copy=copy,
_use_array=_use_array,
)
self._initialise_netcdf(source)
def __array__(self, *dtype):
"""The numpy array interface.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
dtype: optional
Typecode or data-type to which the array is cast.
:Returns:
`numpy.ndarray`
An independent numpy array of the data.
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3])
>>> a = numpy.array(d)
>>> print(type(a))
<class 'numpy.ndarray'>
>>> a[0] = -99
>>> d
<{{repr}}{{class}}(3): [1, 2, 3]>
>>> b = numpy.array(d, float)
>>> print(b)
[1. 2. 3.]
"""
array = self.array
if not dtype:
return array
else:
return array.astype(dtype[0], copy=False)
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
try:
shape = self.shape
except AttributeError:
shape = ""
else:
shape = str(shape)
shape = shape.replace(",)", ")")
return f"<{ self.__class__.__name__}{shape}: {self}>"
def __format__(self, format_spec):
"""Interpret format specifiers for size 1 arrays.
**Examples:**
>>> d = {{package}}.{{class}}(9, 'metres')
>>> f"{d}"
'9 metres'
>>> f"{d!s}"
'9 metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(): 9 metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([[9]], 'metres')
>>> f"{d}"
'[[9]] metres'
>>> f"{d!s}"
'[[9]] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(1, 1): [[9]] metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([9, 10], 'metres')
>>> f"{d}"
>>> '[9, 10] metres'
>>> f"{d!s}"
>>> '[9, 10] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(2): [9, 10] metres>'
>>> f"{d:.3f}"
Traceback (most recent call last):
...
ValueError: Can't format Data array of size 2 with format code .3f
"""
if not format_spec:
return super().__format__("")
n = self.size
if n == 1:
return "{x:{f}}".format(x=self.first_element(), f=format_spec)
raise ValueError(
f"Can't format Data array of size {n} with "
f"format code {format_spec}"
)
def __getitem__(self, indices):
"""Return a subspace of the data defined by indices.
d.__getitem__(indices) <==> d[indices]
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__setitem__`, `_parse_indices`
:Returns:
`{{class}}`
The subspace of the data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1].shape
(1, 10, 1)
>>> d[:, 0].shape
(1, 1, 9)
>>> d[..., 6:3:-1, 3:6].shape
(1, 3, 3)
>>> d[0, [2, 9], [4, 8]].shape
(1, 2, 2)
>>> d[0, :, -2].shape
(1, 10, 1)
"""
indices = self._parse_indices(indices)
array = self._get_Array(None)
if array is None:
raise ValueError("No array!!")
array = array[tuple(indices)]
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def __int__(self):
"""Called by the `int` built-in function.
x.__int__() <==> int(x)
"""
if self.size != 1:
raise TypeError(
"only length-1 arrays can be converted to "
f"Python scalars. Got {self}"
)
return int(self.array)
def __iter__(self):
"""Called when an iterator is required.
x.__iter__() <==> iter(x)
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3], 'metres')
>>> for e in d:
... print(repr(e))
...
1
2
3
>>> d = {{package}}.{{class}}([[1, 2], [4, 5]], 'metres')
>>> for e in d:
... print(repr(e))
...
<{{repr}}Data(2): [1, 2] metres>
<{{repr}}Data(2): [4, 5] metres>
>>> d = {{package}}.{{class}}(34, 'metres')
>>> for e in d:
... print(repr(e))
Traceback (most recent call last):
...
TypeError: Iteration over 0-d Data
"""
ndim = self.ndim
if not ndim:
raise TypeError(f"Iteration over 0-d {self.__class__.__name__}")
if ndim == 1:
i = iter(self.array)
while 1:
try:
yield next(i)
except StopIteration:
return
else:
# ndim > 1
for n in range(self.shape[0]):
out = self[n, ...]
out.squeeze(0, inplace=True)
yield out
def __setitem__(self, indices, value):
"""Assign to data elements defined by indices.
d.__setitem__(indices, x) <==> d[indices]=x
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
**Broadcasting**
The value, or values, being assigned must be broadcastable to the
shape defined by the indices, using the numpy broadcasting rules.
**Missing data**
Data array elements may be set to missing values by assigning them
to `masked`. Missing values may be unmasked by assigning them to
any other value.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__getitem__`, `_parse_indices`
:Returns:
`None`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1] = -10
>>> d[:, 0] = range(9)
>>> d[..., 6:3:-1, 3:6] = numpy.arange(-18, -9).reshape(3, 3)
>>> d[0, [2, 9], [4, 8]] = {{package}}.{{class}}([[-2, -3]])
>>> d[0, :, -2] = {{package}}.masked
"""
indices = self._parse_indices(indices)
array = self.array
if value is cfdm_masked or numpy.ma.isMA(value):
# The data is not masked but the assignment is masking
# elements, so turn the non-masked array into a masked
# one.
array = array.view(numpy.ma.MaskedArray)
self._set_subspace(array, indices, numpy.asanyarray(value))
self._set_Array(array, copy=False)
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
units = self.get_units(None)
calendar = self.get_calendar(None)
isreftime = False
if units is not None:
if isinstance(units, str):
isreftime = "since" in units
else:
units = "??"
try:
first = self.first_element()
except Exception:
out = ""
if units and not isreftime:
out += f" {units}"
if calendar:
out += f" {calendar}"
return out
size = self.size
shape = self.shape
ndim = self.ndim
open_brackets = "[" * ndim
close_brackets = "]" * ndim
mask = [False, False, False]
if size == 1:
if isreftime:
# Convert reference time to date-time
if first is numpy.ma.masked:
first = 0
mask[0] = True
try:
first = type(self)(
numpy.ma.array(first, mask=mask[0]), units, calendar
).datetime_array
except (ValueError, OverflowError):
first = "??"
out = f"{open_brackets}{first}{close_brackets}"
else:
last = self.last_element()
if isreftime:
if last is numpy.ma.masked:
last = 0
mask[-1] = True
# Convert reference times to date-times
try:
first, last = type(self)(
numpy.ma.array(
[first, last], mask=(mask[0], mask[-1])
),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
first, last = ("??", "??")
if size > 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
elif shape[-1:] == (3,):
middle = self.second_element()
if isreftime:
# Convert reference time to date-time
if middle is numpy.ma.masked:
middle = 0
mask[1] = True
try:
middle = type(self)(
numpy.ma.array(middle, mask=mask[1]),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
middle = "??"
out = (
f"{open_brackets}{first}, {middle}, {last}{close_brackets}"
)
elif size == 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
else:
out = f"{open_brackets}{first}, {last}{close_brackets}"
if isreftime:
if calendar:
out += f" {calendar}"
elif units:
out += f" {units}"
return out
# ----------------------------------------------------------------
# Private methods
# ----------------------------------------------------------------
def _item(self, index):
"""Return an element of the data as a scalar.
It is assumed, but not checked, that the given index selects
exactly one element.
:Parameters:
index:
:Returns:
The selected element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]], 'km')
>>> x = d._item((0, -1))
>>> print(x, type(x))
3 <class 'int'>
>>> x = d._item((0, 1))
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> d._item((slice(None), slice(1, 2)))
masked
"""
array = self[index].array
if not numpy.ma.isMA(array):
return array.item()
mask = array.mask
if mask is numpy.ma.nomask or not mask.item():
return array.item()
return numpy.ma.masked
def _parse_axes(self, axes):
"""Parses the data axes and returns valid non-duplicate axes.
:Parameters:
axes: (sequence of) `int`
The axes of the data.
{{axes int examples}}
:Returns:
`tuple`
**Examples:**
>>> d._parse_axes(1)
(1,)
>>> e._parse_axes([0, 2])
(0, 2)
"""
if axes is None:
return axes
ndim = self.ndim
if isinstance(axes, int):
axes = (axes,)
axes2 = []
for axis in axes:
if 0 <= axis < ndim:
axes2.append(axis)
elif -ndim <= axis < 0:
axes2.append(axis + ndim)
else:
raise ValueError(f"Invalid axis: {axis!r}")
# Check for duplicate axes
n = len(axes2)
if n > len(set(axes2)) >= 1:
raise ValueError(f"Duplicate axis: {axes2}")
return tuple(axes2)
def _set_Array(self, array, copy=True):
"""Set the array.
.. seealso:: `_set_CompressedArray`
:Parameters:
array: `numpy` array_like or `Array`, optional
The array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_Array(a)
"""
if not isinstance(array, abstract.Array):
if not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = NumpyArray(array)
super()._set_Array(array, copy=copy)
def _set_CompressedArray(self, array, copy=True):
"""Set the compressed array.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `_set_Array`
:Parameters:
array: subclass of `CompressedArray`
The compressed array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_CompressedArray(a)
"""
self._set_Array(array, copy=copy)
@classmethod
def _set_subspace(cls, array, indices, value):
"""Set a subspace of the data array defined by indices."""
axes_with_list_indices = [
i for i, x in enumerate(indices) if not isinstance(x, slice)
]
if len(axes_with_list_indices) < 2:
# --------------------------------------------------------
# At most one axis has a list-of-integers index so we can
# do a normal numpy assignment
# --------------------------------------------------------
array[tuple(indices)] = value
else:
# --------------------------------------------------------
# At least two axes have list-of-integers indices so we
# can't do a normal numpy assignment
# --------------------------------------------------------
indices1 = indices[:]
for i, x in enumerate(indices):
if i in axes_with_list_indices:
# This index is a list of integers
y = []
args = [iter(x)] * 2
for start, stop in itertools.zip_longest(*args):
if not stop:
y.append(slice(start, start + 1))
else:
step = stop - start
stop += 1
y.append(slice(start, stop, step))
indices1[i] = y
else:
indices1[i] = (x,)
if numpy.size(value) == 1:
for i in itertools.product(*indices1):
array[i] = value
else:
indices2 = []
ndim_difference = array.ndim - numpy.ndim(value)
for i, n in enumerate(numpy.shape(value)):
if n == 1:
indices2.append((slice(None),))
elif i + ndim_difference in axes_with_list_indices:
y = []
start = 0
while start < n:
stop = start + 2
y.append(slice(start, stop))
start = stop
indices2.append(y)
else:
indices2.append((slice(None),))
for i, j in zip(
itertools.product(*indices1), itertools.product(*indices2)
):
array[i] = value[j]
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
@property
def compressed_array(self):
"""Returns an independent numpy array of the compressed data.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_compressed_axes`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`numpy.ndarray`
An independent numpy array of the compressed data.
**Examples:**
>>> a = d.compressed_array
"""
ca = self._get_Array(None)
if not ca.get_compression_type():
raise ValueError("not compressed: can't get compressed array")
return ca.compressed_array
@property
def datetime_array(self):
"""Returns an independent numpy array of datetimes.
Specifically, returns an independent numpy array containing
the date-time objects corresponding to times since a reference
date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
'standard' (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `array`, `datetime_as_string`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time objects.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> a = d.datetime_array
>>> print(a)
[cftime.DatetimeGregorian(2019, 1, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 2, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-01 00:00:00
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> a = d.datetime_array
>>> print(a)
[cftime.Datetime360Day(2019, 1, 2, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 2, 3, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-03 00:00:00
"""
array = self.array
mask = None
if numpy.ma.isMA(array):
# num2date has issues if the mask is nomask
mask = array.mask
if mask is numpy.ma.nomask or not numpy.ma.is_masked(array):
mask = None
array = array.view(numpy.ndarray)
if mask is not None and not array.ndim:
# Fix until num2date copes with scalar aarrays containing
# missing data
return array
array = netCDF4.num2date(
array,
units=self.get_units(None),
calendar=self.get_calendar("standard"),
only_use_cftime_datetimes=True,
)
if mask is None:
# There is no missing data
array = numpy.array(array, dtype=object)
else:
# There is missing data
array = numpy.ma.masked_where(mask, array)
if not numpy.ndim(array):
array = numpy.ma.masked_all((), dtype=object)
return array
@property
def datetime_as_string(self):
"""Returns an independent numpy array with datetimes as strings.
Specifically, returns an independent numpy array containing
string representations of times since a reference date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
"standard" (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `array`, `datetime_array`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time strings.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> print(d.datetime_as_string)
['2019-01-01 00:00:00' '2019-02-01 00:00:00' '2019-03-01 00:00:00']
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> print(d.datetime_as_string)
['2019-01-02 00:00:00' '2019-02-03 00:00:00' '2019-03-01 00:00:00']
"""
return self.datetime_array.astype(str)
@property
def mask(self):
"""The Boolean missing data mask of the data array.
The Boolean mask has True where the data array has missing data
and False otherwise.
:Returns:
`{{class}}`
The Boolean mask as data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.ma.array(
... [[280.0, -99, -99, -99],
... [281.0, 279.0, 278.0, 279.5]],
... mask=[[0, 1, 1, 1], [0, 0, 0, 0]]
... ))
>>> d
<{{repr}}Data(2, 4): [[280.0, ..., 279.5]]>
>>> print(d.array)
[[280.0 -- -- --]
[281.0 279.0 278.0 279.5]]
>>> d.mask
<{{repr}}Data(2, 4): [[False, ..., False]]>
>>> print(d.mask.array)
[[False True True True]
[False False False False]]
"""
return type(self)(numpy.ma.getmaskarray(self.array))
# ----------------------------------------------------------------
# Methods
# ----------------------------------------------------------------
def any(self):
"""Test whether any data array elements evaluate to True.
Performs a logical or over the data array and returns the
result. Masked values are considered as False during computation.
:Returns:
`bool`
`True` if any data array elements evaluate to True,
otherwise `False`.
**Examples:**
>>> d = {{package}}.{{class}}([[0, 0, 0]])
>>> d.any()
False
>>> d[0, 0] = {{package}}.masked
>>> print(d.array)
[[-- 0 0]]
>>> d.any()
False
>>> d[0, 1] = 3
>>> print(d.array)
[[-- 3 0]]
>>> d.any()
True
>>> d[...] = {{package}}.masked
>>> print(d.array)
[[-- -- --]]
>>> d.any()
False
"""
masked = self.array.any()
if masked is numpy.ma.masked:
masked = False
return masked
@_inplace_enabled(default=False)
def apply_masking(
self,
fill_values=None,
valid_min=None,
valid_max=None,
valid_range=None,
inplace=False,
):
"""Apply masking.
Masking is applied according to the values of the keyword
parameters.
Elements that are already masked remain so.
.. versionadded:: (cfdm) 1.8.2
.. seealso:: `get_fill_value`, `mask`
:Parameters:
fill_values: `bool` or sequence of scalars, optional
Specify values that will be set to missing data. Data
elements exactly equal to any of the values are set to
missing data.
If True then the value returned by the `get_fill_value`
method, if such a value exists, is used.
Zero or more values may be provided in a sequence of
scalars.
*Parameter example:*
Specify a fill value of 999: ``fill_values=[999]``
*Parameter example:*
Specify fill values of 999 and -1.0e30:
``fill_values=[999, -1.0e30]``
*Parameter example:*
Use the fill value already set for the data:
``fill_values=True``
*Parameter example:*
Use no fill values: ``fill_values=False`` or
``fill_value=[]``
valid_min: number, optional
A scalar specifying the minimum valid value. Data elements
strictly less than this number will be set to missing
data.
valid_max: number, optional
A scalar specifying the maximum valid value. Data elements
strictly greater than this number will be set to missing
data.
valid_range: (number, number), optional
A vector of two numbers specifying the minimum and maximum
valid values, equivalent to specifying values for both
*valid_min* and *valid_max* parameters. The *valid_range*
parameter must not be set if either *valid_min* or
*valid_max* is defined.
*Parameter example:*
``valid_range=[-999, 10000]`` is equivalent to setting
``valid_min=-999, valid_max=10000``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with masked values. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(12).reshape(3, 4), 'm')
>>> d[1, 1] = {{package}}.masked
>>> print(d.array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking().array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0, 11]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 --]]
>>> print(d.apply_masking(valid_min=3).array)
[[-- -- -- 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(valid_max=6).array)
[[0 1 2 3]
[4 -- 6 --]
[-- -- -- --]]
>>> print(d.apply_masking(valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 7]
[8 -- -- --]]
>>> d.set_fill_value(7)
>>> print(d.apply_masking(fill_values=True).array)
[[0 1 2 3]
[4 -- 6 --]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=True,
... valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 --]
[8 -- -- --]]
"""
if valid_range is not None:
if valid_min is not None or valid_max is not None:
raise ValueError(
"Can't set 'valid_range' parameter with either the "
"'valid_min' nor 'valid_max' parameters"
)
try:
if len(valid_range) != 2:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
except TypeError:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
valid_min, valid_max = valid_range
d = _inplace_enabled_define_and_cleanup(self)
if fill_values is None:
fill_values = False
if isinstance(fill_values, bool):
if fill_values:
fill_value = self.get_fill_value(None)
if fill_value is not None:
fill_values = (fill_value,)
else:
fill_values = ()
else:
fill_values = ()
else:
try:
_ = iter(fill_values)
except TypeError:
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
else:
if isinstance(fill_values, str):
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
mask = None
if fill_values:
array = self.array
mask = array == fill_values[0]
for fill_value in fill_values[1:]:
mask |= array == fill_value
if valid_min is not None:
if mask is None:
array = self.array
mask = array < valid_min
else:
mask |= array < valid_min
if valid_max is not None:
if mask is None:
array = self.array
mask = array > valid_max
else:
mask |= array > valid_max
if mask is not None:
array = numpy.ma.where(mask, cfdm_masked, array)
d._set_Array(array, copy=False)
return d
def copy(self, array=True):
"""Return a deep copy.
``d.copy()`` is equivalent to ``copy.deepcopy(d)``.
:Parameters:
array: `bool`, optional
If False then do not copy the array. By default the array
is copied.
:Returns:
`{{class}}`
The deep copy.
**Examples:**
>>> e = d.copy()
>>> e = d.copy(array=False)
"""
return super().copy(array=array)
def creation_commands(
self, name="data", namespace=None, indent=0, string=True
):
"""Return the commands that would create the data object.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
name: `str` or `None`, optional
Set the variable name of `Data` object that the commands
create.
{{namespace: `str`, optional}}
{{indent: `int`, optional}}
{{string: `bool`, optional}}
:Returns:
{{returns creation_commands}}
**Examples:**
>>> d = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]],
... units='degrees_east')
>>> print(d.creation_commands())
data = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]], units='degrees_east', dtype='f8')
>>> d = {{package}}.{{class}}(['alpha', 'beta', 'gamma', 'delta'],
... mask = [1, 0, 0, 0])
>>> d.creation_commands(name='d', namespace='', string=False)
["d = Data(['', 'beta', 'gamma', 'delta'], dtype='U5', mask=Data([True, False, False, False], dtype='b1'))"]
"""
namespace0 = namespace
if namespace is None:
namespace = self._package() + "."
elif namespace and not namespace.endswith("."):
namespace += "."
mask = self.mask
if mask.any():
if name == "mask":
raise ValueError(
"When the data is masked, the 'name' parameter "
"can not have the value 'mask'"
)
masked = True
array = self.filled().array.tolist()
else:
masked = False
array = self.array.tolist()
units = self.get_units(None)
if units is None:
units = ""
else:
units = f", units={units!r}"
calendar = self.get_calendar(None)
if calendar is None:
calendar = ""
else:
calendar = f", calendar={calendar!r}"
fill_value = self.get_fill_value(None)
if fill_value is None:
fill_value = ""
else:
fill_value = f", fill_value={fill_value}"
dtype = self.dtype.descr[0][1][1:]
if masked:
mask = mask.creation_commands(
name="mask", namespace=namespace0, indent=0, string=True
)
mask = mask.replace("mask = ", "mask=", 1)
mask = f", {mask}"
else:
mask = ""
if name is None:
name = ""
else:
name = name + " = "
out = []
out.append(
f"{name}{namespace}{self.__class__.__name__}({array}{units}"
f"{calendar}, dtype={dtype!r}{mask}{fill_value})"
)
if string:
indent = " " * indent
out[0] = indent + out[0]
out = ("\n" + indent).join(out)
return out
@_inplace_enabled(default=False)
def filled(self, fill_value=None, inplace=False):
"""Replace masked elements with the fill value.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
fill_value: scalar, optional
The fill value. By default the fill returned by
`get_fill_value` is used, or if this is not set then the
netCDF default fill value for the data type is used (as
defined by `netCDF.fillvals`).
{{inplace: `bool`, optional}}
:Returns:
`Data` or `None`
The filled data, or `None` if the operation was in-place.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]])
>>> print(d.filled().array)
[[1 2 3]]
>>> d[0, 0] = {{package}}.masked
>>> print(d.filled().array)
[[-9223372036854775806 2 3]]
>>> d.set_fill_value(-99)
>>> print(d.filled().array)
[[-99 2 3]]
>>> print(d.filled(1e10).array)
[[10000000000 2 3]]
"""
d = _inplace_enabled_define_and_cleanup(self)
if fill_value is None:
fill_value = d.get_fill_value(None)
if fill_value is None:
default_fillvals = netCDF4.default_fillvals
fill_value = default_fillvals.get(d.dtype.str[1:], None)
if fill_value is None and d.dtype.kind in ("SU"):
fill_value = default_fillvals.get("S1", None)
if fill_value is None: # should not be None by this stage
raise ValueError(
"Can't determine fill value for "
f"data type {d.dtype.str!r}"
) # pragma: no cover
array = self.array
if numpy.ma.isMA(array):
array = array.filled(fill_value)
d._set_Array(array, copy=False)
return d
@_inplace_enabled(default=False)
def insert_dimension(self, position=0, inplace=False):
"""Expand the shape of the data array.
Inserts a new size 1 axis, corresponding to a given position in
the data array shape.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `squeeze`, `transpose`
:Parameters:
position: `int`, optional
Specify the position that the new axis will have in the
data array. By default the new axis has position 0, the
slowest varying position. Negative integers counting from
the last position are allowed.
*Parameter example:*
``position=2``
*Parameter example:*
``position=-1``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with expanded axes. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.insert_dimension('domainaxis3').shape
(1, 96, 73, 19)
>>> d.insert_dimension('domainaxis3', position=3).shape
(19, 73, 96, 1)
>>> d.insert_dimension('domainaxis3', position=-1, inplace=True)
>>> d.shape
(19, 73, 1, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
# Parse position
ndim = d.ndim
if -ndim - 1 <= position < 0:
position += ndim + 1
elif not 0 <= position <= ndim:
raise ValueError(
f"Can't insert dimension: Invalid position: {position!r}"
)
array = numpy.expand_dims(self.array, position)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def get_count(self, default=ValueError()):
"""Return the count variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_index`, `get_list`
:Parameters:
default: optional
Return the value of the *default* parameter if a count
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The count variable.
**Examples:**
>>> c = d.get_count()
"""
try:
return self._get_Array().get_count()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no count variable"
)
def get_index(self, default=ValueError()):
"""Return the index variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_list`
:Parameters:
default: optional
Return *default* if index variable has not been set.
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The index variable.
**Examples:**
>>> i = d.get_index()
"""
try:
return self._get_Array().get_index()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no index variable"
)
def get_list(self, default=ValueError()):
"""Return the list variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_index`
:Parameters:
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The list variable.
**Examples:**
>>> l = d.get_list()
"""
try:
return self._get_Array().get_list()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no list variable"
)
def get_compressed_dimension(self, default=ValueError()):
"""Returns the compressed dimension's array position.
That is, returns the position of the compressed dimension
in the compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_axes`,
`get_compression_type`
:Parameters:
default: optional
Return the value of the *default* parameter there is no
compressed dimension. If set to an `Exception` instance
then it will be raised instead.
:Returns:
`int`
The position of the compressed dimension in the compressed
array.
**Examples:**
>>> d.get_compressed_dimension()
2
"""
try:
return self._get_Array().get_compressed_dimension()
except (AttributeError, ValueError):
return self._default(
default,
f"{ self.__class__.__name__!r} has no compressed dimension",
)
def _parse_indices(self, indices):
"""Parse indices of the data and return valid indices in a list.
:Parameters:
indices: `tuple` (not a `list`!)
:Returns:
`list`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d._parse_indices((slice(None, None, None), 1, 2))
[slice(None, None, None), slice(1, 2, 1), slice(2, 3, 1)]
>>> d._parse_indices((1,))
[slice(1, 2, 1), slice(None, None, None), slice(None, None, None)]
"""
shape = self.shape
parsed_indices = []
if not isinstance(indices, tuple):
indices = (indices,)
# Initialise the list of parsed indices as the input indices
# with any Ellipsis objects expanded
length = len(indices)
n = len(shape)
ndim = n
for index in indices:
if index is Ellipsis:
m = n - length + 1
parsed_indices.extend([slice(None)] * m)
n -= m
else:
parsed_indices.append(index)
n -= 1
length -= 1
len_parsed_indices = len(parsed_indices)
if ndim and len_parsed_indices > ndim:
raise IndexError(
f"Invalid indices for data with shape {shape}: "
f"{parsed_indices}"
)
if len_parsed_indices < ndim:
parsed_indices.extend([slice(None)] * (ndim - len_parsed_indices))
if not ndim and parsed_indices:
raise IndexError(
"Scalar data can only be indexed with () or Ellipsis"
)
for i, (index, size) in enumerate(zip(parsed_indices, shape)):
if isinstance(index, slice):
continue
if isinstance(index, int):
# E.g. 43 -> slice(43, 44, 1)
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
if getattr(getattr(index, "dtype", None), "kind", None) == "b":
# E.g. index is [True, False, True] -> [0, 2]
#
# Convert Booleans to non-negative integers. We're
# assuming that anything with a dtype attribute also
# has a size attribute.
if index.size != size:
raise IndexError(
"Invalid indices for data "
f"with shape {shape}: {parsed_indices}"
)
index = numpy.where(index)[0]
if not numpy.ndim(index):
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
len_index = len(index)
if len_index == 1:
# E.g. [3] -> slice(3, 4, 1)
index = index[0]
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
# E.g. [1, 3, 4] -> [1, 3, 4]
pass
parsed_indices[i] = index
return parsed_indices
def maximum(self, axes=None):
"""Return the maximum of an array or the maximum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `minimum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the maximum. By default the
maximum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Maximum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.max()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[23]]]]>
>>> print(e.array)
[[[[23]]]]
>>> e = d.max(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[8, ..., 23]]]]>
>>> print(e.array)
[[[[ 8 9 10 11]]
[[20 21 22 23]]]]
>>> e = d.max([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[11, 23]]]]>
>>> print(e.array)
[[[[11]]
[[23]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find maximum of data: {error}")
array = self.array
array = numpy.amax(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def minimum(self, axes=None):
"""Return the minimum of an array or minimum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `maximum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the minimum. By default the
minimum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Minimum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.min()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[0]]]]>
>>> print(e.array)
[[[[0]]]]
>>> e = d.min(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[0, ..., 15]]]]>
>>> print(e.array)
[[[[ 0 1 2 3]]
[[12 13 14 15]]]]
>>> e = d.min([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[0, 12]]]]>
>>> print(e.array)
[[[[ 0]]
[[12]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find minimum of data: {error}")
array = self.array
array = numpy.amin(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
@_inplace_enabled(default=False)
def squeeze(self, axes=None, inplace=False):
"""Remove size 1 axes from the data.
By default all size 1 axes are removed, but particular axes may be
selected with the keyword arguments.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
The positions of the size one axes to be removed. By
default all size one axes are removed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The data with removed data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(1, 73, 1, 96)
>>> f.squeeze().shape
(73, 96)
>>> d.squeeze(0).shape
(73, 1, 96)
>>> d.squeeze([-3, 2]).shape
(73, 96)
>>> d.squeeze(2, inplace=True)
>>> d.shape
(1, 73, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't squeeze data: {error}")
shape = d.shape
if axes is None:
axes = tuple([i for i, n in enumerate(shape) if n == 1])
else:
# Check the squeeze axes
for i in axes:
if shape[i] > 1:
raise ValueError(
"Can't squeeze data: "
f"Can't remove axis of size {shape[i]}"
)
if not axes:
return d
array = self.array
array = numpy.squeeze(array, axes)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def sum(self, axes=None):
"""Return the sum of an array or the sum along axes.
Missing data array elements are omitted from the calculation.
.. seealso:: `max`, `min`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to calculate the sum. By default the
sum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
The sum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.sum()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[276]]]]>
>>> print(e.array)
[[[[276]]]]
>>> e = d.sum(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[12, ..., 57]]]]>
>>> print(e.array)
[[[[12 15 18 21]]
[[48 51 54 57]]]]
>>> e = d.sum([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[66, 210]]]]>
>>> print(e.array)
[[[[ 66]]
[[210]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't sum data: {error}")
array = self.array
array = numpy.sum(array, axis=axes, keepdims=True)
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
@_inplace_enabled(default=False)
def transpose(self, axes=None, inplace=False):
"""Permute the axes of the data array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `squeeze`
:Parameters:
axes: (sequence of) `int`
The new axis order. By default the order is reversed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with permuted data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.transpose().shape
(96, 73, 19)
>>> d.transpose([1, 0, 2]).shape
(73, 19, 96)
>>> d.transpose([-1, 0, 1], inplace=True)
>>> d.shape
(96, 19, 73)
"""
d = _inplace_enabled_define_and_cleanup(self)
ndim = d.ndim
# Parse the axes. By default, reverse the order of the axes.
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't transpose data: {error}")
if axes is None:
if ndim <= 1:
return d
axes = tuple(range(ndim - 1, -1, -1))
elif len(axes) != ndim:
raise ValueError(
f"Can't transpose data: Axes don't match array: {axes}"
)
# Return unchanged if axes are in the same order as the data
if axes == tuple(range(ndim)):
return d
array = self.array
array = numpy.transpose(array, axes=axes)
d._set_Array(array, copy=False)
return d
def get_compressed_axes(self):
"""Returns the dimensions that are compressed in the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`list`
The dimensions of the data that are compressed to a single
dimension in the underlying array. If the data are not
compressed then an empty list is returned.
**Examples:**
>>> d.shape
(2, 3, 4, 5, 6)
>>> d.compressed_array.shape
(2, 14, 6)
>>> d.get_compressed_axes()
[1, 2, 3]
>>> d.get_compression_type()
''
>>> d.get_compressed_axes()
[]
"""
ca = self._get_Array(None)
if ca is None:
return []
return ca.get_compressed_axes()
def get_compression_type(self):
"""Returns the type of compression applied to the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `compression_axes`,
`get_compressed_dimension`
:Returns:
`str`
The compression type. An empty string means that no
compression has been applied.
**Examples:**
>>> d.get_compression_type()
''
>>> d.get_compression_type()
'gathered'
>>> d.get_compression_type()
'ragged contiguous'
"""
ma = self._get_Array(None)
if ma is None:
return ""
return ma.get_compression_type()
@classmethod
def empty(cls, shape, dtype=None, units=None, calendar=None):
"""Create a new data array without initialising the elements.
Note that the mask of the returned empty data is hard.
.. seealso:: `full`, `ones`, `zeros`
:Parameters:
shape: `int` or `tuple` of `int`
The shape of the new array.
dtype: `numpy.dtype` or any object convertible to `numpy.dtype`
The data-type of the new array. By default the
data-type is ``float``.
units: `str` or `Units`
The units for the empty data array.
calendar: `str`, optional
The calendar for reference time units.
:Returns:
`{{class}}`
**Examples:**
>>> d = {{package}}.{{class}}.empty((96, 73))
"""
return cls(
numpy.empty(shape=shape, dtype=dtype),
units=units,
calendar=calendar,
)
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_compression=True,
ignore_type=False,
_check_values=True,
):
"""Whether two data arrays are the same.
Equality is strict by default. This means that for data arrays to
be considered equal:
* the units and calendar must be the same,
..
* the fill value must be the same (see the *ignore_fill_value*
parameter), and
..
* the arrays must have same shape and data type, the same missing
data mask, and be element-wise equal (see the *ignore_data_type*
parameter).
{{equals tolerance}}
Any compression is ignored by default, with only the arrays in
their uncompressed forms being compared. See the
*ignore_compression* parameter.
Any type of object may be tested but, in general, equality is only
possible with another cell measure construct, or a subclass of
one. See the *ignore_type* parameter.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
other:
The object to compare for equality.
{{atol: number, optional}}
{{rtol: number, optional}}
ignore_fill_value: `bool`, optional
If True then the fill value is omitted from the
comparison.
{{ignore_data_type: `bool`, optional}}
{{ignore_compression: `bool`, optional}}
{{ignore_type: `bool`, optional}}
{{verbose: `int` or `str` or `None`, optional}}
:Returns:
`bool`
Whether the two data arrays are equal.
**Examples:**
>>> d.equals(d)
True
>>> d.equals(d.copy())
True
>>> d.equals('not a data array')
False
"""
pp = super()._equals_preprocess(
other, verbose=verbose, ignore_type=ignore_type
)
if pp is True or pp is False:
return pp
other = pp
# Check that each instance has the same shape
if self.shape != other.shape:
logger.info(
f"{self.__class__.__name__}: Different shapes: "
f"{self.shape} != {other.shape}"
) # pragma: no cover
return False
# Check that each instance has the same fill value
if not ignore_fill_value and self.get_fill_value(
None
) != other.get_fill_value(None):
logger.info(
f"{self.__class__.__name__}: Different fill value: "
f"{self.get_fill_value(None)} != {other.get_fill_value(None)}"
) # pragma: no cover
return False
# Check that each instance has the same data type
if not ignore_data_type and self.dtype != other.dtype:
logger.info(
f"{self.__class__.__name__}: Different data types: "
f"{self.dtype} != {other.dtype}"
) # pragma: no cover
return False
# Return now if we have been asked to not check the array
# values
if not _check_values:
return True
# Check that each instance has the same units
for attr in ("units", "calendar"):
x = getattr(self, "get_" + attr)(None)
y = getattr(other, "get_" + attr)(None)
if x != y:
logger.info(
f"{self.__class__.__name__}: Different {attr}: "
f"{x!r} != {y!r}"
) # pragma: no cover
return False
if not ignore_compression:
# --------------------------------------------------------
# Check for equal compression types
# --------------------------------------------------------
compression_type = self.get_compression_type()
if compression_type != other.get_compression_type():
logger.info(
f"{self.__class__.__name__}: Different compression types: "
f"{compression_type} != {other.get_compression_type()}"
) # pragma: no cover
return False
# --------------------------------------------------------
# Check for equal compressed array values
# --------------------------------------------------------
if compression_type:
if not self._equals(
self.compressed_array,
other.compressed_array,
rtol=rtol,
atol=atol,
):
logger.info(
f"{self.__class__.__name__}: Different compressed "
"array values"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Check for equal (uncompressed) array values
# ------------------------------------------------------------
if not self._equals(self.array, other.array, rtol=rtol, atol=atol):
logger.info(
f"{self.__class__.__name__}: Different array values "
f"(atol={atol}, rtol={rtol})"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Still here? Then the two data arrays are equal.
# ------------------------------------------------------------
return True
def get_filenames(self):
"""Return the name of the file containing the data array.
:Returns:
`set`
The file name in normalised, absolute form. If the
data is are memory then an empty `set` is returned.
**Examples:**
>>> f = {{package}}.example_field(0)
>>> {{package}}.write(f, 'temp_file.nc')
>>> g = {{package}}.read('temp_file.nc')[0]
>>> d = g.data
>>> d.get_filenames()
{'/data/user/temp_file.nc'}
>>> d[...] = -99
>>> d.get_filenames()
set()
"""
source = self.source(None)
if source is None:
return set()
try:
filename = source.get_filename()
except AttributeError:
return set()
else:
return set((abspath(filename),))
def first_element(self):
"""Return the first element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `last_element`, `second_element`
:Returns:
The first element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.first_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.first_element()
>>> print(x, type(x))
1 <class 'int'>
>>> d[0, 0] = {{package}}.masked
>>> y = d.first_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.first_element()
>>> print(x, type(x))
foo <class 'str'>
"""
return self._item((slice(0, 1),) * self.ndim)
@_inplace_enabled(default=False)
def flatten(self, axes=None, inplace=False):
"""Flatten axes of the data.
Any subset of the axes may be flattened.
The shape of the data may change, but the size will not.
The flattening is executed in row-major (C-style) order. For
example, the array ``[[1, 2], [3, 4]]`` would be flattened across
both dimensions to ``[1 2 3 4]``.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `insert_dimension`, `squeeze`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
Select the axes. By default all axes are flattened. No
axes are flattened if *axes* is an empty sequence.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The flattened data, or `None` if the operation was
in-place.
**Examples**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.flatten()
>>> e
<{{repr}}Data(24): [0, ..., 23]>
>>> print(e.array)
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]
>>> e = d.flatten([])
>>> e
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> e = d.flatten([1, 3])
>>> e
<{{repr}}Data(1, 8, 3): [[[0, ..., 23]]]>
>>> print(e.array)
[[[ 0 4 8]
[ 1 5 9]
[ 2 6 10]
[ 3 7 11]
[12 16 20]
[13 17 21]
[14 18 22]
[15 19 23]]]
>>> d.flatten([0, -1], inplace=True)
>>> d
<{{repr}}Data(4, 2, 3): [[[0, ..., 23]]]>
>>> print(d.array)
[[[ 0 4 8]
[12 16 20]]
[[ 1 5 9]
[13 17 21]]
[[ 2 6 10]
[14 18 22]]
[[ 3 7 11]
[15 19 23]]]
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't flatten data: {error}")
ndim = d.ndim
if ndim <= 1:
return d
if axes is None:
# By default flatten all axes
axes = tuple(range(ndim))
else:
if len(axes) <= 1:
return d
# Note that it is important that the first axis in the
# list is the left-most flattened axis
axes = sorted(axes)
# Save the shape before we transpose
shape = list(d.shape)
order = [i for i in range(ndim) if i not in axes]
order[axes[0] : axes[0]] = axes
d.transpose(order, inplace=True)
new_shape = [n for i, n in enumerate(shape) if i not in axes]
new_shape.insert(axes[0], numpy.prod([shape[i] for i in axes]))
array = d.array.reshape(new_shape)
out = type(self)(
array,
units=d.get_units(None),
calendar=d.get_calendar(None),
fill_value=d.get_fill_value(None),
)
if inplace:
d.__dict__ = out.__dict__
return out
def last_element(self):
"""Return the last element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `second_element`
:Returns:
The last element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.last_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.last_element()
>>> print(x, type(x))
4 <class 'int'>
>>> d[-1, -1] = {{package}}.masked
>>> y = d.last_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.last_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(-1, None),) * self.ndim)
def second_element(self):
"""Return the second element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `last_element`
:Returns:
The second element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.second_element()
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> y = d.second_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.second_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(0, 1),) * (self.ndim - 1) + (slice(1, 2),))
def to_memory(self):
"""Bring data on disk into memory and retain it there.
There is no change to data that is already in memory.
:Returns:
`None`
**Examples:**
>>> f = {{package}}.example_field(4)
>>> f.data
<{{repr}}Data(3, 26, 4): [[[290.0, ..., --]]] K>
>>> f.data.to_memory()
"""
self._set_Array(self.source().to_memory())
@_inplace_enabled(default=False)
def uncompress(self, inplace=False):
"""Uncompress the underlying array.
.. versionadded:: (cfdm) 1.7.3
.. seealso:: `array`, `compressed_array`, `source`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The uncompressed data, or `None` if the operation was
in-place.
**Examples:**
>>> d.get_compression_type()
'ragged contiguous'
>>> d.source()
<RaggedContiguousArray(4, 9): >
>>> d.uncompress(inpalce=True)
>>> d.get_compression_type()
''
>>> d.source()
<NumpyArray(4, 9): >
"""
d = _inplace_enabled_define_and_cleanup(self)
if d.get_compression_type():
d._set_Array(d.array, copy=False)
return d
def unique(self):
"""The unique elements of the data.
The unique elements are sorted into a one dimensional array. with
no missing values.
.. versionadded:: (cfdm) 1.7.0
:Returns:
`{{class}}`
The unique elements.
**Examples:**
>>> d = {{package}}.{{class}}([[4, 2, 1], [1, 2, 3]], 'metre')
>>> d.unique()
<{{repr}}Data(4): [1, ..., 4] metre>
>>> d[1, -1] = {{package}}.masked
>>> d.unique()
<{{repr}}Data(3): [1, 2, 4] metre>
"""
array = self.array
array = numpy.unique(array)
if numpy.ma.is_masked(array):
array = array.compressed()
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
# ----------------------------------------------------------------
# Aliases
# ----------------------------------------------------------------
def max(self, axes=None):
"""Alias for `maximum`."""
return self.maximum(axes=axes)
def min(self, axes=None):
"""Alias for `minimum`."""
return self.minimum(axes=axes)
|
[
"logging.getLogger",
"numpy.prod",
"numpy.ma.getmaskarray",
"numpy.asanyarray",
"numpy.array",
"numpy.ma.is_masked",
"numpy.where",
"numpy.ma.masked_all",
"itertools.product",
"numpy.ndim",
"numpy.ma.masked_where",
"numpy.empty",
"numpy.ma.where",
"numpy.amin",
"numpy.ma.array",
"numpy.size",
"itertools.zip_longest",
"numpy.squeeze",
"numpy.shape",
"numpy.transpose",
"numpy.ma.isMA",
"numpy.unique",
"numpy.sum",
"numpy.expand_dims",
"numpy.amax"
] |
[((412, 439), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (429, 439), False, 'import logging\n'), ((24164, 24184), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}), '(array)\n', (24177, 24184), False, 'import numpy\n'), ((40415, 40435), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}), '(array)\n', (40428, 40435), False, 'import numpy\n'), ((42343, 42382), 'numpy.expand_dims', 'numpy.expand_dims', (['self.array', 'position'], {}), '(self.array, position)\n', (42360, 42382), False, 'import numpy\n'), ((51195, 51238), 'numpy.amax', 'numpy.amax', (['array'], {'axis': 'axes', 'keepdims': '(True)'}), '(array, axis=axes, keepdims=True)\n', (51205, 51238), False, 'import numpy\n'), ((53131, 53174), 'numpy.amin', 'numpy.amin', (['array'], {'axis': 'axes', 'keepdims': '(True)'}), '(array, axis=axes, keepdims=True)\n', (53141, 53174), False, 'import numpy\n'), ((55270, 55296), 'numpy.squeeze', 'numpy.squeeze', (['array', 'axes'], {}), '(array, axes)\n', (55283, 55296), False, 'import numpy\n'), ((57050, 57092), 'numpy.sum', 'numpy.sum', (['array'], {'axis': 'axes', 'keepdims': '(True)'}), '(array, axis=axes, keepdims=True)\n', (57059, 57092), False, 'import numpy\n'), ((59029, 59062), 'numpy.transpose', 'numpy.transpose', (['array'], {'axes': 'axes'}), '(array, axes=axes)\n', (59044, 59062), False, 'import numpy\n'), ((76655, 76674), 'numpy.unique', 'numpy.unique', (['array'], {}), '(array)\n', (76667, 76674), False, 'import numpy\n'), ((76687, 76712), 'numpy.ma.is_masked', 'numpy.ma.is_masked', (['array'], {}), '(array)\n', (76705, 76712), False, 'import numpy\n'), ((4663, 4695), 'numpy.ma.array', 'numpy.ma.array', (['array'], {'mask': 'mask'}), '(array, mask=mask)\n', (4677, 4695), False, 'import numpy\n'), ((12464, 12484), 'numpy.ma.isMA', 'numpy.ma.isMA', (['value'], {}), '(value)\n', (12477, 12484), False, 'import numpy\n'), ((12736, 12759), 'numpy.asanyarray', 'numpy.asanyarray', (['value'], {}), '(value)\n', (12752, 12759), False, 'import numpy\n'), ((16925, 16945), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}), '(array)\n', (16938, 16945), False, 'import numpy\n'), ((24879, 24911), 'numpy.array', 'numpy.array', (['array'], {'dtype': 'object'}), '(array, dtype=object)\n', (24890, 24911), False, 'import numpy\n'), ((24982, 25016), 'numpy.ma.masked_where', 'numpy.ma.masked_where', (['mask', 'array'], {}), '(mask, array)\n', (25003, 25016), False, 'import numpy\n'), ((27323, 27356), 'numpy.ma.getmaskarray', 'numpy.ma.getmaskarray', (['self.array'], {}), '(self.array)\n', (27344, 27356), False, 'import numpy\n'), ((34903, 34943), 'numpy.ma.where', 'numpy.ma.where', (['mask', 'cfdm_masked', 'array'], {}), '(mask, cfdm_masked, array)\n', (34917, 34943), False, 'import numpy\n'), ((61673, 61710), 'numpy.empty', 'numpy.empty', ([], {'shape': 'shape', 'dtype': 'dtype'}), '(shape=shape, dtype=dtype)\n', (61684, 61710), False, 'import numpy\n'), ((72555, 72591), 'numpy.prod', 'numpy.prod', (['[shape[i] for i in axes]'], {}), '([shape[i] for i in axes])\n', (72565, 72591), False, 'import numpy\n'), ((18596, 18619), 'numpy.asanyarray', 'numpy.asanyarray', (['array'], {}), '(array)\n', (18612, 18619), False, 'import numpy\n'), ((20733, 20750), 'numpy.size', 'numpy.size', (['value'], {}), '(value)\n', (20743, 20750), False, 'import numpy\n'), ((20782, 20810), 'itertools.product', 'itertools.product', (['*indices1'], {}), '(*indices1)\n', (20799, 20810), False, 'import itertools\n'), ((25036, 25053), 'numpy.ndim', 'numpy.ndim', (['array'], {}), '(array)\n', (25046, 25053), False, 'import numpy\n'), ((25079, 25116), 'numpy.ma.masked_all', 'numpy.ma.masked_all', (['()'], {'dtype': 'object'}), '((), dtype=object)\n', (25098, 25116), False, 'import numpy\n'), ((4320, 4343), 'numpy.asanyarray', 'numpy.asanyarray', (['array'], {}), '(array)\n', (4336, 4343), False, 'import numpy\n'), ((4618, 4641), 'numpy.asanyarray', 'numpy.asanyarray', (['array'], {}), '(array)\n', (4634, 4641), False, 'import numpy\n'), ((20311, 20339), 'itertools.zip_longest', 'itertools.zip_longest', (['*args'], {}), '(*args)\n', (20332, 20339), False, 'import itertools\n'), ((20945, 20962), 'numpy.ndim', 'numpy.ndim', (['value'], {}), '(value)\n', (20955, 20962), False, 'import numpy\n'), ((21001, 21019), 'numpy.shape', 'numpy.shape', (['value'], {}), '(value)\n', (21012, 21019), False, 'import numpy\n'), ((21610, 21638), 'itertools.product', 'itertools.product', (['*indices1'], {}), '(*indices1)\n', (21627, 21638), False, 'import itertools\n'), ((21640, 21668), 'itertools.product', 'itertools.product', (['*indices2'], {}), '(*indices2)\n', (21657, 21668), False, 'import itertools\n'), ((24318, 24343), 'numpy.ma.is_masked', 'numpy.ma.is_masked', (['array'], {}), '(array)\n', (24336, 24343), False, 'import numpy\n'), ((48849, 48866), 'numpy.ndim', 'numpy.ndim', (['index'], {}), '(index)\n', (48859, 48866), False, 'import numpy\n'), ((48803, 48821), 'numpy.where', 'numpy.where', (['index'], {}), '(index)\n', (48814, 48821), False, 'import numpy\n'), ((13943, 13978), 'numpy.ma.array', 'numpy.ma.array', (['first'], {'mask': 'mask[0]'}), '(first, mask=mask[0])\n', (13957, 13978), False, 'import numpy\n'), ((14515, 14570), 'numpy.ma.array', 'numpy.ma.array', (['[first, last]'], {'mask': '(mask[0], mask[-1])'}), '([first, last], mask=(mask[0], mask[-1]))\n', (14529, 14570), False, 'import numpy\n'), ((15325, 15361), 'numpy.ma.array', 'numpy.ma.array', (['middle'], {'mask': 'mask[1]'}), '(middle, mask=mask[1])\n', (15339, 15361), False, 'import numpy\n')]
|
#!/usr/bin/env python
# coding:utf8
# -*- coding: utf-8 -*-
"""
Main Program: Run MODIS AGGREGATION IN MPI WITH FLEXIBLE STATISTICS
Created on 2020
@author: <NAME> (Email: <EMAIL>)
"""
import os
import sys
import h5py
import timeit
import random
import calendar
import numpy as np
import pandas as pd
from mpi4py import MPI
from netCDF4 import Dataset
from collections import OrderedDict
from datetime import date, datetime
from dateutil.rrule import rrule, DAILY, MONTHLY
from MODIS_Aggregation import *
if __name__ =='__main__':
# This is the main program for using concurrent to speed up the whole process
#--------------STEP 1: Read User Inputs and Initial Paramters for Aggregation--------------------
fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,map_lon,map_lat,grid_lon,grid_lat,gap_x,gap_y,filenum, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,bin_num1,bin_num2,var_idx,spl_num,sts_name,histnames, \
output_dir,l3name,unit_list,scale_list,offst_list,longname_list,fillvalue_list = read_user_inputs()
total_file_num = len(filenum)
#--------------STEP 2: Start Aggregation------------------------------------------------
# Start counting operation time
start_time = timeit.default_timer()
print("-------- START AGGREGATION --------")
# Initiate MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
random.seed(rank)
# Initiate the number of files for MPI
remain = size-total_file_num%size
files_part1 = np.arange(total_file_num + remain)
tasks_part1 = np.array(np.split(files_part1,size))
files_part2 = np.arange(total_file_num - tasks_part1[rank].size * (size-remain)) + tasks_part1[rank].size * (size-remain)
tasks_part2 = np.array(np.split(files_part2,remain))
if rank < (size-remain):
fileloop = tasks_part1[rank]
else:
fileloop = tasks_part2[rank-(size-remain)]
print("Process {} calculating files from {} to {}... (Total: {} / {})".format(rank, fileloop[0],fileloop[-1],fileloop.shape[0],total_file_num))
if rank == 0:
grid_data = run_modis_aggre(fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,grid_lon,grid_lat,gap_x,gap_y,fileloop, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,var_idx,spl_num,sts_name,histnames)
for i in range(1,size):
results = comm.recv(source=i, tag=0)
grid_data = addCounter(grid_data, results)
# Compute the mean cloud fraction & Statistics (Include Min & Max & Standard deviation)
# Reference for statstic parameters
# sts_name[0]: min
# sts_name[1]: max
# sts_name[2]: mean / total_value
# sts_name[3]: count
# sts_name[4]: square
# sts_name[5]: histogram
# sts_name[6]: joint histogram
sts_idx = np.array(np.where(sts_switch == True))[0]
print("Index of User-defined Statistics:",sts_idx)
print(grid_data['GRID_Counts'].reshape([grid_lat,grid_lon]))
key_idx = 0
for key in varnames:
for i in sts_idx:
if i == 0:
grid_data[key+'_'+sts_name[0]] = grid_data[key+'_'+sts_name[0]].reshape([grid_lat,grid_lon])
elif i == 1:
grid_data[key+'_'+sts_name[1]] = grid_data[key+'_'+sts_name[1]].reshape([grid_lat,grid_lon])
elif i == 2:
grid_data[key+'_'+sts_name[2]] = grid_data[key+'_'+sts_name[2]] / grid_data[key+'_'+sts_name[3]]
grid_data[key+'_'+sts_name[2]] = grid_data[key+'_'+sts_name[2]].reshape([grid_lat,grid_lon])
elif i == 3:
grid_data[key+'_'+sts_name[3]] = grid_data[key+'_'+sts_name[3]].reshape([grid_lat,grid_lon])
elif i == 4:
grid_data[key+'_'+sts_name[4]] = ((grid_data[key+'_'+sts_name[4]] / grid_data[key+'_'+sts_name[3]].ravel()) - grid_data[key+'_'+sts_name[2]].ravel()**2)**0.5
grid_data[key+'_'+sts_name[4]] = grid_data[key+'_'+sts_name[4]].reshape([grid_lat,grid_lon])
elif i == 5:
grid_data[key+'_'+sts_name[5]] = grid_data[key+'_'+sts_name[5]].reshape([grid_lat,grid_lon,bin_num1[key_idx]])
elif i == 6:
grid_data[key+'_'+sts_name[6]+histnames[key_idx]] = grid_data[key+'_'+sts_name[6]+histnames[key_idx]].reshape([grid_lat,grid_lon,bin_num1[key_idx],bin_num2[key_idx]])
key_idx += 1
end_time = timeit.default_timer()
print ("Operation Time in {:7.2f} seconds".format(end_time - start_time))
#--------------STEP 3: Create HDF5 file to store the result------------------------------
ff=h5py.File(output_dir+l3name+'MPI','w')
PC=ff.create_dataset('lat_bnd',data=map_lat)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Latitude_boundaries'
PC=ff.create_dataset('lon_bnd',data=map_lon)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Longitude_boundaries'
PCentry=ff.create_dataset('GRID_Counts',data=grid_data['GRID_Counts'].reshape([grid_lat,grid_lon]))
PCentry.dims[0].label='lat_bnd'
PCentry.dims[1].label='lon_bnd'
PC.attrs['units']='none'
PC.attrs['long_name']='grid_point_counts'
for i in range(sts_idx.shape[0]):
cnt = 0
for key in grid_data:
if key.find("1km") != -1:
new_name = key.replace("_1km", "")
else:
new_name = key
if (sts_name[sts_idx[i]] in key) == True:
if sts_idx[i] >= 5:
addGridEntry(ff,new_name,unit_list[cnt],longname_list[cnt],fillvalue_list[cnt],scale_list[cnt],offst_list[cnt],grid_data[key],intervals_1d[cnt],intervals_2d[cnt])
else:
addGridEntry(ff,new_name,unit_list[cnt],longname_list[cnt],fillvalue_list[cnt],scale_list[cnt],offst_list[cnt],grid_data[key],intervals_1d[0],intervals_2d[0])
cnt += 1
ff.close()
print(l3name+' Saved!')
print("-------- AGGREGATION COMPLETED --------")
else:
results = run_modis_aggre(fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,grid_lon,grid_lat,gap_x,gap_y,fileloop, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,var_idx,spl_num,sts_name,histnames)
massage = "Process {} finished".format(rank)
print(massage)
comm.send(results, dest=0, tag=0)
#---------------------------COMPLETED------------------------------------------------------
|
[
"numpy.where",
"timeit.default_timer",
"random.seed",
"h5py.File",
"numpy.split",
"numpy.arange"
] |
[((1212, 1234), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1232, 1234), False, 'import timeit\n'), ((1372, 1389), 'random.seed', 'random.seed', (['rank'], {}), '(rank)\n', (1383, 1389), False, 'import random\n'), ((1484, 1518), 'numpy.arange', 'np.arange', (['(total_file_num + remain)'], {}), '(total_file_num + remain)\n', (1493, 1518), True, 'import numpy as np\n'), ((1543, 1570), 'numpy.split', 'np.split', (['files_part1', 'size'], {}), '(files_part1, size)\n', (1551, 1570), True, 'import numpy as np\n'), ((1587, 1655), 'numpy.arange', 'np.arange', (['(total_file_num - tasks_part1[rank].size * (size - remain))'], {}), '(total_file_num - tasks_part1[rank].size * (size - remain))\n', (1596, 1655), True, 'import numpy as np\n'), ((1719, 1748), 'numpy.split', 'np.split', (['files_part2', 'remain'], {}), '(files_part2, remain)\n', (1727, 1748), True, 'import numpy as np\n'), ((4099, 4121), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4119, 4121), False, 'import timeit\n'), ((4298, 4341), 'h5py.File', 'h5py.File', (["(output_dir + l3name + 'MPI')", '"""w"""'], {}), "(output_dir + l3name + 'MPI', 'w')\n", (4307, 4341), False, 'import h5py\n'), ((2702, 2730), 'numpy.where', 'np.where', (['(sts_switch == True)'], {}), '(sts_switch == True)\n', (2710, 2730), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import os
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from .modules import WavePool, WaveUnpool, ImagePool, NLayerDiscriminator
from utils.metrics import compute_dice_metric
from utils.losses import DiceLoss
import numpy as np
class WaveEncoder(nn.Module):
"""Wavelet encoder in WCT2, only partial layers used"""
def __init__(self):
super(WaveEncoder, self).__init__()
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv0 = nn.Conv2d(3, 3, 1, 1, 0)
self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 0)
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
self.pool1 = WavePool(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 0)
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
self.pool2 = WavePool(128)
self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
self.pool3 = WavePool(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 0)
def forward(self, x, skips):
"""Wavelet encoding - only up to level 2
Args:
x (torch.Tensor): input to be encoded
skips (dict): dictionary to contain LH, HL, HH filter responses
Returns:
LL (torch.Tensor): output of LL filters
skips (dict): dictionary containing said filters
"""
# level 1
out = self.conv0(x)
out = self.relu(self.conv1_1(self.pad(out)))
# level 2
out = self.relu(self.conv1_2(self.pad(out)))
skips['conv1_2'] = out
LL, LH, HL, HH = self.pool1(out)
skips['pool1'] = [LH, HL, HH]
return LL, skips
class WaveDecoder(nn.Module):
"""Wavelet encoder in WCT2, only partial layers used"""
def __init__(self):
super(WaveDecoder, self).__init__()
multiply_in = 5
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv4_1 = nn.Conv2d(512, 256, 3, 1, 0)
self.recon_block3 = WaveUnpool(256)
self.conv3_4_2 = nn.Conv2d(256*multiply_in, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_1 = nn.Conv2d(256, 128, 3, 1, 0)
self.recon_block2 = WaveUnpool(128)
self.conv2_2_2 = nn.Conv2d(128*multiply_in, 128, 3, 1, 0)
self.conv2_1 = nn.Conv2d(128, 64, 3, 1, 0)
self.recon_block1 = WaveUnpool(64)
self.conv1_2_2 = nn.Conv2d(64*multiply_in, 64, 3, 1, 0)
self.conv1_1 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x, skips):
"""Decoder - upsample from level 2
Args:
x (torch.Tensor): input to be encoded
skips (dict): dictionary containing LH, HL, HH filter responses
Returns:
out (torch.Tensor): output of wavelet unpooling layer
"""
LH, HL, HH = skips['pool1']
original = skips['conv1_2'] if 'conv1_2' in skips.keys() else None
out = self.recon_block1(x, LH, HL, HH, original)
return out
class WCT2Features(nn.Module):
"""WCT2 transform with fixed input and output channels and handpicked LL filters
"""
def __init__(self, filters=None, model_path_encoder=None, model_path_decoder=None):
super(WCT2Features, self).__init__()
self.encoder = WaveEncoder().cuda()
self.decoder = WaveDecoder().cuda()
self.encoder.load_state_dict(
torch.load(os.path.join(model_path_encoder),
map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(
torch.load(os.path.join(model_path_decoder),
map_location=lambda storage, loc: storage))
self.filters = filters
# self.tanh = nn.Tanh()
# chosen channels
# self.ll_filter_idx = [4,7,11,24,25,27]
# Sparsest CT channels [25, 54,16,22,61,4,8,27,7,3]
# self.ll_filter_idx = [15,2,41,12,39,1,42,23,51,38]
# self.ll_filter_idx = [14 ,15 ,45 ,19 ,39, 1 ,42 ,23 ,51, 38]
def forward(self, x):
"""Get WCT2 LL filters
Args:
x (torch.Tensor): input tensor
Returns:
out (torch.Tensor): output LL filters
"""
skips = {}
out, skips = self.encoder(x, skips)
out = self.decoder(out, skips)
out = out[:,:64,:,:]
if self.filters != None:
out = torch.index_select(out, 1, torch.tensor(self.filters).cuda())
return out
class WCT2GANUNet(nn.Module):
"""WCT2 GAN UNet all in one class"""
def __init__(self, g, seg, n_channels, lr=0.0002):
super(WCT2GANUNet, self).__init__()
# generator
self.g = g.cuda()
# discriminator
self.d = NLayerDiscriminator(input_nc=n_channels).cuda()
# segmentor
self.seg = seg.cuda()
self.lr = lr
# optimisers here
self.g_optim = optim.Adam(self.g.parameters(), lr=self.lr)
self.seg_optim = optim.Adam(self.seg.parameters(), lr=self.lr)
# self.optim = optim.Adam(chain(self.g.parameters(), self.seg.parameters()), lr=self.lr)
self.d_optim = optim.SGD(self.d.parameters(), lr=self.lr, momentum=0.5)
self.criterion_gan = nn.BCELoss()
self.pool = ImagePool()
def criterion_seg(self, prediction, target):
return nn.BCELoss()(prediction, target) + DiceLoss()(prediction, target)
def forward_gen(self, x):
return self.g(x)
def forward_seg(self, x):
out = self.forward_gen(x)
a1, a2, a3, a4, a5 = self.seg.downsample(out)
seg = self.seg.upsample(a1, a2, a3, a4, a5)
return seg
def get_target(self, pred, is_true=True):
"""Return target tensor with similar shape to pred"""
if is_true == True and np.random.random() > 0.65:
return torch.ones(pred.size(), requires_grad=False).cuda()
return torch.zeros(pred.size(), requires_grad=False).cuda()
# # occasionally give wrong labels
# if is_true == True and np.random.random() + 0.3 > 0.5:
# # use soft label for true [0.7, 1.2]
# return (1.2 - 0.7) * torch.rand(pred.size(), requires_grad=False).cuda() + 0.7
# # use soft label [0, 0.1] for false
# return 0.1 * torch.rand(pred.size(), requires_grad=False).cuda()
def set_requires_grad(self, net, requires_grad=False):
for param in net.parameters():
param.requires_grad=requires_grad
def step(self, x_s, x_t, y_s):
# GAN loss - update discriminator and generator here
# GAN loss - max log(D(x)) + log(1 - D(G(x)))
# update d only
self.d_optim.zero_grad()
out_x_s = self.forward_gen(x_s)
out_x_t = self.forward_gen(x_t)
x_s_real = self.d(out_x_s)
target_real = self.get_target(x_s_real)
loss_real = self.criterion_gan(x_s_real, target_real)
loss_real.backward()
# get generated feature maps from pool / replay for stability
x_s_fake_map = (self.pool.query(out_x_t)).detach()
x_s_fake = self.d(x_s_fake_map)
target_fake = self.get_target(x_s_fake, is_true=False)
loss_fake = self.criterion_gan(x_s_fake, target_fake)
loss_fake.backward()
self.d_optim.step()
# update g - max D(G(X))
self.g_optim.zero_grad()
x_s_fake = self.d(x_s_fake_map)
target_real = self.get_target(x_s_real)
loss_g = self.criterion_gan(x_s_fake, target_real)
loss_g.backward()
self.g_optim.step()
# Segmentation loss
self.set_requires_grad(self.g, requires_grad=False)
# self.g_optim.zero_grad()
self.seg_optim.zero_grad()
out_seg = self.forward_seg(x_s)
seg_loss = self.criterion_seg(out_seg, y_s)
seg_loss.backward()
# self.g_optim.step()
self.seg_optim.step()
# calculate dice score for current batch
dice_score = compute_dice_metric(torch.round(out_seg), y_s).item()
# backward pass
return seg_loss.item(), (loss_real + loss_fake).item(), dice_score
def save(self, path):
print('saving model...')
if os.path.isdir(path) == False:
os.makedirs(path)
torch.save(self.g.state_dict(), os.path.join(path,'g.pth'))
torch.save(self.d.state_dict(), os.path.join(path,'d.pth'))
torch.save(self.seg.state_dict(), os.path.join(path,'seg.pth'))
print('saving done!')
|
[
"torch.nn.ReLU",
"os.makedirs",
"numpy.random.random",
"torch.nn.ReflectionPad2d",
"os.path.join",
"torch.nn.Conv2d",
"torch.tensor",
"torch.nn.BCELoss",
"os.path.isdir",
"torch.round",
"utils.losses.DiceLoss"
] |
[((479, 500), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (497, 500), True, 'import torch.nn as nn\n'), ((521, 542), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (528, 542), True, 'import torch.nn as nn\n'), ((565, 589), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)', '(1)', '(1)', '(0)'], {}), '(3, 3, 1, 1, 0)\n', (574, 589), True, 'import torch.nn as nn\n'), ((613, 638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3)', '(1)', '(0)'], {}), '(3, 64, 3, 1, 0)\n', (622, 638), True, 'import torch.nn as nn\n'), ((662, 688), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)', '(1)', '(0)'], {}), '(64, 64, 3, 1, 0)\n', (671, 688), True, 'import torch.nn as nn\n'), ((747, 774), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)', '(1)', '(0)'], {}), '(64, 128, 3, 1, 0)\n', (756, 774), True, 'import torch.nn as nn\n'), ((798, 826), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(0)'], {}), '(128, 128, 3, 1, 0)\n', (807, 826), True, 'import torch.nn as nn\n'), ((886, 914), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)', '(1)', '(0)'], {}), '(128, 256, 3, 1, 0)\n', (895, 914), True, 'import torch.nn as nn\n'), ((938, 966), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (947, 966), True, 'import torch.nn as nn\n'), ((990, 1018), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (999, 1018), True, 'import torch.nn as nn\n'), ((1042, 1070), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (1051, 1070), True, 'import torch.nn as nn\n'), ((1138, 1166), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3)', '(1)', '(0)'], {}), '(256, 512, 3, 1, 0)\n', (1147, 1166), True, 'import torch.nn as nn\n'), ((2084, 2105), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (2102, 2105), True, 'import torch.nn as nn\n'), ((2126, 2147), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2133, 2147), True, 'import torch.nn as nn\n'), ((2171, 2199), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)', '(3)', '(1)', '(0)'], {}), '(512, 256, 3, 1, 0)\n', (2180, 2199), True, 'import torch.nn as nn\n'), ((2279, 2321), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * multiply_in)', '(256)', '(3)', '(1)', '(0)'], {}), '(256 * multiply_in, 256, 3, 1, 0)\n', (2288, 2321), True, 'import torch.nn as nn\n'), ((2343, 2371), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (2352, 2371), True, 'import torch.nn as nn\n'), ((2395, 2423), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (2404, 2423), True, 'import torch.nn as nn\n'), ((2447, 2475), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)', '(1)', '(0)'], {}), '(256, 128, 3, 1, 0)\n', (2456, 2475), True, 'import torch.nn as nn\n'), ((2555, 2597), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * multiply_in)', '(128)', '(3)', '(1)', '(0)'], {}), '(128 * multiply_in, 128, 3, 1, 0)\n', (2564, 2597), True, 'import torch.nn as nn\n'), ((2619, 2646), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)', '(1)', '(0)'], {}), '(128, 64, 3, 1, 0)\n', (2628, 2646), True, 'import torch.nn as nn\n'), ((2716, 2756), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * multiply_in)', '(64)', '(3)', '(1)', '(0)'], {}), '(64 * multiply_in, 64, 3, 1, 0)\n', (2725, 2756), True, 'import torch.nn as nn\n'), ((2778, 2803), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)', '(3)', '(1)', '(0)'], {}), '(64, 3, 3, 1, 0)\n', (2787, 2803), True, 'import torch.nn as nn\n'), ((5668, 5680), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5678, 5680), True, 'import torch.nn as nn\n'), ((8858, 8877), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8871, 8877), False, 'import os\n'), ((8900, 8917), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (8911, 8917), False, 'import os\n'), ((8958, 8985), 'os.path.join', 'os.path.join', (['path', '"""g.pth"""'], {}), "(path, 'g.pth')\n", (8970, 8985), False, 'import os\n'), ((9026, 9053), 'os.path.join', 'os.path.join', (['path', '"""d.pth"""'], {}), "(path, 'd.pth')\n", (9038, 9053), False, 'import os\n'), ((9096, 9125), 'os.path.join', 'os.path.join', (['path', '"""seg.pth"""'], {}), "(path, 'seg.pth')\n", (9108, 9125), False, 'import os\n'), ((3753, 3785), 'os.path.join', 'os.path.join', (['model_path_encoder'], {}), '(model_path_encoder)\n', (3765, 3785), False, 'import os\n'), ((3915, 3947), 'os.path.join', 'os.path.join', (['model_path_decoder'], {}), '(model_path_decoder)\n', (3927, 3947), False, 'import os\n'), ((5787, 5799), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5797, 5799), True, 'import torch.nn as nn\n'), ((5822, 5832), 'utils.losses.DiceLoss', 'DiceLoss', ([], {}), '()\n', (5830, 5832), False, 'from utils.losses import DiceLoss\n'), ((6290, 6308), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6306, 6308), True, 'import numpy as np\n'), ((8636, 8656), 'torch.round', 'torch.round', (['out_seg'], {}), '(out_seg)\n', (8647, 8656), False, 'import torch\n'), ((4799, 4825), 'torch.tensor', 'torch.tensor', (['self.filters'], {}), '(self.filters)\n', (4811, 4825), False, 'import torch\n')]
|
import kfserving
from typing import List, Union
import numpy as np
class Predictor(): # pylint:disable=too-few-public-methods
def __init__(self, clf: kfserving.KFModel):
self.clf = clf
def predict_fn(self, arr: Union[np.ndarray, List]) -> np.ndarray:
instances = []
for req_data in arr:
if isinstance(req_data, np.ndarray):
instances.append(req_data.tolist())
else:
instances.append(req_data)
resp = self.clf.predict({"instances": instances})
return np.array(resp["predictions"])
|
[
"numpy.array"
] |
[((557, 586), 'numpy.array', 'np.array', (["resp['predictions']"], {}), "(resp['predictions'])\n", (565, 586), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Emperor Project"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__version__ = "0.9.3-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from unittest import TestCase, main
from tempfile import mkstemp
from os import close
import numpy as np
import numpy.testing as npt
from emperor.parse import parse_coords
class ParseTests(TestCase):
def test_parse_coords_ordination_results(self):
"""parse_coords should handle skbio's OrdinationResults file"""
coords = ordination_results_file.splitlines()
obs = parse_coords(coords)
exp = (['A', 'B', 'C'],
np.array([[.11, .09, .23], [.03, .07, -.26], [.12, .06, -.32]]),
np.array([4.94, 1.79, 1.50]),
np.array([14.3, 5.2, 4.3]))
# test the header and the values apart from each other
self.assertEqual(obs[0], exp[0])
npt.assert_almost_equal(obs[1], exp[1])
npt.assert_almost_equal(obs[2], exp[2])
npt.assert_almost_equal(obs[3], exp[3])
def test_parse_coords_qiime(self):
"""parse_coords should handle old qiime PCoA coords format"""
coords = qiime_pcoa_file.splitlines()
obs = parse_coords(coords)
exp = (['A', 'B', 'C'],
np.array([[.11, .09, .23], [.03, .07, -.26], [.12, .06, -.32]]),
np.array([4.94, 1.79, 1.50]),
np.array([14.3, 5.2, 4.3]))
# test the header and the values apart from each other
self.assertEqual(obs[0], exp[0])
npt.assert_almost_equal(obs[1], exp[1])
npt.assert_almost_equal(obs[2], exp[2])
npt.assert_almost_equal(obs[3], exp[3])
def test_parse_coords_qiime_file(self):
"""parse_coords should handle old qiime PCoA coords file"""
fd, fp = mkstemp()
close(fd)
with open(fp, 'w') as f:
f.write(qiime_pcoa_file)
with open(fp, 'U') as f:
obs = parse_coords(f)
exp = (['A', 'B', 'C'],
np.array([[.11, .09, .23], [.03, .07, -.26], [.12, .06, -.32]]),
np.array([4.94, 1.79, 1.50]),
np.array([14.3, 5.2, 4.3]))
# test the header and the values apart from each other
self.assertEqual(obs[0], exp[0])
npt.assert_almost_equal(obs[1], exp[1])
npt.assert_almost_equal(obs[2], exp[2])
npt.assert_almost_equal(obs[3], exp[3])
ordination_results_file = """Eigvals\t3
4.94\t1.79\t1.50
Proportion explained\t3
14.3\t5.2\t4.3
Species\t0\t0
Site\t3\t3
A\t.11\t.09\t.23
B\t.03\t.07\t-.26
C\t.12\t.06\t-.32
Biplot\t0\t0
Site constraints\t0\t0"""
qiime_pcoa_file = """pc vector number\t1\t2\t3
A\t0.11\t0.09\t0.23
B\t0.03\t0.07\t-0.26
C\t0.12\t0.06\t-0.32
eigvals\t4.94\t1.79\t1.50
% variation explained\t14.3\t5.2\t4.3
"""
if __name__ == '__main__':
main()
|
[
"os.close",
"numpy.testing.assert_almost_equal",
"numpy.array",
"unittest.main",
"emperor.parse.parse_coords",
"tempfile.mkstemp"
] |
[((2964, 2970), 'unittest.main', 'main', ([], {}), '()\n', (2968, 2970), False, 'from unittest import TestCase, main\n'), ((677, 697), 'emperor.parse.parse_coords', 'parse_coords', (['coords'], {}), '(coords)\n', (689, 697), False, 'from emperor.parse import parse_coords\n'), ((1010, 1049), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[1]', 'exp[1]'], {}), '(obs[1], exp[1])\n', (1033, 1049), True, 'import numpy.testing as npt\n'), ((1058, 1097), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[2]', 'exp[2]'], {}), '(obs[2], exp[2])\n', (1081, 1097), True, 'import numpy.testing as npt\n'), ((1106, 1145), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[3]', 'exp[3]'], {}), '(obs[3], exp[3])\n', (1129, 1145), True, 'import numpy.testing as npt\n'), ((1316, 1336), 'emperor.parse.parse_coords', 'parse_coords', (['coords'], {}), '(coords)\n', (1328, 1336), False, 'from emperor.parse import parse_coords\n'), ((1649, 1688), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[1]', 'exp[1]'], {}), '(obs[1], exp[1])\n', (1672, 1688), True, 'import numpy.testing as npt\n'), ((1697, 1736), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[2]', 'exp[2]'], {}), '(obs[2], exp[2])\n', (1720, 1736), True, 'import numpy.testing as npt\n'), ((1745, 1784), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[3]', 'exp[3]'], {}), '(obs[3], exp[3])\n', (1768, 1784), True, 'import numpy.testing as npt\n'), ((1915, 1924), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (1922, 1924), False, 'from tempfile import mkstemp\n'), ((1933, 1942), 'os.close', 'close', (['fd'], {}), '(fd)\n', (1938, 1942), False, 'from os import close\n'), ((2395, 2434), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[1]', 'exp[1]'], {}), '(obs[1], exp[1])\n', (2418, 2434), True, 'import numpy.testing as npt\n'), ((2443, 2482), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[2]', 'exp[2]'], {}), '(obs[2], exp[2])\n', (2466, 2482), True, 'import numpy.testing as npt\n'), ((2491, 2530), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[3]', 'exp[3]'], {}), '(obs[3], exp[3])\n', (2514, 2530), True, 'import numpy.testing as npt\n'), ((745, 817), 'numpy.array', 'np.array', (['[[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]]'], {}), '([[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]])\n', (753, 817), True, 'import numpy as np\n'), ((825, 852), 'numpy.array', 'np.array', (['[4.94, 1.79, 1.5]'], {}), '([4.94, 1.79, 1.5])\n', (833, 852), True, 'import numpy as np\n'), ((870, 896), 'numpy.array', 'np.array', (['[14.3, 5.2, 4.3]'], {}), '([14.3, 5.2, 4.3])\n', (878, 896), True, 'import numpy as np\n'), ((1384, 1456), 'numpy.array', 'np.array', (['[[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]]'], {}), '([[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]])\n', (1392, 1456), True, 'import numpy as np\n'), ((1464, 1491), 'numpy.array', 'np.array', (['[4.94, 1.79, 1.5]'], {}), '([4.94, 1.79, 1.5])\n', (1472, 1491), True, 'import numpy as np\n'), ((1509, 1535), 'numpy.array', 'np.array', (['[14.3, 5.2, 4.3]'], {}), '([14.3, 5.2, 4.3])\n', (1517, 1535), True, 'import numpy as np\n'), ((2066, 2081), 'emperor.parse.parse_coords', 'parse_coords', (['f'], {}), '(f)\n', (2078, 2081), False, 'from emperor.parse import parse_coords\n'), ((2130, 2202), 'numpy.array', 'np.array', (['[[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]]'], {}), '([[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]])\n', (2138, 2202), True, 'import numpy as np\n'), ((2210, 2237), 'numpy.array', 'np.array', (['[4.94, 1.79, 1.5]'], {}), '([4.94, 1.79, 1.5])\n', (2218, 2237), True, 'import numpy as np\n'), ((2255, 2281), 'numpy.array', 'np.array', (['[14.3, 5.2, 4.3]'], {}), '([14.3, 5.2, 4.3])\n', (2263, 2281), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import imutils
from collections import defaultdict
# mouse callback function
def define_points(target_img):
corners = []
refPt = []
def draw_circle(event,x,y,flags,param):
global refPt
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(param,(x,y),5,(255,0,0),-1)
refPt = [x,y]
print(type(refPt))
corners.append(refPt)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle, target_img)
while(1):
cv2.imshow('image',target_img)
k = cv2.waitKey(20) & 0xFF
# corners.append(refPt)
if k == 27:
break
cv2.destroyAllWindows()
print (corners)
new_corners = np.array(corners)
return new_corners
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def segment_by_angle_kmeans(lines,k=2, **kwargs):
"""Groups lines based on angle with k-means.
Uses k-means on the coordinates of the angle on the unit circle
to segment `k` angles inside `lines`.
"""
# Define criteria = (type, max_iter, epsilon)
default_criteria_type = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
criteria = kwargs.get('criteria', (default_criteria_type, 10, 1.0))
flags = kwargs.get('flags', cv2.KMEANS_RANDOM_CENTERS)
attempts = kwargs.get('attempts', 10)
# returns angles in [0, pi] in radians
angles = np.array([line[0][1] for line in lines])
# multiply the angles by two and find coordinates of that angle
pts = np.array([[np.cos(2*angle), np.sin(2*angle)]
for angle in angles], dtype=np.float32)
# run kmeans on the coords
labels, centers = cv2.kmeans(pts, k, None, criteria, attempts, flags)[1:]
labels = labels.reshape(-1) # transpose to row vec
# segment lines based on their kmeans label
segmented = defaultdict(list)
for i, line in zip(range(len(lines)), lines):
segmented[labels[i]].append(line)
segmented = list(segmented.values())
return segmented
def intersection(line1, line2):
"""Finds the intersection of two lines given in Hesse normal form.
Returns closest integer pixel locations.
See https://stackoverflow.com/a/383527/5087436
"""
rho1, theta1 = line1[0]
rho2, theta2 = line2[0]
A = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
b = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, b)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [[x0, y0]]
def segmented_intersections(lines):
"""Finds the intersections between groups of lines."""
intersections = []
for i, group in enumerate(lines[:-1]):
for next_group in lines[i+1:]:
for line1 in group:
for line2 in next_group:
intersections.append(intersection(line1, line2))
return intersections
def isEqual(l1, l2):
length1 = sqrtf((l1[2] - l1[0])*(l1[2] - l1[0]) + (l1[3] - l1[1])*(l1[3] - l1[1]))
length2 = sqrtf((l2[2] - l2[0])*(l2[2] - l2[0]) + (l2[3] - l2[1])*(l2[3] - l2[1]))
product = (l1[2] - l1[0])*(l2[2] - l2[0]) + (l1[3] - l1[1])*(l2[3] - l2[1])
if (fabs(product / (length1 * length2)) < cos(CV_PI / 30)):
return false
mx1 = (l1[0] + l1[2]) * 0.5
mx2 = (l2[0] + l2[2]) * 0.5
my1 = (l1[1] + l1[3]) * 0.5
my2 = (l2[1] + l2[3]) * 0.5
dist = sqrtf((mx1 - mx2)*(mx1 - mx2) + (my1 - my2)*(my1 - my2))
if (dist > max(length1, length2) * 0.5):
return false
return true
def birdseye_correction(img = "angled.jpg"):
img = cv2.imread(img,0)
resized = imutils.resize(img, height = 1000)
copy = resized.copy()
rect = order_points(define_points(copy))
print (rect)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0]-bl[0])**2)+((br[1]-bl[1])**2))
widthB = np.sqrt(((tr[0]-tl[0])**2)+((tr[1]-tl[1])**2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0]-br[0])**2)+((tr[1]-br[1])**2))
heightB = np.sqrt(((tl[0]-bl[0])**2)+((tl[1]-bl[1])**2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([[0, 0], \
[maxWidth - 1, 0], \
[maxWidth - 1, maxHeight - 1], \
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(resized, M, (maxWidth, maxHeight))
cv2.imshow("warped", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
# gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
blurred_img = cv2.GaussianBlur(warped,(3,3),0)
binary = cv2.adaptiveThreshold(blurred_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,31,2)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(binary,cv2.MORPH_OPEN,kernel, iterations = 2)
# Apply edge detection method on the image
edges = cv2.Canny(warped,50,150,apertureSize = 3)
#
cv2.imshow("edges", edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
# This returns an array of r and theta values
lines = cv2.HoughLines(edges,1,np.pi/180, 140)
# The below for loop runs till r and theta values
# are in the range of the 2d array
for line in lines:
for r,theta in line:
# Stores the value of cos(theta) in a
a = np.cos(theta)
# Stores the value of sin(theta) in b
b = np.sin(theta)
# x0 stores the value rcos(theta)
x0 = a*r
# y0 stores the value rsin(theta)
y0 = b*r
# x1 stores the rounded off value of (rcos(theta)-1000sin(theta))
x1 = int(x0 + 1000*(-b))
# y1 stores the rounded off value of (rsin(theta)+1000cos(theta))
y1 = int(y0 + 1000*(a))
# x2 stores the rounded off value of (rcos(theta)+1000sin(theta))
x2 = int(x0 - 1000*(-b))
# y2 stores the rounded off value of (rsin(theta)-1000cos(theta))
y2 = int(y0 - 1000*(a))
# cv2.line draws a line in img from the point(x1,y1) to (x2,y2).
# (0,0,255) denotes the colour of the line to be
# In this case, it is red.
cv2.line(warped,(x1,y1), (x2,y2), (0,0,255),2)
# labels = []
# num_lines = partition(lines, labels, isEqual)
# define criteria, number of clusters(K) and apply kmeans()
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 54, 1.0)
# K = 54
# ret,label,center=cv2.kmeans(lines,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
#
# # Now convert back into uint8, and make original image
# center = np.uint8(center)
# res = center[label.flatten()]
# print(res.shape, img.shape)
# # res2 = res.reshape((img.shape))
# cv2.imshow('res',res)
# res2 = cv2.resize(res, warped.shape);
# cv2.imshow('img', img)
# cv2.imshow('res2',res2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
cv2.imwrite("unclustered_lines.jpg", warped)
#
cv2.imshow("lines", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
# segmented = segment_by_angle_kmeans(lines)
# intersections = segmented_intersections(segmented)
# print(intersections)
# draw the intersection points
# intersectsimg = img.copy()
# for cx, cy in zip(intersections):
# cx = np.round(cx).astype(int)
# cy = np.round(cy).astype(int)
# color = np.random.randint(0,255,3).tolist() # random colors
# cv2.circle(intersectsimg, (cx, cy), radius=2, color=color, thickness=-1) # -1: filled circle
#
#
# cv2.imshow("intersections", intersectionimg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def main():
birdseye_correction()
if __name__ == "__main__":
main()
|
[
"numpy.sqrt",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.HoughLines",
"cv2.destroyAllWindows",
"numpy.sin",
"cv2.setMouseCallback",
"cv2.line",
"numpy.diff",
"numpy.argmin",
"cv2.waitKey",
"numpy.round",
"numpy.ones",
"cv2.getPerspectiveTransform",
"cv2.kmeans",
"numpy.argmax",
"cv2.morphologyEx",
"cv2.circle",
"numpy.cos",
"cv2.GaussianBlur",
"cv2.imread",
"cv2.namedWindow",
"cv2.Canny",
"cv2.imwrite",
"numpy.linalg.solve",
"imutils.resize",
"numpy.zeros",
"cv2.adaptiveThreshold",
"collections.defaultdict"
] |
[((429, 453), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (444, 453), False, 'import cv2\n'), ((458, 512), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle', 'target_img'], {}), "('image', draw_circle, target_img)\n", (478, 512), False, 'import cv2\n'), ((674, 697), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (695, 697), False, 'import cv2\n'), ((736, 753), 'numpy.array', 'np.array', (['corners'], {}), '(corners)\n', (744, 753), True, 'import numpy as np\n'), ((1031, 1064), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (1039, 1064), True, 'import numpy as np\n'), ((1437, 1457), 'numpy.diff', 'np.diff', (['pts'], {'axis': '(1)'}), '(pts, axis=1)\n', (1444, 1457), True, 'import numpy as np\n'), ((2151, 2191), 'numpy.array', 'np.array', (['[line[0][1] for line in lines]'], {}), '([line[0][1] for line in lines])\n', (2159, 2191), True, 'import numpy as np\n'), ((2606, 2623), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2617, 2623), False, 'from collections import defaultdict\n'), ((3160, 3186), 'numpy.array', 'np.array', (['[[rho1], [rho2]]'], {}), '([[rho1], [rho2]])\n', (3168, 3186), True, 'import numpy as np\n'), ((3200, 3221), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (3215, 3221), True, 'import numpy as np\n'), ((4367, 4385), 'cv2.imread', 'cv2.imread', (['img', '(0)'], {}), '(img, 0)\n', (4377, 4385), False, 'import cv2\n'), ((4399, 4431), 'imutils.resize', 'imutils.resize', (['img'], {'height': '(1000)'}), '(img, height=1000)\n', (4413, 4431), False, 'import imutils\n'), ((4739, 4791), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (4746, 4791), True, 'import numpy as np\n'), ((4799, 4851), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (4806, 4851), True, 'import numpy as np\n'), ((5095, 5147), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (5102, 5147), True, 'import numpy as np\n'), ((5156, 5208), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (5163, 5208), True, 'import numpy as np\n'), ((5262, 5372), 'numpy.array', 'np.array', (['[[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, \n maxHeight - 1]], dtype='float32')\n", (5270, 5372), True, 'import numpy as np\n'), ((5462, 5500), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['rect', 'dst'], {}), '(rect, dst)\n', (5489, 5500), False, 'import cv2\n'), ((5514, 5568), 'cv2.warpPerspective', 'cv2.warpPerspective', (['resized', 'M', '(maxWidth, maxHeight)'], {}), '(resized, M, (maxWidth, maxHeight))\n', (5533, 5568), False, 'import cv2\n'), ((5574, 5602), 'cv2.imshow', 'cv2.imshow', (['"""warped"""', 'warped'], {}), "('warped', warped)\n", (5584, 5602), False, 'import cv2\n'), ((5607, 5621), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5618, 5621), False, 'import cv2\n'), ((5626, 5649), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5647, 5649), False, 'import cv2\n'), ((5724, 5759), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['warped', '(3, 3)', '(0)'], {}), '(warped, (3, 3), 0)\n', (5740, 5759), False, 'import cv2\n'), ((5770, 5872), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['blurred_img', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(31)', '(2)'], {}), '(blurred_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2\n .THRESH_BINARY, 31, 2)\n', (5791, 5872), False, 'import cv2\n'), ((5914, 5939), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (5921, 5939), True, 'import numpy as np\n'), ((5952, 6014), 'cv2.morphologyEx', 'cv2.morphologyEx', (['binary', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(2)'}), '(binary, cv2.MORPH_OPEN, kernel, iterations=2)\n', (5968, 6014), False, 'import cv2\n'), ((6074, 6116), 'cv2.Canny', 'cv2.Canny', (['warped', '(50)', '(150)'], {'apertureSize': '(3)'}), '(warped, 50, 150, apertureSize=3)\n', (6083, 6116), False, 'import cv2\n'), ((6126, 6152), 'cv2.imshow', 'cv2.imshow', (['"""edges"""', 'edges'], {}), "('edges', edges)\n", (6136, 6152), False, 'import cv2\n'), ((6157, 6171), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6168, 6171), False, 'import cv2\n'), ((6176, 6199), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6197, 6199), False, 'import cv2\n'), ((6264, 6306), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 180)', '(140)'], {}), '(edges, 1, np.pi / 180, 140)\n', (6278, 6306), False, 'import cv2\n'), ((8153, 8197), 'cv2.imwrite', 'cv2.imwrite', (['"""unclustered_lines.jpg"""', 'warped'], {}), "('unclustered_lines.jpg', warped)\n", (8164, 8197), False, 'import cv2\n'), ((8208, 8235), 'cv2.imshow', 'cv2.imshow', (['"""lines"""', 'warped'], {}), "('lines', warped)\n", (8218, 8235), False, 'import cv2\n'), ((8240, 8254), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8251, 8254), False, 'import cv2\n'), ((8259, 8282), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8280, 8282), False, 'import cv2\n'), ((534, 565), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'target_img'], {}), "('image', target_img)\n", (544, 565), False, 'import cv2\n'), ((1216, 1228), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (1225, 1228), True, 'import numpy as np\n'), ((1245, 1257), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (1254, 1257), True, 'import numpy as np\n'), ((1475, 1490), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (1484, 1490), True, 'import numpy as np\n'), ((1507, 1522), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (1516, 1522), True, 'import numpy as np\n'), ((2429, 2480), 'cv2.kmeans', 'cv2.kmeans', (['pts', 'k', 'None', 'criteria', 'attempts', 'flags'], {}), '(pts, k, None, criteria, attempts, flags)\n', (2439, 2480), False, 'import cv2\n'), ((294, 339), 'cv2.circle', 'cv2.circle', (['param', '(x, y)', '(5)', '(255, 0, 0)', '(-1)'], {}), '(param, (x, y), 5, (255, 0, 0), -1)\n', (304, 339), False, 'import cv2\n'), ((577, 592), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (588, 592), False, 'import cv2\n'), ((3239, 3251), 'numpy.round', 'np.round', (['x0'], {}), '(x0)\n', (3247, 3251), True, 'import numpy as np\n'), ((3258, 3270), 'numpy.round', 'np.round', (['y0'], {}), '(y0)\n', (3266, 3270), True, 'import numpy as np\n'), ((6515, 6528), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6521, 6528), True, 'import numpy as np\n'), ((6595, 6608), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6601, 6608), True, 'import numpy as np\n'), ((7391, 7443), 'cv2.line', 'cv2.line', (['warped', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(warped, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (7399, 7443), False, 'import cv2\n'), ((2281, 2298), 'numpy.cos', 'np.cos', (['(2 * angle)'], {}), '(2 * angle)\n', (2287, 2298), True, 'import numpy as np\n'), ((2298, 2315), 'numpy.sin', 'np.sin', (['(2 * angle)'], {}), '(2 * angle)\n', (2304, 2315), True, 'import numpy as np\n'), ((3071, 3085), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (3077, 3085), True, 'import numpy as np\n'), ((3087, 3101), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (3093, 3101), True, 'import numpy as np\n'), ((3113, 3127), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (3119, 3127), True, 'import numpy as np\n'), ((3129, 3143), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (3135, 3143), True, 'import numpy as np\n')]
|
import logging as log
import numpy as np
import h5py
import humblerl as hrl
from humblerl import Callback, Interpreter
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal
from torch.utils.data import Dataset
from common_utils import get_model_path_if_exists
from third_party.torchtrainer import TorchTrainer, evaluate
class MDNInterpreter(Interpreter, Callback):
"""Performs state preprocessing with VAE module and concatenates it with hidden state of MDN module.
Args:
vae_model (keras.Model): Keras VAE encoder.
mdn_model (torch.nn.Module): PyTorch MDN-RNN memory.
latent_dim (int): Latent space dimensionality.
Note:
In order to work, this Interpreter system must be also passed as callback to 'hrl.loop(...)'!
"""
def __init__(self, vae_model, mdn_model, latent_dim):
self.vae_model = vae_model
self.mdn_model = mdn_model
self.latent_dim = latent_dim
def __call__(self, state, reward=0.):
return self.process_state(state), reward
def process_state(self, state):
# NOTE: [0][0] <- it gets first in the batch latent space mean (mu)
latent = self.vae_model.predict(state[np.newaxis, :])[0][0]
memory = self.mdn_model.hidden[0].cpu().detach().numpy()
# NOTE: See HRL `ply`, `on_step_taken` that would update hidden state is called AFTER
# Interpreter is used to preprocess next_state. So next_state has out-dated hidden state!
# What saves us is the fact, that `state` in next `ply` call will have it updated so,
# Transitions.state has up-to-date latent and hidden state and in all the other places
# exactly it is used, not next state.
return np.concatenate((latent, memory.flatten()))
def on_episode_start(self, episode, train_mode):
self.mdn_model.init_hidden(1)
def on_step_taken(self, step, transition, info):
state = torch.from_numpy(transition.state[:self.latent_dim]).view(1, 1, -1)
action = torch.from_numpy(np.array([transition.action])).view(1, 1, -1)
if torch.cuda.is_available():
state = state.cuda()
action = action.cuda()
with torch.no_grad(), evaluate(self.mdn_model) as net:
net(state, action)
class MDNDataset(Dataset):
"""Dataset of sequential data to train MDN-RNN.
Args:
dataset_path (string): Path to HDF5 dataset file.
sequence_len (int): Desired output sequence len.
terminal_prob (float): Probability of sampling sequence that finishes with
terminal state. (Default: 0.5)
Note:
Arrays should have the same size of the first dimension and their type should be the
same as desired Tensor type.
"""
def __init__(self, dataset_path, sequence_len, terminal_prob=0.5, dataset_fraction=1.):
assert 0 < terminal_prob and terminal_prob <= 1.0, "0 < terminal_prob <= 1.0"
assert 0 < dataset_fraction and dataset_fraction <= 1.0, "0 < dataset_fraction <= 1.0"
self.dataset = h5py.File(dataset_path, "r")
self.sequence_len = sequence_len
self.terminal_prob = terminal_prob
self.dataset_fraction = dataset_fraction
self.latent_dim = self.dataset.attrs["LATENT_DIM"]
self.action_dim = self.dataset.attrs["ACTION_DIM"]
def __getitem__(self, idx):
"""Get sequence at random starting position of given sequence length from episode `idx`."""
offset = 1
t_start, t_end = self.dataset['episodes'][idx:idx + 2]
episode_length = t_end - t_start
if self.sequence_len <= episode_length - offset:
sequence_len = self.sequence_len
else:
sequence_len = episode_length - offset
log.warning(
"Episode %d is too short to form full sequence, data will be zero-padded.", idx)
# Sample where to start sequence of length `self.sequence_len` in episode `idx`
# '- offset' because "next states" are offset by 'offset'
if np.random.rand() < self.terminal_prob:
# Take sequence ending with terminal state
start = t_start + episode_length - sequence_len - offset
else:
# NOTE: np.random.randint takes EXCLUSIVE upper bound of range to sample from
start = t_start + np.random.randint(max(1, episode_length - sequence_len - offset))
states_ = torch.from_numpy(self.dataset['states'][start:start + sequence_len + offset])
actions_ = torch.from_numpy(self.dataset['actions'][start:start + sequence_len])
states = torch.zeros(self.sequence_len, self.latent_dim, dtype=states_.dtype)
next_states = torch.zeros(self.sequence_len, self.latent_dim, dtype=states_.dtype)
actions = torch.zeros(self.sequence_len, self.action_dim, dtype=actions_.dtype)
# Sample latent states (this is done to prevent overfitting MDN-RNN to a specific 'z'.)
mu = states_[:, 0]
sigma = torch.exp(states_[:, 1] / 2)
latent = Normal(loc=mu, scale=sigma)
z_samples = latent.sample()
states[:sequence_len] = z_samples[:-offset]
next_states[:sequence_len] = z_samples[offset:]
actions[:sequence_len] = actions_
return [states, actions], [next_states]
def __len__(self):
return int(self.dataset.attrs["N_GAMES"] * self.dataset_fraction)
def close(self):
self.dataset.close()
class MDN(nn.Module):
def __init__(self, hidden_units, latent_dim, action_space, temperature, n_gaussians, num_layers=1):
super(MDN, self).__init__()
self.hidden_units = hidden_units
self.latent_dim = latent_dim
self.temperature = temperature
self.n_gaussians = n_gaussians
self.num_layers = num_layers
self.embedding = nn.Embedding.from_pretrained(torch.eye(action_space.num)) \
if isinstance(action_space, hrl.environments.Discrete) else None
self.lstm = nn.LSTM(input_size=(latent_dim + action_space.num),
hidden_size=hidden_units,
num_layers=num_layers,
batch_first=True)
self.pi = nn.Linear(hidden_units, n_gaussians * latent_dim)
self.mu = nn.Linear(hidden_units, n_gaussians * latent_dim)
self.logsigma = nn.Linear(hidden_units, n_gaussians * latent_dim)
# NOTE: This is here only for backward compatibility with trained checkpoint
self.reward = nn.Linear(hidden_units, 1)
def forward(self, latent, action, hidden=None):
self.lstm.flatten_parameters()
sequence_len = latent.size(1)
if self.embedding:
# Use one-hot representation for discrete actions
x = torch.cat((latent, self.embedding(action).squeeze(dim=2)), dim=2)
else:
# Pass raw action vector for continuous actions
x = torch.cat((latent, action.float()), dim=2)
h, self.hidden = self.lstm(x, hidden if hidden else self.hidden)
pi = self.pi(h).view(-1, sequence_len, self.n_gaussians, self.latent_dim) / self.temperature
pi = torch.softmax(pi, dim=2)
logsigma = self.logsigma(h).view(-1, sequence_len, self.n_gaussians, self.latent_dim)
sigma = torch.exp(logsigma)
mu = self.mu(h).view(-1, sequence_len, self.n_gaussians, self.latent_dim)
return mu, sigma, pi
def sample(self, latent, action, hidden=None):
"""Sample (simulate) next state from Mixture Density Network a.k.a. Gaussian Mixture Model.
Args:
latent (torch.Tensor): Latent vectors to start from.
Shape of tensor: batch x sequence x latent dim.
action (torch.Tensor): Actions to simulate.
Shape of tensor: batch x sequence x action dim.
hidden (tuple): Memory module (torch.nn.LSTM) hidden state.
Return:
numpy.ndarray: Latent vector of next state.
Shape of array: batch x sequence x latent dim.
Note:
You can find next hidden state in this module `hidden` member.
"""
# Simulate transition
with torch.no_grad(), evaluate(self) as net:
mu, sigma, pi = net(latent, action, hidden)
# Transform tensors to numpy arrays and move "gaussians mixture" dim to the end
# NOTE: Arrays will have shape (batch x sequence x latent dim. x num. gaussians)
mu = np.transpose(mu.cpu().detach().numpy(), axes=[0, 1, 3, 2])
sigma = np.transpose(sigma.cpu().detach().numpy(), axes=[0, 1, 3, 2])
pi = np.transpose(pi.cpu().detach().numpy(), axes=[0, 1, 3, 2])
# Sample parameters of Gaussian distribution(s) from mixture
c = pi.cumsum(axis=-1)
u = np.random.rand(*c.shape[:-1], 1)
choices = np.expand_dims((u < c).argmax(axis=-1), axis=-1)
# Sample latent vector from Gaussian distribution with mean and std. dev. from above
mean = np.take_along_axis(mu, choices, axis=-1)
stddev = np.take_along_axis(sigma, choices, axis=-1)
samples = mean + stddev * np.random.randn(*mean.shape)
return np.squeeze(samples, axis=-1)
def simulate(self, latent, actions):
"""Simulate environment trajectory.
Args:
latent (torch.Tensor): Latent vector with state(s) to start from.
Shape of tensor: batch x 1 (sequence dim.) x latent dim.
actions (torch.Tensor): Tensor with actions to take in simulated trajectory.
Shape of tensor: batch x sequence x action dim.
Return:
np.ndarray: Array of latent vectors of simulated trajectory.
Shape of array: batch x sequence x latent dim.
Note:
You can find next hidden state in this module `hidden` member.
"""
states = []
for a in range(actions.shape[1]):
# NOTE: We use np.newaxis to preserve shape of tensor.
states.append(self.sample(latent, actions[:, a, np.newaxis, :]))
# NOTE: This is a bit arbitrary to set it to float32 which happens to be type of torch
# tensors. It can blow up further in code if we'll choose to change tensors types.
latent = torch.from_numpy(states[-1]).float().to(next(self.parameters()).device)
# NOTE: Squeeze former sequence dim. (which is 1 because we inferred next latent state
# action by action) and reorder batch dim. and list sequence dim. to finally get:
# batch x len(states) (sequence dim.) x latent dim.
return np.transpose(np.squeeze(np.array(states), axis=2), axes=[1, 0, 2])
def init_hidden(self, batch_size):
device = next(self.parameters()).device
self.hidden = (
torch.zeros(self.num_layers, batch_size, self.hidden_units, device=device),
torch.zeros(self.num_layers, batch_size, self.hidden_units, device=device)
)
def build_rnn_model(rnn_params, latent_dim, action_space, model_path=None):
"""Builds MDN-RNN memory module, which model time dependencies.
Args:
rnn_params (dict): MDN-RNN parameters from .json config.
latent_dim (int): Latent space dimensionality.
action_space (hrl.environments.ActionSpace): Action space, discrete or continuous.
model_path (str): Path to VAE ckpt. Taken from .json config if `None` (Default: None)
Returns:
TorchTrainer: Compiled MDN-RNN model wrapped in TorchTrainer, ready for training.
"""
use_cuda = torch.cuda.is_available()
def mdn_loss_function(pred, target):
"""Mixed Density Network loss function, see:
https://mikedusenberry.com/mixture-density-networks"""
mu, sigma, pi = pred
sequence_len = mu.size(1)
latent_dim = mu.size(3)
target = target.view(-1, sequence_len, 1, latent_dim)
loss = Normal(loc=mu, scale=sigma)
loss = torch.exp(loss.log_prob(target)) # TODO: Is this stable?! Check that.
loss = torch.sum(loss * pi, dim=2)
loss = -torch.log(loss + 1e-9)
return torch.mean(loss)
mdn = TorchTrainer(MDN(rnn_params['hidden_units'], latent_dim, action_space,
rnn_params['temperature'], rnn_params['n_gaussians']),
device_name='cuda' if use_cuda else 'cpu')
mdn.compile(
optimizer=optim.Adam(mdn.model.parameters(), lr=rnn_params['learning_rate']),
loss=mdn_loss_function
)
model_path = get_model_path_if_exists(
path=model_path, default_path=rnn_params['ckpt_path'], model_name="MDN-RNN")
if model_path is not None:
mdn.load_ckpt(model_path)
log.info("Loaded MDN-RNN model weights from: %s", model_path)
return mdn
|
[
"numpy.random.rand",
"torch.from_numpy",
"torch.exp",
"torch.softmax",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"numpy.take_along_axis",
"logging.info",
"torch.nn.LSTM",
"torch.mean",
"torch.eye",
"torch.distributions.Normal",
"logging.warning",
"h5py.File",
"numpy.squeeze",
"numpy.random.randn",
"torch.log",
"third_party.torchtrainer.evaluate",
"torch.nn.Linear",
"torch.no_grad",
"common_utils.get_model_path_if_exists",
"torch.zeros"
] |
[((11699, 11724), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11722, 11724), False, 'import torch\n'), ((12676, 12782), 'common_utils.get_model_path_if_exists', 'get_model_path_if_exists', ([], {'path': 'model_path', 'default_path': "rnn_params['ckpt_path']", 'model_name': '"""MDN-RNN"""'}), "(path=model_path, default_path=rnn_params[\n 'ckpt_path'], model_name='MDN-RNN')\n", (12700, 12782), False, 'from common_utils import get_model_path_if_exists\n'), ((2157, 2182), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2180, 2182), False, 'import torch\n'), ((3126, 3154), 'h5py.File', 'h5py.File', (['dataset_path', '"""r"""'], {}), "(dataset_path, 'r')\n", (3135, 3154), False, 'import h5py\n'), ((4501, 4578), 'torch.from_numpy', 'torch.from_numpy', (["self.dataset['states'][start:start + sequence_len + offset]"], {}), "(self.dataset['states'][start:start + sequence_len + offset])\n", (4517, 4578), False, 'import torch\n'), ((4598, 4667), 'torch.from_numpy', 'torch.from_numpy', (["self.dataset['actions'][start:start + sequence_len]"], {}), "(self.dataset['actions'][start:start + sequence_len])\n", (4614, 4667), False, 'import torch\n'), ((4686, 4754), 'torch.zeros', 'torch.zeros', (['self.sequence_len', 'self.latent_dim'], {'dtype': 'states_.dtype'}), '(self.sequence_len, self.latent_dim, dtype=states_.dtype)\n', (4697, 4754), False, 'import torch\n'), ((4777, 4845), 'torch.zeros', 'torch.zeros', (['self.sequence_len', 'self.latent_dim'], {'dtype': 'states_.dtype'}), '(self.sequence_len, self.latent_dim, dtype=states_.dtype)\n', (4788, 4845), False, 'import torch\n'), ((4864, 4933), 'torch.zeros', 'torch.zeros', (['self.sequence_len', 'self.action_dim'], {'dtype': 'actions_.dtype'}), '(self.sequence_len, self.action_dim, dtype=actions_.dtype)\n', (4875, 4933), False, 'import torch\n'), ((5074, 5102), 'torch.exp', 'torch.exp', (['(states_[:, 1] / 2)'], {}), '(states_[:, 1] / 2)\n', (5083, 5102), False, 'import torch\n'), ((5120, 5147), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'mu', 'scale': 'sigma'}), '(loc=mu, scale=sigma)\n', (5126, 5147), False, 'from torch.distributions import Normal\n'), ((6074, 6194), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(latent_dim + action_space.num)', 'hidden_size': 'hidden_units', 'num_layers': 'num_layers', 'batch_first': '(True)'}), '(input_size=latent_dim + action_space.num, hidden_size=hidden_units,\n num_layers=num_layers, batch_first=True)\n', (6081, 6194), True, 'import torch.nn as nn\n'), ((6295, 6344), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(n_gaussians * latent_dim)'], {}), '(hidden_units, n_gaussians * latent_dim)\n', (6304, 6344), True, 'import torch.nn as nn\n'), ((6363, 6412), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(n_gaussians * latent_dim)'], {}), '(hidden_units, n_gaussians * latent_dim)\n', (6372, 6412), True, 'import torch.nn as nn\n'), ((6437, 6486), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(n_gaussians * latent_dim)'], {}), '(hidden_units, n_gaussians * latent_dim)\n', (6446, 6486), True, 'import torch.nn as nn\n'), ((6594, 6620), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(1)'], {}), '(hidden_units, 1)\n', (6603, 6620), True, 'import torch.nn as nn\n'), ((7244, 7268), 'torch.softmax', 'torch.softmax', (['pi'], {'dim': '(2)'}), '(pi, dim=2)\n', (7257, 7268), False, 'import torch\n'), ((7380, 7399), 'torch.exp', 'torch.exp', (['logsigma'], {}), '(logsigma)\n', (7389, 7399), False, 'import torch\n'), ((8892, 8924), 'numpy.random.rand', 'np.random.rand', (['*c.shape[:-1]', '(1)'], {}), '(*c.shape[:-1], 1)\n', (8906, 8924), True, 'import numpy as np\n'), ((9101, 9141), 'numpy.take_along_axis', 'np.take_along_axis', (['mu', 'choices'], {'axis': '(-1)'}), '(mu, choices, axis=-1)\n', (9119, 9141), True, 'import numpy as np\n'), ((9159, 9202), 'numpy.take_along_axis', 'np.take_along_axis', (['sigma', 'choices'], {'axis': '(-1)'}), '(sigma, choices, axis=-1)\n', (9177, 9202), True, 'import numpy as np\n'), ((9282, 9310), 'numpy.squeeze', 'np.squeeze', (['samples'], {'axis': '(-1)'}), '(samples, axis=-1)\n', (9292, 9310), True, 'import numpy as np\n'), ((12058, 12085), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'mu', 'scale': 'sigma'}), '(loc=mu, scale=sigma)\n', (12064, 12085), False, 'from torch.distributions import Normal\n'), ((12187, 12214), 'torch.sum', 'torch.sum', (['(loss * pi)'], {'dim': '(2)'}), '(loss * pi, dim=2)\n', (12196, 12214), False, 'import torch\n'), ((12270, 12286), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (12280, 12286), False, 'import torch\n'), ((12861, 12922), 'logging.info', 'log.info', (['"""Loaded MDN-RNN model weights from: %s"""', 'model_path'], {}), "('Loaded MDN-RNN model weights from: %s', model_path)\n", (12869, 12922), True, 'import logging as log\n'), ((2265, 2280), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2278, 2280), False, 'import torch\n'), ((2282, 2306), 'third_party.torchtrainer.evaluate', 'evaluate', (['self.mdn_model'], {}), '(self.mdn_model)\n', (2290, 2306), False, 'from third_party.torchtrainer import TorchTrainer, evaluate\n'), ((3843, 3944), 'logging.warning', 'log.warning', (['"""Episode %d is too short to form full sequence, data will be zero-padded."""', 'idx'], {}), "(\n 'Episode %d is too short to form full sequence, data will be zero-padded.',\n idx)\n", (3854, 3944), True, 'import logging as log\n'), ((4119, 4135), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4133, 4135), True, 'import numpy as np\n'), ((8283, 8298), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8296, 8298), False, 'import torch\n'), ((8300, 8314), 'third_party.torchtrainer.evaluate', 'evaluate', (['self'], {}), '(self)\n', (8308, 8314), False, 'from third_party.torchtrainer import TorchTrainer, evaluate\n'), ((10936, 11010), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'batch_size', 'self.hidden_units'], {'device': 'device'}), '(self.num_layers, batch_size, self.hidden_units, device=device)\n', (10947, 11010), False, 'import torch\n'), ((11024, 11098), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'batch_size', 'self.hidden_units'], {'device': 'device'}), '(self.num_layers, batch_size, self.hidden_units, device=device)\n', (11035, 11098), False, 'import torch\n'), ((12231, 12254), 'torch.log', 'torch.log', (['(loss + 1e-09)'], {}), '(loss + 1e-09)\n', (12240, 12254), False, 'import torch\n'), ((1998, 2050), 'torch.from_numpy', 'torch.from_numpy', (['transition.state[:self.latent_dim]'], {}), '(transition.state[:self.latent_dim])\n', (2014, 2050), False, 'import torch\n'), ((5946, 5973), 'torch.eye', 'torch.eye', (['action_space.num'], {}), '(action_space.num)\n', (5955, 5973), False, 'import torch\n'), ((9237, 9265), 'numpy.random.randn', 'np.random.randn', (['*mean.shape'], {}), '(*mean.shape)\n', (9252, 9265), True, 'import numpy as np\n'), ((10768, 10784), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (10776, 10784), True, 'import numpy as np\n'), ((2100, 2129), 'numpy.array', 'np.array', (['[transition.action]'], {}), '([transition.action])\n', (2108, 2129), True, 'import numpy as np\n'), ((10399, 10427), 'torch.from_numpy', 'torch.from_numpy', (['states[-1]'], {}), '(states[-1])\n', (10415, 10427), False, 'import torch\n')]
|
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import rcParams
params = {
# 'text.latex.preamble': ['\\usepackage{gensymb}'],
# 'text.usetex': True,
'font.family': 'Helvetica',
'lines.solid_capstyle':'butt',
'lines.markeredgewidth': 1,
}
rcParams.update(params)
sns.set_context("paper", font_scale=1.6, rc={"lines.linewidth": 2})
sns.set_style('white')
sns.set_palette("cividis")
dir_path = os.path.dirname(os.path.realpath(__file__))
def main():
sens = pd.read_csv('model_sensitivities.csv',header=0,index_col=0) # ,low_memory=False)
occu = pd.read_csv('sc_occurrence_data.csv',header=0,index_col=0)
occu.index = [val if val[6] is not '_' else val[:6] + val[7:] for val in occu.index.values] # oops, used two naming conventions
occu['cluster'] = [sens.loc[((sens['model_id']==i) & (sens['experiment']=='classification')),'cluster'].values[0] for i in occu.index.values]
clust_list = ['Dominated Cluster','Overfit Cluster','Parsimonious Cluster',]
occu = occu[[True if i in [1,2,3] else False for i in occu['cluster'].values]]
occu['Cluster'] = [clust_list[i-1] for i in occu['cluster']]
occu = occu.drop(['training_error', 'complexity', 'test_error','cluster'],axis=1)
occu[occu.columns[:-1]] = occu[occu.columns[:-1]] > 0
occu = occu.groupby(['Cluster']).sum()
inpu = occu[occu.columns[:-9]].stack()
inputs = pd.DataFrame()
inputs['Cluster'] = inpu.index.get_level_values(0)
inputs['Input'] = inpu.index.get_level_values(1)
inputs['category'] = [categorize(i) for i in inputs['Input'].values]
inputs['Category'] = [translate(i) for i in inputs['category'].values]
inputs['Lag'] = [lag(i) for i in inputs['Input'].values]
inputs['Neighbor'] = [neighbor(i) for i in inputs['Input'].values]
inputs['Occurrence'] = inpu.values
func = occu[occu.columns[-9:]].stack()
functions = pd.DataFrame()
functions['Cluster'] = func.index.get_level_values(0)
functions['Function'] = [func_trans(i) for i in func.index.get_level_values(1)]
functions['Occurrence'] = func.values
plots = {'category':'Category',
'neighborhood':'Neighborhood',
'lag':'Lag'
} # three in total
orders = {'category':['land_tree','land_non','water_pump','water_deliv','econ_tree','econ_non'],
'neighborhood':['home','neighbor_1','neighbor_2','neighbor_3','neighbor_4','neighbor_5'],
'lag':['present','lag1','lag2','lag3','lag4','lag5','lag6'],
'function':['Addition','Subtraction','Multiplication','Division','Negative','Sine','Cosine','Less Than','If-Then-Else'],
'Category':['Tree Acreage','Non-Tree Acreage','Tree Prices/Values','Non-Tree Prices/Values','Water Deliveries','Water Pumping'],
'Cluster':['Parsimonious Cluster','Dominated Cluster','Overfit Cluster'],
# 'color':['midnightblue','Red']
}
colors = ['midnightblue','Red','Blue'] #,'c','m','y','b']
fig, axes = plt.subplots(1,2,figsize=(8,6))
g2 = sns.boxplot(x='Occurrence',
y='Category',
order=orders['Category'],
hue='Cluster',
hue_order=orders['Cluster'],
data=inputs,
whis='range',
dodge=True,
# width=0.8,
linewidth=2,
palette=colors,
ax=axes[0],
)
g1 = sns.scatterplot(x='Occurrence',
y='Function',
marker='o',
palette=colors,
s=100,
alpha=0.9,
hue='Cluster',
hue_order=orders['Cluster'],
data=functions,
ax=axes[1]
)
adjust_box_widths(fig, 0.8)
for i,artist in enumerate(axes[0].artists):
# Set the linecolor on the artist to the facecolor, and set the facecolor to None
col = artist.get_facecolor()
artist.set_edgecolor(col)
artist.set_facecolor('None')
# Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
# Loop over them here, and use the same colour as above
for j in range(i*6,i*6+6):
line = axes[0].lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
line.set_solid_capstyle('butt')
med_line = axes[0].lines[i*6+4].set_ydata(axes[0].lines[i*6+2].get_ydata())
axes[0].set_xscale('log')
axes[0].legend_.remove()
axes[1].legend_.remove()
axes[1].legend(frameon=False,markerscale=2,bbox_to_anchor=(1, 1),ncol=4,bbox_transform=plt.gcf().transFigure)
axes[1].yaxis.set_label_position("right")
axes[1].yaxis.tick_right()
axes[0].text(x= 0.95,y=0.8,s='(A)',ha='right',va='top',transform=axes[0].transAxes)
axes[1].text(x= 0.05,y=0.8,s='(B)',ha='left',va='top',transform=axes[1].transAxes)
# for patch in axes[0].artists:
# r, g, b, a = patch.get_facecolor()
# patch.set_facecolor((r, g, b, .9))
# plt.tight_layout()
plt.subplots_adjust(wspace=0.05)
fig.savefig('plot_occurrence.pdf',format='pdf',bbox_inches='tight',dpi=600,transparent=True)
from matplotlib.patches import PathPatch
def adjust_box_widths(g, fac):
"""
Adjust the withs of a seaborn-generated boxplot.
"""
# iterating through Axes instances
for ax in g.axes:
# iterating through axes artists:
for i,c in enumerate(ax.get_children()):
# searching for PathPatches
if isinstance(c, PathPatch):
# getting current width of box:
p = c.get_path()
verts = p.vertices
# print(verts)
verts_sub = verts[:-1]
xmin = np.min(verts_sub[:, 1])
xmax = np.max(verts_sub[:, 1])
xmid = 0.5*(xmin+xmax)
xhalf = 0.5*(xmax - xmin)
# setting new width of box
xmin_new = xmid-fac*xhalf
xmax_new = xmid+fac*xhalf
verts_sub[verts_sub[:, 1] == xmin, 1] = xmin_new
verts_sub[verts_sub[:, 1] == xmax, 1] = xmax_new
# setting new width of median line
for l in ax.lines:
if np.all(l.get_xdata() == [xmin, xmax]):
l.set_xdata([xmin_new, xmax_new])
def categorize(term):
trees = ['ALMOND','ALMONDHULLS','APRICOT','NECTARINES','PISTACHIO','PLUMS','WALNUT']
if ('ppu' in term) or ('value' in term):
if any(tree in term for tree in trees):
category = 'econ_tree'
else:
category = 'econ_non'
elif 'Pump' in term or 'Deliv' in term:
if 'Pump' in term:
category = 'water_pump'
else:
category = 'water_deliv'
elif 'tree' in term:
category = 'land_tree'
elif 'non_' in term:
category = 'land_non'
else:
category = 'none'
return category
def lag(term):
lags = ['lag1','lag2','lag3','lag4','lag5','lag6']
if any(lag_ in term for lag_ in lags):
lag = lags[np.argmax([lag_ in term for lag_ in lags])]
else:
lag = 'present'
return lag
def neighbor(term):
if 'neighbor' in term:
neighbor = term[:10]
else:
neighbor = 'home'
return neighbor
def func_trans(term):
if term == 'lt':
return 'Less Than'
elif term == 'ite':
return 'If-Then-Else'
elif term == 'vadd':
return 'Addition'
elif term == 'vsub':
return 'Subtraction'
elif term == 'vmul':
return 'Multiplication'
elif term == 'vdiv':
return 'Division'
elif term == 'vneg':
return 'Negative'
elif term == 'vsin':
return 'Sine'
elif term == 'vcos':
return 'Cosine'
def translate(item):
if item == 'land_tree':
return 'Tree Acreage'
if item == 'land_non':
return 'Non-Tree Acreage'
if item == 'water_pump':
return 'Water Pumping'
if item == 'water_deliv':
return 'Water Deliveries'
if item == 'econ_tree':
return 'Tree Prices/Values'
if item == 'econ_non':
return 'Non-Tree Prices/Values'
if item == 'home':
return 'Current Plot Data'
if item == 'neighbor_1':
return 'Neighbor 1 Data'
if item == 'neighbor_2':
return 'Neighbor 2 Data'
if item == 'neighbor_3':
return 'Neighbor 3 Data'
if item == 'neighbor_4':
return 'Neighbor 4 Data'
if item == 'neighbor_5':
return 'Neighbor 5 Data'
if item == 'present':
return 'Present Data'
if item == 'lag1':
return "Previous Year's Data"
if item == 'lag2':
return 'Two Years Previous'
if item == 'lag3':
return 'Three Years Previous'
if item == 'lag4':
return 'Four Years Previous'
if item == 'lag5':
return 'Five Years Previous'
if item == 'lag6':
return 'Six Years Previous'
if __name__ == "__main__":
main()
|
[
"seaborn.set_palette",
"matplotlib.rcParams.update",
"pandas.read_csv",
"matplotlib.pyplot.gcf",
"seaborn.set_context",
"numpy.argmax",
"numpy.max",
"seaborn.set_style",
"os.path.realpath",
"seaborn.boxplot",
"seaborn.scatterplot",
"numpy.min",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust"
] |
[((365, 388), 'matplotlib.rcParams.update', 'rcParams.update', (['params'], {}), '(params)\n', (380, 388), False, 'from matplotlib import rcParams\n'), ((390, 457), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'font_scale': '(1.6)', 'rc': "{'lines.linewidth': 2}"}), "('paper', font_scale=1.6, rc={'lines.linewidth': 2})\n", (405, 457), True, 'import seaborn as sns\n'), ((458, 480), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (471, 480), True, 'import seaborn as sns\n'), ((481, 507), 'seaborn.set_palette', 'sns.set_palette', (['"""cividis"""'], {}), "('cividis')\n", (496, 507), True, 'import seaborn as sns\n'), ((536, 562), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (552, 562), False, 'import os\n'), ((585, 646), 'pandas.read_csv', 'pd.read_csv', (['"""model_sensitivities.csv"""'], {'header': '(0)', 'index_col': '(0)'}), "('model_sensitivities.csv', header=0, index_col=0)\n", (596, 646), True, 'import pandas as pd\n'), ((674, 734), 'pandas.read_csv', 'pd.read_csv', (['"""sc_occurrence_data.csv"""'], {'header': '(0)', 'index_col': '(0)'}), "('sc_occurrence_data.csv', header=0, index_col=0)\n", (685, 734), True, 'import pandas as pd\n'), ((1455, 1469), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1467, 1469), True, 'import pandas as pd\n'), ((1930, 1944), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1942, 1944), True, 'import pandas as pd\n'), ((2918, 2952), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 6)'}), '(1, 2, figsize=(8, 6))\n', (2930, 2952), True, 'import matplotlib.pyplot as plt\n'), ((2958, 3158), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Occurrence"""', 'y': '"""Category"""', 'order': "orders['Category']", 'hue': '"""Cluster"""', 'hue_order': "orders['Cluster']", 'data': 'inputs', 'whis': '"""range"""', 'dodge': '(True)', 'linewidth': '(2)', 'palette': 'colors', 'ax': 'axes[0]'}), "(x='Occurrence', y='Category', order=orders['Category'], hue=\n 'Cluster', hue_order=orders['Cluster'], data=inputs, whis='range',\n dodge=True, linewidth=2, palette=colors, ax=axes[0])\n", (2969, 3158), True, 'import seaborn as sns\n'), ((3236, 3409), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""Occurrence"""', 'y': '"""Function"""', 'marker': '"""o"""', 'palette': 'colors', 's': '(100)', 'alpha': '(0.9)', 'hue': '"""Cluster"""', 'hue_order': "orders['Cluster']", 'data': 'functions', 'ax': 'axes[1]'}), "(x='Occurrence', y='Function', marker='o', palette=colors, s\n =100, alpha=0.9, hue='Cluster', hue_order=orders['Cluster'], data=\n functions, ax=axes[1])\n", (3251, 3409), True, 'import seaborn as sns\n'), ((4664, 4696), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.05)'}), '(wspace=0.05)\n', (4683, 4696), True, 'import matplotlib.pyplot as plt\n'), ((6360, 6404), 'numpy.argmax', 'np.argmax', (['[(lag_ in term) for lag_ in lags]'], {}), '([(lag_ in term) for lag_ in lags])\n', (6369, 6404), True, 'import numpy as np\n'), ((4258, 4267), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4265, 4267), True, 'import matplotlib.pyplot as plt\n'), ((5261, 5284), 'numpy.min', 'np.min', (['verts_sub[:, 1]'], {}), '(verts_sub[:, 1])\n', (5267, 5284), True, 'import numpy as np\n'), ((5296, 5319), 'numpy.max', 'np.max', (['verts_sub[:, 1]'], {}), '(verts_sub[:, 1])\n', (5302, 5319), True, 'import numpy as np\n')]
|
import numpy as np
def split_ids(args, ids, folds=10):
if args.dataset == 'COLORS-3':
assert folds == 1, 'this dataset has train, val and test splits'
train_ids = [np.arange(500)]
val_ids = [np.arange(500, 3000)]
test_ids = [np.arange(3000, 10500)]
elif args.dataset == 'TRIANGLES':
assert folds == 1, 'this dataset has train, val and test splits'
train_ids = [np.arange(30000)]
val_ids = [np.arange(30000, 35000)]
test_ids = [np.arange(35000, 45000)]
else:
n = len(ids)
stride = int(np.ceil(n / float(folds)))
test_ids = [ids[i: i + stride] for i in range(0, n, stride)]
assert np.all(
np.unique(np.concatenate(test_ids)) == sorted(ids)), 'some graphs are missing in the test sets'
assert len(test_ids) == folds, 'invalid test sets'
train_ids = []
for fold in range(folds):
train_ids.append(np.array([e for e in ids if e not in test_ids[fold]]))
assert len(train_ids[fold]) + len(test_ids[fold]) == len(
np.unique(list(train_ids[fold]) + list(test_ids[fold]))) == n, 'invalid splits'
return train_ids, test_ids
|
[
"numpy.concatenate",
"numpy.array",
"numpy.arange"
] |
[((186, 200), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (195, 200), True, 'import numpy as np\n'), ((221, 241), 'numpy.arange', 'np.arange', (['(500)', '(3000)'], {}), '(500, 3000)\n', (230, 241), True, 'import numpy as np\n'), ((263, 285), 'numpy.arange', 'np.arange', (['(3000)', '(10500)'], {}), '(3000, 10500)\n', (272, 285), True, 'import numpy as np\n'), ((419, 435), 'numpy.arange', 'np.arange', (['(30000)'], {}), '(30000)\n', (428, 435), True, 'import numpy as np\n'), ((456, 479), 'numpy.arange', 'np.arange', (['(30000)', '(35000)'], {}), '(30000, 35000)\n', (465, 479), True, 'import numpy as np\n'), ((501, 524), 'numpy.arange', 'np.arange', (['(35000)', '(45000)'], {}), '(35000, 45000)\n', (510, 524), True, 'import numpy as np\n'), ((950, 1003), 'numpy.array', 'np.array', (['[e for e in ids if e not in test_ids[fold]]'], {}), '([e for e in ids if e not in test_ids[fold]])\n', (958, 1003), True, 'import numpy as np\n'), ((719, 743), 'numpy.concatenate', 'np.concatenate', (['test_ids'], {}), '(test_ids)\n', (733, 743), True, 'import numpy as np\n')]
|
# coding: utf-8
# - We are creating a very simple machine learning model.<br>
# - Using dataset: tic-tac-toe.data.txt with user-defined columns.<br>
# - We are treating this problem as a supervised learning problem.<br>
# In[74]:
# This the rough sketch of the processing that happened in my brain while creating the program.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
# In[52]:
# Loading data
data = pd.read_csv("../tic-tac-toe.data.txt", sep = ",")
data_copy = pd.read_csv("../tic-tac-toe.data.txt", sep = ",")
# Setting cols.
data.columns = ["first_row_left", "first_row_middle", "first_row_right", "center_row_left", "center_row_middle", "center_row_right", "bottom_row_left", "bottom_row_middle", "bottom_row_right", "is_win"]
data_copy.columns = ["first_row_left", "first_row_middle", "first_row_right", "center_row_left", "center_row_middle", "center_row_right", "bottom_row_left", "bottom_row_middle", "bottom_row_right", "is_win"]
# In[53]:
# Viewing data
data.head()
# In[54]:
# As we can see the the different move options, we perform label encoding.
mapping_for_moves = {'x':1, "o":0} # For b, we put mean of the data.
mapping_for_wins = {"positive":1, "negative":0} # Positive is win, negative is lose
data.is_win = data.is_win.map(mapping_for_wins)
data_copy.is_win = data_copy.is_win.map(mapping_for_wins)
data = data.drop(columns=["is_win"], axis=1)
# In[55]:
data.head()
# In[56]:
for i in data.columns: # Applying map to all the columns except is_win.
data[i] = data[i].map(mapping_for_moves)
# In[57]:
data.head() # Viewing data
# In[58]:
# Extracting features and labels
features = data.values
labels = data_copy.is_win.values
# In[63]:
# Filling missing values aka "b"
features = (Imputer().fit_transform(features))
# In[48]:
len(features)
# In[49]:
len(labels)
# In[65]:
# Changing type to int
features = features.astype(np.int)
labels = labels.astype(np.int)
# In[66]:
features
# In[67]:
labels
# - Preprocessing is done.
# In[68]:
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, random_state=3, shuffle=True)
# In[73]:
data.corr()
# - Clearly it is a classification problem, we can use DecisionTree or SVC
# In[84]:
# Trying different classifiers.
clf = DecisionTreeClassifier()
clf.fit(features_train, labels_train)
d_tree_score = clf.score(features_test, labels_test) # Good result!
# In[78]:
clf2 = SVC() # Clearly the data is non linear.
clf2.fit(features_train, labels_train)
clf2.score(features_test, labels_test) # Not good!
# In[85]:
clf3 = KNeighborsClassifier(n_neighbors=1)
clf3.fit(features_train, labels_train)
k_score = clf3.score(features_test, labels_test)
# In[86]:
d_tree_score > k_score
# In[87]:
predictions = clf3.predict(features_test)
# In[89]:
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test, predictions)
# In[90]:
cm
# In[91]:
np.where(labels_test!=predictions)
# In[95]:
d_tree_score
# In[94]:
k_score
# In[97]:
from sklearn.metrics import classification_report
c = classification_report(labels_test, predictions)
# In[98]:
c
# In[115]:
from sklearn.ensemble import RandomForestClassifier
r = RandomForestClassifier(n_estimators=100) # With 100 decision tree
r.fit(features_train, labels_train)
r_forest = r.score(features_test, labels_test)
p = r.predict(features_test)
np.where(labels_test!=features_test) # Only one misclassified
# In[116]:
cm = confusion_matrix(labels_test, p)
# In[117]:
cm
|
[
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.where",
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.Imputer",
"sklearn.svm.SVC"
] |
[((662, 709), 'pandas.read_csv', 'pd.read_csv', (['"""../tic-tac-toe.data.txt"""'], {'sep': '""","""'}), "('../tic-tac-toe.data.txt', sep=',')\n", (673, 709), True, 'import pandas as pd\n'), ((724, 771), 'pandas.read_csv', 'pd.read_csv', (['"""../tic-tac-toe.data.txt"""'], {'sep': '""","""'}), "('../tic-tac-toe.data.txt', sep=',')\n", (735, 771), True, 'import pandas as pd\n'), ((2335, 2399), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'random_state': '(3)', 'shuffle': '(True)'}), '(features, labels, random_state=3, shuffle=True)\n', (2351, 2399), False, 'from sklearn.model_selection import train_test_split\n'), ((2554, 2578), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (2576, 2578), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2706, 2711), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (2709, 2711), False, 'from sklearn.svm import SVC\n'), ((2857, 2892), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (2877, 2892), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3138, 3180), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (3154, 3180), False, 'from sklearn.metrics import confusion_matrix\n'), ((3212, 3248), 'numpy.where', 'np.where', (['(labels_test != predictions)'], {}), '(labels_test != predictions)\n', (3220, 3248), True, 'import numpy as np\n'), ((3364, 3411), 'sklearn.metrics.classification_report', 'classification_report', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (3385, 3411), False, 'from sklearn.metrics import classification_report\n'), ((3499, 3539), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (3521, 3539), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3677, 3715), 'numpy.where', 'np.where', (['(labels_test != features_test)'], {}), '(labels_test != features_test)\n', (3685, 3715), True, 'import numpy as np\n'), ((3760, 3792), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels_test', 'p'], {}), '(labels_test, p)\n', (3776, 3792), False, 'from sklearn.metrics import confusion_matrix\n'), ((1997, 2006), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {}), '()\n', (2004, 2006), False, 'from sklearn.preprocessing import Imputer\n')]
|
"""
Functions to manipulate data from PostgreSQL database includes a
parallelise dataframe that runs a function on a pandas data frame
in parallel, as well as a loop_chunks function. This reads a chunk
from the database performs an operation and uploads to a new
table in the database.
"""
import numpy as np
import pandas as pd
import time
from multiprocessing import Pool, cpu_count
from utils import db_connect
def parallelise_dataframe(df, func, num_cores=None):
'''
Perform function in parallel on pandas data frame where if the
num_cores is not specified then use the number of available
cores -1.
Arguments:
df (dataframe to manipulate)
func (function to apply)
num_cores (number of cores to parallelise)
Returns:
The data frame processed by function in parallel.
'''
if num_cores==None:
num_cores = cpu_count() - 1
df_split = np.array_split(df, num_cores)
pool = Pool(num_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
def loop_chunks(table_read, chunk_function, output_schema, output_table,
size_chunk=1000000, parallel=True):
'''
Perform function on PostgreSQL database chunk. Read from the db
perform operation either threaded or on a single core, then
upload to the database.
Arguments:
table_read (a PSQL query that alchemy uses to read the table)
chunk_function (the function to apply to that chunk)
output_schema (schema for table output)
output_table (table name to output data into, will create if not exists)
size_chunk (the number of rows to process in 1 chunk)
parallel (use the parallelise_dataframe function on chunk)
'''
conn_input, conn_output = db_connect.alchemy_input_output_open()
start = round(time.time())
j = 0
for chunk in pd.read_sql_query(table_read, conn_input, chunksize=size_chunk):
if parallel==True:
chunk = parallelise_dataframe(chunk, chunk_function)
else:
chunk = chunk_function(chunk)
chunk.to_sql(output_table, conn_output, schema=output_schema,
if_exists='append', index=False)
j+=1
print('{} seconds: completed {} rows'.format(
(round(time.time()) - start), j*size_chunk))
db_connect.alchemy_input_output_close(conn_input, conn_output)
|
[
"pandas.read_sql_query",
"utils.db_connect.alchemy_input_output_close",
"utils.db_connect.alchemy_input_output_open",
"multiprocessing.cpu_count",
"numpy.array_split",
"multiprocessing.Pool",
"time.time"
] |
[((901, 930), 'numpy.array_split', 'np.array_split', (['df', 'num_cores'], {}), '(df, num_cores)\n', (915, 930), True, 'import numpy as np\n'), ((942, 957), 'multiprocessing.Pool', 'Pool', (['num_cores'], {}), '(num_cores)\n', (946, 957), False, 'from multiprocessing import Pool, cpu_count\n'), ((1764, 1802), 'utils.db_connect.alchemy_input_output_open', 'db_connect.alchemy_input_output_open', ([], {}), '()\n', (1800, 1802), False, 'from utils import db_connect\n'), ((1861, 1924), 'pandas.read_sql_query', 'pd.read_sql_query', (['table_read', 'conn_input'], {'chunksize': 'size_chunk'}), '(table_read, conn_input, chunksize=size_chunk)\n', (1878, 1924), True, 'import pandas as pd\n'), ((2327, 2389), 'utils.db_connect.alchemy_input_output_close', 'db_connect.alchemy_input_output_close', (['conn_input', 'conn_output'], {}), '(conn_input, conn_output)\n', (2364, 2389), False, 'from utils import db_connect\n'), ((1821, 1832), 'time.time', 'time.time', ([], {}), '()\n', (1830, 1832), False, 'import time\n'), ((870, 881), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (879, 881), False, 'from multiprocessing import Pool, cpu_count\n'), ((2284, 2295), 'time.time', 'time.time', ([], {}), '()\n', (2293, 2295), False, 'import time\n')]
|
import os
import numpy as np
import urllib
from absl import flags
import tensorflow as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
flags.DEFINE_float(
"learning_rate", default=0.001, help="Initial learning rate.")
flags.DEFINE_integer(
"epochs", default=100, help="Number of training steps to run.")
flags.DEFINE_string(
"activation",
default="selu",
help="Activation function for all hidden layers.")
flags.DEFINE_integer(
"batch_size",
default=32,
help="Batch size.")
flags.DEFINE_string(
"data_dir",
default="/tmp/mnist",
help="Directory where data is stored (if using real data).")
flags.DEFINE_string(
"model_dir",
default="/tmp/critic/",
help="Directory to put the model's fit.")
flags.DEFINE_integer(
"viz_steps", default=500, help="Frequency at which to save visualizations.")
flags.DEFINE_bool(
"delete_existing",
default=False,
help="If true, deletes existing `model_dir` directory.")
FLAGS = flags.FLAGS
def non_square_det(x, reltol=1e-6):
"""
Idea taken from https://www.quora.com/How-do-we-calculate-the-determinant-of-a-non-square-matrix
# for n != m
A = tf.random_normal([n, m])
det(A) := sqrt(det(A.A^T))
Args:
x (tf.tensor): shape in [..., a, b]
Returns:
[..., ]
"""
# squared_mat = tf.matmul(x, x, transpose_b=True)
# return tf.sqrt(tf.linalg.det(squared_mat))
s = tf.svd(x, compute_uv=False)
# atol = tf.reduce_max(s) * reltol
# s = tf.diag(tf.where(tf.greater(atol, tf.abs(s)), tf.ones_like(s), s))
return tf.reduce_prod(s)
def pinv(A, reltol=1e-6):
"""
Args:
A (tf.tensor): the matrix to be inverted shape=[n, m]
Returns:
inverse (tf.tensor): the invserse of A, s.t. A_T.A = I. shape=[m,n]
"""
s, u, v = tf.svd(A)
atol = tf.reduce_max(s) * reltol
s_inv = tf.diag(tf.where(tf.greater(tf.abs(s), atol), 1.0/s, tf.zeros_like(s)))
# s_inv = tf.diag(1./s)
return tf.matmul(v, tf.matmul(s_inv, u, transpose_b=True))
class Dense(tfb.Bijector):
"""
Want a hierarchical flow.
Map some low dim distribution to a manifold in a higher dimensional space.
For more info on bijectors see tfb.Bijector, I simply cloned the general
structure.
"""
def __init__(self, n_inputs, n_outputs, validate_args=False, name=''):
"""
Args:
n_inputs (int): the number of features (last dim)
n_outputs (int): the target num of feautres
"""
super(self.__class__, self).__init__(
validate_args=validate_args,
is_constant_jacobian=True,
forward_min_event_ndims=1,
name=name)
self.n_inputs = n_inputs
self.n_outputs = n_outputs
with tf.variable_scope('dense'+name):
self.weights = tf.get_variable(name='weights',
shape=[n_inputs, n_outputs],
dtype=tf.float32,
# initializer=tf.initializers.orthogonal()
)
self.bias = tf.get_variable(name='bias',
shape=[n_outputs],
dtype=tf.float32,
initializer=tf.initializers.zeros()
)
@property
def _is_injective(self):
return True
def _forward_event_shape_tensor(self, shape):
return tf.shape([shape[0], self.n_inputs])
def _invserse_event_shape_tensor(self, shape):
return tf.shape([shape[0], self.n_outputs])
def _forward(self, x):
return tf.matmul(x, self.weights) + self.bias
def _inverse(self, y):
weights_inv = pinv(self.weights)
return tf.matmul(y - self.bias, weights_inv)
def _forward_log_det_jacobian(self, x):
return tf.log(non_square_det(self.weights))
def _inverse_log_det_jacobian(self, y):
return tf.log(non_square_det(pinv(self.weights)))
def make_mixture(latent_size, mixture_components):
"""Creates a mixture of Gaussians distribution.
Args:
latent_size: The dimensionality of the latent representation.
mixture_components: Number of elements of the mixture.
Returns:
random_prior: A `tf.distributions.Distribution` instance
representing the distribution over encodings in the absence of any
evidence.
"""
if mixture_components == 1:
# See the module docstring for why we don't learn the parameters here.
return tfd.MultivariateNormalDiag(
loc=tf.zeros([latent_size]),
scale_identity_multiplier=1.0)
loc = tf.get_variable(name="loc", shape=[mixture_components, latent_size])
raw_scale_diag = tf.get_variable(
name="raw_scale_diag", shape=[mixture_components, latent_size])
mixture_logits = tf.get_variable(
name="mixture_logits", shape=[mixture_components])
return tfd.MixtureSameFamily(
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
name="prior")
def model_fn(features, labels, mode, params, config):
"""
Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
x = features['x']
global_step = tf.train.get_or_create_global_step()
with tf.contrib.summary.record_summaries_every_n_global_steps(100, global_step=global_step):
# construct a multilayer parameterised bijector
n_hidden = 8
width = 32
n_outputs = 784
fn = tfb.Chain([
Dense(width, n_outputs, name='3'),
# tfb.Softplus(),
# Dense(width, width, name='2'),
# tfb.Softplus(),
# Dense(width, width, name='1'),
Dense(n_hidden, width, name='0')
])
# use the bijector to map a simple distribution into our a density model
dist = make_mixture(n_hidden, 10)
# logits = tf.get_variable(
# name="logits", shape=[n_outputs])
# dist = tfd.RelaxedOneHotCategorical(logits=logits, temperature=1.0)
# density = tfd.RelaxedBernoulli(logits=logits, temperature=100.0)
density = tfd.TransformedDistribution(distribution=dist, bijector=fn)
# maximise the likelihood of the data
p = density.prob(x)
loss = tf.reduce_mean(1-p) # - 0.1*density.entropy()
# reg = -density.entropy()
# tf.summary.scalar('entropy', reg)
# generate some samples to visualise
# HACK to get samples to work I had to comment out line 411 of transformed_distribution.py
samples = density.sample(3)
tf.summary.image('samples', tf.reshape(samples, [3, 28, 28, 1]))
# mu = density.mean()
# tf.summary.image('mean', tf.reshape(mu, [1, 28, 28, 1]))
opt = tf.train.AdamOptimizer(0.0001)
gnvs = opt.compute_gradients(loss)
gnvs = [(tf.clip_by_norm(g, 10.0) if g is not None else tf.zeros_like(v), v) for g, v in gnvs]
train_step = opt.apply_gradients(gnvs, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_step,
eval_metric_ops={"eval_loss": tf.metrics.mean(loss)}
)
def main(_):
params = FLAGS.flag_values_dict()
params["activation"] = getattr(tf.nn, params["activation"])
if FLAGS.delete_existing and tf.gfile.Exists(FLAGS.model_dir):
tf.logging.warn("Deleting old log directory at {}".format(FLAGS.model_dir))
tf.gfile.DeleteRecursively(FLAGS.model_dir)
tf.gfile.MakeDirs(FLAGS.model_dir)
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=1,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
batch_size=FLAGS.batch_size,
num_epochs=1,
shuffle=False)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.viz_steps,
),
)
for _ in range(FLAGS.epochs):
estimator.train(train_input_fn, steps=FLAGS.viz_steps)
eval_results = estimator.evaluate(eval_input_fn)
print("Evaluation_results:\n\t%s\n" % eval_results)
if __name__ == "__main__":
tf.app.run()
|
[
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.contrib.learn.datasets.load_dataset",
"tensorflow.estimator.inputs.numpy_input_fn",
"tensorflow.metrics.mean",
"tensorflow.nn.softplus",
"tensorflow.gfile.MakeDirs",
"tensorflow.reduce_mean",
"absl.flags.DEFINE_float",
"tensorflow.app.run",
"tensorflow.gfile.Exists",
"tensorflow.gfile.DeleteRecursively",
"numpy.asarray",
"tensorflow.matmul",
"tensorflow.zeros_like",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.get_or_create_global_step",
"tensorflow.zeros",
"tensorflow.variable_scope",
"tensorflow.clip_by_norm",
"tensorflow.initializers.zeros",
"tensorflow.reduce_max",
"tensorflow.reshape",
"absl.flags.DEFINE_string",
"tensorflow.reduce_prod",
"tensorflow.estimator.RunConfig",
"absl.flags.DEFINE_bool",
"tensorflow.contrib.summary.record_summaries_every_n_global_steps",
"absl.flags.DEFINE_integer",
"tensorflow.svd",
"tensorflow.abs"
] |
[((174, 260), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""'], {'default': '(0.001)', 'help': '"""Initial learning rate."""'}), "('learning_rate', default=0.001, help=\n 'Initial learning rate.')\n", (192, 260), False, 'from absl import flags\n'), ((261, 350), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""epochs"""'], {'default': '(100)', 'help': '"""Number of training steps to run."""'}), "('epochs', default=100, help=\n 'Number of training steps to run.')\n", (281, 350), False, 'from absl import flags\n'), ((351, 456), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""activation"""'], {'default': '"""selu"""', 'help': '"""Activation function for all hidden layers."""'}), "('activation', default='selu', help=\n 'Activation function for all hidden layers.')\n", (370, 456), False, 'from absl import flags\n'), ((465, 531), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""'], {'default': '(32)', 'help': '"""Batch size."""'}), "('batch_size', default=32, help='Batch size.')\n", (485, 531), False, 'from absl import flags\n'), ((545, 664), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_dir"""'], {'default': '"""/tmp/mnist"""', 'help': '"""Directory where data is stored (if using real data)."""'}), "('data_dir', default='/tmp/mnist', help=\n 'Directory where data is stored (if using real data).')\n", (564, 664), False, 'from absl import flags\n'), ((673, 776), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_dir"""'], {'default': '"""/tmp/critic/"""', 'help': '"""Directory to put the model\'s fit."""'}), '(\'model_dir\', default=\'/tmp/critic/\', help=\n "Directory to put the model\'s fit.")\n', (692, 776), False, 'from absl import flags\n'), ((785, 887), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""viz_steps"""'], {'default': '(500)', 'help': '"""Frequency at which to save visualizations."""'}), "('viz_steps', default=500, help=\n 'Frequency at which to save visualizations.')\n", (805, 887), False, 'from absl import flags\n'), ((888, 1001), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""delete_existing"""'], {'default': '(False)', 'help': '"""If true, deletes existing `model_dir` directory."""'}), "('delete_existing', default=False, help=\n 'If true, deletes existing `model_dir` directory.')\n", (905, 1001), False, 'from absl import flags\n'), ((1464, 1491), 'tensorflow.svd', 'tf.svd', (['x'], {'compute_uv': '(False)'}), '(x, compute_uv=False)\n', (1470, 1491), True, 'import tensorflow as tf\n'), ((1621, 1638), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['s'], {}), '(s)\n', (1635, 1638), True, 'import tensorflow as tf\n'), ((1858, 1867), 'tensorflow.svd', 'tf.svd', (['A'], {}), '(A)\n', (1864, 1867), True, 'import tensorflow as tf\n'), ((4781, 4849), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""loc"""', 'shape': '[mixture_components, latent_size]'}), "(name='loc', shape=[mixture_components, latent_size])\n", (4796, 4849), True, 'import tensorflow as tf\n'), ((4869, 4948), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""raw_scale_diag"""', 'shape': '[mixture_components, latent_size]'}), "(name='raw_scale_diag', shape=[mixture_components, latent_size])\n", (4884, 4948), True, 'import tensorflow as tf\n'), ((4975, 5041), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""mixture_logits"""', 'shape': '[mixture_components]'}), "(name='mixture_logits', shape=[mixture_components])\n", (4990, 5041), True, 'import tensorflow as tf\n'), ((5816, 5852), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (5850, 5852), True, 'import tensorflow as tf\n'), ((8113, 8147), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (8130, 8147), True, 'import tensorflow as tf\n'), ((8161, 8208), 'tensorflow.contrib.learn.datasets.load_dataset', 'tf.contrib.learn.datasets.load_dataset', (['"""mnist"""'], {}), "('mnist')\n", (8199, 8208), True, 'import tensorflow as tf\n'), ((8284, 8330), 'numpy.asarray', 'np.asarray', (['mnist.train.labels'], {'dtype': 'np.int32'}), '(mnist.train.labels, dtype=np.int32)\n', (8294, 8330), True, 'import numpy as np\n'), ((8403, 8448), 'numpy.asarray', 'np.asarray', (['mnist.test.labels'], {'dtype': 'np.int32'}), '(mnist.test.labels, dtype=np.int32)\n', (8413, 8448), True, 'import numpy as np\n'), ((8471, 8603), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': train_data}", 'y': 'train_labels', 'batch_size': 'FLAGS.batch_size', 'num_epochs': '(1)', 'shuffle': '(True)'}), "(x={'x': train_data}, y=train_labels,\n batch_size=FLAGS.batch_size, num_epochs=1, shuffle=True)\n", (8505, 8603), True, 'import tensorflow as tf\n'), ((8672, 8803), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': eval_data}", 'y': 'eval_labels', 'batch_size': 'FLAGS.batch_size', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(x={'x': eval_data}, y=eval_labels,\n batch_size=FLAGS.batch_size, num_epochs=1, shuffle=False)\n", (8706, 8803), True, 'import tensorflow as tf\n'), ((9306, 9318), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (9316, 9318), True, 'import tensorflow as tf\n'), ((1880, 1896), 'tensorflow.reduce_max', 'tf.reduce_max', (['s'], {}), '(s)\n', (1893, 1896), True, 'import tensorflow as tf\n'), ((2043, 2080), 'tensorflow.matmul', 'tf.matmul', (['s_inv', 'u'], {'transpose_b': '(True)'}), '(s_inv, u, transpose_b=True)\n', (2052, 2080), True, 'import tensorflow as tf\n'), ((3604, 3639), 'tensorflow.shape', 'tf.shape', (['[shape[0], self.n_inputs]'], {}), '([shape[0], self.n_inputs])\n', (3612, 3639), True, 'import tensorflow as tf\n'), ((3707, 3743), 'tensorflow.shape', 'tf.shape', (['[shape[0], self.n_outputs]'], {}), '([shape[0], self.n_outputs])\n', (3715, 3743), True, 'import tensorflow as tf\n'), ((3910, 3947), 'tensorflow.matmul', 'tf.matmul', (['(y - self.bias)', 'weights_inv'], {}), '(y - self.bias, weights_inv)\n', (3919, 3947), True, 'import tensorflow as tf\n'), ((5862, 5953), 'tensorflow.contrib.summary.record_summaries_every_n_global_steps', 'tf.contrib.summary.record_summaries_every_n_global_steps', (['(100)'], {'global_step': 'global_step'}), '(100, global_step=\n global_step)\n', (5918, 5953), True, 'import tensorflow as tf\n'), ((6882, 6903), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(1 - p)'], {}), '(1 - p)\n', (6896, 6903), True, 'import tensorflow as tf\n'), ((7374, 7404), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (7396, 7404), True, 'import tensorflow as tf\n'), ((7939, 7971), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (7954, 7971), True, 'import tensorflow as tf\n'), ((8065, 8108), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (8091, 8108), True, 'import tensorflow as tf\n'), ((1971, 1987), 'tensorflow.zeros_like', 'tf.zeros_like', (['s'], {}), '(s)\n', (1984, 1987), True, 'import tensorflow as tf\n'), ((2830, 2863), 'tensorflow.variable_scope', 'tf.variable_scope', (["('dense' + name)"], {}), "('dense' + name)\n", (2847, 2863), True, 'import tensorflow as tf\n'), ((2890, 2968), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""weights"""', 'shape': '[n_inputs, n_outputs]', 'dtype': 'tf.float32'}), "(name='weights', shape=[n_inputs, n_outputs], dtype=tf.float32)\n", (2905, 2968), True, 'import tensorflow as tf\n'), ((3787, 3813), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weights'], {}), '(x, self.weights)\n', (3796, 3813), True, 'import tensorflow as tf\n'), ((7224, 7259), 'tensorflow.reshape', 'tf.reshape', (['samples', '[3, 28, 28, 1]'], {}), '(samples, [3, 28, 28, 1])\n', (7234, 7259), True, 'import tensorflow as tf\n'), ((8933, 9027), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'model_dir': 'FLAGS.model_dir', 'save_checkpoints_steps': 'FLAGS.viz_steps'}), '(model_dir=FLAGS.model_dir, save_checkpoints_steps=\n FLAGS.viz_steps)\n', (8955, 9027), True, 'import tensorflow as tf\n'), ((1946, 1955), 'tensorflow.abs', 'tf.abs', (['s'], {}), '(s)\n', (1952, 1955), True, 'import tensorflow as tf\n'), ((4708, 4731), 'tensorflow.zeros', 'tf.zeros', (['[latent_size]'], {}), '([latent_size])\n', (4716, 4731), True, 'import tensorflow as tf\n'), ((7760, 7781), 'tensorflow.metrics.mean', 'tf.metrics.mean', (['loss'], {}), '(loss)\n', (7775, 7781), True, 'import tensorflow as tf\n'), ((3408, 3431), 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {}), '()\n', (3429, 3431), True, 'import tensorflow as tf\n'), ((5180, 5210), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['raw_scale_diag'], {}), '(raw_scale_diag)\n', (5194, 5210), True, 'import tensorflow as tf\n'), ((7465, 7489), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['g', '(10.0)'], {}), '(g, 10.0)\n', (7480, 7489), True, 'import tensorflow as tf\n'), ((7512, 7528), 'tensorflow.zeros_like', 'tf.zeros_like', (['v'], {}), '(v)\n', (7525, 7528), True, 'import tensorflow as tf\n')]
|
import os
import shutil
import argparse
import torch
from torch import nn
from torchvision.utils import save_image, make_grid
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
import utils.utils as utils
from utils.constants import *
class GenerationMode(enum.Enum):
SINGLE_IMAGE = 0,
INTERPOLATION = 1,
VECTOR_ARITHMETIC = 2
def postprocess_generated_img(generated_img_tensor):
assert isinstance(generated_img_tensor, torch.Tensor), f'Expected PyTorch tensor but got {type(generated_img_tensor)}.'
# Move the tensor from GPU to CPU, convert to numpy array, extract 0th batch, move the image channel
# from 0th to 2nd position (CHW -> HWC)
generated_img = np.moveaxis(generated_img_tensor.to('cpu').numpy()[0], 0, 2)
# If grayscale image repeat 3 times to get RGB image (for generators trained on MNIST)
if generated_img.shape[2] == 1:
generated_img = np.repeat(generated_img, 3, axis=2)
# Imagery is in the range [-1, 1] (generator has tanh as the output activation) move it into [0, 1] range
generated_img -= np.min(generated_img)
generated_img /= np.max(generated_img)
return generated_img
def generate_from_random_latent_vector(generator, cgan_digit=None):
with torch.no_grad():
latent_vector = utils.get_gaussian_latent_batch(1, next(generator.parameters()).device)
if cgan_digit is None:
generated_img = postprocess_generated_img(generator(latent_vector))
else: # condition and generate the digit specified by cgan_digit
ref_label = torch.tensor([cgan_digit], dtype=torch.int64)
ref_label_one_hot_encoding = torch.nn.functional.one_hot(ref_label, MNIST_NUM_CLASSES).type(torch.FloatTensor).to(next(generator.parameters()).device)
generated_img = postprocess_generated_img(generator(latent_vector, ref_label_one_hot_encoding))
return generated_img, latent_vector.to('cpu').numpy()[0]
def generate_from_specified_numpy_latent_vector(generator, latent_vector):
assert isinstance(latent_vector, np.ndarray), f'Expected latent vector to be numpy array but got {type(latent_vector)}.'
with torch.no_grad():
latent_vector_tensor = torch.unsqueeze(torch.tensor(latent_vector, device=next(generator.parameters()).device), dim=0)
return postprocess_generated_img(generator(latent_vector_tensor))
def linear_interpolation(t, p0, p1):
return p0 + t * (p1 - p0)
def spherical_interpolation(t, p0, p1):
""" Spherical interpolation (slerp) formula: https://en.wikipedia.org/wiki/Slerp
Found inspiration here: https://github.com/soumith/ganhacks
but I didn't get any improvement using it compared to linear interpolation.
Args:
t (float): has [0, 1] range
p0 (numpy array): First n-dimensional vector
p1 (numpy array): Second n-dimensional vector
Result:
Returns spherically interpolated vector.
"""
if t <= 0:
return p0
elif t >= 1:
return p1
elif np.allclose(p0, p1):
return p0
# Convert p0 and p1 to unit vectors and find the angle between them (omega)
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
sin_omega = np.sin(omega) # syntactic sugar
return np.sin((1.0 - t) * omega) / sin_omega * p0 + np.sin(t * omega) / sin_omega * p1
def display_vector_arithmetic_results(imgs_to_display):
fig = plt.figure(figsize=(6, 6))
title_fontsize = 'x-small'
num_display_imgs = 7
titles = ['happy women', 'happy woman (avg)', 'neutral women', 'neutral woman (avg)', 'neutral men', 'neutral man (avg)', 'result - happy man']
ax = np.zeros(num_display_imgs, dtype=object)
assert len(imgs_to_display) == num_display_imgs, f'Expected {num_display_imgs} got {len(imgs_to_display)} images.'
gs = fig.add_gridspec(5, 4, left=0.02, right=0.98, wspace=0.05, hspace=0.3)
ax[0] = fig.add_subplot(gs[0, :3])
ax[1] = fig.add_subplot(gs[0, 3])
ax[2] = fig.add_subplot(gs[1, :3])
ax[3] = fig.add_subplot(gs[1, 3])
ax[4] = fig.add_subplot(gs[2, :3])
ax[5] = fig.add_subplot(gs[2, 3])
ax[6] = fig.add_subplot(gs[3:, 1:3])
for i in range(num_display_imgs):
ax[i].imshow(cv.resize(imgs_to_display[i], (0, 0), fx=3, fy=3, interpolation=cv.INTER_NEAREST))
ax[i].set_title(titles[i], fontsize=title_fontsize)
ax[i].tick_params(which='both', bottom=False, left=False, labelleft=False, labelbottom=False)
plt.show()
def generate_new_images(model_name, cgan_digit=None, generation_mode=True, slerp=True, a=None, b=None, should_display=True):
""" Generate imagery using pre-trained generator (using vanilla_generator_000000.pth by default)
Args:
model_name (str): model name you want to use (default lookup location is BINARIES_PATH).
cgan_digit (int): if specified generate that exact digit.
generation_mode (enum): generate a single image from a random vector, interpolate between the 2 chosen latent
vectors, or perform arithmetic over latent vectors (note: not every mode is supported for every model type)
slerp (bool): if True use spherical interpolation otherwise use linear interpolation.
a, b (numpy arrays): latent vectors, if set to None you'll be prompted to choose images you like,
and use corresponding latent vectors instead.
should_display (bool): Display the generated images before saving them.
"""
model_path = os.path.join(BINARIES_PATH, model_name)
assert os.path.exists(model_path), f'Could not find the model {model_path}. You first need to train your generator.'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Prepare the correct (vanilla, cGAN, DCGAN, ...) model, load the weights and put the model into evaluation mode
model_state = torch.load(model_path)
gan_type = model_state["gan_type"]
print(f'Found {gan_type} GAN!')
_, generator = utils.get_gan(device, gan_type)
generator.load_state_dict(model_state["state_dict"], strict=True)
generator.eval()
# Generate a single image, save it and potentially display it
if generation_mode == GenerationMode.SINGLE_IMAGE:
generated_imgs_path = os.path.join(DATA_DIR_PATH, 'generated_imagery')
os.makedirs(generated_imgs_path, exist_ok=True)
generated_img, _ = generate_from_random_latent_vector(generator, cgan_digit if gan_type == GANType.CGAN.name else None)
utils.save_and_maybe_display_image(generated_imgs_path, generated_img, should_display=should_display)
# Pick 2 images you like between which you'd like to interpolate (by typing 'y' into console)
elif generation_mode == GenerationMode.INTERPOLATION:
assert gan_type == GANType.VANILLA.name or gan_type ==GANType.DCGAN.name, f'Got {gan_type} but only VANILLA/DCGAN are supported for the interpolation mode.'
interpolation_name = "spherical" if slerp else "linear"
interpolation_fn = spherical_interpolation if slerp else linear_interpolation
grid_interpolated_imgs_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery') # combined results dir
decomposed_interpolated_imgs_path = os.path.join(grid_interpolated_imgs_path, f'tmp_{gan_type}_{interpolation_name}_dump') # dump separate results
if os.path.exists(decomposed_interpolated_imgs_path):
shutil.rmtree(decomposed_interpolated_imgs_path)
os.makedirs(grid_interpolated_imgs_path, exist_ok=True)
os.makedirs(decomposed_interpolated_imgs_path, exist_ok=True)
latent_vector_a, latent_vector_b = [None, None]
# If a and b were not specified loop until the user picked the 2 images he/she likes.
found_good_vectors_flag = False
if a is None or b is None:
while not found_good_vectors_flag:
generated_img, latent_vector = generate_from_random_latent_vector(generator)
plt.imshow(generated_img); plt.title('Do you like this image?'); plt.show()
user_input = input("Do you like this generated image? [y for yes]:")
if user_input == 'y':
if latent_vector_a is None:
latent_vector_a = latent_vector
print('Saved the first latent vector.')
elif latent_vector_b is None:
latent_vector_b = latent_vector
print('Saved the second latent vector.')
found_good_vectors_flag = True
else:
print('Well lets generate a new one!')
continue
else:
print('Skipping latent vectors selection section and using cached ones.')
latent_vector_a, latent_vector_b = [a, b]
# Cache latent vectors
if a is None or b is None:
np.save(os.path.join(grid_interpolated_imgs_path, 'a.npy'), latent_vector_a)
np.save(os.path.join(grid_interpolated_imgs_path, 'b.npy'), latent_vector_b)
print(f'Lets do some {interpolation_name} interpolation!')
interpolation_resolution = 47 # number of images between the vectors a and b
num_interpolated_imgs = interpolation_resolution + 2 # + 2 so that we include a and b
generated_imgs = []
for i in range(num_interpolated_imgs):
t = i / (num_interpolated_imgs - 1) # goes from 0. to 1.
current_latent_vector = interpolation_fn(t, latent_vector_a, latent_vector_b)
generated_img = generate_from_specified_numpy_latent_vector(generator, current_latent_vector)
print(f'Generated image [{i+1}/{num_interpolated_imgs}].')
utils.save_and_maybe_display_image(decomposed_interpolated_imgs_path, generated_img, should_display=should_display)
# Move from channel last to channel first (CHW->HWC), PyTorch's save_image function expects BCHW format
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0)))
interpolated_block_img = torch.stack(generated_imgs)
interpolated_block_img = nn.Upsample(scale_factor=2.5, mode='nearest')(interpolated_block_img)
save_image(interpolated_block_img, os.path.join(grid_interpolated_imgs_path, utils.get_available_file_name(grid_interpolated_imgs_path)), nrow=int(np.sqrt(num_interpolated_imgs)))
elif generation_mode == GenerationMode.VECTOR_ARITHMETIC:
assert gan_type == GANType.DCGAN.name, f'Got {gan_type} but only DCGAN is supported for arithmetic mode.'
# Generate num_options face images and create a grid image from them
num_options = 100
generated_imgs = []
latent_vectors = []
padding = 2
for i in range(num_options):
generated_img, latent_vector = generate_from_random_latent_vector(generator)
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0))) # make_grid expects CHW format
latent_vectors.append(latent_vector)
stacked_tensor_imgs = torch.stack(generated_imgs)
final_tensor_img = make_grid(stacked_tensor_imgs, nrow=int(np.sqrt(num_options)), padding=padding)
display_img = np.moveaxis(final_tensor_img.numpy(), 0, 2)
# For storing latent vectors
num_of_vectors_per_category = 3
happy_woman_latent_vectors = []
neutral_woman_latent_vectors = []
neutral_man_latent_vectors = []
# Make it easy - by clicking on the plot you pick the image.
def onclick(event):
if event.dblclick:
pass
else: # single click
if event.button == 1: # left click
x_coord = event.xdata
y_coord = event.ydata
column = int(x_coord / (64 + padding))
row = int(y_coord / (64 + padding))
# Store latent vector corresponding to the image that the user clicked on.
if len(happy_woman_latent_vectors) < num_of_vectors_per_category:
happy_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(happy_woman_latent_vectors)}. happy woman.')
elif len(neutral_woman_latent_vectors) < num_of_vectors_per_category:
neutral_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_woman_latent_vectors)}. neutral woman.')
elif len(neutral_man_latent_vectors) < num_of_vectors_per_category:
neutral_man_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_man_latent_vectors)}. neutral man.')
else:
plt.close()
plt.figure(figsize=(10, 10))
plt.imshow(display_img)
# This is just an example you could also pick 3 neutral woman images with sunglasses, etc.
plt.title('Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)')
cid = plt.gcf().canvas.mpl_connect('button_press_event', onclick)
plt.show()
plt.gcf().canvas.mpl_disconnect(cid)
print('Done choosing images.')
# Calculate the average latent vector for every category (happy woman, neutral woman, neutral man)
happy_woman_avg_latent_vector = np.mean(np.array(happy_woman_latent_vectors), axis=0)
neutral_woman_avg_latent_vector = np.mean(np.array(neutral_woman_latent_vectors), axis=0)
neutral_man_avg_latent_vector = np.mean(np.array(neutral_man_latent_vectors), axis=0)
# By subtracting neutral woman from the happy woman we capture the "vector of smiling". Adding that vector
# to a neutral man we get a happy man's latent vector! Our latent space has amazingly beautiful structure!
happy_man_latent_vector = neutral_man_avg_latent_vector + (happy_woman_avg_latent_vector - neutral_woman_avg_latent_vector)
# Generate images from these latent vectors
happy_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in happy_woman_latent_vectors])
neutral_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_woman_latent_vectors])
neutral_men_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_man_latent_vectors])
happy_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, happy_woman_avg_latent_vector)
neutral_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_woman_avg_latent_vector)
neutral_man_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_man_avg_latent_vector)
happy_man_img = generate_from_specified_numpy_latent_vector(generator, happy_man_latent_vector)
display_vector_arithmetic_results([happy_women_imgs, happy_woman_avg_img, neutral_women_imgs, neutral_woman_avg_img, neutral_men_imgs, neutral_man_avg_img, happy_man_img])
else:
raise Exception(f'Generation mode not yet supported.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="Pre-trained generator model name", default=r'VANILLA_000000.pth')
parser.add_argument("--cgan_digit", type=int, help="Used only for cGAN - generate specified digit", default=3)
parser.add_argument("--generation_mode", type=bool, help="Pick between 3 generation modes", default=GenerationMode.SINGLE_IMAGE)
parser.add_argument("--slerp", type=bool, help="Should use spherical interpolation (default No)", default=False)
parser.add_argument("--should_display", type=bool, help="Display intermediate results", default=True)
args = parser.parse_args()
# The first time you start generation in the interpolation mode it will cache a and b
# which you'll choose the first time you run the it.
a_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'a.npy')
b_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'b.npy')
latent_vector_a = np.load(a_path) if os.path.exists(a_path) else None
latent_vector_b = np.load(b_path) if os.path.exists(b_path) else None
generate_new_images(
args.model_name,
args.cgan_digit,
generation_mode=args.generation_mode,
slerp=args.slerp,
a=latent_vector_a,
b=latent_vector_b,
should_display=args.should_display)
|
[
"numpy.sqrt",
"utils.utils.save_and_maybe_display_image",
"numpy.array",
"torch.cuda.is_available",
"numpy.linalg.norm",
"numpy.sin",
"numpy.moveaxis",
"utils.utils.get_available_file_name",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.repeat",
"argparse.ArgumentParser",
"numpy.max",
"utils.utils.get_gan",
"matplotlib.pyplot.close",
"numpy.min",
"numpy.allclose",
"matplotlib.pyplot.gcf",
"torch.nn.functional.one_hot",
"torch.nn.Upsample",
"matplotlib.pyplot.title",
"cv2.resize",
"matplotlib.pyplot.show",
"os.makedirs",
"torch.load",
"torch.stack",
"os.path.join",
"torch.tensor",
"matplotlib.pyplot.figure",
"numpy.zeros",
"shutil.rmtree",
"torch.no_grad",
"numpy.load"
] |
[((1093, 1114), 'numpy.min', 'np.min', (['generated_img'], {}), '(generated_img)\n', (1099, 1114), True, 'import numpy as np\n'), ((1136, 1157), 'numpy.max', 'np.max', (['generated_img'], {}), '(generated_img)\n', (1142, 1157), True, 'import numpy as np\n'), ((3254, 3267), 'numpy.sin', 'np.sin', (['omega'], {}), '(omega)\n', (3260, 3267), True, 'import numpy as np\n'), ((3446, 3472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (3456, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3686, 3726), 'numpy.zeros', 'np.zeros', (['num_display_imgs'], {'dtype': 'object'}), '(num_display_imgs, dtype=object)\n', (3694, 3726), True, 'import numpy as np\n'), ((4509, 4519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4517, 4519), True, 'import matplotlib.pyplot as plt\n'), ((5520, 5559), 'os.path.join', 'os.path.join', (['BINARIES_PATH', 'model_name'], {}), '(BINARIES_PATH, model_name)\n', (5532, 5559), False, 'import os\n'), ((5571, 5597), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (5585, 5597), False, 'import os\n'), ((5892, 5914), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (5902, 5914), False, 'import torch\n'), ((6009, 6040), 'utils.utils.get_gan', 'utils.get_gan', (['device', 'gan_type'], {}), '(device, gan_type)\n', (6022, 6040), True, 'import utils.utils as utils\n'), ((15467, 15492), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15490, 15492), False, 'import argparse\n'), ((16278, 16338), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""interpolated_imagery"""', '"""a.npy"""'], {}), "(DATA_DIR_PATH, 'interpolated_imagery', 'a.npy')\n", (16290, 16338), False, 'import os\n'), ((16352, 16412), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""interpolated_imagery"""', '"""b.npy"""'], {}), "(DATA_DIR_PATH, 'interpolated_imagery', 'b.npy')\n", (16364, 16412), False, 'import os\n'), ((924, 959), 'numpy.repeat', 'np.repeat', (['generated_img', '(3)'], {'axis': '(2)'}), '(generated_img, 3, axis=2)\n', (933, 959), True, 'import numpy as np\n'), ((1263, 1278), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1276, 1278), False, 'import torch\n'), ((2177, 2192), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2190, 2192), False, 'import torch\n'), ((6284, 6332), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""generated_imagery"""'], {}), "(DATA_DIR_PATH, 'generated_imagery')\n", (6296, 6332), False, 'import os\n'), ((6341, 6388), 'os.makedirs', 'os.makedirs', (['generated_imgs_path'], {'exist_ok': '(True)'}), '(generated_imgs_path, exist_ok=True)\n', (6352, 6388), False, 'import os\n'), ((6526, 6631), 'utils.utils.save_and_maybe_display_image', 'utils.save_and_maybe_display_image', (['generated_imgs_path', 'generated_img'], {'should_display': 'should_display'}), '(generated_imgs_path, generated_img,\n should_display=should_display)\n', (6560, 6631), True, 'import utils.utils as utils\n'), ((16454, 16476), 'os.path.exists', 'os.path.exists', (['a_path'], {}), '(a_path)\n', (16468, 16476), False, 'import os\n'), ((16435, 16450), 'numpy.load', 'np.load', (['a_path'], {}), '(a_path)\n', (16442, 16450), True, 'import numpy as np\n'), ((16528, 16550), 'os.path.exists', 'os.path.exists', (['b_path'], {}), '(b_path)\n', (16542, 16550), False, 'import os\n'), ((16509, 16524), 'numpy.load', 'np.load', (['b_path'], {}), '(b_path)\n', (16516, 16524), True, 'import numpy as np\n'), ((1586, 1631), 'torch.tensor', 'torch.tensor', (['[cgan_digit]'], {'dtype': 'torch.int64'}), '([cgan_digit], dtype=torch.int64)\n', (1598, 1631), False, 'import torch\n'), ((3038, 3057), 'numpy.allclose', 'np.allclose', (['p0', 'p1'], {}), '(p0, p1)\n', (3049, 3057), True, 'import numpy as np\n'), ((4259, 4345), 'cv2.resize', 'cv.resize', (['imgs_to_display[i]', '(0, 0)'], {'fx': '(3)', 'fy': '(3)', 'interpolation': 'cv.INTER_NEAREST'}), '(imgs_to_display[i], (0, 0), fx=3, fy=3, interpolation=cv.\n INTER_NEAREST)\n', (4268, 4345), True, 'import cv2 as cv\n'), ((5718, 5743), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5741, 5743), False, 'import torch\n'), ((7140, 7191), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""interpolated_imagery"""'], {}), "(DATA_DIR_PATH, 'interpolated_imagery')\n", (7152, 7191), False, 'import os\n'), ((7260, 7350), 'os.path.join', 'os.path.join', (['grid_interpolated_imgs_path', 'f"""tmp_{gan_type}_{interpolation_name}_dump"""'], {}), "(grid_interpolated_imgs_path,\n f'tmp_{gan_type}_{interpolation_name}_dump')\n", (7272, 7350), False, 'import os\n'), ((7383, 7432), 'os.path.exists', 'os.path.exists', (['decomposed_interpolated_imgs_path'], {}), '(decomposed_interpolated_imgs_path)\n', (7397, 7432), False, 'import os\n'), ((7503, 7558), 'os.makedirs', 'os.makedirs', (['grid_interpolated_imgs_path'], {'exist_ok': '(True)'}), '(grid_interpolated_imgs_path, exist_ok=True)\n', (7514, 7558), False, 'import os\n'), ((7567, 7628), 'os.makedirs', 'os.makedirs', (['decomposed_interpolated_imgs_path'], {'exist_ok': '(True)'}), '(decomposed_interpolated_imgs_path, exist_ok=True)\n', (7578, 7628), False, 'import os\n'), ((10138, 10165), 'torch.stack', 'torch.stack', (['generated_imgs'], {}), '(generated_imgs)\n', (10149, 10165), False, 'import torch\n'), ((3192, 3210), 'numpy.linalg.norm', 'np.linalg.norm', (['p0'], {}), '(p0)\n', (3206, 3210), True, 'import numpy as np\n'), ((3217, 3235), 'numpy.linalg.norm', 'np.linalg.norm', (['p1'], {}), '(p1)\n', (3231, 3235), True, 'import numpy as np\n'), ((3298, 3323), 'numpy.sin', 'np.sin', (['((1.0 - t) * omega)'], {}), '((1.0 - t) * omega)\n', (3304, 3323), True, 'import numpy as np\n'), ((3343, 3360), 'numpy.sin', 'np.sin', (['(t * omega)'], {}), '(t * omega)\n', (3349, 3360), True, 'import numpy as np\n'), ((7446, 7494), 'shutil.rmtree', 'shutil.rmtree', (['decomposed_interpolated_imgs_path'], {}), '(decomposed_interpolated_imgs_path)\n', (7459, 7494), False, 'import shutil\n'), ((9789, 9908), 'utils.utils.save_and_maybe_display_image', 'utils.save_and_maybe_display_image', (['decomposed_interpolated_imgs_path', 'generated_img'], {'should_display': 'should_display'}), '(decomposed_interpolated_imgs_path,\n generated_img, should_display=should_display)\n', (9823, 9908), True, 'import utils.utils as utils\n'), ((10199, 10244), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2.5)', 'mode': '"""nearest"""'}), "(scale_factor=2.5, mode='nearest')\n", (10210, 10244), False, 'from torch import nn\n'), ((11133, 11160), 'torch.stack', 'torch.stack', (['generated_imgs'], {}), '(generated_imgs)\n', (11144, 11160), False, 'import torch\n'), ((13054, 13082), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (13064, 13082), True, 'import matplotlib.pyplot as plt\n'), ((13091, 13114), 'matplotlib.pyplot.imshow', 'plt.imshow', (['display_img'], {}), '(display_img)\n', (13101, 13114), True, 'import matplotlib.pyplot as plt\n'), ((13222, 13332), 'matplotlib.pyplot.title', 'plt.title', (['"""Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)"""'], {}), '(\n """Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)"""\n )\n', (13231, 13332), True, 'import matplotlib.pyplot as plt\n'), ((13402, 13412), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13410, 13412), True, 'import matplotlib.pyplot as plt\n'), ((8012, 8037), 'matplotlib.pyplot.imshow', 'plt.imshow', (['generated_img'], {}), '(generated_img)\n', (8022, 8037), True, 'import matplotlib.pyplot as plt\n'), ((8039, 8075), 'matplotlib.pyplot.title', 'plt.title', (['"""Do you like this image?"""'], {}), "('Do you like this image?')\n", (8048, 8075), True, 'import matplotlib.pyplot as plt\n'), ((8077, 8087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8085, 8087), True, 'import matplotlib.pyplot as plt\n'), ((8956, 9006), 'os.path.join', 'os.path.join', (['grid_interpolated_imgs_path', '"""a.npy"""'], {}), "(grid_interpolated_imgs_path, 'a.npy')\n", (8968, 9006), False, 'import os\n'), ((9045, 9095), 'os.path.join', 'os.path.join', (['grid_interpolated_imgs_path', '"""b.npy"""'], {}), "(grid_interpolated_imgs_path, 'b.npy')\n", (9057, 9095), False, 'import os\n'), ((10354, 10412), 'utils.utils.get_available_file_name', 'utils.get_available_file_name', (['grid_interpolated_imgs_path'], {}), '(grid_interpolated_imgs_path)\n', (10383, 10412), True, 'import utils.utils as utils\n'), ((13653, 13689), 'numpy.array', 'np.array', (['happy_woman_latent_vectors'], {}), '(happy_woman_latent_vectors)\n', (13661, 13689), True, 'import numpy as np\n'), ((13749, 13787), 'numpy.array', 'np.array', (['neutral_woman_latent_vectors'], {}), '(neutral_woman_latent_vectors)\n', (13757, 13787), True, 'import numpy as np\n'), ((13845, 13881), 'numpy.array', 'np.array', (['neutral_man_latent_vectors'], {}), '(neutral_man_latent_vectors)\n', (13853, 13881), True, 'import numpy as np\n'), ((10069, 10101), 'numpy.moveaxis', 'np.moveaxis', (['generated_img', '(2)', '(0)'], {}), '(generated_img, 2, 0)\n', (10080, 10101), True, 'import numpy as np\n'), ((10424, 10454), 'numpy.sqrt', 'np.sqrt', (['num_interpolated_imgs'], {}), '(num_interpolated_imgs)\n', (10431, 10454), True, 'import numpy as np\n'), ((1673, 1730), 'torch.nn.functional.one_hot', 'torch.nn.functional.one_hot', (['ref_label', 'MNIST_NUM_CLASSES'], {}), '(ref_label, MNIST_NUM_CLASSES)\n', (1700, 1730), False, 'import torch\n'), ((10987, 11019), 'numpy.moveaxis', 'np.moveaxis', (['generated_img', '(2)', '(0)'], {}), '(generated_img, 2, 0)\n', (10998, 11019), True, 'import numpy as np\n'), ((11228, 11248), 'numpy.sqrt', 'np.sqrt', (['num_options'], {}), '(num_options)\n', (11235, 11248), True, 'import numpy as np\n'), ((13334, 13343), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13341, 13343), True, 'import matplotlib.pyplot as plt\n'), ((13421, 13430), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13428, 13430), True, 'import matplotlib.pyplot as plt\n'), ((13033, 13044), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13042, 13044), True, 'import matplotlib.pyplot as plt\n')]
|
"""a module solely for finding how add_a_list and add_tuple_list compare.
it's effectively the empirical proof for how LongIntTable.add() chooses
the fastest method with it's get_fastest_method() function."""
from __future__ import print_function
from math import log10
import time
import random
from os import getcwd
from itertools import cycle
import matplotlib.pyplot as plt
import numpy as np
from dicetables.additiveevents import AdditiveEvents
WELCOME_TXT = 'hi'
def input_py_2_and_3(question):
try:
return raw_input(question)
except NameError:
return input(question)
def generate_tuple_list_with_increasing_number_of_events(first_event, start_length, event_occurrences,
len_increase_step=1):
"""
:param first_event:
:param start_length:
:param event_occurrences:
:param len_increase_step: =1
:return: generator(next)
"""
tuple_list_of_events = [(first_event, event_occurrences)]
for add_to_first_event in range(1, start_length):
tuple_list_of_events.append((first_event + add_to_first_event, event_occurrences))
while True:
yield tuple_list_of_events
highest_event = tuple_list_of_events[-1][0]
new_tuples = [(highest_event + 1 + step, event_occurrences) for step in range(len_increase_step)]
tuple_list_of_events += new_tuples
def generate_tuple_list_with_increasing_occurrences(first_event, start_length, increment, exponential_increase=True):
"""
:param first_event:
:param start_length:
:param increment:
:param exponential_increase: =True
:return: generator(next)
"""
tuple_list_of_events = [(event, 1) for event in range(first_event, first_event + start_length)]
growth = 0.0
while True:
yield tuple_list_of_events
growth += increment
if exponential_increase:
tuple_list_of_events = [(event, int(2 ** growth)) for
event in range(first_event, first_event + start_length)]
else:
tuple_list_of_events = [(event, int(growth)) for
event in range(first_event, first_event + start_length)]
def generate_tuple_list_with_increasing_gaps(first_event, start_length, event_occurrences=1, gaps_per_iteration=1,
randomize=True):
"""
:param first_event:
:param start_length:
:param event_occurrences: =1
:param gaps_per_iteration: =1
:param randomize: =True
:return: generator
"""
tuple_list_of_events = [(first_event + index, event_occurrences) for index in range(start_length)]
while sum([event[1] for event in tuple_list_of_events]) > 2 * event_occurrences:
yield tuple_list_of_events
for _ in range(gaps_per_iteration):
if randomize:
start_search_index = random.randrange(1, start_length - 1)
else:
start_search_index = len(tuple_list_of_events) - 2
only_occurrences = [event[1] for event in tuple_list_of_events]
while not only_occurrences[start_search_index:-1].count(event_occurrences) and start_search_index:
start_search_index -= 1
index_to_make_zero = only_occurrences[start_search_index:].index(event_occurrences) + start_search_index
event_value = tuple_list_of_events[index_to_make_zero][0]
tuple_list_of_events[index_to_make_zero] = (event_value, 0)
def get_generator(variable_name, first_event, start_length,
growth_increment=1.,
event_occurrences=1,
len_increase_step=1,
gaps_per_iteration=1,
randomize=True,
exponential_increase=True):
"""
:param variable_name: 'list_length', 'event_occurrences', 'increasing_gaps'
:param first_event:
:param start_length:
:param growth_increment: =1.0
:param event_occurrences: =1
:param len_increase_step: =1
:param gaps_per_iteration: =1
:param randomize: True
:param exponential_increase: =True
:return:
"""
if variable_name == 'list_length':
return generate_tuple_list_with_increasing_number_of_events(first_event, start_length,
event_occurrences, len_increase_step)
if variable_name == 'event_occurrences':
return generate_tuple_list_with_increasing_occurrences(first_event, start_length,
growth_increment, exponential_increase)
if variable_name == 'increasing_gaps':
return generate_tuple_list_with_increasing_gaps(first_event, start_length,
event_occurrences, gaps_per_iteration, randomize)
def one_time_trial(combine_times, events_tuples, input_dict_size=1, use_exponential_occurrences=True):
"""
:param combine_times:
:param events_tuples:
:param input_dict_size: =1
:param use_exponential_occurrences: =True
:return: (list_len, # occurrences, log10(# occurrences), range/events, start dict size)\n
, control time, IndexedValues time
"""
if events_tuples[0][1] < 10**100:
print('one_time_trial prepped list [{} .. {}]'.format(events_tuples[0], events_tuples[-1]))
input_dict = get_input_dict(input_dict_size, use_exponential_occurrences)
events_tuples = [pair for pair in events_tuples if pair[1]]
control_time, indexed_values_time = get_control_and_indexed_values_times(combine_times, events_tuples, input_dict)
list_length = float(len(events_tuples))
event_occurrences = float(events_tuples[0][1])
event_occurrences_exponent = log10(events_tuples[0][1])
events_range_vs_events = (max(events_tuples)[0] - min(events_tuples)[0] + 1) / float(list_length)
start_dict_size = float(input_dict_size)
y_axis_variables = (list_length, event_occurrences, event_occurrences_exponent, events_range_vs_events,
start_dict_size)
return y_axis_variables, control_time, indexed_values_time
def get_input_dict(input_dict_size, use_exponential_occurrences):
if use_exponential_occurrences:
input_dict = dict([(event, 1 + 2 ** (event % 1000)) for event in range(input_dict_size)])
else:
input_dict = dict([(event, 1 + event % 1000) for event in range(input_dict_size)])
return input_dict
def get_control_and_indexed_values_times(combine_times, events_tuples, input_dict):
control_events_action = get_control_action(input_dict, events_tuples)
events_for_indexed_values = AdditiveEvents(input_dict)
events_to_add = AdditiveEvents(dict(events_tuples))
indexed_values_start = time.clock()
events_for_indexed_values.combine_by_indexed_values(events_to_add, combine_times)
indexed_values_time = time.clock() - indexed_values_start
control_start = time.clock()
control_events_action(events_to_add, combine_times)
control_time = time.clock() - control_start
return control_time, indexed_values_time
def get_control_action(input_dict, events_tuples):
control_events = AdditiveEvents(input_dict)
control_method_str = get_control_method_str(events_tuples)
control_method_dict = {'tuple_list': control_events.combine_by_dictionary,
'flattened_list': control_events.combine_by_flattened_list}
control_events_action = control_method_dict[control_method_str]
return control_events_action
def get_control_method_str(prepped_list):
if prepped_list[0][1] == 1:
return 'flattened_list'
else:
return 'tuple_list'
def time_trial_vary_start_dict(events_tuple_list, input_dict_start_size=1000, input_dict_downward_step=5,
number_of_adds=1, use_exponential_occurrences=True):
"""
:param events_tuple_list:
:param input_dict_start_size: =1000
:param input_dict_downward_step: =5
:param number_of_adds: =1
:param use_exponential_occurrences: =False
:return:
"""
adds_per_trial = number_of_adds
variable_name = 'start_dict_size'
variable_values = []
control_times = []
indexed_values_times = []
print('please wait for the down to reach zero')
input_dict_size = input_dict_start_size
while input_dict_size > 0:
print('adds {}'.format(adds_per_trial))
y_axis, control_time, indexed_values_time = one_time_trial(
adds_per_trial,
events_tuple_list,
input_dict_size=input_dict_size,
use_exponential_occurrences=use_exponential_occurrences
)
input_dict_size -= input_dict_downward_step
variable = y_axis[4]
print('results: variable: {:.2}, control: {:.3e}, IndexedValues: {:.3e}'.format(variable,
control_time,
indexed_values_time))
print('count down: {}\n'.format(input_dict_size))
variable_values.append(variable)
control_times.append(control_time)
indexed_values_times.append(indexed_values_time)
return variable_values, variable_name, control_times, indexed_values_times
def time_trial(generator, variable_name, adds_per_trial=1, automatic_adds_per_trial=False, input_dict_size=1,
number_of_data_pts=100):
"""
:param generator:
:param variable_name: 'list_length', 'event_occurrences_linear', 'event_occurrences', 'increasing_gaps'
:param adds_per_trial: =1
:param automatic_adds_per_trial: =False
:param input_dict_size: =1
:param number_of_data_pts: =100
:return: variable_values, variable_name, control_times, indexed_values_times
"""
tuple_list_length_times_add_times = 2200
variable_values = []
control_times = []
indexed_values_times = []
count = number_of_data_pts
print('please wait for the count-up/down to reach zero')
while count > 0:
try:
tuple_list_for_trial = next(generator)
except StopIteration:
break
if automatic_adds_per_trial:
adds_per_trial = int(max(1, tuple_list_length_times_add_times / len(tuple_list_for_trial)))
print('adds {}'.format(adds_per_trial))
y_axis, control_time, indexed_values_time = one_time_trial(adds_per_trial, tuple_list_for_trial,
input_dict_size=input_dict_size)
variable_order = ['list_length', 'event_occurrences_linear', 'event_occurrences', 'increasing_gaps']
index = variable_order.index(variable_name)
variable = y_axis[index]
print('results: variable: {:.2}, control: {:.3e}, IndexedValues: {:.3e}'.format(variable,
control_time,
indexed_values_time))
print('count down: {}\n'.format(count))
count -= 1
variable_values.append(variable)
control_times.append(control_time)
indexed_values_times.append(indexed_values_time)
return variable_values, variable_name, control_times, indexed_values_times
def plot_trial_with_ratio(variable_values, variable_name, control_times, iv_times, title='none', figure=1,
style='bo-', label='', base_line=False):
"""
:param variable_values:
:param variable_name: 'list_length', 'event_occurrences', 'event_occurrences_linear', 'increasing_gaps', 'dict_size'
:param control_times:
:param iv_times:
:param title:
:param figure:
:param style: ='bo-'
:param label: =''
:param base_line: =False
:return:
"""
plt.ion()
# use_figure = plt.figure(figure)
# use_figure.clf()
speed_ratios = []
equality_line = [1.0] * len(control_times)
for index, numerator in enumerate(control_times):
speed_ratios.append(numerator / iv_times[index])
plt.plot(variable_values, speed_ratios, style, label=label)
if base_line:
plt.plot(variable_values, equality_line, 'g-', label='equal speed')
plt.ylabel('speed of indexed values over speed of control')
x_labels = {'list_length': 'size of tuple list',
'event_occurrences': '10 ** exponent event occurrences',
'event_occurrences_linear': 'event occurrences',
'increasing_gaps': 'ratio of events range to non-zero events',
'start_dict_size': 'number of events in starting dictionary'}
plt.xlabel(x_labels[variable_name])
plt.legend()
plt.title(title)
plt.pause(0.01)
def plot_trial_two_lines(variable_values, variable_name, control_times, iv_times, title='none', figure=1):
"""
:param variable_values:
:param variable_name:'list_length', 'event_occurrences', 'increasing_gaps', 'dict_size'
:param control_times:
:param iv_times:
:param title:
:param figure:
:return:
"""
plt.ion()
use_figure = plt.figure(figure)
use_figure.clf()
plt.plot(variable_values, control_times, 'bo-', label='control')
plt.plot(variable_values, iv_times, 'r*-', label='IndexedValues')
plt.ylabel('time')
x_labels = {'list_length': 'size of tuple list',
'event_occurrences': '10 ** exponent event occurrences',
'increasing_gaps': 'ratio of events range to non-zero events',
'start_dict_size': 'number of events in starting dictionary'}
plt.xlabel(x_labels[variable_name])
plt.legend()
intersection, control_fit, iv_fit = get_poly_fit_and_intersection(variable_values, control_times, iv_times)
title += '\nintersection = {}'.format(intersection)
plt.title(title)
plt.plot(variable_values, control_fit, 'c-')
plt.plot(variable_values, iv_fit, 'c-')
plt.pause(0.01)
return intersection
def get_poly_fit_and_intersection(variable_values, control_times, iv_times):
control_slope, control_constant = np.polyfit(variable_values, control_times, 1)
iv_slope, iv_constant = np.polyfit(variable_values, iv_times, 1)
intersection = (control_constant - iv_constant) / (iv_slope - control_slope)
control_poly_fit_values = [(control_slope * x + control_constant) for x in variable_values]
iv_poly_fit_values = [(iv_slope * x + iv_constant) for x in variable_values]
return intersection, control_poly_fit_values, iv_poly_fit_values
def get_welcome():
"""return welcome_message.txt"""
try:
welcome_file_name = getcwd() + '\\' + 'welcome_message.txt'
welcome_file = open(welcome_file_name, 'r')
welcome_message = welcome_file.read()
except IOError:
welcome_message = 'took a guess where "welcome_' \
'message.txt" was, and I was wrong.'
return welcome_message
def get_int(question):
"""makes sure user input is an int. quit if "q" """
while True:
answer = input_py_2_and_3(question + '\n>>> ')
if answer == 'q':
raise SystemExit
try:
output = int(answer)
return output
except ValueError:
print('must be int OR "q" to quit')
continue
def get_answer(question, min_val, max_val):
question = '{} between {} and {}'.format(question, min_val, max_val)
raw_val = get_int(question)
return min(max_val, (max(min_val, raw_val)))
def get_plot_style_generator():
pt_style = cycle(['o', '<', '>', 'v', 's', 'p', '*',
'+', 'x', 'D', 'd'])
colors = cycle(['b', 'y', 'r', 'c', 'm', 'k', 'g'])
while True:
yield '{}{}-'.format(next(colors), next(pt_style))
def do_trials_vary_start_dict(add_list_len=10, occurrences_are_many=False, use_exponential_occurrences=True,
adds_list=(1, 2, 5)):
"""
:param add_list_len: =10
:param occurrences_are_many: =False
:param use_exponential_occurrences: =False
:param adds_list: =(1, 2, 5)
:return:
"""
style_generator = get_plot_style_generator()
if occurrences_are_many:
occurrences = 10
else:
occurrences = 1
list_for_vary_start_dict = get_generator('list_length', 0, add_list_len, event_occurrences=occurrences)
tuple_list_for_time_trial = next(list_for_vary_start_dict)
for add_variable in adds_list:
title = 'vary size of start dict. number of adds = {}\n'.format(add_variable)
title += 'input occurrences = {}. input list length = {}'.format(occurrences, add_list_len)
results = time_trial_vary_start_dict(tuple_list_for_time_trial, input_dict_start_size=1000,
input_dict_downward_step=10, number_of_adds=add_variable,
use_exponential_occurrences=use_exponential_occurrences)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def do_trials_vary_event_occurrences(add_list_len=10, start_dict_size=1, adds_list=(1, 2, 5), exponential_growth=True):
"""
:param add_list_len: =10
:param start_dict_size: =1
:param adds_list: =(1, 2, 5)
:param exponential_growth: =True
:return:
"""
style_generator = get_plot_style_generator()
for add_variable in adds_list:
if exponential_growth:
increment = 0.2
time_trial_variable = 'event_occurrences'
else:
increment = 1
time_trial_variable = 'event_occurrences_linear'
event_occurrences_generator = get_generator('event_occurrences', 0, add_list_len, growth_increment=increment,
exponential_increase=exponential_growth)
results = time_trial(event_occurrences_generator, time_trial_variable, adds_per_trial=add_variable,
input_dict_size=start_dict_size, number_of_data_pts=100)
title = 'increasing event occurrences.\n'
title += 'starting dict size={}. input list length = {}'.format(start_dict_size, add_list_len)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def do_trials_vary_list_length(start_dict_size=1, occurrences_are_many=False, adds_list=(1, 2, 5)):
"""
:param start_dict_size: =1
:param occurrences_are_many: =False
:param adds_list: =(1, 2, 4)
:return:
"""
style_generator = get_plot_style_generator()
if occurrences_are_many:
occurrences = 10
else:
occurrences = 1
for add_variable in adds_list:
list_length_generator = get_generator('list_length', 0, 2, event_occurrences=occurrences, len_increase_step=1)
results = time_trial(list_length_generator, 'list_length', adds_per_trial=add_variable,
input_dict_size=start_dict_size, number_of_data_pts=100)
title = 'increasing list length.\n'
title += 'starting dict size={}. input list occurrences = {}'.format(start_dict_size, occurrences)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def do_trials_vary_gaps_in_list(add_list_len=100, start_dict_size=1, occurrences_are_many=False, randomize_gaps=True,
adds_list=(1, 2, 5)):
"""
:param add_list_len: =100
:param start_dict_size: =1
:param occurrences_are_many: =False
:param randomize_gaps: =True
:param adds_list: =(1, 2, 5)
:return:
"""
style_generator = get_plot_style_generator()
if occurrences_are_many:
occurrences = 10
else:
occurrences = 1
gaps_per_iteration = max(1, add_list_len // 100)
for add_variable in adds_list:
increasing_gaps_generator = get_generator('increasing_gaps', 0, add_list_len, event_occurrences=occurrences,
gaps_per_iteration=gaps_per_iteration, randomize=randomize_gaps)
results = time_trial(increasing_gaps_generator, 'increasing_gaps', adds_per_trial=add_variable,
input_dict_size=start_dict_size, number_of_data_pts=100)
title = 'making many gaps in list.\n'
title += 'starting dict size={}. input list length: {}, occurrences: {}'.format(start_dict_size,
add_list_len,
occurrences)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def graphing_ui():
"""a UI to demonstrate add speeds"""
print(WELCOME_TXT)
"""
'list_length', 'event_occurrences', 'increasing_gaps', 'dict_size'
"""
plt_figure = 1
while True:
plt.figure(plt_figure)
plt_figure += 1
plt.ion()
variable_choice = get_answer('enter "1" for varying input events\' length\n' +
'enter "2" for varying input events\' # of occurrences\n' +
'enter "3" for varying input events\' gaps in values\n' +
'enter "4" for varying the size of the start dictionary',
1, 4)
variable_dict = {1: 'list_length',
2: 'event_occurrences',
3: 'increasing_gaps',
4: 'dict_size'}
action_dict = {1: do_trials_vary_list_length,
2: do_trials_vary_event_occurrences,
3: do_trials_vary_gaps_in_list,
4: do_trials_vary_start_dict}
variable = variable_dict[variable_choice]
action = action_dict[variable_choice]
print('chose {}'.format(variable))
input_variables = get_kwargs(variable)
action(**input_variables)
plt.pause(0.1)
def get_kwargs(request):
default_adds_list = [1, 2, 3, 4, 5]
keys = ['start_dict_size', 'add_list_len', 'occurrences_are_many', 'exponential_growth']
questions = ['what size for starting dictionary?',
'how large a list to add?',
'should the list have many occurrences? 1=True, 0=False',
'should the occurrences increase exponentially? 1=True, 0=False'
]
min_max = [(1, 2000), (2, 500), (0, 1), (0, 1), (0, 1)]
if request == 'dict_size':
min_max[1] = (2, 100)
request_and_indices = {'list_length': (0, 2),
'event_occurrences': (0, 1, 3),
'increasing_gaps': (0, 1, 2),
'dict_size': (1, 2)}
output_kwargs = {}
for index in request_and_indices[request]:
output_kwargs[keys[index]] = get_answer(questions[index], *min_max[index])
if min_max[index] == (0, 1):
output_kwargs[keys[index]] = bool(output_kwargs[keys[index]])
if request != 'list_length':
adds_list = get_adds_list(output_kwargs)
output_kwargs['adds_list'] = adds_list
else:
output_kwargs['adds_list'] = default_adds_list
return output_kwargs
def get_adds_list(dictionary):
start_size = dictionary.get('start_dict_size', 1000)
add_list_size = dictionary['add_list_len']
complete_add_list = [1, 2, 3, 4, 5, 10, 50, 100, 500]
max_adds = 5
if start_size <= 100:
max_list_size_for_add = [(3, 500), (6, 100), (9, 50), (20, 10), (10000, 5)]
for pair in max_list_size_for_add:
if add_list_size <= pair[0]:
max_adds = pair[1]
break
else:
max_list_size_for_add = [(4, 50), (9, 10), (10000, 5)]
for pair in max_list_size_for_add:
if add_list_size <= pair[0]:
max_adds = pair[1]
break
adds_list_end = complete_add_list.index(max_adds)
return complete_add_list[: adds_list_end + 1]
def get_tuple_list(size, many_occurrences=False, step=1):
if many_occurrences:
occur = 10
else:
occur = 1
return [(event, occur) for event in range(0, size, step)]
def get_indexed_advantage_ratio(start_dict_size, adds, tuple_list_sizes, many_occurrences):
events_tuples = get_tuple_list(tuple_list_sizes, many_occurrences)
input_dict = get_input_dict(start_dict_size, True)
control_time, indexed_values_time = get_control_and_indexed_values_times(adds, events_tuples, input_dict)
return control_time / indexed_values_time
def get_data_list(many_occurrences):
titles = ('ADDS', 'DICT SIZE', 'LIST SIZE', 'OCCUR MANY', 'RESULT')
adds = [1, 2, 3, 4, 5, 10, 20, 50, 100, 500, 1000, 2000]
start_dict_sizes = [1, 10, 50, 100, 200, 500, 1000, 2000, 5000]
tuple_list_sizes = [2, 3, 4, 6, 8, 10, 20, 50, 100]
all_data = [titles]
for add_time in adds:
print(add_time)
for start_size in start_dict_sizes:
for tuple_size in tuple_list_sizes:
if add_time * tuple_size <= 4000:
datum = get_indexed_advantage_ratio(start_size, add_time, tuple_size, many_occurrences)
data_line = (float(add_time), float(start_size), float(tuple_size), float(many_occurrences), datum)
all_data.append(data_line)
return all_data
def data_grouper(data_list, index_priority=(0, 1, 2, 3, 4)):
new_list = []
for data in data_list:
new_data = []
for index in index_priority:
new_data.append(data[index])
new_list.append(tuple(new_data))
new_labels = new_list[0]
the_rest = sorted(new_list[1:])
return [new_labels] + the_rest
def get_result_str(data_list):
labels = data_list[0]
result_index = labels.index('RESULT')
bool_index = labels.index('OCCUR MANY')
star_the_result = 1.0
number_of_labels = len(labels)
middle_just = '10'
template = '\n' + ('{:^' + middle_just + '}|') * number_of_labels
template.rstrip('|')
table_descriptor = template.format(*labels)
line_len = len(table_descriptor)
table_descriptor = add_sep_line(table_descriptor, line_len, '*')
table_descriptor = '\n' + line_len * '=' + table_descriptor
first_element = -1
second_element = -1
output_str = ''
for line in data_list[1:]:
new_first_element = int(line[0])
new_second_element = int(line[1])
if new_first_element != first_element:
output_str += table_descriptor
if new_second_element != second_element:
output_str = add_sep_line(output_str, line_len, '-')
first_element = new_first_element
second_element = new_second_element
line_strings = []
for index, element in enumerate(line):
if index == result_index:
to_add = '{:.3f}'.format(element)
elif index == bool_index:
to_add = str(bool(element))
else:
to_add = str(int(element))
line_strings.append(to_add)
output_str += template.format(*line_strings)
result = float(line[result_index])
if result > star_the_result:
output_str += ' *** '
return output_str
def add_sep_line(input_str, line_length, separator):
return input_str + '\n' + line_length * separator
def save_data_pts(data_flat, data_bumpy):
flat_save = np.array(data_flat)
np.save('save_flat_data', flat_save)
bumpy_save = np.array(data_bumpy)
np.save('save_bumpy_data', bumpy_save)
def load_data_pts(full_file_name):
np_array = np.load(full_file_name)
output = []
for data_tuple in np_array.tolist():
try:
output.append(tuple([float(number) for number in data_tuple]))
except ValueError:
output.append(tuple(data_tuple))
return output
def get_saved_data():
data_points_flat = load_data_pts('save_flat_data.npy')
data_points_bumpy = load_data_pts('save_bumpy_data.npy')
return data_points_flat, data_points_bumpy
def data_points_ui():
try:
get_new_data = input_py_2_and_3('generate new data pts (will take some minutes)? type "y" for yes.\n>>> ')
if get_new_data == 'y':
raise IOError
data_points_flat, data_points_bumpy = get_saved_data()
except IOError:
print('generating data points. this will take a few minutes')
data_points_flat = get_data_list(False)
data_points_bumpy = get_data_list(True)
save_data_pts(data_points_flat, data_points_bumpy)
labels_dict = dict(enumerate(data_points_flat[0]))
intro = """
here are the values whose order you may change
{}
at the prompt put in a new 5-digit string showing how you want the data ordered
so "01234" will order the data by ('ADDS', 'DICT SIZE', 'LIST SIZE', 'OCCUR MANY', 'RESULT')
"21034" will order the data by ('LIST SIZE', 'DICT SIZE', 'ADDS', 'OCCUR MANY', 'RESULT')
when prompted, enter the base name for the file.
"test" would create 3 files.
"test_flat.txt", "test_many.txt", "test_combined.txt". they will be text files showing the data
grouped accordingly. flat show adding events that occurred once and many shows events that occurred 10 times.
the result column shows how many times faster the index_values method is and so any time
indexed values is faster, it is starred.
"""
print(intro.format(str(labels_dict).replace(',', '\n')))
while True:
print(str(labels_dict).replace(',', '\n'))
new_order = input_py_2_and_3('new order or "q" quits >>> ')
if new_order == 'q':
break
change_list = []
for digit in new_order:
change_list.append(int(digit))
result_to_print_flat = data_grouper(data_points_flat, change_list)
result_to_print_bumpy = data_grouper(data_points_bumpy, change_list)
flat = get_result_str(result_to_print_flat)
many = get_result_str(result_to_print_bumpy)
name = input_py_2_and_3('file base name >>> ')
with open(name + '_flat.txt', 'w') as file:
file.write(flat)
with open(name + '_many.txt', 'w') as file:
file.write(many)
with open(name + '_combined.txt', 'w') as file:
file.write(get_side_by_side_data(flat, many))
def get_side_by_side_data(left_answer, right_answer):
left_lines = left_answer.split('\n')
right_lines = right_answer.split('\n')
left_just_line_len = 64
joined_lines = []
for index, line in enumerate(left_lines):
new_line = '{:<{}}{}'.format(line, left_just_line_len, right_lines[index])
joined_lines.append(new_line)
joined_answer = '\n'.join(joined_lines)
return joined_answer
if __name__ == '__main__':
graphing_ui()
# data_points_ui()
|
[
"itertools.cycle",
"time.clock",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"random.randrange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.getcwd",
"numpy.array",
"matplotlib.pyplot.figure",
"dicetables.additiveevents.AdditiveEvents",
"numpy.save",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"math.log10",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.legend",
"numpy.load"
] |
[((5825, 5851), 'math.log10', 'log10', (['events_tuples[0][1]'], {}), '(events_tuples[0][1])\n', (5830, 5851), False, 'from math import log10\n'), ((6730, 6756), 'dicetables.additiveevents.AdditiveEvents', 'AdditiveEvents', (['input_dict'], {}), '(input_dict)\n', (6744, 6756), False, 'from dicetables.additiveevents import AdditiveEvents\n'), ((6840, 6852), 'time.clock', 'time.clock', ([], {}), '()\n', (6850, 6852), False, 'import time\n'), ((7021, 7033), 'time.clock', 'time.clock', ([], {}), '()\n', (7031, 7033), False, 'import time\n'), ((7257, 7283), 'dicetables.additiveevents.AdditiveEvents', 'AdditiveEvents', (['input_dict'], {}), '(input_dict)\n', (7271, 7283), False, 'from dicetables.additiveevents import AdditiveEvents\n'), ((11995, 12004), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (12002, 12004), True, 'import matplotlib.pyplot as plt\n'), ((12251, 12310), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'speed_ratios', 'style'], {'label': 'label'}), '(variable_values, speed_ratios, style, label=label)\n', (12259, 12310), True, 'import matplotlib.pyplot as plt\n'), ((12409, 12468), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""speed of indexed values over speed of control"""'], {}), "('speed of indexed values over speed of control')\n", (12419, 12468), True, 'import matplotlib.pyplot as plt\n'), ((12821, 12856), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_labels[variable_name]'], {}), '(x_labels[variable_name])\n', (12831, 12856), True, 'import matplotlib.pyplot as plt\n'), ((12862, 12874), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12872, 12874), True, 'import matplotlib.pyplot as plt\n'), ((12879, 12895), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (12888, 12895), True, 'import matplotlib.pyplot as plt\n'), ((12900, 12915), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (12909, 12915), True, 'import matplotlib.pyplot as plt\n'), ((13263, 13272), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (13270, 13272), True, 'import matplotlib.pyplot as plt\n'), ((13291, 13309), 'matplotlib.pyplot.figure', 'plt.figure', (['figure'], {}), '(figure)\n', (13301, 13309), True, 'import matplotlib.pyplot as plt\n'), ((13335, 13399), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'control_times', '"""bo-"""'], {'label': '"""control"""'}), "(variable_values, control_times, 'bo-', label='control')\n", (13343, 13399), True, 'import matplotlib.pyplot as plt\n'), ((13404, 13469), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'iv_times', '"""r*-"""'], {'label': '"""IndexedValues"""'}), "(variable_values, iv_times, 'r*-', label='IndexedValues')\n", (13412, 13469), True, 'import matplotlib.pyplot as plt\n'), ((13474, 13492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""time"""'], {}), "('time')\n", (13484, 13492), True, 'import matplotlib.pyplot as plt\n'), ((13780, 13815), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_labels[variable_name]'], {}), '(x_labels[variable_name])\n', (13790, 13815), True, 'import matplotlib.pyplot as plt\n'), ((13821, 13833), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13831, 13833), True, 'import matplotlib.pyplot as plt\n'), ((14006, 14022), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (14015, 14022), True, 'import matplotlib.pyplot as plt\n'), ((14027, 14071), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'control_fit', '"""c-"""'], {}), "(variable_values, control_fit, 'c-')\n", (14035, 14071), True, 'import matplotlib.pyplot as plt\n'), ((14076, 14115), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'iv_fit', '"""c-"""'], {}), "(variable_values, iv_fit, 'c-')\n", (14084, 14115), True, 'import matplotlib.pyplot as plt\n'), ((14120, 14135), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (14129, 14135), True, 'import matplotlib.pyplot as plt\n'), ((14277, 14322), 'numpy.polyfit', 'np.polyfit', (['variable_values', 'control_times', '(1)'], {}), '(variable_values, control_times, 1)\n', (14287, 14322), True, 'import numpy as np\n'), ((14351, 14391), 'numpy.polyfit', 'np.polyfit', (['variable_values', 'iv_times', '(1)'], {}), '(variable_values, iv_times, 1)\n', (14361, 14391), True, 'import numpy as np\n'), ((15745, 15807), 'itertools.cycle', 'cycle', (["['o', '<', '>', 'v', 's', 'p', '*', '+', 'x', 'D', 'd']"], {}), "(['o', '<', '>', 'v', 's', 'p', '*', '+', 'x', 'D', 'd'])\n", (15750, 15807), False, 'from itertools import cycle\n'), ((15843, 15885), 'itertools.cycle', 'cycle', (["['b', 'y', 'r', 'c', 'm', 'k', 'g']"], {}), "(['b', 'y', 'r', 'c', 'm', 'k', 'g'])\n", (15848, 15885), False, 'from itertools import cycle\n'), ((28495, 28514), 'numpy.array', 'np.array', (['data_flat'], {}), '(data_flat)\n', (28503, 28514), True, 'import numpy as np\n'), ((28519, 28555), 'numpy.save', 'np.save', (['"""save_flat_data"""', 'flat_save'], {}), "('save_flat_data', flat_save)\n", (28526, 28555), True, 'import numpy as np\n'), ((28574, 28594), 'numpy.array', 'np.array', (['data_bumpy'], {}), '(data_bumpy)\n', (28582, 28594), True, 'import numpy as np\n'), ((28599, 28637), 'numpy.save', 'np.save', (['"""save_bumpy_data"""', 'bumpy_save'], {}), "('save_bumpy_data', bumpy_save)\n", (28606, 28637), True, 'import numpy as np\n'), ((28690, 28713), 'numpy.load', 'np.load', (['full_file_name'], {}), '(full_file_name)\n', (28697, 28713), True, 'import numpy as np\n'), ((6965, 6977), 'time.clock', 'time.clock', ([], {}), '()\n', (6975, 6977), False, 'import time\n'), ((7109, 7121), 'time.clock', 'time.clock', ([], {}), '()\n', (7119, 7121), False, 'import time\n'), ((12337, 12404), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'equality_line', '"""g-"""'], {'label': '"""equal speed"""'}), "(variable_values, equality_line, 'g-', label='equal speed')\n", (12345, 12404), True, 'import matplotlib.pyplot as plt\n'), ((21874, 21896), 'matplotlib.pyplot.figure', 'plt.figure', (['plt_figure'], {}), '(plt_figure)\n', (21884, 21896), True, 'import matplotlib.pyplot as plt\n'), ((21929, 21938), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (21936, 21938), True, 'import matplotlib.pyplot as plt\n'), ((22988, 23002), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (22997, 23002), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2966), 'random.randrange', 'random.randrange', (['(1)', '(start_length - 1)'], {}), '(1, start_length - 1)\n', (2945, 2966), False, 'import random\n'), ((14814, 14822), 'os.getcwd', 'getcwd', ([], {}), '()\n', (14820, 14822), False, 'from os import getcwd\n')]
|
# coding: utf-8
# In[20]:
import numpy as np
import pydensecrf.densecrf as dcrf
import os
import cv2
import random
from tqdm import tqdm
# In[21]:
from skimage.color import gray2rgb
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, accuracy_score
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
#from osgeo import gdal
get_ipython().run_line_magic('matplotlib', 'inline')
# In[22]:
# Color maps for direction map
COLOR_LR = [0,128,128]
COLOR_UD = [128,0,128]
COLOR_DIAG = [255,215,0]
COLOR_ADIAG = [1,255,255]
INF = 10000
# In[23]:
MAX = 0
SUM = 1
VEC = 0
MAT = 1
# In[24]:
def dir_to_features(dir_map):
"""Converts direction color map to feature used for crf kernel. The
feature is obtained by computing the intersections of the x, y axis and the
line determined by the position of one point and its direction. (More details in
the report)
Parameters
____________
dir_map: numpy.array
Direction map that maps each pixel to a direction in
[left_right, up_down, diagonal, anti-diagonal], each direction
is represented by a color.
"""
(h, w, c) = dir_map.shape
feature_map = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
dir_color = dir_map[i,j]
if dir_color[0] == COLOR_LR[0]: # dir = lr
feature_map[i,j] = np.array([INF,i])
if dir_color[0] == COLOR_UP[0]: # dir = ud
feature_map[i,j] = np.array([j,INF])
if dir_color[1] == COLOR_DIAG[0]: # dir = diag
feature_map[i,j] = np.array([j-i,i-j])
if dir_color[1] == COLOR_ADIAG[0]: # dir = adiag
feature_map[i,j] = np.array([i+j, i+j])
return feature_map
# In[25]:
def gen_dir_map(img):
"""Generate direction map from a rgb img
Parameters
____________
img: numpy.array
Rgb img with width = height
"""
window_size = 101
half_size = int((window_size-1)/2)
sigma_1 = 2
sigma_2 = 40
(h, w, c) = img.shape
assert h==w, "h and w are not equal"
dir_map = np.zeros((h,w))
pos_mat = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
index_mask_lr = np.zeros((window_size, window_size)).astype("bool")
index_mask_lr[half_size,:]=True
index_mask_ud = np.zeros((window_size, window_size)).astype("bool")
index_mask_ud[:,half_size]=True
index_mask_diag = np.identity(window_size).astype("bool")
index_mask_adiag = np.fliplr(np.identity(window_size)).astype("bool")
mask_list = [index_mask_lr, index_mask_ud, index_mask_diag, index_mask_adiag]
for i in range(h):
for j in range(w):
img_nbr = padded_img[i:i+window_size,j:j+window_size]
pos_nbr = padded_pos[i:i+window_size,j:j+window_size]
img_nbr = img_nbr - img[i,j,:]
pos_nbr = pos_nbr - np.array([i,j])
dir_intensity = np.zeros(4)
for dir_index, index_mask in enumerate(mask_list):
img_nbr_dir = img_nbr[index_mask]
pos_nbr_dir = pos_nbr[index_mask]
img_nbr_dir = np.sum(img_nbr_dir**2, axis=1)/(2*sigma_1**2)
pos_nbr_dir = np.sum(pos_nbr_dir**2, axis=1)/(2*sigma_2**2)
k = np.exp(-img_nbr_dir-pos_nbr_dir)
dir_intensity[dir_index]=np.sum(k)
dir_map[i,j]=np.argmax(dir_intensity)+1
return dir_map
# In[26]:
def visualize_dir_map(img, dir_map, save_file=False,
filename=None, vis_path=None, dir_path=None):
"""Visualize a direction map
Parameters
____________
img: numpy.array
Rgb img
dir_map: numpy.array
Correspongding direction map
...
"""
h = img.shape[0]
w = img.shape[1]
vis_dir = np.zeros(img.shape)
vis_dir[dir_map==1] = np.array(COLOR_LR)
vis_dir[dir_map==2] = np.array(COLOR_UD)
vis_dir[dir_map==3] = np.array(COLOR_DIAG)
vis_dir[dir_map==4] = np.array(COLOR_ADIAG)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1); plt.imshow(img); plt.title('Original Image (blurred)'); plt.axis('off');
plt.subplot(1,2,2); plt.imshow(dir_map); plt.title('Direction map'); plt.axis('off');
if save_file:
plt.savefig(os.path.join(vis_path, filename),dpi=300)
plt.close()
cv2.imwrite(os.path.join(dir_path, filename), vis_dir)
# In[27]:
def gen_dir_map_and_visualize(image_path= './images/',
vis_path='./vis_dir_blur_/',
dir_path='./dir_map_/',
process_all=True):
"""Generate direction color map for images in image_path
Parameters
____________
image_path: string
Image path
vis_path: string
Path to save visualization results
dir_path: string
Path to save direction map
process_all: Bool
False to generate a single visualization result without save. True to
generate and save visualizaiton results for all images.
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if not os.path.exists(vis_path):
os.mkdir(vis_path)
if process_all:
for file in tqdm(os.listdir(image_path)):
img = cv2.imread(os.path.join(image_path, file))
img = cv2.GaussianBlur(img,(5,5),0)
dir_map = gen_dir_map(img)
visualize_dir_map(img, dir_map, filename=file, save_file=True,
vis_path=vis_path, dir_path=dir_path)
else:
img = cv2.imread('./images/satImage_001.png')
img = cv2.GaussianBlur(img,(5,5),0)
dir_map = gen_dir_map(img)
visualize_dir_map(img, dir_map, save_file=False)
# In[28]:
def crf_with_dir_kernel(original_img, dir_feature, prob,
iter_num, compat_smooth, compat_appearance, compat_struct,
w_smooth, w_appearance, w_struct,
sigma_smooth, sigma_app_color, sigma_app_pos,
sigma_struct_pos, sigma_struct_feat):
"""CRF with a Gaussian smoothing kernel, an appearance kernel and a structural kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#Setting up the CRF model
d = dcrf.DenseCRF2D(original_img.shape[1], original_img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
compat_smooth = compat_smooth * w_smooth
compat_appearance = compat_appearance * w_appearance
compat_struct = compat_struct * w_struct
# Smooth kernel
d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Appearance kernel
d.addPairwiseBilateral(sxy=(sigma_app_pos, sigma_app_pos),
srgb=(sigma_app_color, sigma_app_color, sigma_app_color),
rgbim=original_image,
compat=compat_appearance.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Structural kernel
pairwise_energy = create_pairwise_bilateral(sdims=(sigma_struct_pos,sigma_struct_pos),
schan=(sigma_struct_feat,sigma_struct_feat),
img=dir_feature, chdim=2)
d.addPairwiseEnergy(pairwise_energy, compat=compat_struct.astype(np.float32))
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((dir_feature.shape[0], dir_feature.shape[1]))
# In[29]:
def crf(original_image, prob,
iter_num=4, compat_smooth = np.array([[-0.4946432, 1.27117338],[0.59452892, 0.23182234]]),
compat_appearance = np.array([[-0.30571318, 0.83015124],[1.3217825, -0.13046645]]),
w_smooth=3.7946478055761963, w_appearance=1.8458537690881878,
sigma_smooth=8.575103751642672, sigma_color=2.0738539891571977, sigma_color_pos=20):
"""Basic CRF with a Gaussian smoothing kernel and an appearance kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#print("No of labels in the Image are ")
#print(n_labels)
#Setting up the CRF model
d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
compat_smooth=compat_smooth*w_smooth
compat_appearance=compat_appearance*w_appearance
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32), kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(sigma_color_pos, sigma_color_pos), srgb=(sigma_color, sigma_color, sigma_color), rgbim=original_image,
compat=compat_appearance.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((original_image.shape[0], original_image.shape[1]))
# In[30]:
def crf_smooth(original_image, prob, use_2d = True, iter_num=1, w=4.921522279119057, sigma_sm=4.325251720130304):
"""CRF with only a smoothing kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#Setting up the CRF model
if use_2d :
d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(sigma_sm, sigma_sm), compat=w, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((original_image.shape[0], original_image.shape[1]))
# In[31]:
def propagate_max_mat(img, prob):
"""Probability propagation (max) in 4 directions via matrix multiplication
"""
prob_out = prob.copy()
prop_size = 51
half_size = int((prop_size-1)/2)
prop_num = 3
sigma_1 = 5
sigma_2 = 42
(h, w) = prob.shape
pos_mat = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
index_mask = np.zeros((prop_size, prop_size)).astype("bool")
for i in range(prop_size):
index_mask[i,half_size]=1
index_mask[half_size,i]=1
index_mask[i,i]=1
index_mask[prop_size-1-i,i]=1
for iteration in range(prop_num):
padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size)))
# propagate prob (maximum)
for i in range(h):
for j in range(w):
if prob_out[i,j]<0.01:
continue
img_nbr = padded_img[i:i+prop_size,j:j+prop_size]
pos_nbr = padded_pos[i:i+prop_size,j:j+prop_size]
img_nbr = img_nbr - img[i,j,:]
pos_nbr = pos_nbr - np.array([i,j])
img_nbr[~index_mask]=0
pos_nbr[~index_mask]=0
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*prob_out[i,j]
k = k*index_mask
padded_prob[i:i+prop_size,j:j+prop_size] = np.maximum(padded_prob[i:i+prop_size,j:j+prop_size], k)
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
return prob_out
# In[32]:
def propagate_max_vec(img, prob, prop_size=11,
prop_num=16, sigma_1=1.039316347691348, sigma_2=40):
"""
vec means only do propagation along x and y axis
max means propagate using max function
Args:
prop_size: neighborhood size
prop_num: number of iteration/propagation
sigma_1: variance of color
sigma_2: variance of distance
"""
prob_out = prob.copy()
half_size = int((prop_size-1)/2)
(h, w, c) = img.shape
pos_mat = np.zeros((h,w,2)) # position matrix
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
for iteration in range(prop_num):
padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size)))
padded_prob_fix = padded_prob.copy()
# propagate prob (maximum)
assert h==w, "h and w are not equal"
for i in range(h):
# prop along y for row i
img_nbr = padded_img[i:i+prop_size,:]
pos_nbr = padded_pos[i:i+prop_size,:]
img_nbr = img_nbr - padded_img[i+half_size,:,:]
pos_nbr = pos_nbr - padded_pos[i+half_size,:,:]
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[i+half_size,:]
padded_prob[i:i+prop_size,:] = np.maximum(padded_prob[i:i+prop_size,:], k)
# prop along x for col i
img_nbr = padded_img[:,i:i+prop_size]
pos_nbr = padded_pos[:,i:i+prop_size]
img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c))
pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2))
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[:,i+half_size].reshape((-1,1))
padded_prob[:,i:i+prop_size] = np.maximum(padded_prob[:,i:i+prop_size], k)
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
return prob_out
# In[33]:
def propagate_sum_vec(img, prob, prop_size=11, prop_num=1, sigma_1=1.5319569104856783, sigma_2=80):
"""
vec means only do propagation along x and y axis
sum means propagate in a additive schema (with total probability fixed)
Args:
prop_size: neighborhood size
prop_num: number of iteration/propagation
sigma_1: variance of color
sigma_2: variance of distance
"""
# print(np.sum(prob))
prob_out = prob.copy()
half_size = int((prop_size-1)/2)
(h, w, c) = img.shape
pos_mat = np.zeros((h,w,2)) # position matrix
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_prob = np.pad(prob, ((half_size, half_size), (half_size, half_size)))
for iteration in range(prop_num):
padded_prob_fix = padded_prob.copy()
padded_prob = np.pad(np.zeros((h,w)), ((half_size, half_size), (half_size, half_size)))
# propagate prob (sum)
assert h==w, "h and w are not equal"
# compute the degree mat
deg_mat = np.zeros((h+2*half_size,w+2*half_size))
for i in range(h):
# prop along y for row i
img_nbr = padded_img[i:i+prop_size,:]
pos_nbr = padded_pos[i:i+prop_size,:]
img_nbr = img_nbr - padded_img[i+half_size,:,:]
pos_nbr = pos_nbr - padded_pos[i+half_size,:,:]
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)
deg_mat[i+half_size,:] = deg_mat[i+half_size,:]+np.sum(k,axis=0)
# prop along x for col i
img_nbr = padded_img[:,i:i+prop_size]
pos_nbr = padded_pos[:,i:i+prop_size]
img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c))
pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2))
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)
deg_mat[:,i+half_size] = deg_mat[:,i+half_size]+np.sum(k,axis=1)
for i in range(h):
# prop along y for row i
img_nbr = padded_img[i:i+prop_size,:]
pos_nbr = padded_pos[i:i+prop_size,:]
img_nbr = img_nbr - padded_img[i+half_size,:,:]
pos_nbr = pos_nbr - padded_pos[i+half_size,:,:]
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr) # similarity matrix
k = k/deg_mat[i+half_size,:] #devided by degree
prop_prob = k * padded_prob_fix[i+half_size,:]
padded_prob[i:i+prop_size,:] = padded_prob[i:i+prop_size,:] + prop_prob
# prop along x for col i
img_nbr = padded_img[:,i:i+prop_size]
pos_nbr = padded_pos[:,i:i+prop_size]
img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c))
pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2))
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr) # similarity matrix
k = k/deg_mat[:,i+half_size].reshape((-1,1)) #devided by degree
prop_prob = k * padded_prob_fix[:,i+half_size].reshape((-1,1))
padded_prob[:,i:i+prop_size] = padded_prob[:,i:i+prop_size]+ prop_prob
# padded_prob = padded_prob + 0.5 * padded_prob_fix # lazy propagation
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
# print(np.sum(prob_out))
prob_out[prob_out>1]=1
return prob_out
# In[34]:
def prob_to_patch(im):
"""Convert pixel level probability prediction to patch version
"""
patch_list = []
patch_size = 16
for j in range(0, im.shape[1], patch_size):
for i in range(0, im.shape[0], patch_size):
patch = im[i:i + patch_size, j:j + patch_size]
df = np.mean(patch)
patch_list.append(df)
return np.array(patch_list)
|
[
"numpy.ascontiguousarray",
"numpy.array",
"pydensecrf.utils.unary_from_softmax",
"matplotlib.pyplot.imshow",
"pydensecrf.densecrf.DenseCRF2D",
"os.path.exists",
"os.listdir",
"numpy.mean",
"matplotlib.pyplot.close",
"numpy.exp",
"os.mkdir",
"matplotlib.pyplot.axis",
"numpy.maximum",
"numpy.identity",
"numpy.argmax",
"pydensecrf.utils.create_pairwise_bilateral",
"matplotlib.pyplot.title",
"cv2.GaussianBlur",
"cv2.imread",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.pad",
"matplotlib.pyplot.subplot"
] |
[((1290, 1309), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (1298, 1309), True, 'import numpy as np\n'), ((2223, 2239), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (2231, 2239), True, 'import numpy as np\n'), ((2253, 2272), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (2261, 2272), True, 'import numpy as np\n'), ((2409, 2482), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (2415, 2482), True, 'import numpy as np\n'), ((2499, 2568), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (2505, 2568), True, 'import numpy as np\n'), ((4187, 4206), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (4195, 4206), True, 'import numpy as np\n'), ((4233, 4251), 'numpy.array', 'np.array', (['COLOR_LR'], {}), '(COLOR_LR)\n', (4241, 4251), True, 'import numpy as np\n'), ((4278, 4296), 'numpy.array', 'np.array', (['COLOR_UD'], {}), '(COLOR_UD)\n', (4286, 4296), True, 'import numpy as np\n'), ((4323, 4343), 'numpy.array', 'np.array', (['COLOR_DIAG'], {}), '(COLOR_DIAG)\n', (4331, 4343), True, 'import numpy as np\n'), ((4370, 4391), 'numpy.array', 'np.array', (['COLOR_ADIAG'], {}), '(COLOR_ADIAG)\n', (4378, 4391), True, 'import numpy as np\n'), ((4396, 4423), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4406, 4423), True, 'import matplotlib.pyplot as plt\n'), ((4427, 4447), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4438, 4447), True, 'import matplotlib.pyplot as plt\n'), ((4447, 4462), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4457, 4462), True, 'import matplotlib.pyplot as plt\n'), ((4464, 4501), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image (blurred)"""'], {}), "('Original Image (blurred)')\n", (4473, 4501), True, 'import matplotlib.pyplot as plt\n'), ((4503, 4518), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4511, 4518), True, 'import matplotlib.pyplot as plt\n'), ((4524, 4544), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4535, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4544, 4563), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dir_map'], {}), '(dir_map)\n', (4554, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4565, 4591), 'matplotlib.pyplot.title', 'plt.title', (['"""Direction map"""'], {}), "('Direction map')\n", (4574, 4591), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4608), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4601, 4608), True, 'import matplotlib.pyplot as plt\n'), ((6606, 6625), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (6614, 6625), True, 'import numpy as np\n'), ((6822, 6893), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['original_img.shape[1]', 'original_img.shape[0]', 'n_labels'], {}), '(original_img.shape[1], original_img.shape[0], n_labels)\n', (6837, 6893), True, 'import pydensecrf.densecrf as dcrf\n'), ((6969, 7004), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['annotated_image'], {}), '(annotated_image)\n', (6987, 7004), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((7017, 7040), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['U'], {}), '(U)\n', (7037, 7040), True, 'import numpy as np\n'), ((7926, 8072), 'pydensecrf.utils.create_pairwise_bilateral', 'create_pairwise_bilateral', ([], {'sdims': '(sigma_struct_pos, sigma_struct_pos)', 'schan': '(sigma_struct_feat, sigma_struct_feat)', 'img': 'dir_feature', 'chdim': '(2)'}), '(sdims=(sigma_struct_pos, sigma_struct_pos), schan\n =(sigma_struct_feat, sigma_struct_feat), img=dir_feature, chdim=2)\n', (7951, 8072), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((8293, 8304), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (8301, 8304), True, 'import numpy as np\n'), ((8459, 8521), 'numpy.array', 'np.array', (['[[-0.4946432, 1.27117338], [0.59452892, 0.23182234]]'], {}), '([[-0.4946432, 1.27117338], [0.59452892, 0.23182234]])\n', (8467, 8521), True, 'import numpy as np\n'), ((8552, 8615), 'numpy.array', 'np.array', (['[[-0.30571318, 0.83015124], [1.3217825, -0.13046645]]'], {}), '([[-0.30571318, 0.83015124], [1.3217825, -0.13046645]])\n', (8560, 8615), True, 'import numpy as np\n'), ((8901, 8920), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (8909, 8920), True, 'import numpy as np\n'), ((9201, 9276), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['original_image.shape[1]', 'original_image.shape[0]', 'n_labels'], {}), '(original_image.shape[1], original_image.shape[0], n_labels)\n', (9216, 9276), True, 'import pydensecrf.densecrf as dcrf\n'), ((9352, 9387), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['annotated_image'], {}), '(annotated_image)\n', (9370, 9387), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((9400, 9423), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['U'], {}), '(U)\n', (9420, 9423), True, 'import numpy as np\n'), ((10258, 10269), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (10266, 10269), True, 'import numpy as np\n'), ((10562, 10581), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (10570, 10581), True, 'import numpy as np\n'), ((11349, 11360), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (11357, 11360), True, 'import numpy as np\n'), ((11752, 11771), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (11760, 11771), True, 'import numpy as np\n'), ((11908, 11981), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (11914, 11981), True, 'import numpy as np\n'), ((11998, 12067), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (12004, 12067), True, 'import numpy as np\n'), ((13882, 13901), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (13890, 13901), True, 'import numpy as np\n'), ((14063, 14136), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (14069, 14136), True, 'import numpy as np\n'), ((14153, 14222), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (14159, 14222), True, 'import numpy as np\n'), ((16379, 16398), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (16387, 16398), True, 'import numpy as np\n'), ((16560, 16633), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (16566, 16633), True, 'import numpy as np\n'), ((16650, 16719), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (16656, 16719), True, 'import numpy as np\n'), ((16737, 16799), 'numpy.pad', 'np.pad', (['prob', '((half_size, half_size), (half_size, half_size))'], {}), '(prob, ((half_size, half_size), (half_size, half_size)))\n', (16743, 16799), True, 'import numpy as np\n'), ((20330, 20350), 'numpy.array', 'np.array', (['patch_list'], {}), '(patch_list)\n', (20338, 20350), True, 'import numpy as np\n'), ((4698, 4709), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4707, 4709), True, 'import matplotlib.pyplot as plt\n'), ((5448, 5472), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (5462, 5472), False, 'import os\n'), ((5482, 5500), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (5490, 5500), False, 'import os\n'), ((5512, 5536), 'os.path.exists', 'os.path.exists', (['vis_path'], {}), '(vis_path)\n', (5526, 5536), False, 'import os\n'), ((5546, 5564), 'os.mkdir', 'os.mkdir', (['vis_path'], {}), '(vis_path)\n', (5554, 5564), False, 'import os\n'), ((5951, 5990), 'cv2.imread', 'cv2.imread', (['"""./images/satImage_001.png"""'], {}), "('./images/satImage_001.png')\n", (5961, 5990), False, 'import cv2\n'), ((6005, 6037), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (6021, 6037), False, 'import cv2\n'), ((10802, 10877), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['original_image.shape[1]', 'original_image.shape[0]', 'n_labels'], {}), '(original_image.shape[1], original_image.shape[0], n_labels)\n', (10817, 10877), True, 'import pydensecrf.densecrf as dcrf\n'), ((10961, 10996), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['annotated_image'], {}), '(annotated_image)\n', (10979, 10996), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((11013, 11036), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['U'], {}), '(U)\n', (11033, 11036), True, 'import numpy as np\n'), ((12369, 12435), 'numpy.pad', 'np.pad', (['prob_out', '((half_size, half_size), (half_size, half_size))'], {}), '(prob_out, ((half_size, half_size), (half_size, half_size)))\n', (12375, 12435), True, 'import numpy as np\n'), ((14291, 14357), 'numpy.pad', 'np.pad', (['prob_out', '((half_size, half_size), (half_size, half_size))'], {}), '(prob_out, ((half_size, half_size), (half_size, half_size)))\n', (14297, 14357), True, 'import numpy as np\n'), ((17106, 17154), 'numpy.zeros', 'np.zeros', (['(h + 2 * half_size, w + 2 * half_size)'], {}), '((h + 2 * half_size, w + 2 * half_size))\n', (17114, 17154), True, 'import numpy as np\n'), ((2593, 2629), 'numpy.zeros', 'np.zeros', (['(window_size, window_size)'], {}), '((window_size, window_size))\n', (2601, 2629), True, 'import numpy as np\n'), ((2701, 2737), 'numpy.zeros', 'np.zeros', (['(window_size, window_size)'], {}), '((window_size, window_size))\n', (2709, 2737), True, 'import numpy as np\n'), ((2811, 2835), 'numpy.identity', 'np.identity', (['window_size'], {}), '(window_size)\n', (2822, 2835), True, 'import numpy as np\n'), ((3308, 3319), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3316, 3319), True, 'import numpy as np\n'), ((4648, 4680), 'os.path.join', 'os.path.join', (['vis_path', 'filename'], {}), '(vis_path, filename)\n', (4660, 4680), False, 'import os\n'), ((4730, 4762), 'os.path.join', 'os.path.join', (['dir_path', 'filename'], {}), '(dir_path, filename)\n', (4742, 4762), False, 'import os\n'), ((5610, 5632), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (5620, 5632), False, 'import os\n'), ((5714, 5746), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (5730, 5746), False, 'import cv2\n'), ((12089, 12121), 'numpy.zeros', 'np.zeros', (['(prop_size, prop_size)'], {}), '((prop_size, prop_size))\n', (12097, 12121), True, 'import numpy as np\n'), ((15010, 15056), 'numpy.maximum', 'np.maximum', (['padded_prob[i:i + prop_size, :]', 'k'], {}), '(padded_prob[i:i + prop_size, :], k)\n', (15020, 15056), True, 'import numpy as np\n'), ((15653, 15699), 'numpy.maximum', 'np.maximum', (['padded_prob[:, i:i + prop_size]', 'k'], {}), '(padded_prob[:, i:i + prop_size], k)\n', (15663, 15699), True, 'import numpy as np\n'), ((16912, 16928), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (16920, 16928), True, 'import numpy as np\n'), ((17574, 17600), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (17580, 17600), True, 'import numpy as np\n'), ((18160, 18186), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (18166, 18186), True, 'import numpy as np\n'), ((18691, 18717), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (18697, 18717), True, 'import numpy as np\n'), ((19411, 19437), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (19417, 19437), True, 'import numpy as np\n'), ((20270, 20284), 'numpy.mean', 'np.mean', (['patch'], {}), '(patch)\n', (20277, 20284), True, 'import numpy as np\n'), ((1485, 1503), 'numpy.array', 'np.array', (['[INF, i]'], {}), '([INF, i])\n', (1493, 1503), True, 'import numpy as np\n'), ((1593, 1611), 'numpy.array', 'np.array', (['[j, INF]'], {}), '([j, INF])\n', (1601, 1611), True, 'import numpy as np\n'), ((1705, 1729), 'numpy.array', 'np.array', (['[j - i, i - j]'], {}), '([j - i, i - j])\n', (1713, 1729), True, 'import numpy as np\n'), ((1821, 1845), 'numpy.array', 'np.array', (['[i + j, i + j]'], {}), '([i + j, i + j])\n', (1829, 1845), True, 'import numpy as np\n'), ((2884, 2908), 'numpy.identity', 'np.identity', (['window_size'], {}), '(window_size)\n', (2895, 2908), True, 'import numpy as np\n'), ((3264, 3280), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (3272, 3280), True, 'import numpy as np\n'), ((3655, 3689), 'numpy.exp', 'np.exp', (['(-img_nbr_dir - pos_nbr_dir)'], {}), '(-img_nbr_dir - pos_nbr_dir)\n', (3661, 3689), True, 'import numpy as np\n'), ((3729, 3738), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (3735, 3738), True, 'import numpy as np\n'), ((3764, 3788), 'numpy.argmax', 'np.argmax', (['dir_intensity'], {}), '(dir_intensity)\n', (3773, 3788), True, 'import numpy as np\n'), ((5664, 5694), 'os.path.join', 'os.path.join', (['image_path', 'file'], {}), '(image_path, file)\n', (5676, 5694), False, 'import os\n'), ((13193, 13253), 'numpy.maximum', 'np.maximum', (['padded_prob[i:i + prop_size, j:j + prop_size]', 'k'], {}), '(padded_prob[i:i + prop_size, j:j + prop_size], k)\n', (13203, 13253), True, 'import numpy as np\n'), ((14789, 14817), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (14795, 14817), True, 'import numpy as np\n'), ((14853, 14881), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (14859, 14881), True, 'import numpy as np\n'), ((14911, 14937), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (14917, 14937), True, 'import numpy as np\n'), ((15416, 15444), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (15422, 15444), True, 'import numpy as np\n'), ((15480, 15508), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (15486, 15508), True, 'import numpy as np\n'), ((15538, 15564), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (15544, 15564), True, 'import numpy as np\n'), ((17452, 17480), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (17458, 17480), True, 'import numpy as np\n'), ((17516, 17544), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (17522, 17544), True, 'import numpy as np\n'), ((17659, 17676), 'numpy.sum', 'np.sum', (['k'], {'axis': '(0)'}), '(k, axis=0)\n', (17665, 17676), True, 'import numpy as np\n'), ((18038, 18066), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (18044, 18066), True, 'import numpy as np\n'), ((18102, 18130), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (18108, 18130), True, 'import numpy as np\n'), ((18245, 18262), 'numpy.sum', 'np.sum', (['k'], {'axis': '(1)'}), '(k, axis=1)\n', (18251, 18262), True, 'import numpy as np\n'), ((18569, 18597), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (18575, 18597), True, 'import numpy as np\n'), ((18633, 18661), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (18639, 18661), True, 'import numpy as np\n'), ((19289, 19317), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (19295, 19317), True, 'import numpy as np\n'), ((19353, 19381), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (19359, 19381), True, 'import numpy as np\n'), ((3513, 3545), 'numpy.sum', 'np.sum', (['(img_nbr_dir ** 2)'], {'axis': '(1)'}), '(img_nbr_dir ** 2, axis=1)\n', (3519, 3545), True, 'import numpy as np\n'), ((3589, 3621), 'numpy.sum', 'np.sum', (['(pos_nbr_dir ** 2)'], {'axis': '(1)'}), '(pos_nbr_dir ** 2, axis=1)\n', (3595, 3621), True, 'import numpy as np\n'), ((12812, 12828), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (12820, 12828), True, 'import numpy as np\n'), ((12932, 12960), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (12938, 12960), True, 'import numpy as np\n'), ((13000, 13028), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (13006, 13028), True, 'import numpy as np\n'), ((13062, 13088), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (13068, 13088), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import os
from pyburst.grids import grid_analyser, grid_strings, grid_tools
# resolution tests
y_factors = {'dt': 3600,
'fluence': 1e39,
'peak': 1e38,
}
y_labels = {'dt': '$\Delta t$',
'rate': 'Burst rate',
'fluence': '$E_b$',
'peak': '$L_{peak}$',
'length': 'Burst length',
}
y_units = {'dt': 'hr',
'rate': 'day$^{-1}$',
'fluence': '$10^39$ erg',
'peak': '$10^38$ erg s$^{-1}$',
'length': 's',
}
reference_params = {'accmass': 1e16,
'accdepth': 1e20}
other_param = {'accmass': 'accdepth',
'accdepth': 'accmass'}
x_bounds = {'accmass': [1e15, 1e17],
'accdepth': [1e19, 1e21]}
colors = {True: 'C1',
False: 'C0'}
# TODO add save plot, iterate over params
def save_all_plots(sources, ref_source, grid_version,
params=('x', 'z', 'mass', 'accrate'), **kwargs):
kgrids = get_multigrids(sources, grid_version=grid_version)
source = get_not(sources, ref_source)
unique_all = kgrids[source].unique_params
unique_subset = {}
for p in params:
unique_subset[p] = unique_all[p]
params_full = grid_tools.enumerate_params(unique_subset)
n = len(params_full[params[0]])
for i in range(n):
params_sub = {}
for p in params:
params_sub[p] = params_full[p][i]
plot(params=params_sub, sources=sources, ref_source=ref_source,
kgrids=kgrids, save=True, display=False, title=False, **kwargs)
def plot(params, sources, ref_source, grid_version,
bprops=('rate', 'fluence', 'peak', 'length'), figsize=(9, 10), shaded=False,
display=True, save=False, kgrids=None, title=True, show_nbursts=True):
"""Plot burst properties for given resolution parameter
parameters
----------
params : dict
ref_source : str
source from which the reference model comes
sources: set(str)
list of source(s) to get models from
kgrids : {source: Kgrid}
dict of grid_analyser.Kgrid objects for each source
bprops : [str]
figsize : [int, int]
shaded : bool
shade between y_values of reference model
"""
check_params(params)
n = len(bprops)
fig, ax = plt.subplots(n, 2, sharex=False, figsize=figsize)
if kgrids is None:
kgrids = get_multigrids(sources, grid_version=grid_version)
for i, res_param in enumerate(reference_params):
ref_value = reference_params[res_param]
other_res_param = other_param[res_param]
full_params = dict(params)
full_params[other_res_param] = reference_params[other_res_param]
sub_summ, sub_params = get_subgrids(kgrids, params=full_params)
for j, bprop in enumerate(bprops):
u_bprop = f'u_{bprop}'
y_label = f'{y_labels[bprop]} ({y_units[bprop]})'
y_factor = y_factors.get(bprop, 1)
set_axes(ax[j, i], xscale='log',
ylabel=y_label if i == 0 else '',
xlabel=res_param if j == n-1 else '',
yticks=True if i == 0 else False)
for source in sources:
ref = source == ref_source
x = sub_params[source][res_param]
y = sub_summ[source][bprop] / y_factor
yerr = sub_summ[source][u_bprop] / y_factor
if show_nbursts:
n_bursts = sub_summ[source]['n_used']
for k in range(len(n_bursts)):
x_offset = 1.15
nb = n_bursts.iloc[k]
ax[j, i].text(x.iloc[k] * x_offset, y.iloc[k], f'{nb:.0f}',
verticalalignment='center')
if shaded and ref:
idx = np.where(x == ref_value)[0]
y_ref = y.iloc[idx]
yerr_ref = yerr.iloc[idx]
ax[j, i].fill_between(x_bounds[res_param],
np.full(2, y_ref + yerr_ref),
np.full(2, y_ref - yerr_ref), color='0.85')
ax[j, i].errorbar(x=x, y=y, yerr=yerr, ls='none',
marker='o', capsize=3, color=colors[ref])
if title:
ax[0, 0].set_title(params, fontsize=11)
plt.tight_layout()
if save:
source = get_not(sources, ref_source)
precisions = {'z': 4, 'x': 2, 'qb': 3, 'mass': 1, 'accrate': 2}
fixed_str = ''
for p, v in params.items():
precision = precisions.get(p, 3)
fixed_str += f'_{p}={v:.{precision}f}'
filename = f'resolution_{source}{fixed_str}.png'
path = os.path.join(grid_strings.plots_path(source), 'resolution')
filepath = os.path.join(path, filename)
print(f'Saving {filepath}')
plt.savefig(filepath)
plt.close(fig)
else:
plt.show(block=False)
def get_not(array, var):
"""Returns value in length-2 'array' that is not 'var'
"""
copy = list(array)
copy.remove(var)
return copy[0]
def get_multigrids(sources, grid_version):
kgrids = {}
for source in sources:
kgrids[source] = grid_analyser.Kgrid(source, grid_version=grid_version)
return kgrids
def get_subgrids(kgrids, params):
"""Returns subkgrids of multiple given sources
"""
sub_params = {}
sub_summ = {}
for source in kgrids:
sub_params[source] = kgrids[source].get_params(params=params)
sub_summ[source] = kgrids[source].get_summ(params=params)
return sub_summ, sub_params
def set_axes(ax, title='', xlabel='', ylabel='', yscale='linear', xscale='linear',
fontsize=14, yticks=True, xticks=True):
if not yticks:
ax.axes.tick_params(axis='both', left='off', labelleft='off')
if not xticks:
ax.axes.tick_params(axis='both', bottom='off', labelbottom='off')
ax.set_title(title, fontsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
def check_params(params, must_specify=('x', 'z', 'accrate', 'mass')):
for param in must_specify:
if param not in params:
raise ValueError(f'{param} not specified in params')
|
[
"matplotlib.pyplot.savefig",
"pyburst.grids.grid_tools.enumerate_params",
"numpy.where",
"os.path.join",
"matplotlib.pyplot.close",
"pyburst.grids.grid_strings.plots_path",
"matplotlib.pyplot.tight_layout",
"pyburst.grids.grid_analyser.Kgrid",
"numpy.full",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((1303, 1345), 'pyburst.grids.grid_tools.enumerate_params', 'grid_tools.enumerate_params', (['unique_subset'], {}), '(unique_subset)\n', (1330, 1345), False, 'from pyburst.grids import grid_analyser, grid_strings, grid_tools\n'), ((2387, 2436), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', '(2)'], {'sharex': '(False)', 'figsize': 'figsize'}), '(n, 2, sharex=False, figsize=figsize)\n', (2399, 2436), True, 'import matplotlib.pyplot as plt\n'), ((4491, 4509), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4507, 4509), True, 'import matplotlib.pyplot as plt\n'), ((4949, 4977), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (4961, 4977), False, 'import os\n'), ((5022, 5043), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (5033, 5043), True, 'import matplotlib.pyplot as plt\n'), ((5052, 5066), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5061, 5066), True, 'import matplotlib.pyplot as plt\n'), ((5085, 5106), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (5093, 5106), True, 'import matplotlib.pyplot as plt\n'), ((5376, 5430), 'pyburst.grids.grid_analyser.Kgrid', 'grid_analyser.Kgrid', (['source'], {'grid_version': 'grid_version'}), '(source, grid_version=grid_version)\n', (5395, 5430), False, 'from pyburst.grids import grid_analyser, grid_strings, grid_tools\n'), ((4883, 4914), 'pyburst.grids.grid_strings.plots_path', 'grid_strings.plots_path', (['source'], {}), '(source)\n', (4906, 4914), False, 'from pyburst.grids import grid_analyser, grid_strings, grid_tools\n'), ((3947, 3971), 'numpy.where', 'np.where', (['(x == ref_value)'], {}), '(x == ref_value)\n', (3955, 3971), True, 'import numpy as np\n'), ((4166, 4194), 'numpy.full', 'np.full', (['(2)', '(y_ref + yerr_ref)'], {}), '(2, y_ref + yerr_ref)\n', (4173, 4194), True, 'import numpy as np\n'), ((4238, 4266), 'numpy.full', 'np.full', (['(2)', '(y_ref - yerr_ref)'], {}), '(2, y_ref - yerr_ref)\n', (4245, 4266), True, 'import numpy as np\n')]
|
# Copyright (c) 2020-present, Assistive Robotics Lab
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from transformers.training_utils import fit
from transformers.transformers import (
InferenceTransformerEncoder,
InferenceTransformer
)
from common.data_utils import load_dataloader
from common.logging import logger
from common.losses import QuatDistance
import torch
from torch import nn, optim
import numpy as np
import argparse
torch.manual_seed(42)
np.random.seed(42)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
def parse_args():
"""Parse arguments for module.
Returns:
argparse.Namespace: contains accessible arguments passed in to module
"""
parser = argparse.ArgumentParser()
parser.add_argument("--task",
help=("task for neural network to train on; "
"either prediction or conversion"))
parser.add_argument("--data-path",
help=("path to h5 files containing data "
"(must contain training.h5 and validation.h5)"))
parser.add_argument("--representation",
help=("will normalize if quaternions, will use expmap "
"to quat validation loss if expmap"),
default="quaternion")
parser.add_argument("--full-transformer",
help=("will use Transformer with both encoder and "
"decoder if true, will only use encoder "
"if false"),
default=False,
action="store_true")
parser.add_argument("--model-file-path",
help="path to model file for saving it after training")
parser.add_argument("--batch-size",
help="batch size for training", default=32)
parser.add_argument("--learning-rate",
help="initial learning rate for training",
default=0.001)
parser.add_argument("--beta-one",
help="beta1 for adam optimizer (momentum)",
default=0.9)
parser.add_argument("--beta-two",
help="beta2 for adam optimizer", default=0.999)
parser.add_argument("--seq-length",
help=("sequence length for model, will be divided "
"by downsample if downsample is provided"),
default=20)
parser.add_argument("--downsample",
help=("reduce sampling frequency of recorded data; "
"default sampling frequency is 240 Hz"),
default=1)
parser.add_argument("--in-out-ratio",
help=("ratio of input/output; "
"seq_length / downsample = input length = 10, "
"output length = input length / in_out_ratio"),
default=1)
parser.add_argument("--stride",
help=("stride used when reading data in "
"for running prediction tasks"),
default=3)
parser.add_argument("--num-epochs",
help="number of epochs for training", default=1)
parser.add_argument("--num-heads",
help="number of heads in Transformer")
parser.add_argument("--dim-feedforward",
help=("number of dimensions in feedforward layer "
"in Transformer"))
parser.add_argument("--dropout",
help="dropout percentage in Transformer")
parser.add_argument("--num-layers",
help="number of layers in Transformer")
args = parser.parse_args()
if args.data_path is None:
parser.print_help()
return args
if __name__ == "__main__":
args = parse_args()
for arg in vars(args):
logger.info(f"{arg} - {getattr(args, arg)}")
logger.info("Starting Transformer training...")
logger.info(f"Device count: {torch.cuda.device_count()}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Training on {device}...")
seq_length = int(args.seq_length)//int(args.downsample)
assert seq_length % int(args.in_out_ratio) == 0
lr = float(args.learning_rate)
normalize = True
train_dataloader, norm_data = load_dataloader(args, "training", normalize)
val_dataloader, _ = load_dataloader(args, "validation", normalize,
norm_data=norm_data)
encoder_feature_size = train_dataloader.dataset[0][0].shape[1]
decoder_feature_size = train_dataloader.dataset[0][1].shape[1]
num_heads = int(args.num_heads)
dim_feedforward = int(args.dim_feedforward)
dropout = float(args.dropout)
num_layers = int(args.num_layers)
quaternions = (args.representation == "quaternions")
if args.full_transformer:
model = InferenceTransformer(decoder_feature_size, num_heads,
dim_feedforward, dropout,
num_layers, quaternions=quaternions)
else:
model = InferenceTransformerEncoder(encoder_feature_size, num_heads,
dim_feedforward, dropout,
num_layers, decoder_feature_size,
quaternions=quaternions)
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model = model.to(device).double()
epochs = int(args.num_epochs)
beta1 = float(args.beta_one)
beta2 = float(args.beta_two)
optimizer = optim.AdamW(model.parameters(),
lr=lr,
betas=(beta1, beta2),
weight_decay=0.03)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[1, 3],
gamma=0.1)
dataloaders = (train_dataloader, val_dataloader)
training_criterion = nn.L1Loss()
validation_criteria = [nn.L1Loss(), QuatDistance()]
logger.info(f"Model for training: {model}")
logger.info(f"Number of parameters: {num_params}")
logger.info(f"Optimizer for training: {optimizer}")
logger.info(f"Criterion for training: {training_criterion}")
fit(model, optimizer, scheduler, epochs, dataloaders, training_criterion,
validation_criteria, device, args.model_file_path,
full_transformer=args.full_transformer)
logger.info("Completed Training...")
logger.info("\n")
|
[
"torch.manual_seed",
"torch.optim.lr_scheduler.MultiStepLR",
"argparse.ArgumentParser",
"common.logging.logger.info",
"torch.nn.L1Loss",
"torch.cuda.device_count",
"transformers.training_utils.fit",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"common.losses.QuatDistance",
"numpy.random.seed",
"transformers.transformers.InferenceTransformerEncoder",
"transformers.transformers.InferenceTransformer",
"common.data_utils.load_dataloader"
] |
[((541, 562), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (558, 562), False, 'import torch\n'), ((563, 581), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (577, 581), True, 'import numpy as np\n'), ((833, 858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (856, 858), False, 'import argparse\n'), ((4185, 4232), 'common.logging.logger.info', 'logger.info', (['"""Starting Transformer training..."""'], {}), "('Starting Transformer training...')\n", (4196, 4232), False, 'from common.logging import logger\n'), ((4375, 4414), 'common.logging.logger.info', 'logger.info', (['f"""Training on {device}..."""'], {}), "(f'Training on {device}...')\n", (4386, 4414), False, 'from common.logging import logger\n'), ((4620, 4664), 'common.data_utils.load_dataloader', 'load_dataloader', (['args', '"""training"""', 'normalize'], {}), "(args, 'training', normalize)\n", (4635, 4664), False, 'from common.data_utils import load_dataloader\n'), ((4689, 4756), 'common.data_utils.load_dataloader', 'load_dataloader', (['args', '"""validation"""', 'normalize'], {'norm_data': 'norm_data'}), "(args, 'validation', normalize, norm_data=norm_data)\n", (4704, 4756), False, 'from common.data_utils import load_dataloader\n'), ((6183, 6254), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': '[1, 3]', 'gamma': '(0.1)'}), '(optimizer, milestones=[1, 3], gamma=0.1)\n', (6213, 6254), False, 'from torch import nn, optim\n'), ((6428, 6439), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (6437, 6439), False, 'from torch import nn, optim\n'), ((6501, 6544), 'common.logging.logger.info', 'logger.info', (['f"""Model for training: {model}"""'], {}), "(f'Model for training: {model}')\n", (6512, 6544), False, 'from common.logging import logger\n'), ((6549, 6599), 'common.logging.logger.info', 'logger.info', (['f"""Number of parameters: {num_params}"""'], {}), "(f'Number of parameters: {num_params}')\n", (6560, 6599), False, 'from common.logging import logger\n'), ((6604, 6655), 'common.logging.logger.info', 'logger.info', (['f"""Optimizer for training: {optimizer}"""'], {}), "(f'Optimizer for training: {optimizer}')\n", (6615, 6655), False, 'from common.logging import logger\n'), ((6660, 6720), 'common.logging.logger.info', 'logger.info', (['f"""Criterion for training: {training_criterion}"""'], {}), "(f'Criterion for training: {training_criterion}')\n", (6671, 6720), False, 'from common.logging import logger\n'), ((6726, 6899), 'transformers.training_utils.fit', 'fit', (['model', 'optimizer', 'scheduler', 'epochs', 'dataloaders', 'training_criterion', 'validation_criteria', 'device', 'args.model_file_path'], {'full_transformer': 'args.full_transformer'}), '(model, optimizer, scheduler, epochs, dataloaders, training_criterion,\n validation_criteria, device, args.model_file_path, full_transformer=\n args.full_transformer)\n', (6729, 6899), False, 'from transformers.training_utils import fit\n'), ((6912, 6948), 'common.logging.logger.info', 'logger.info', (['"""Completed Training..."""'], {}), "('Completed Training...')\n", (6923, 6948), False, 'from common.logging import logger\n'), ((6953, 6970), 'common.logging.logger.info', 'logger.info', (['"""\n"""'], {}), "('\\n')\n", (6964, 6970), False, 'from common.logging import logger\n'), ((5193, 5313), 'transformers.transformers.InferenceTransformer', 'InferenceTransformer', (['decoder_feature_size', 'num_heads', 'dim_feedforward', 'dropout', 'num_layers'], {'quaternions': 'quaternions'}), '(decoder_feature_size, num_heads, dim_feedforward,\n dropout, num_layers, quaternions=quaternions)\n', (5213, 5313), False, 'from transformers.transformers import InferenceTransformerEncoder, InferenceTransformer\n'), ((5410, 5564), 'transformers.transformers.InferenceTransformerEncoder', 'InferenceTransformerEncoder', (['encoder_feature_size', 'num_heads', 'dim_feedforward', 'dropout', 'num_layers', 'decoder_feature_size'], {'quaternions': 'quaternions'}), '(encoder_feature_size, num_heads,\n dim_feedforward, dropout, num_layers, decoder_feature_size, quaternions\n =quaternions)\n', (5437, 5564), False, 'from transformers.transformers import InferenceTransformerEncoder, InferenceTransformer\n'), ((5776, 5801), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5799, 5801), False, 'import torch\n'), ((5823, 5845), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (5838, 5845), False, 'from torch import nn, optim\n'), ((6467, 6478), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (6476, 6478), False, 'from torch import nn, optim\n'), ((6480, 6494), 'common.losses.QuatDistance', 'QuatDistance', ([], {}), '()\n', (6492, 6494), False, 'from common.losses import QuatDistance\n'), ((4333, 4358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4356, 4358), False, 'import torch\n'), ((4267, 4292), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4290, 4292), False, 'import torch\n')]
|
#!/usr/bin/env nemesis
"""
This script creates a spatial database for the initial stress and state
variables for a Maxwell plane strain material.
"""
sim = "gravity_vardensity"
materials = ["crust","mantle"]
import numpy
import h5py
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs._configure()
cs.setSpaceDim(2)
# Basis functions for quad4 cell evaluated at quadrature points. Use
# to compute coordinate of quadrature points in each cell from
# coordinates of vertices. Note the order must correspond to the order
# of the data at the quadrature points in the output.
qpts = numpy.array([[ 0.62200847, 0.16666667, 0.0446582, 0.16666667],
[ 0.16666667, 0.62200847, 0.16666667, 0.0446582 ],
[ 0.16666667, 0.0446582, 0.16666667, 0.62200847],
[ 0.0446582, 0.16666667, 0.62200847, 0.16666667]], dtype=numpy.float64)
def calcQuadCoords(vertices, cells, qpts):
"""Compute coordinates of quadrature points."""
nqpts = qpts.shape[0]
ncells = cells.shape[0]
spaceDim = vertices.shape[1]
quadCoords = numpy.zeros((ncells, nqpts, spaceDim), dtype=numpy.float64)
cellCoords = vertices[cells,:]
for iDim in range(spaceDim):
quadCoords[:,:,iDim] = numpy.dot(cellCoords[:,:,iDim], qpts.transpose())
quadCoords = quadCoords.reshape((ncells*nqpts, spaceDim))
return quadCoords
for material in materials:
filenameH5 = "output/%s-%s.h5" % (sim, material)
filenameDB = "%s_statevars-%s.spatialdb" % (sim, material)
# Open HDF5 file and get coordinates, cells, and stress.
h5 = h5py.File(filenameH5, "r")
vertices = h5['geometry/vertices'][:]
tindex = -1
cells = numpy.array(h5['topology/cells'][:], dtype=numpy.int)
stress = h5['cell_fields/stress'][tindex,:,:]
if "mantle" in material:
vstrain = h5['cell_fields/viscous_strain'][tindex,:,:]
h5.close()
# Compute coordinates of quadrature points.
quadCoords = calcQuadCoords(vertices, cells, qpts)
nqpts = qpts.shape[0]
ncells = cells.shape[0]
nvalues = stress.shape[1]/nqpts
# Check to make sure output included all quadrature points (CellFilterAvg was not used).
if stress.shape[1] == 3:
raise ValueError("Found %d stress values for each cell. Expected 12 stress values (stress_xx, stress_yy, and stress_xy at 4 quadrature points) for each cell. Turn off CellFilterAvg in pylithapp.cfg." % stress.shape[1])
if stress.shape[1] != nqpts*3:
raise ValueError("Found %d stress values for each cell. Expected 12 stress values (stress_xx, stress_yy, and stress_xy at 4 quadrature points) for each cell. Did you turn off CellFilterAvg in pylithapp.cfg?" % stress.shape[1])
stress = stress.reshape((ncells*nqpts, nvalues))
# Create writer for spatial database file
writer = SimpleIOAscii()
writer.inventory.filename = filenameDB
writer._configure()
values = [{'name': "stress-xx",
'units': "Pa",
'data': stress[:,0]},
{'name': "stress-yy",
'units': "Pa",
'data': stress[:,1]},
{'name': "stress-xy",
'units': "Pa",
'data': stress[:,2]},
]
if "mantle" in material:
nvalues = vstrain.shape[1]/nqpts
vstrain = vstrain.reshape((ncells*nqpts, nvalues))
stressZZ = 0.5*(stress[:,0]+stress[:,1])
zeros = numpy.zeros(stressZZ.shape)
values += [{'name': "stress-zz-initial",
'units': "Pa",
'data': stressZZ},
{'name': "total-strain-xx",
'units': "None",
'data': zeros},
{'name': "total-strain-yy",
'units': "None",
'data': zeros},
{'name': "total-strain-xy",
'units': "None",
'data': zeros},
{'name': "viscous-strain-xx",
'units': "None",
'data': vstrain[:,0]},
{'name': "viscous-strain-yy",
'units': "None",
'data': vstrain[:,1]},
{'name': "viscous-strain-zz",
'units': "None",
'data': vstrain[:,2]},
{'name': "viscous-strain-xy",
'units': "None",
'data': vstrain[:,3]},
]
writer.write({'points': quadCoords,
'coordsys': cs,
'data_dim': 2,
'values': values})
# End of file
|
[
"spatialdata.spatialdb.SimpleIOAscii.SimpleIOAscii",
"h5py.File",
"numpy.array",
"numpy.zeros",
"spatialdata.geocoords.CSCart.CSCart"
] |
[((351, 359), 'spatialdata.geocoords.CSCart.CSCart', 'CSCart', ([], {}), '()\n', (357, 359), False, 'from spatialdata.geocoords.CSCart import CSCart\n'), ((659, 903), 'numpy.array', 'numpy.array', (['[[0.62200847, 0.16666667, 0.0446582, 0.16666667], [0.16666667, 0.62200847, \n 0.16666667, 0.0446582], [0.16666667, 0.0446582, 0.16666667, 0.62200847],\n [0.0446582, 0.16666667, 0.62200847, 0.16666667]]'], {'dtype': 'numpy.float64'}), '([[0.62200847, 0.16666667, 0.0446582, 0.16666667], [0.16666667, \n 0.62200847, 0.16666667, 0.0446582], [0.16666667, 0.0446582, 0.16666667,\n 0.62200847], [0.0446582, 0.16666667, 0.62200847, 0.16666667]], dtype=\n numpy.float64)\n', (670, 903), False, 'import numpy\n'), ((1162, 1221), 'numpy.zeros', 'numpy.zeros', (['(ncells, nqpts, spaceDim)'], {'dtype': 'numpy.float64'}), '((ncells, nqpts, spaceDim), dtype=numpy.float64)\n', (1173, 1221), False, 'import numpy\n'), ((1652, 1678), 'h5py.File', 'h5py.File', (['filenameH5', '"""r"""'], {}), "(filenameH5, 'r')\n", (1661, 1678), False, 'import h5py\n'), ((1743, 1796), 'numpy.array', 'numpy.array', (["h5['topology/cells'][:]"], {'dtype': 'numpy.int'}), "(h5['topology/cells'][:], dtype=numpy.int)\n", (1754, 1796), False, 'import numpy\n'), ((2850, 2865), 'spatialdata.spatialdb.SimpleIOAscii.SimpleIOAscii', 'SimpleIOAscii', ([], {}), '()\n', (2863, 2865), False, 'from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii\n'), ((3414, 3441), 'numpy.zeros', 'numpy.zeros', (['stressZZ.shape'], {}), '(stressZZ.shape)\n', (3425, 3441), False, 'import numpy\n')]
|
import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class MetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(MetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
|
[
"numpy.abs",
"collections.namedtuple",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.expand_dims",
"inspect.isclass",
"sklearn.ensemble.GradientBoostingRegressor"
] |
[((6132, 6145), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (6142, 6145), True, 'import numpy as np\n'), ((6158, 6171), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (6168, 6171), True, 'import numpy as np\n'), ((7884, 7916), 'numpy.hstack', 'np.hstack', (['[X, y_hat_meta_prime]'], {}), '([X, y_hat_meta_prime])\n', (7893, 7916), True, 'import numpy as np\n'), ((8715, 8742), 'numpy.hstack', 'np.hstack', (['[X, y_hat_prime]'], {}), '([X, y_hat_prime])\n', (8724, 8742), True, 'import numpy as np\n'), ((8812, 8863), 'collections.namedtuple', 'namedtuple', (['"""res"""', "['y_mean', 'y_lower', 'y_upper']"], {}), "('res', ['y_mean', 'y_lower', 'y_upper'])\n", (8822, 8863), False, 'from collections import namedtuple\n'), ((1030, 1065), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {}), '(**config)\n', (1055, 1065), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((1801, 1823), 'inspect.isclass', 'inspect.isclass', (['model'], {}), '(model)\n', (1816, 1823), False, 'import inspect\n'), ((6296, 6405), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'shuffle': 'randomize_samples', 'test_size': 'meta_fraction', 'random_state': 'self.random_seed'}), '(X, y, shuffle=randomize_samples, test_size=meta_fraction,\n random_state=self.random_seed)\n', (6312, 6405), False, 'from sklearn.model_selection import train_test_split\n'), ((7322, 7349), 'numpy.abs', 'np.abs', (['(y_hat_meta - y_meta)'], {}), '(y_hat_meta - y_meta)\n', (7328, 7349), True, 'import numpy as np\n'), ((7803, 7828), 'numpy.expand_dims', 'np.expand_dims', (['y_hat', '(-1)'], {}), '(y_hat, -1)\n', (7817, 7828), True, 'import numpy as np\n'), ((8634, 8659), 'numpy.expand_dims', 'np.expand_dims', (['y_hat', '(-1)'], {}), '(y_hat, -1)\n', (8648, 8659), True, 'import numpy as np\n')]
|
import unittest
from collections import defaultdict
import numpy as np
import pandas as pd
from ife.io.io import ImageReader
class TestMomentFeatures(unittest.TestCase):
def test_moment_output_type(self) -> None:
features = ImageReader.read_from_single_file("ife/data/small_rgb.jpg")
moment = features.moment()
self.assertIs(np.ndarray, type(moment))
moment = features.moment(output_type="")
self.assertIs(np.ndarray, type(moment))
moment = features.moment(output_type="one_col")
self.assertIs(np.ndarray, type(moment))
self.assertEqual(np.zeros(15).shape, moment.shape) # type: ignore
moment = features.moment(output_type="dict")
self.assertIs(defaultdict, type(moment))
moment = features.moment(output_type="pandas")
self.assertIs(pd.DataFrame, type(moment))
def test_colourfulness_output_type(self) -> None:
features = ImageReader.read_from_single_file("ife/data/small_rgb.jpg")
moment = features.colourfulness()
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="")
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="one_col")
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="dict")
self.assertIs(dict, type(moment))
moment = features.colourfulness(output_type="pandas")
self.assertIs(pd.DataFrame, type(moment))
|
[
"ife.io.io.ImageReader.read_from_single_file",
"numpy.zeros"
] |
[((240, 299), 'ife.io.io.ImageReader.read_from_single_file', 'ImageReader.read_from_single_file', (['"""ife/data/small_rgb.jpg"""'], {}), "('ife/data/small_rgb.jpg')\n", (273, 299), False, 'from ife.io.io import ImageReader\n'), ((945, 1004), 'ife.io.io.ImageReader.read_from_single_file', 'ImageReader.read_from_single_file', (['"""ife/data/small_rgb.jpg"""'], {}), "('ife/data/small_rgb.jpg')\n", (978, 1004), False, 'from ife.io.io import ImageReader\n'), ((612, 624), 'numpy.zeros', 'np.zeros', (['(15)'], {}), '(15)\n', (620, 624), True, 'import numpy as np\n')]
|
import numpy as np
from math import pi
import torch
from pykeops.torch import LazyTensor
from plyfile import PlyData, PlyElement
from helper import *
import torch.nn as nn
import torch.nn.functional as F
# from matplotlib import pyplot as plt
from pykeops.torch.cluster import grid_cluster, cluster_ranges_centroids, from_matrix
from math import pi, sqrt
# Input-Output for tests =======================================================
import os
from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData
def save_vtk(
fname, xyz, triangles=None, values=None, vectors=None, triangle_values=None
):
"""Saves a point cloud or triangle mesh as a .vtk file.
Files can be opened with Paraview or displayed using the PyVista library.
Args:
fname (string): filename.
xyz (Tensor): (N,3) point cloud or vertices.
triangles (integer Tensor, optional): (T,3) mesh connectivity. Defaults to None.
values (Tensor, optional): (N,D) values, supported by the vertices. Defaults to None.
vectors (Tensor, optional): (N,3) vectors, supported by the vertices. Defaults to None.
triangle_values (Tensor, optional): (T,D) values, supported by the triangles. Defaults to None.
"""
# Encode the points/vertices as a VTK structure:
if triangles is None: # Point cloud
structure = PolyData(points=numpy(xyz), vertices=np.arange(len(xyz)))
else: # Surface mesh
structure = PolyData(points=numpy(xyz), polygons=numpy(triangles))
data = [structure]
pointdata, celldata = [], []
# Point values - one channel per column of the `values` array:
if values is not None:
values = numpy(values)
if len(values.shape) == 1:
values = values[:, None]
features = values.T
pointdata += [
Scalars(f, name=f"features_{i:02d}") for i, f in enumerate(features)
]
# Point vectors - one vector per point:
if vectors is not None:
pointdata += [Vectors(numpy(vectors), name="vectors")]
# Store in the VTK object:
if pointdata != []:
pointdata = PointData(*pointdata)
data.append(pointdata)
# Triangle values - one channel per column of the `triangle_values` array:
if triangle_values is not None:
triangle_values = numpy(triangle_values)
if len(triangle_values.shape) == 1:
triangle_values = triangle_values[:, None]
features = triangle_values.T
celldata += [
Scalars(f, name=f"features_{i:02d}") for i, f in enumerate(features)
]
celldata = CellData(*celldata)
data.append(celldata)
# Write to hard drive:
vtk = VtkData(*data)
os.makedirs(os.path.dirname(fname), exist_ok=True)
vtk.tofile(fname)
# On-the-fly generation of the surfaces ========================================
def subsample(x, batch=None, scale=1.0):
"""Subsamples the point cloud using a grid (cubic) clustering scheme.
The function returns one average sample per cell, as described in Fig. 3.e)
of the paper.
Args:
x (Tensor): (N,3) point cloud.
batch (integer Tensor, optional): (N,) batch vector, as in PyTorch_geometric.
Defaults to None.
scale (float, optional): side length of the cubic grid cells. Defaults to 1 (Angstrom).
Returns:
(M,3): sub-sampled point cloud, with M <= N.
"""
if batch is None: # Single protein case:
if True: # Use a fast scatter_add_ implementation
labels = grid_cluster(x, scale).long()
C = labels.max() + 1
# We append a "1" to the input vectors, in order to
# compute both the numerator and denominator of the "average"
# fraction in one pass through the data.
x_1 = torch.cat((x, torch.ones_like(x[:, :1])), dim=1)
D = x_1.shape[1]
points = torch.zeros_like(x_1[:C])
points.scatter_add_(0, labels[:, None].repeat(1, D), x_1)
return (points[:, :-1] / points[:, -1:]).contiguous()
else: # Older implementation;
points = scatter(points * weights[:, None], labels, dim=0)
weights = scatter(weights, labels, dim=0)
points = points / weights[:, None]
else: # We process proteins using a for loop.
# This is probably sub-optimal, but I don't really know
# how to do more elegantly (this type of computation is
# not super well supported by PyTorch).
batch_size = torch.max(batch).item() + 1 # Typically, =32
points, batches = [], []
for b in range(batch_size):
p = subsample(x[batch == b], scale=scale)
points.append(p)
batches.append(b * torch.ones_like(batch[: len(p)]))
return torch.cat(points, dim=0), torch.cat(batches, dim=0)
def soft_distances(x, y, batch_x, batch_y, smoothness=0.01, atomtypes=None):
"""Computes a soft distance function to the atom centers of a protein.
Implements Eq. (1) of the paper in a fast and numerically stable way.
Args:
x (Tensor): (N,3) atom centers.
y (Tensor): (M,3) sampling locations.
batch_x (integer Tensor): (N,) batch vector for x, as in PyTorch_geometric.
batch_y (integer Tensor): (M,) batch vector for y, as in PyTorch_geometric.
smoothness (float, optional): atom radii if atom types are not provided. Defaults to .01.
atomtypes (integer Tensor, optional): (N,6) one-hot encoding of the atom chemical types. Defaults to None.
Returns:
Tensor: (M,) values of the soft distance function on the points `y`.
"""
# Build the (N, M, 1) symbolic matrix of squared distances:
x_i = LazyTensor(x[:, None, :]) # (N, 1, 3) atoms
y_j = LazyTensor(y[None, :, :]) # (1, M, 3) sampling points
D_ij = ((x_i - y_j) ** 2).sum(-1) # (N, M, 1) squared distances
# Use a block-diagonal sparsity mask to support heterogeneous batch processing:
D_ij.ranges = diagonal_ranges(batch_x, batch_y)
if atomtypes is not None:
# Turn the one-hot encoding "atomtypes" into a vector of diameters "smoothness_i":
# (N, 6) -> (N, 1, 1) (There are 6 atom types)
atomic_radii = torch.FloatTensor(
[170, 110, 152, 155, 180, 190], device=x.device
)
atomic_radii = atomic_radii / atomic_radii.min()
atomtype_radii = atomtypes * atomic_radii[None, :] # n_atoms, n_atomtypes
# smoothness = atomtypes @ atomic_radii # (N, 6) @ (6,) = (N,)
smoothness = torch.sum(
smoothness * atomtype_radii, dim=1, keepdim=False
) # n_atoms, 1
smoothness_i = LazyTensor(smoothness[:, None, None])
# Compute an estimation of the mean smoothness in a neighborhood
# of each sampling point:
# density = (-D_ij.sqrt()).exp().sum(0).view(-1) # (M,) local density of atoms
# smooth = (smoothness_i * (-D_ij.sqrt()).exp()).sum(0).view(-1) # (M,)
# mean_smoothness = smooth / density # (M,)
# soft_dists = -mean_smoothness * (
# (-D_ij.sqrt() / smoothness_i).logsumexp(dim=0)
# ).view(-1)
mean_smoothness = (-D_ij.sqrt()).exp().sum(0)
mean_smoothness_j = LazyTensor(mean_smoothness[None, :, :])
mean_smoothness = (
smoothness_i * (-D_ij.sqrt()).exp() / mean_smoothness_j
) # n_atoms, n_points, 1
mean_smoothness = mean_smoothness.sum(0).view(-1)
soft_dists = -mean_smoothness * (
(-D_ij.sqrt() / smoothness_i).logsumexp(dim=0)
).view(-1)
else:
soft_dists = -smoothness * ((-D_ij.sqrt() / smoothness).logsumexp(dim=0)).view(
-1
)
return soft_dists
def atoms_to_points_normals(
atoms,
batch,
distance=1.05,
smoothness=0.5,
resolution=1.0,
nits=4,
atomtypes=None,
sup_sampling=20,
variance=0.1,
):
"""Turns a collection of atoms into an oriented point cloud.
Sampling algorithm for protein surfaces, described in Fig. 3 of the paper.
Args:
atoms (Tensor): (N,3) coordinates of the atom centers `a_k`.
batch (integer Tensor): (N,) batch vector, as in PyTorch_geometric.
distance (float, optional): value of the level set to sample from
the smooth distance function. Defaults to 1.05.
smoothness (float, optional): radii of the atoms, if atom types are
not provided. Defaults to 0.5.
resolution (float, optional): side length of the cubic cells in
the final sub-sampling pass. Defaults to 1.0.
nits (int, optional): number of iterations . Defaults to 4.
atomtypes (Tensor, optional): (N,6) one-hot encoding of the atom
chemical types. Defaults to None.
Returns:
(Tensor): (M,3) coordinates for the surface points `x_i`.
(Tensor): (M,3) unit normals `n_i`.
(integer Tensor): (M,) batch vector, as in PyTorch_geometric.
"""
# a) Parameters for the soft distance function and its level set:
T = distance
N, D = atoms.shape
B = sup_sampling # Sup-sampling ratio
# Batch vectors:
batch_atoms = batch
batch_z = batch[:, None].repeat(1, B).view(N * B)
# b) Draw N*B points at random in the neighborhood of our atoms
z = atoms[:, None, :] + 10 * T * torch.randn(N, B, D).type_as(atoms)
z = z.view(-1, D) # (N*B, D)
# We don't want to backprop through a full network here!
atoms = atoms.detach().contiguous()
z = z.detach().contiguous()
# N.B.: Test mode disables the autograd engine: we must switch it on explicitely.
with torch.enable_grad():
if z.is_leaf:
z.requires_grad = True
# c) Iterative loop: gradient descent along the potential
# ".5 * (dist - T)^2" with respect to the positions z of our samples
for it in range(nits):
dists = soft_distances(
atoms,
z,
batch_atoms,
batch_z,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = ((dists - T) ** 2).sum()
g = torch.autograd.grad(Loss, z)[0]
z.data -= 0.5 * g
# d) Only keep the points which are reasonably close to the level set:
dists = soft_distances(
atoms, z, batch_atoms, batch_z, smoothness=smoothness, atomtypes=atomtypes
)
margin = (dists - T).abs()
mask = margin < variance * T
# d') And remove the points that are trapped *inside* the protein:
zz = z.detach()
zz.requires_grad = True
for it in range(nits):
dists = soft_distances(
atoms,
zz,
batch_atoms,
batch_z,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = (1.0 * dists).sum()
g = torch.autograd.grad(Loss, zz)[0]
normals = F.normalize(g, p=2, dim=-1) # (N, 3)
zz = zz + 1.0 * T * normals
dists = soft_distances(
atoms, zz, batch_atoms, batch_z, smoothness=smoothness, atomtypes=atomtypes
)
mask = mask & (dists > 1.5 * T)
z = z[mask].contiguous().detach()
batch_z = batch_z[mask].contiguous().detach()
# e) Subsample the point cloud:
points, batch_points = subsample(z, batch_z, scale=resolution)
# f) Compute the normals on this smaller point cloud:
p = points.detach()
p.requires_grad = True
dists = soft_distances(
atoms,
p,
batch_atoms,
batch_points,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = (1.0 * dists).sum()
g = torch.autograd.grad(Loss, p)[0]
normals = F.normalize(g, p=2, dim=-1) # (N, 3)
points = points - 0.5 * normals
return points.detach(), normals.detach(), batch_points.detach()
# Surface mesh -> Normals ======================================================
def mesh_normals_areas(vertices, triangles=None, scale=[1.0], batch=None, normals=None):
"""Returns a smooth field of normals, possibly at different scales.
points, triangles or normals, scale(s) -> normals
(N, 3), (3, T) or (N,3), (S,) -> (N, 3) or (N, S, 3)
Simply put - if `triangles` are provided:
1. Normals are first computed for every triangle using simple 3D geometry
and are weighted according to surface area.
2. The normal at any given vertex is then computed as the weighted average
of the normals of all triangles in a neighborhood specified
by Gaussian windows whose radii are given in the list of "scales".
If `normals` are provided instead, we simply smooth the discrete vector
field using Gaussian windows whose radii are given in the list of "scales".
If more than one scale is provided, normal fields are computed in parallel
and returned in a single 3D tensor.
Args:
vertices (Tensor): (N,3) coordinates of mesh vertices or 3D points.
triangles (integer Tensor, optional): (3,T) mesh connectivity. Defaults to None.
scale (list of floats, optional): (S,) radii of the Gaussian smoothing windows. Defaults to [1.].
batch (integer Tensor, optional): batch vector, as in PyTorch_geometric. Defaults to None.
normals (Tensor, optional): (N,3) raw normals vectors on the vertices. Defaults to None.
Returns:
(Tensor): (N,3) or (N,S,3) point normals.
(Tensor): (N,) point areas, if triangles were provided.
"""
# Single- or Multi-scale mode:
if hasattr(scale, "__len__"):
scales, single_scale = scale, False
else:
scales, single_scale = [scale], True
scales = torch.Tensor(scales).type_as(vertices) # (S,)
# Compute the "raw" field of normals:
if triangles is not None:
# Vertices of all triangles in the mesh:
A = vertices[triangles[0, :]] # (N, 3)
B = vertices[triangles[1, :]] # (N, 3)
C = vertices[triangles[2, :]] # (N, 3)
# Triangle centers and normals (length = surface area):
centers = (A + B + C) / 3 # (N, 3)
V = (B - A).cross(C - A) # (N, 3)
# Vertice areas:
S = (V ** 2).sum(-1).sqrt() / 6 # (N,) 1/3 of a triangle area
areas = torch.zeros(len(vertices)).type_as(vertices) # (N,)
areas.scatter_add_(0, triangles[0, :], S) # Aggregate from "A's"
areas.scatter_add_(0, triangles[1, :], S) # Aggregate from "B's"
areas.scatter_add_(0, triangles[2, :], S) # Aggregate from "C's"
else: # Use "normals" instead
areas = None
V = normals
centers = vertices
# Normal of a vertex = average of all normals in a ball of size "scale":
x_i = LazyTensor(vertices[:, None, :]) # (N, 1, 3)
y_j = LazyTensor(centers[None, :, :]) # (1, M, 3)
v_j = LazyTensor(V[None, :, :]) # (1, M, 3)
s = LazyTensor(scales[None, None, :]) # (1, 1, S)
D_ij = ((x_i - y_j) ** 2).sum(-1) # (N, M, 1)
K_ij = (-D_ij / (2 * s ** 2)).exp() # (N, M, S)
# Support for heterogeneous batch processing:
if batch is not None:
batch_vertices = batch
batch_centers = batch[triangles[0, :]] if triangles is not None else batch
K_ij.ranges = diagonal_ranges(batch_vertices, batch_centers)
if single_scale:
U = (K_ij * v_j).sum(dim=1) # (N, 3)
else:
U = (K_ij.tensorprod(v_j)).sum(dim=1) # (N, S*3)
U = U.view(-1, len(scales), 3) # (N, S, 3)
normals = F.normalize(U, p=2, dim=-1) # (N, 3) or (N, S, 3)
return normals, areas
# Compute tangent planes and curvatures ========================================
def tangent_vectors(normals):
"""Returns a pair of vector fields u and v to complete the orthonormal basis [n,u,v].
normals -> uv
(N, 3) or (N, S, 3) -> (N, 2, 3) or (N, S, 2, 3)
This routine assumes that the 3D "normal" vectors are normalized.
It is based on the 2017 paper from Pixar, "Building an orthonormal basis, revisited".
Args:
normals (Tensor): (N,3) or (N,S,3) normals `n_i`, i.e. unit-norm 3D vectors.
Returns:
(Tensor): (N,2,3) or (N,S,2,3) unit vectors `u_i` and `v_i` to complete
the tangent coordinate systems `[n_i,u_i,v_i].
"""
x, y, z = normals[..., 0], normals[..., 1], normals[..., 2]
s = (2 * (z >= 0)) - 1.0 # = z.sign(), but =1. if z=0.
a = -1 / (s + z)
b = x * y * a
uv = torch.stack((1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y), dim=-1)
uv = uv.view(uv.shape[:-1] + (2, 3))
return uv
def curvatures(
vertices, triangles=None, scales=[1.0], batch=None, normals=None, reg=0.01
):
"""Returns a collection of mean (H) and Gauss (K) curvatures at different scales.
points, faces, scales -> (H_1, K_1, ..., H_S, K_S)
(N, 3), (3, N), (S,) -> (N, S*2)
We rely on a very simple linear regression method, for all vertices:
1. Estimate normals and surface areas.
2. Compute a local tangent frame.
3. In a pseudo-geodesic Gaussian neighborhood at scale s,
compute the two (2, 2) covariance matrices PPt and PQt
between the displacement vectors "P = x_i - x_j" and
the normals "Q = n_i - n_j", projected on the local tangent plane.
4. Up to the sign, the shape operator S at scale s is then approximated
as "S = (reg**2 * I_2 + PPt)^-1 @ PQt".
5. The mean and Gauss curvatures are the trace and determinant of
this (2, 2) matrix.
As of today, this implementation does not weigh points by surface areas:
this could make a sizeable difference if protein surfaces were not
sub-sampled to ensure uniform sampling density.
For convergence analysis, see for instance
"Efficient curvature estimation for oriented point clouds",
Cao, Li, Sun, Assadi, Zhang, 2019.
Args:
vertices (Tensor): (N,3) coordinates of the points or mesh vertices.
triangles (integer Tensor, optional): (3,T) mesh connectivity. Defaults to None.
scales (list of floats, optional): list of (S,) smoothing scales. Defaults to [1.].
batch (integer Tensor, optional): batch vector, as in PyTorch_geometric. Defaults to None.
normals (Tensor, optional): (N,3) field of "raw" unit normals. Defaults to None.
reg (float, optional): small amount of Tikhonov/ridge regularization
in the estimation of the shape operator. Defaults to .01.
Returns:
(Tensor): (N, S*2) tensor of mean and Gauss curvatures computed for
every point at the required scales.
"""
# Number of points, number of scales:
N, S = vertices.shape[0], len(scales)
ranges = diagonal_ranges(batch)
# Compute the normals at different scales + vertice areas:
normals_s, _ = mesh_normals_areas(
vertices, triangles=triangles, normals=normals, scale=scales, batch=batch
) # (N, S, 3), (N,)
# Local tangent bases:
uv_s = tangent_vectors(normals_s) # (N, S, 2, 3)
features = []
for s, scale in enumerate(scales):
# Extract the relevant descriptors at the current scale:
normals = normals_s[:, s, :].contiguous() # (N, 3)
uv = uv_s[:, s, :, :].contiguous() # (N, 2, 3)
# Encode as symbolic tensors:
# Points:
x_i = LazyTensor(vertices.view(N, 1, 3))
x_j = LazyTensor(vertices.view(1, N, 3))
# Normals:
n_i = LazyTensor(normals.view(N, 1, 3))
n_j = LazyTensor(normals.view(1, N, 3))
# Tangent bases:
uv_i = LazyTensor(uv.view(N, 1, 6))
# Pseudo-geodesic squared distance:
d2_ij = ((x_j - x_i) ** 2).sum(-1) * ((2 - (n_i | n_j)) ** 2) # (N, N, 1)
# Gaussian window:
window_ij = (-d2_ij / (2 * (scale ** 2))).exp() # (N, N, 1)
# Project on the tangent plane:
P_ij = uv_i.matvecmult(x_j - x_i) # (N, N, 2)
Q_ij = uv_i.matvecmult(n_j - n_i) # (N, N, 2)
# Concatenate:
PQ_ij = P_ij.concat(Q_ij) # (N, N, 2+2)
# Covariances, with a scale-dependent weight:
PPt_PQt_ij = P_ij.tensorprod(PQ_ij) # (N, N, 2*(2+2))
PPt_PQt_ij = window_ij * PPt_PQt_ij # (N, N, 2*(2+2))
# Reduction - with batch support:
PPt_PQt_ij.ranges = ranges
PPt_PQt = PPt_PQt_ij.sum(1) # (N, 2*(2+2))
# Reshape to get the two covariance matrices:
PPt_PQt = PPt_PQt.view(N, 2, 2, 2)
PPt, PQt = PPt_PQt[:, :, 0, :], PPt_PQt[:, :, 1, :] # (N, 2, 2), (N, 2, 2)
# Add a small ridge regression:
PPt[:, 0, 0] += reg
PPt[:, 1, 1] += reg
# (minus) Shape operator, i.e. the differential of the Gauss map:
# = (PPt^-1 @ PQt) : simple estimation through linear regression
S = torch.solve(PQt, PPt).solution
a, b, c, d = S[:, 0, 0], S[:, 0, 1], S[:, 1, 0], S[:, 1, 1] # (N,)
# Normalization
mean_curvature = a + d
gauss_curvature = a * d - b * c
features += [mean_curvature.clamp(-1, 1), gauss_curvature.clamp(-1, 1)]
features = torch.stack(features, dim=-1)
return features
# Fast tangent convolution layer ===============================================
class ContiguousBackward(torch.autograd.Function):
"""
Function to ensure contiguous gradient in backward pass. To be applied after PyKeOps reduction.
N.B.: This workaround fixes a bug that will be fixed in ulterior KeOp releases.
"""
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output.contiguous()
class dMaSIFConv(nn.Module):
def __init__(
self, in_channels=1, out_channels=1, radius=1.0, hidden_units=None, cheap=False
):
"""Creates the KeOps convolution layer.
I = in_channels is the dimension of the input features
O = out_channels is the dimension of the output features
H = hidden_units is the dimension of the intermediate representation
radius is the size of the pseudo-geodesic Gaussian window w_ij = W(d_ij)
This affordable layer implements an elementary "convolution" operator
on a cloud of N points (x_i) in dimension 3 that we decompose in three steps:
1. Apply the MLP "net_in" on the input features "f_i". (N, I) -> (N, H)
2. Compute H interaction terms in parallel with:
f_i = sum_j [ w_ij * conv(P_ij) * f_j ]
In the equation above:
- w_ij is a pseudo-geodesic window with a set radius.
- P_ij is a vector of dimension 3, equal to "x_j-x_i"
in the local oriented basis at x_i.
- "conv" is an MLP from R^3 to R^H:
- with 1 linear layer if "cheap" is True;
- with 2 linear layers and C=8 intermediate "cuts" otherwise.
- "*" is coordinate-wise product.
- f_j is the vector of transformed features.
3. Apply the MLP "net_out" on the output features. (N, H) -> (N, O)
A more general layer would have implemented conv(P_ij) as a full
(H, H) matrix instead of a mere (H,) vector... At a much higher
computational cost. The reasoning behind the code below is that
a given time budget is better spent on using a larger architecture
and more channels than on a very complex convolution operator.
Interactions between channels happen at steps 1. and 3.,
whereas the (costly) point-to-point interaction step 2.
lets the network aggregate information in spatial neighborhoods.
Args:
in_channels (int, optional): numper of input features per point. Defaults to 1.
out_channels (int, optional): number of output features per point. Defaults to 1.
radius (float, optional): deviation of the Gaussian window on the
quasi-geodesic distance `d_ij`. Defaults to 1..
hidden_units (int, optional): number of hidden features per point.
Defaults to out_channels.
cheap (bool, optional): shall we use a 1-layer deep Filter,
instead of a 2-layer deep MLP? Defaults to False.
"""
super(dMaSIFConv, self).__init__()
self.Input = in_channels
self.Output = out_channels
self.Radius = radius
self.Hidden = self.Output if hidden_units is None else hidden_units
self.Cuts = 8 # Number of hidden units for the 3D MLP Filter.
self.cheap = cheap
# For performance reasons, we cut our "hidden" vectors
# in n_heads "independent heads" of dimension 8.
self.heads_dim = 8 # 4 is probably too small; 16 is certainly too big
# We accept "Hidden" dimensions of size 1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, ...
if self.Hidden < self.heads_dim:
self.heads_dim = self.Hidden
if self.Hidden % self.heads_dim != 0:
raise ValueError(f"The dimension of the hidden units ({self.Hidden})"\
+ f"should be a multiple of the heads dimension ({self.heads_dim}).")
else:
self.n_heads = self.Hidden // self.heads_dim
# Transformation of the input features:
self.net_in = nn.Sequential(
nn.Linear(self.Input, self.Hidden), # (H, I) + (H,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Hidden, self.Hidden), # (H, H) + (H,)
# nn.LayerNorm(self.Hidden),#nn.BatchNorm1d(self.Hidden),
nn.LeakyReLU(negative_slope=0.2),
) # (H,)
self.norm_in = nn.GroupNorm(4, self.Hidden)
# self.norm_in = nn.LayerNorm(self.Hidden)
# self.norm_in = nn.Identity()
# 3D convolution filters, encoded as an MLP:
if cheap:
self.conv = nn.Sequential(
nn.Linear(3, self.Hidden), nn.ReLU() # (H, 3) + (H,)
) # KeOps does not support well LeakyReLu
else:
self.conv = nn.Sequential(
nn.Linear(3, self.Cuts), # (C, 3) + (C,)
nn.ReLU(), # KeOps does not support well LeakyReLu
nn.Linear(self.Cuts, self.Hidden),
) # (H, C) + (H,)
# Transformation of the output features:
self.net_out = nn.Sequential(
nn.Linear(self.Hidden, self.Output), # (O, H) + (O,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Output, self.Output), # (O, O) + (O,)
# nn.LayerNorm(self.Output),#nn.BatchNorm1d(self.Output),
nn.LeakyReLU(negative_slope=0.2),
) # (O,)
self.norm_out = nn.GroupNorm(4, self.Output)
# self.norm_out = nn.LayerNorm(self.Output)
# self.norm_out = nn.Identity()
# Custom initialization for the MLP convolution filters:
# we get interesting piecewise affine cuts on a normalized neighborhood.
with torch.no_grad():
nn.init.normal_(self.conv[0].weight)
nn.init.uniform_(self.conv[0].bias)
self.conv[0].bias *= 0.8 * (self.conv[0].weight ** 2).sum(-1).sqrt()
if not cheap:
nn.init.uniform_(
self.conv[2].weight,
a=-1 / np.sqrt(self.Cuts),
b=1 / np.sqrt(self.Cuts),
)
nn.init.normal_(self.conv[2].bias)
self.conv[2].bias *= 0.5 * (self.conv[2].weight ** 2).sum(-1).sqrt()
def forward(self, points, nuv, features, ranges=None):
"""Performs a quasi-geodesic interaction step.
points, local basis, in features -> out features
(N, 3), (N, 3, 3), (N, I) -> (N, O)
This layer computes the interaction step of Eq. (7) in the paper,
in-between the application of two MLP networks independently on all
feature vectors.
Args:
points (Tensor): (N,3) point coordinates `x_i`.
nuv (Tensor): (N,3,3) local coordinate systems `[n_i,u_i,v_i]`.
features (Tensor): (N,I) input feature vectors `f_i`.
ranges (6-uple of integer Tensors, optional): low-level format
to support batch processing, as described in the KeOps documentation.
In practice, this will be built by a higher-level object
to encode the relevant "batch vectors" in a way that is convenient
for the KeOps CUDA engine. Defaults to None.
Returns:
(Tensor): (N,O) output feature vectors `f'_i`.
"""
# 1. Transform the input features: -------------------------------------
features = self.net_in(features) # (N, I) -> (N, H)
features = features.transpose(1, 0)[None, :, :] # (1,H,N)
features = self.norm_in(features)
features = features[0].transpose(1, 0).contiguous() # (1, H, N) -> (N, H)
# 2. Compute the local "shape contexts": -------------------------------
# 2.a Normalize the kernel radius:
points = points / (sqrt(2.0) * self.Radius) # (N, 3)
# 2.b Encode the variables as KeOps LazyTensors
# Vertices:
x_i = LazyTensor(points[:, None, :]) # (N, 1, 3)
x_j = LazyTensor(points[None, :, :]) # (1, N, 3)
# WARNING - Here, we assume that the normals are fixed:
normals = (
nuv[:, 0, :].contiguous().detach()
) # (N, 3) - remove the .detach() if needed
# Local bases:
nuv_i = LazyTensor(nuv.view(-1, 1, 9)) # (N, 1, 9)
# Normals:
n_i = nuv_i[:3] # (N, 1, 3)
n_j = LazyTensor(normals[None, :, :]) # (1, N, 3)
# To avoid register spilling when using large embeddings, we perform our KeOps reduction
# over the vector of length "self.Hidden = self.n_heads * self.heads_dim"
# as self.n_heads reduction over vectors of length self.heads_dim (= "Hd" in the comments).
head_out_features = []
for head in range(self.n_heads):
# Extract a slice of width Hd from the feature array
head_start = head * self.heads_dim
head_end = head_start + self.heads_dim
head_features = features[:, head_start:head_end].contiguous() # (N, H) -> (N, Hd)
# Features:
f_j = LazyTensor(head_features[None, :, :]) # (1, N, Hd)
# Convolution parameters:
if self.cheap:
# Extract a slice of Hd lines: (H, 3) -> (Hd, 3)
A = self.conv[0].weight[head_start:head_end, :].contiguous()
# Extract a slice of Hd coefficients: (H,) -> (Hd,)
B = self.conv[0].bias[head_start:head_end].contiguous()
AB = torch.cat((A, B[:, None]), dim=1) # (Hd, 4)
ab = LazyTensor(AB.view(1, 1, -1)) # (1, 1, Hd*4)
else:
A_1, B_1 = self.conv[0].weight, self.conv[0].bias # (C, 3), (C,)
# Extract a slice of Hd lines: (H, C) -> (Hd, C)
A_2 = self.conv[2].weight[head_start:head_end, :].contiguous()
# Extract a slice of Hd coefficients: (H,) -> (Hd,)
B_2 = self.conv[2].bias[head_start:head_end].contiguous()
a_1 = LazyTensor(A_1.view(1, 1, -1)) # (1, 1, C*3)
b_1 = LazyTensor(B_1.view(1, 1, -1)) # (1, 1, C)
a_2 = LazyTensor(A_2.view(1, 1, -1)) # (1, 1, Hd*C)
b_2 = LazyTensor(B_2.view(1, 1, -1)) # (1, 1, Hd)
# 2.c Pseudo-geodesic window:
# Pseudo-geodesic squared distance:
d2_ij = ((x_j - x_i) ** 2).sum(-1) * ((2 - (n_i | n_j)) ** 2) # (N, N, 1)
# Gaussian window:
window_ij = (-d2_ij).exp() # (N, N, 1)
# 2.d Local MLP:
# Local coordinates:
X_ij = nuv_i.matvecmult(x_j - x_i) # (N, N, 9) "@" (N, N, 3) = (N, N, 3)
# MLP:
if self.cheap:
X_ij = ab.matvecmult(
X_ij.concat(LazyTensor(1))
) # (N, N, Hd*4) @ (N, N, 3+1) = (N, N, Hd)
X_ij = X_ij.relu() # (N, N, Hd)
else:
X_ij = a_1.matvecmult(X_ij) + b_1 # (N, N, C)
X_ij = X_ij.relu() # (N, N, C)
X_ij = a_2.matvecmult(X_ij) + b_2 # (N, N, Hd)
X_ij = X_ij.relu()
# 2.e Actual computation:
F_ij = window_ij * X_ij * f_j # (N, N, Hd)
F_ij.ranges = ranges # Support for batches and/or block-sparsity
head_out_features.append(ContiguousBackward().apply(F_ij.sum(dim=1))) # (N, Hd)
# Concatenate the result of our n_heads "attention heads":
features = torch.cat(head_out_features, dim=1) # n_heads * (N, Hd) -> (N, H)
# 3. Transform the output features: ------------------------------------
features = self.net_out(features) # (N, H) -> (N, O)
features = features.transpose(1, 0)[None, :, :] # (1,O,N)
features = self.norm_out(features)
features = features[0].transpose(1, 0).contiguous()
return features
|
[
"torch.nn.ReLU",
"numpy.sqrt",
"torch.max",
"math.sqrt",
"torch.sum",
"torch.nn.GroupNorm",
"pyvtk.PointData",
"pyvtk.VtkData",
"pyvtk.CellData",
"torch.zeros_like",
"torch.nn.init.uniform_",
"pykeops.torch.LazyTensor",
"torch.randn",
"torch.ones_like",
"torch.nn.LeakyReLU",
"torch.Tensor",
"torch.solve",
"torch.nn.functional.normalize",
"os.path.dirname",
"torch.autograd.grad",
"torch.cat",
"torch.nn.init.normal_",
"pykeops.torch.cluster.grid_cluster",
"torch.enable_grad",
"pyvtk.Scalars",
"torch.stack",
"torch.nn.Linear",
"torch.no_grad",
"torch.FloatTensor"
] |
[((2722, 2736), 'pyvtk.VtkData', 'VtkData', (['*data'], {}), '(*data)\n', (2729, 2736), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((5780, 5805), 'pykeops.torch.LazyTensor', 'LazyTensor', (['x[:, None, :]'], {}), '(x[:, None, :])\n', (5790, 5805), False, 'from pykeops.torch import LazyTensor\n'), ((5835, 5860), 'pykeops.torch.LazyTensor', 'LazyTensor', (['y[None, :, :]'], {}), '(y[None, :, :])\n', (5845, 5860), False, 'from pykeops.torch import LazyTensor\n'), ((15017, 15049), 'pykeops.torch.LazyTensor', 'LazyTensor', (['vertices[:, None, :]'], {}), '(vertices[:, None, :])\n', (15027, 15049), False, 'from pykeops.torch import LazyTensor\n'), ((15073, 15104), 'pykeops.torch.LazyTensor', 'LazyTensor', (['centers[None, :, :]'], {}), '(centers[None, :, :])\n', (15083, 15104), False, 'from pykeops.torch import LazyTensor\n'), ((15128, 15153), 'pykeops.torch.LazyTensor', 'LazyTensor', (['V[None, :, :]'], {}), '(V[None, :, :])\n', (15138, 15153), False, 'from pykeops.torch import LazyTensor\n'), ((15175, 15208), 'pykeops.torch.LazyTensor', 'LazyTensor', (['scales[None, None, :]'], {}), '(scales[None, None, :])\n', (15185, 15208), False, 'from pykeops.torch import LazyTensor\n'), ((15791, 15818), 'torch.nn.functional.normalize', 'F.normalize', (['U'], {'p': '(2)', 'dim': '(-1)'}), '(U, p=2, dim=-1)\n', (15802, 15818), True, 'import torch.nn.functional as F\n'), ((16763, 16840), 'torch.stack', 'torch.stack', (['(1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y)'], {'dim': '(-1)'}), '((1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y), dim=-1)\n', (16774, 16840), False, 'import torch\n'), ((21434, 21463), 'torch.stack', 'torch.stack', (['features'], {'dim': '(-1)'}), '(features, dim=-1)\n', (21445, 21463), False, 'import torch\n'), ((2146, 2167), 'pyvtk.PointData', 'PointData', (['*pointdata'], {}), '(*pointdata)\n', (2155, 2167), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((2633, 2652), 'pyvtk.CellData', 'CellData', (['*celldata'], {}), '(*celldata)\n', (2641, 2652), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((2753, 2775), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (2768, 2775), False, 'import os\n'), ((4848, 4872), 'torch.cat', 'torch.cat', (['points'], {'dim': '(0)'}), '(points, dim=0)\n', (4857, 4872), False, 'import torch\n'), ((4874, 4899), 'torch.cat', 'torch.cat', (['batches'], {'dim': '(0)'}), '(batches, dim=0)\n', (4883, 4899), False, 'import torch\n'), ((6298, 6364), 'torch.FloatTensor', 'torch.FloatTensor', (['[170, 110, 152, 155, 180, 190]'], {'device': 'x.device'}), '([170, 110, 152, 155, 180, 190], device=x.device)\n', (6315, 6364), False, 'import torch\n'), ((6620, 6680), 'torch.sum', 'torch.sum', (['(smoothness * atomtype_radii)'], {'dim': '(1)', 'keepdim': '(False)'}), '(smoothness * atomtype_radii, dim=1, keepdim=False)\n', (6629, 6680), False, 'import torch\n'), ((6740, 6777), 'pykeops.torch.LazyTensor', 'LazyTensor', (['smoothness[:, None, None]'], {}), '(smoothness[:, None, None])\n', (6750, 6777), False, 'from pykeops.torch import LazyTensor\n'), ((7316, 7355), 'pykeops.torch.LazyTensor', 'LazyTensor', (['mean_smoothness[None, :, :]'], {}), '(mean_smoothness[None, :, :])\n', (7326, 7355), False, 'from pykeops.torch import LazyTensor\n'), ((9730, 9749), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (9747, 9749), False, 'import torch\n'), ((11976, 12003), 'torch.nn.functional.normalize', 'F.normalize', (['g'], {'p': '(2)', 'dim': '(-1)'}), '(g, p=2, dim=-1)\n', (11987, 12003), True, 'import torch.nn.functional as F\n'), ((26007, 26035), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(4)', 'self.Hidden'], {}), '(4, self.Hidden)\n', (26019, 26035), True, 'import torch.nn as nn\n'), ((27049, 27077), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(4)', 'self.Output'], {}), '(4, self.Output)\n', (27061, 27077), True, 'import torch.nn as nn\n'), ((29580, 29610), 'pykeops.torch.LazyTensor', 'LazyTensor', (['points[:, None, :]'], {}), '(points[:, None, :])\n', (29590, 29610), False, 'from pykeops.torch import LazyTensor\n'), ((29638, 29668), 'pykeops.torch.LazyTensor', 'LazyTensor', (['points[None, :, :]'], {}), '(points[None, :, :])\n', (29648, 29668), False, 'from pykeops.torch import LazyTensor\n'), ((30022, 30053), 'pykeops.torch.LazyTensor', 'LazyTensor', (['normals[None, :, :]'], {}), '(normals[None, :, :])\n', (30032, 30053), False, 'from pykeops.torch import LazyTensor\n'), ((33146, 33181), 'torch.cat', 'torch.cat', (['head_out_features'], {'dim': '(1)'}), '(head_out_features, dim=1)\n', (33155, 33181), False, 'import torch\n'), ((1855, 1891), 'pyvtk.Scalars', 'Scalars', (['f'], {'name': 'f"""features_{i:02d}"""'}), "(f, name=f'features_{i:02d}')\n", (1862, 1891), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((2534, 2570), 'pyvtk.Scalars', 'Scalars', (['f'], {'name': 'f"""features_{i:02d}"""'}), "(f, name=f'features_{i:02d}')\n", (2541, 2570), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((3950, 3975), 'torch.zeros_like', 'torch.zeros_like', (['x_1[:C]'], {}), '(x_1[:C])\n', (3966, 3975), False, 'import torch\n'), ((11104, 11131), 'torch.nn.functional.normalize', 'F.normalize', (['g'], {'p': '(2)', 'dim': '(-1)'}), '(g, p=2, dim=-1)\n', (11115, 11131), True, 'import torch.nn.functional as F\n'), ((11926, 11954), 'torch.autograd.grad', 'torch.autograd.grad', (['Loss', 'p'], {}), '(Loss, p)\n', (11945, 11954), False, 'import torch\n'), ((13972, 13992), 'torch.Tensor', 'torch.Tensor', (['scales'], {}), '(scales)\n', (13984, 13992), False, 'import torch\n'), ((21135, 21156), 'torch.solve', 'torch.solve', (['PQt', 'PPt'], {}), '(PQt, PPt)\n', (21146, 21156), False, 'import torch\n'), ((25684, 25718), 'torch.nn.Linear', 'nn.Linear', (['self.Input', 'self.Hidden'], {}), '(self.Input, self.Hidden)\n', (25693, 25718), True, 'import torch.nn as nn\n'), ((25749, 25781), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (25761, 25781), True, 'import torch.nn as nn\n'), ((25795, 25830), 'torch.nn.Linear', 'nn.Linear', (['self.Hidden', 'self.Hidden'], {}), '(self.Hidden, self.Hidden)\n', (25804, 25830), True, 'import torch.nn as nn\n'), ((25931, 25963), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (25943, 25963), True, 'import torch.nn as nn\n'), ((26723, 26758), 'torch.nn.Linear', 'nn.Linear', (['self.Hidden', 'self.Output'], {}), '(self.Hidden, self.Output)\n', (26732, 26758), True, 'import torch.nn as nn\n'), ((26789, 26821), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (26801, 26821), True, 'import torch.nn as nn\n'), ((26835, 26870), 'torch.nn.Linear', 'nn.Linear', (['self.Output', 'self.Output'], {}), '(self.Output, self.Output)\n', (26844, 26870), True, 'import torch.nn as nn\n'), ((26971, 27003), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (26983, 27003), True, 'import torch.nn as nn\n'), ((27330, 27345), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27343, 27345), False, 'import torch\n'), ((27359, 27395), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.conv[0].weight'], {}), '(self.conv[0].weight)\n', (27374, 27395), True, 'import torch.nn as nn\n'), ((27408, 27443), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.conv[0].bias'], {}), '(self.conv[0].bias)\n', (27424, 27443), True, 'import torch.nn as nn\n'), ((30721, 30758), 'pykeops.torch.LazyTensor', 'LazyTensor', (['head_features[None, :, :]'], {}), '(head_features[None, :, :])\n', (30731, 30758), False, 'from pykeops.torch import LazyTensor\n'), ((10265, 10293), 'torch.autograd.grad', 'torch.autograd.grad', (['Loss', 'z'], {}), '(Loss, z)\n', (10284, 10293), False, 'import torch\n'), ((11049, 11078), 'torch.autograd.grad', 'torch.autograd.grad', (['Loss', 'zz'], {}), '(Loss, zz)\n', (11068, 11078), False, 'import torch\n'), ((26253, 26278), 'torch.nn.Linear', 'nn.Linear', (['(3)', 'self.Hidden'], {}), '(3, self.Hidden)\n', (26262, 26278), True, 'import torch.nn as nn\n'), ((26280, 26289), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (26287, 26289), True, 'import torch.nn as nn\n'), ((26431, 26454), 'torch.nn.Linear', 'nn.Linear', (['(3)', 'self.Cuts'], {}), '(3, self.Cuts)\n', (26440, 26454), True, 'import torch.nn as nn\n'), ((26489, 26498), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (26496, 26498), True, 'import torch.nn as nn\n'), ((26557, 26590), 'torch.nn.Linear', 'nn.Linear', (['self.Cuts', 'self.Hidden'], {}), '(self.Cuts, self.Hidden)\n', (26566, 26590), True, 'import torch.nn as nn\n'), ((27754, 27788), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.conv[2].bias'], {}), '(self.conv[2].bias)\n', (27769, 27788), True, 'import torch.nn as nn\n'), ((29453, 29462), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (29457, 29462), False, 'from math import pi, sqrt\n'), ((31145, 31178), 'torch.cat', 'torch.cat', (['(A, B[:, None])'], {'dim': '(1)'}), '((A, B[:, None]), dim=1)\n', (31154, 31178), False, 'import torch\n'), ((3577, 3599), 'pykeops.torch.cluster.grid_cluster', 'grid_cluster', (['x', 'scale'], {}), '(x, scale)\n', (3589, 3599), False, 'from pykeops.torch.cluster import grid_cluster, cluster_ranges_centroids, from_matrix\n'), ((3865, 3890), 'torch.ones_like', 'torch.ones_like', (['x[:, :1]'], {}), '(x[:, :1])\n', (3880, 3890), False, 'import torch\n'), ((4573, 4589), 'torch.max', 'torch.max', (['batch'], {}), '(batch)\n', (4582, 4589), False, 'import torch\n'), ((9430, 9450), 'torch.randn', 'torch.randn', (['N', 'B', 'D'], {}), '(N, B, D)\n', (9441, 9450), False, 'import torch\n'), ((32439, 32452), 'pykeops.torch.LazyTensor', 'LazyTensor', (['(1)'], {}), '(1)\n', (32449, 32452), False, 'from pykeops.torch import LazyTensor\n'), ((27654, 27672), 'numpy.sqrt', 'np.sqrt', (['self.Cuts'], {}), '(self.Cuts)\n', (27661, 27672), True, 'import numpy as np\n'), ((27700, 27718), 'numpy.sqrt', 'np.sqrt', (['self.Cuts'], {}), '(self.Cuts)\n', (27707, 27718), True, 'import numpy as np\n')]
|
from random import randint, seed
import numpy as np
from os import path, mkdir
from maze_utils import generate_grid
seed_number = 69
training_folder = "training"
testing_folder = "testing"
tot_elem_training = 100 # numero di matrici da generare
tot_elem_testing = 20 # numero di matrici da generare
max_w = 10 # massima altezza
max_h = 10 # massima lunghezza
min_w = 3 # minima altezza
min_h = 3 # minima larghezza
def generate_dataset():
"""
Genera il dataset di training e testing creando matrici a caso
di dimensione massima 10x10, minima 3x3 e con un numero minimo di 1 muro
:return:
"""
# imposto il seed
np.random.seed(seed_number)
seed(seed_number)
generate_training(tot_elem_training)
generate_testing(tot_elem_testing)
def generate_testing(dim: int):
"""
Genera il dataset di testing.
Se la cartella non esiste la crea e la popola con matrici a caso.
:param dim: numero di matrici da creare
:return:
"""
# se la cartella non esiste la creo
if not path.exists(testing_folder):
mkdir(testing_folder)
for elem in range(dim):
file_name = f"{testing_folder}/matrice_{elem}"
# scelta random di w, h e walls
w = randint(min_w, max_w)
h = randint(min_h, max_h)
walls = randint(1, int(w * h / 2) - 1)
grid = generate_grid(w, h, walls=walls)
np.savetxt(file_name, grid, delimiter=" ", fmt='%i')
def generate_training(dim: int):
"""
Genera il dataset di training.
Se la cartella non esiste la crea e la popola con matrici a caso.
:param dim: numero di matrici da creare
:return:
"""
# se la cartella non esiste la creo
if not path.exists(training_folder):
mkdir(training_folder)
for elem in range(dim):
file_name = f"{training_folder}/matrice_{elem}"\
# scelta random di w, h e walls
w = randint(min_w, max_w)
h = randint(min_h, max_h)
walls = randint(1, int(w * h / 2) - 1)
grid = generate_grid(w, h, walls=walls)
np.savetxt(file_name, grid, delimiter=" ", fmt='%i')
if __name__ == "__main__":
generate_dataset()
|
[
"os.path.exists",
"maze_utils.generate_grid",
"random.seed",
"os.mkdir",
"numpy.random.seed",
"numpy.savetxt",
"random.randint"
] |
[((707, 734), 'numpy.random.seed', 'np.random.seed', (['seed_number'], {}), '(seed_number)\n', (721, 734), True, 'import numpy as np\n'), ((739, 756), 'random.seed', 'seed', (['seed_number'], {}), '(seed_number)\n', (743, 756), False, 'from random import randint, seed\n'), ((1101, 1128), 'os.path.exists', 'path.exists', (['testing_folder'], {}), '(testing_folder)\n', (1112, 1128), False, 'from os import path, mkdir\n'), ((1138, 1159), 'os.mkdir', 'mkdir', (['testing_folder'], {}), '(testing_folder)\n', (1143, 1159), False, 'from os import path, mkdir\n'), ((1297, 1318), 'random.randint', 'randint', (['min_w', 'max_w'], {}), '(min_w, max_w)\n', (1304, 1318), False, 'from random import randint, seed\n'), ((1331, 1352), 'random.randint', 'randint', (['min_h', 'max_h'], {}), '(min_h, max_h)\n', (1338, 1352), False, 'from random import randint, seed\n'), ((1416, 1448), 'maze_utils.generate_grid', 'generate_grid', (['w', 'h'], {'walls': 'walls'}), '(w, h, walls=walls)\n', (1429, 1448), False, 'from maze_utils import generate_grid\n'), ((1458, 1510), 'numpy.savetxt', 'np.savetxt', (['file_name', 'grid'], {'delimiter': '""" """', 'fmt': '"""%i"""'}), "(file_name, grid, delimiter=' ', fmt='%i')\n", (1468, 1510), True, 'import numpy as np\n'), ((1777, 1805), 'os.path.exists', 'path.exists', (['training_folder'], {}), '(training_folder)\n', (1788, 1805), False, 'from os import path, mkdir\n'), ((1815, 1837), 'os.mkdir', 'mkdir', (['training_folder'], {}), '(training_folder)\n', (1820, 1837), False, 'from os import path, mkdir\n'), ((1977, 1998), 'random.randint', 'randint', (['min_w', 'max_w'], {}), '(min_w, max_w)\n', (1984, 1998), False, 'from random import randint, seed\n'), ((2011, 2032), 'random.randint', 'randint', (['min_h', 'max_h'], {}), '(min_h, max_h)\n', (2018, 2032), False, 'from random import randint, seed\n'), ((2096, 2128), 'maze_utils.generate_grid', 'generate_grid', (['w', 'h'], {'walls': 'walls'}), '(w, h, walls=walls)\n', (2109, 2128), False, 'from maze_utils import generate_grid\n'), ((2138, 2190), 'numpy.savetxt', 'np.savetxt', (['file_name', 'grid'], {'delimiter': '""" """', 'fmt': '"""%i"""'}), "(file_name, grid, delimiter=' ', fmt='%i')\n", (2148, 2190), True, 'import numpy as np\n')]
|
# coding: utf-8
# $ \newcommand{\cat}[2][\phantom{i}]{\ket{C^{#2}_{#1\alpha}}} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\bra}[1]{\langle#1|} $
# $ \newcommand{\braket}[2]{\langle#1|#2\rangle} $
# $\newcommand{\au}{\hat{a}^\dagger}$
# $\newcommand{\ad}{\hat{a}}$
# $\newcommand{\bu}{\hat{b}^\dagger}$
# $\newcommand{\bd}{\hat{b}}$
# # Cat Code Preparation with Optimal Control
# <sup><NAME></sup>
#
# ## Goal
# Obtain a set of pulses which will encode the quantum information of a qubit with "cat codes" (and vice versa).
#
# <sub><NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, ‘Extending the lifetime of a quantum bit with error correction in superconducting circuits’, Nature; London, vol. 536, no. 7617, pp. 441–445, Aug. 2016.</sub>
# # Outline
# * Why cat codes?
# * Optimal control (GRAPE)
# * Using optimal control to generate cat codes
# * My work so far
# # Why use cat codes for error correction?
# The cat code is comprised of the logical basis:
# 
# <p style="text-align: center;">Notation: $ \ket{0}_L = \cat{\pm},\,\, \ket{1}_L = \cat[i]{\pm} $ </p>
# $ \ket{\psi} = c_0 \ket{C_\alpha^\pm} + c_1 \ket{C_{i\alpha}^\pm} $
# 
# ## Crash course in Optimal control (GRAPE)
# 
# We (usually) optimise for fidelity $\newcommand{\tr}[0]{\operatorname{tr}} f_{PSU} = \tfrac{1}{d} \big| \tr \{X_{targ}^{\dagger} X(T)\} \big| $
# # Optimal control for cat codes
# Jaynes-Cummings (dispersive)
# $$ \hat{H} = \omega_s\au\ad \,+ (\omega_a - \chi_{sa}\au\ad)\bu\bd $$
# $$-\, \frac{K_s}{2}\au{}^2\ad{}^2 \,-\, \frac{K_a}{2}\bu{}^2\bd{}^2 $$
# $$+\, \underbrace{\epsilon_a(t)\bu + \epsilon_a^*(t)\bd}_{\text{Qubit drive}} \,+\, \underbrace{\epsilon_s(t)\au + \epsilon_s^*(t)\ad}_{\text{Res drive}} $$
#
# $$ \bu\bd = \ket{e}\bra{e} = \sigma_-\sigma_+ $$
# 
# * Use optimisation to find the pulse envelope which will realise the unitary $ \hat{U}_t \underbrace{(c_0\ket{g} + c_1\ket{e})}_{\text{ancilla}}\underbrace{\ket{0}}_{\text{res}} = \underbrace{\ket{g}}_{\text{ancilla}} \underbrace{(c_0\cat{+} + c_1\cat[i]{+})}_{\text{res}} $
# * Practically this means we want to optimise for $K$ state transfers at the same time $ F_{oc} = \frac{1}{K^2} | \sum_k^K \braket{\psi_k(T)}{\psi_k^{\text{tar}}} |^2 $ where we encode many points on the Bloch sphere in the cat code basis.
# In[7]:
from numpy import sqrt
π = 3.1415926
ω_r = 8.3056 * 2 * π # resonator frequency
ω_q = 6.2815 * 2 * π # qubit frequency
K_q = -2*π*297e-3 # Kerr qubit 200-300 MHz
K_r = 2*π*4.5e-6 # Kerr res 1-10 Khz
ω_ef = ω_q + K_q
ω_gf = ω_q + K_q/2
χ = 25e-3 * 2 * π # parameter in the dispersive hamiltonian
Δ = abs(ω_r - ω_q) # detuning
g = sqrt(Δ * χ) # coupling strength that is consistent with chi
print(g)
# 
# 
# 
# ### My work so far
# * Use the pulse optimisation tool in `QuTiP` (quantum simulation toolbox in Python), or other framework
# * Project status - more difficult than expected
# * Even for the simple things, e.g. bit flip pulse, there are problems with convergence and numerical errors
# * Custom constraints on the pulses aren't implemented yet (nor general optimization goals) in QuTiP
# * I will try `Krotov`, another python toolbox which uses the Krotov method instead of GRAPE
# * Goal of the thesis is to realise this method and then eventually evaluate possible extensions:
# * Other bosonic codes besides "2 lobe"-cat codes
# * Optimise the coefficients of Fock states (theoretical curiosity)
# ## Thank you for listening! Any questions?
|
[
"numpy.sqrt"
] |
[((2879, 2890), 'numpy.sqrt', 'sqrt', (['(Δ * χ)'], {}), '(Δ * χ)\n', (2883, 2890), False, 'from numpy import sqrt\n')]
|
import numpy as np
class NumpyDynamic:
def __init__(self, dtype, array_size=(100,)):
self.data = np.zeros(array_size, dtype)
self.array_size = list(array_size)
self.size = 0
def add(self, x):
if self.size == self.array_size[0]:
self.array_size[0] *= 2
newdata = np.zeros(self.array_size, self.data.dtype)
newdata[:self.size] = self.data
self.data = newdata
self.data[self.size] = x
self.size += 1
def finalize(self):
return self.data[:self.size]
|
[
"numpy.zeros"
] |
[((112, 139), 'numpy.zeros', 'np.zeros', (['array_size', 'dtype'], {}), '(array_size, dtype)\n', (120, 139), True, 'import numpy as np\n'), ((330, 372), 'numpy.zeros', 'np.zeros', (['self.array_size', 'self.data.dtype'], {}), '(self.array_size, self.data.dtype)\n', (338, 372), True, 'import numpy as np\n')]
|
import numpy as np
from abc import ABCMeta, abstractmethod
class Node(object):
"""Represents state in MCTS search tree.
Args:
state (object): The environment state corresponding to this node in the search tree.
Note:
Node object is immutable. Node is left without exit edges (empty dict) when it's terminal.
"""
def __init__(self, state):
self._state = state
self._edges = None
@property
def state(self):
"""object: The environment state corresponding to this node in the search tree."""
return self._state
@property
def edges(self):
"""list of Edges: Mapping from this node's possible actions to corresponding edges."""
return self._edges
def expand(self, edges):
"""Initialize Node object with edges.
Args:
edges (dict of Edges): Mapping from this node's possible actions to corresponding edges.
"""
self._edges = edges
def select_edge(self, c=1.):
"""Choose next action (edge) according to UCB formula.
Args:
c (float): The parameter c >= 0 controls the trade-off between choosing lucrative nodes
(low c) and exploring nodes with low visit counts (high c). (Default: 1)
Returns:
int: Action chosen with UCB formula.
Edge: Edge which represents proper action chosen with UCB formula.
or
None: If it is terminal node and has no exit edges.
"""
assert self.edges is not None, "This node hasn't been expanded yet!"
if len(self.edges) == 0:
return None
state_visits = 0
scores = {}
# Initialize every edge's score to its Q-value and count current state visits
for action, edge in self.edges.items():
state_visits += edge.num_visits
scores[(action, edge)] = edge.qvalue
# Add exploration term to every edge's score
for action, edge in self.edges.items():
scores[(action, edge)] += c * edge.prior * \
np.sqrt(state_visits) / (1 + edge.num_visits)
# Choose next action and edge with highest score
action_edge = max(scores, key=scores.get)
return action_edge
class Edge(object):
"""Represents state-actions pair in MCTS search tree.
Args:
prior (float): Action probability from prior policy. (Default: 1.)
"""
def __init__(self, prior=1.):
self._prior = prior
self._next_node = None
self._reward = 0
self._qvalue = 0
self._num_visits = 0
def expand(self, next_node, reward):
"""Explore this edge.
Args:
next_node (Node): Node that this edge points to.
reward (float): Reward of transition represented by this edge.
"""
self._next_node = next_node
self._reward = reward
def update(self, return_t):
"""Update edge with data from child.
Args:
return_t (float): (Un)discounted return from timestep 't' (this edge).
"""
self._num_visits += 1
# This is formula for iteratively calculating average
# NOTE: You can check that first arbitrary value will be forgotten after fist update
self._qvalue += (return_t - self._qvalue) / self.num_visits
@property
def next_node(self):
"""next_node (Node): Node that this edge points to."""
return self._next_node
@property
def reward(self):
"""float: Reward of transition represented by this edge."""
return self._reward
@property
def qvalue(self):
"""float: Quality value of this edge state-action pair."""
return self._qvalue
@property
def prior(self):
"""float: Action probability from prior policy."""
return self._prior
@property
def num_visits(self):
"""int: Number of times this state-action pair was visited."""
return self._num_visits
|
[
"numpy.sqrt"
] |
[((2113, 2134), 'numpy.sqrt', 'np.sqrt', (['state_visits'], {}), '(state_visits)\n', (2120, 2134), True, 'import numpy as np\n')]
|
import numpy as np
import time
import pytest
import jax.numpy as jnp
import jax.config as config
import torch
import tensorflow as tf
from tensornetwork.linalg import linalg
from tensornetwork import backends
from tensornetwork.backends.numpy import numpy_backend
from tensornetwork.backends.jax import jax_backend
#pylint: disable=no-member
config.update("jax_enable_x64", True)
np_real = [np.float32, np.float16, np.float64]
np_float = np_real + [np.complex64, np.complex128]
np_int = [np.int8, np.int16, np.int32, np.int64]
np_uint = [np.uint8, np.uint16, np.uint32, np.uint64]
np_dtypes = {"real": np_real, "float": np_float,
"rand": np_float,
"int": np_int + np_uint,
"all": np_real+ np_int + np_uint + [None, ]}
tf_real = [tf.float32, tf.float16, tf.float64]
tf_float = tf_real + [tf.complex64, tf.complex128]
tf_int = [tf.int8, tf.int16, tf.int32, tf.int64]
tf_uint = [tf.uint8, tf.uint16, tf.uint32, tf.uint64]
tf_dtypes = {"real": tf_real, "float": tf_float,
"rand": tf_real + [None, ],
"int": tf_int + tf_uint,
"all": tf_real + tf_int + tf_uint + [None, ]}
torch_float = [torch.float32, torch.float16, torch.float64]
torch_int = [torch.int8, torch.int16, torch.int32, torch.int64]
torch_uint = [torch.uint8]
torch_dtypes = {"real": torch_float, "float": torch_float,
"rand": [torch.float32, torch.float64, None],
"int": torch_int + torch_uint,
"all": torch_float + torch_int + torch_uint + [None, ]}
dtypes = {"pytorch": torch_dtypes,
"jax": np_dtypes, "numpy": np_dtypes, "tensorflow": tf_dtypes}
def test_eye(backend):
"""
Tests linalg.eye against np.eye.
"""
N = 4
M = 6
name = "Jeffrey"
axis_names = ["Sam", "Blinkey"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.eye(N, dtype=dtype, M=M, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.eye(N, dtype=dtype, M=M)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_zeros(backend):
"""
Tests linalg.zeros against np.zeros.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.zeros(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.zeros(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_ones(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.ones(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.ones(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_randn(backend):
"""
Tests linalg.randn against the backend code.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.randn(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend, seed=seed)
npI = backend_obj.randn(shape, dtype=dtype, seed=seed)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_random_uniform(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
boundaries = (-0.3, 10.5)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.random_uniform(shape, dtype=dtype, name=name,
axis_names=axis_names, backend=backend,
seed=seed, boundaries=boundaries)
npI = backend_obj.random_uniform(shape, dtype=dtype, seed=seed,
boundaries=boundaries)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
|
[
"jax.config.update",
"tensornetwork.linalg.linalg.zeros",
"tensornetwork.linalg.linalg.randn",
"numpy.testing.assert_allclose",
"tensornetwork.backends.backend_factory.get_backend",
"tensornetwork.linalg.linalg.eye",
"numpy.random.seed",
"tensornetwork.linalg.linalg.ones",
"time.time",
"tensornetwork.linalg.linalg.random_uniform"
] |
[((342, 379), 'jax.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (355, 379), True, 'import jax.config as config\n'), ((1806, 1851), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (1842, 1851), False, 'from tensornetwork import backends\n'), ((2476, 2521), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (2512, 2521), False, 'from tensornetwork import backends\n'), ((3147, 3192), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (3183, 3192), False, 'from tensornetwork import backends\n'), ((3777, 3802), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (3791, 3802), True, 'import numpy as np\n'), ((3893, 3938), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (3929, 3938), False, 'from tensornetwork import backends\n'), ((4548, 4573), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (4562, 4573), True, 'import numpy as np\n'), ((4692, 4737), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (4728, 4737), False, 'from tensornetwork import backends\n'), ((1901, 1988), 'tensornetwork.linalg.linalg.eye', 'linalg.eye', (['N'], {'dtype': 'dtype', 'M': 'M', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend'}), '(N, dtype=dtype, M=M, name=name, axis_names=axis_names, backend=\n backend)\n', (1911, 1988), False, 'from tensornetwork.linalg import linalg\n'), ((2056, 2099), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (2082, 2099), True, 'import numpy as np\n'), ((2571, 2659), 'tensornetwork.linalg.linalg.zeros', 'linalg.zeros', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend'}), '(shape, dtype=dtype, name=name, axis_names=axis_names, backend=\n backend)\n', (2583, 2659), False, 'from tensornetwork.linalg import linalg\n'), ((2730, 2773), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (2756, 2773), True, 'import numpy as np\n'), ((3242, 3329), 'tensornetwork.linalg.linalg.ones', 'linalg.ones', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend'}), '(shape, dtype=dtype, name=name, axis_names=axis_names, backend=\n backend)\n', (3253, 3329), False, 'from tensornetwork.linalg import linalg\n'), ((3398, 3441), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (3424, 3441), True, 'import numpy as np\n'), ((3762, 3773), 'time.time', 'time.time', ([], {}), '()\n', (3771, 3773), False, 'import time\n'), ((3989, 4088), 'tensornetwork.linalg.linalg.randn', 'linalg.randn', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend', 'seed': 'seed'}), '(shape, dtype=dtype, name=name, axis_names=axis_names, backend=\n backend, seed=seed)\n', (4001, 4088), False, 'from tensornetwork.linalg import linalg\n'), ((4170, 4213), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (4196, 4213), True, 'import numpy as np\n'), ((4533, 4544), 'time.time', 'time.time', ([], {}), '()\n', (4542, 4544), False, 'import time\n'), ((4788, 4918), 'tensornetwork.linalg.linalg.random_uniform', 'linalg.random_uniform', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend', 'seed': 'seed', 'boundaries': 'boundaries'}), '(shape, dtype=dtype, name=name, axis_names=axis_names,\n backend=backend, seed=seed, boundaries=boundaries)\n', (4809, 4918), False, 'from tensornetwork.linalg import linalg\n'), ((5111, 5154), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (5137, 5154), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import librosa
import os
import sys
import time
from datetime import datetime
from pathlib import Path
from src.python.audio_transforms import *
from src.python.model_predict import *
from src.python.graphics import plot_graph
# Hardcoding a few variables
max_chroma_sample = 6145
max_spectrogram_sample = 6145
model_classes = [(0, 'artifact'), (1, 'extra'), (2, 'murmur'), (3, 'normal')]
# Directories
DIR_ROOT = Path().resolve()
DIR_PARENT = Path().resolve().parent
def import_wav(filepath):
'''
Takes a filepath and returns the
sample rate (sr) and amplitude (x)
'''
try:
x, sr = librosa.load(filepath)
x, _ = librosa.effects.trim(x)
except FileNotFoundError:
raise FileNotFoundError(f'could not file a file at {filepath}')
return x, sr
# ----------------------------------
# MAIN FUNCTION --------------------
# ----------------------------------
def main(wav_path,
max_chroma_sample,
max_spect_sample,
dt_string):
audio_results = {}
base_path = Path(DIR_ROOT, 'demo_files', 'results')
# 0. SAVE RECORD SOMEWHERE
## Placeholder for now
# 1. Open wav file with Librosa
x, sr = import_wav(wav_path)
# 2. Spectogram
audio_results['spectogram'] = amp_to_db(
freq_array = stft_transform(amp_array = x),
sr = sr,
ref = np.max
)
# 3. MFCC
audio_results['mfcc'] = mfcc_spectogram(
amp_array = x,
sr = sr
)
# 4. Chromagram
audio_results['chromagram'] = chromagram(
amp_array = x,
sr = sr
)
# 5. Create Images (User)
for key, value in audio_results.items():
plot_graph(
audio_array = value,
viz_type = key,
out_file = Path(base_path, 'user_images', "_".join([dt_string, key]) + '.png'),
user = True,
dpi = 150
)
# 6. Pad Images
for key, value in audio_results.items():
audio_results[key] = pad_along_axis(value, max_spectrogram_sample)
# 6. Create Images (Model)
img_path = {}
for key, value in audio_results.items():
file_path = Path(base_path, 'model_images', "_".join([key, dt_string]) + '.png')
plot_graph(
audio_array = value,
viz_type = key,
out_file = file_path,
user = False,
dpi = 200
)
img_path[key] = str(file_path)
# Return all 3 images to be pushed to model for predictions
return img_path
if __name__ == '__main__':
wav_path = sys.argv[1]
if not Path(wav_path).is_file():
raise FileNotFoundError()
dt_string = str(round(datetime.now().timestamp()))
hb_images = main(
wav_path,
max_chroma_sample,
max_spectrogram_sample,
dt_string
)
results = []
for key, value in hb_images.items():
output, predict = predict_heartbeat(key, value, DIR_ROOT)
results.append(output.detach().numpy()[0])
results = np.array(results)
index = results.mean(axis=0).argmax()
hb_predict = model_classes[index][1].title()
if hb_predict.lower() == 'artifact':
m = "Too much backgound noise. Try again!"
else:
m = f"Your heartbeat is....... {hb_predict}"
print(m)
|
[
"pathlib.Path",
"src.python.graphics.plot_graph",
"numpy.array",
"datetime.datetime.now",
"librosa.effects.trim",
"librosa.load"
] |
[((1090, 1129), 'pathlib.Path', 'Path', (['DIR_ROOT', '"""demo_files"""', '"""results"""'], {}), "(DIR_ROOT, 'demo_files', 'results')\n", (1094, 1129), False, 'from pathlib import Path\n'), ((3100, 3117), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (3108, 3117), True, 'import numpy as np\n'), ((481, 487), 'pathlib.Path', 'Path', ([], {}), '()\n', (485, 487), False, 'from pathlib import Path\n'), ((666, 688), 'librosa.load', 'librosa.load', (['filepath'], {}), '(filepath)\n', (678, 688), False, 'import librosa\n'), ((700, 723), 'librosa.effects.trim', 'librosa.effects.trim', (['x'], {}), '(x)\n', (720, 723), False, 'import librosa\n'), ((2307, 2395), 'src.python.graphics.plot_graph', 'plot_graph', ([], {'audio_array': 'value', 'viz_type': 'key', 'out_file': 'file_path', 'user': '(False)', 'dpi': '(200)'}), '(audio_array=value, viz_type=key, out_file=file_path, user=False,\n dpi=200)\n', (2317, 2395), False, 'from src.python.graphics import plot_graph\n'), ((511, 517), 'pathlib.Path', 'Path', ([], {}), '()\n', (515, 517), False, 'from pathlib import Path\n'), ((2664, 2678), 'pathlib.Path', 'Path', (['wav_path'], {}), '(wav_path)\n', (2668, 2678), False, 'from pathlib import Path\n'), ((2751, 2765), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2763, 2765), False, 'from datetime import datetime\n')]
|
import unittest
import numpy as np
from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, \
get_time_series_for_geometry, get_time_series_for_geometry_collection
from ..helpers import new_test_service_context
class TimeSeriesControllerTest(unittest.TestCase):
def test_get_time_series_for_point_invalid_lat_and_lon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=-150.0, lat=-30.0)
expected_dict = {'results': []}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_one_valid(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'),
max_valids=1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_only_valids(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'),
max_valids=-1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_with_uncertainty(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo-1w', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-22T00:00:00Z',
'result': {'average': 3.534773588180542,
'uncertainty': 0.0,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-29T00:00:00Z',
'result': {'average': 20.12085723876953,
'uncertainty': 0.0,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Point", coordinates=[2.1, 51.4]),
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_polygon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.], [1., 51.]
]]))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 49.71656646340396,
'totalCount': 1,
'validCount': 132716}},
{'date': '2017-01-30T10:46:34Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_polygon_one_valid(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.], [1., 51.]
]]), max_valids=1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometries_incl_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry_collection(ctx,
'demo', 'conc_tsm',
dict(type="GeometryCollection",
geometries=[
dict(type="Point", coordinates=[2.1, 51.4])]),
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [[{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometries_incl_polygon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry_collection(ctx,
'demo', 'conc_tsm',
dict(type="GeometryCollection",
geometries=[dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.],
[1., 51.]
]])]))
expected_dict = {'results': [[{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 49.71656646340396,
'totalCount': 1,
'validCount': 132716}},
{'date': '2017-01-30T10:46:34Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}}]]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_info(self):
self.maxDiff = None
ctx = new_test_service_context()
info = get_time_series_info(ctx)
expected_dict = self._get_expected_info_dict()
self.assertEqual(expected_dict, info)
@staticmethod
def _get_expected_info_dict():
expected_dict = {'layers': []}
bounds = {'xmin': 0.0, 'ymin': 50.0,
'xmax': 5.0, 'ymax': 52.5}
demo_times = ['2017-01-16T10:09:22Z',
'2017-01-25T09:35:51Z',
'2017-01-26T10:50:17Z',
'2017-01-28T09:58:11Z',
'2017-01-30T10:46:34Z']
demo_variables = ['c2rcc_flags',
'conc_chl',
'conc_tsm',
'kd489',
'quality_flags']
for demo_variable in demo_variables:
dict_variable = {'name': f'demo.{demo_variable}', 'dates': demo_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
demo1w_times = ['2017-01-22T00:00:00Z', '2017-01-29T00:00:00Z', '2017-02-05T00:00:00Z']
for demo_variable in demo_variables:
dict_variable = {'name': f'demo-1w.{demo_variable}', 'dates': demo1w_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
dict_variable = {'name': f'demo-1w.{demo_variable}_stdev', 'dates': demo1w_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
return expected_dict
|
[
"xcube.webapi.controllers.time_series.get_time_series_for_point",
"xcube.webapi.controllers.time_series.get_time_series_info",
"numpy.datetime64"
] |
[((441, 514), 'xcube.webapi.controllers.time_series.get_time_series_for_point', 'get_time_series_for_point', (['ctx', '"""demo"""', '"""conc_tsm"""'], {'lon': '(-150.0)', 'lat': '(-30.0)'}), "(ctx, 'demo', 'conc_tsm', lon=-150.0, lat=-30.0)\n", (466, 514), False, 'from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, get_time_series_for_geometry, get_time_series_for_geometry_collection\n'), ((12635, 12660), 'xcube.webapi.controllers.time_series.get_time_series_info', 'get_time_series_info', (['ctx'], {}), '(ctx)\n', (12655, 12660), False, 'from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, get_time_series_for_geometry, get_time_series_for_geometry_collection\n'), ((943, 970), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (956, 970), True, 'import numpy as np\n'), ((1029, 1056), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (1042, 1056), True, 'import numpy as np\n'), ((2319, 2346), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (2332, 2346), True, 'import numpy as np\n'), ((2405, 2432), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (2418, 2432), True, 'import numpy as np\n'), ((3132, 3159), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (3145, 3159), True, 'import numpy as np\n'), ((3218, 3245), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (3231, 3245), True, 'import numpy as np\n'), ((4237, 4264), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (4250, 4264), True, 'import numpy as np\n'), ((4323, 4350), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (4336, 4350), True, 'import numpy as np\n'), ((5441, 5468), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (5454, 5468), True, 'import numpy as np\n'), ((5530, 5557), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (5543, 5557), True, 'import numpy as np\n'), ((9507, 9534), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (9520, 9534), True, 'import numpy as np\n'), ((9607, 9634), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (9620, 9634), True, 'import numpy as np\n')]
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : eval-referential.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 30.07.2019
# Last Modified Date: 16.10.2019
# Last Modified By : Chi Han, Jiayuan Mao
#
# This file is part of the VCML codebase
# Distributed under MIT license
# -*- coding: utf-8 -*-
# File : eval-referential.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 07/30/2019
#
# This file is part of eval-clevr-instance-retrieval.
# Distributed under terms of the MIT license.
import six
import functools
import sys
from IPython.core import ultratb
import numpy as np
import jacinle.io as io
import jacinle.random as random
from jacinle.cli.argument import JacArgumentParser
from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm
from jacinle.utils.meter import GroupMeters
sys.excepthook = ultratb.FormattedTB(
mode='Plain', color_scheme='Linux', call_pdb=True)
parser = JacArgumentParser()
parser.add_argument('--scene-json', required=True, type='checked_file')
parser.add_argument('--preds-json', required=True, type='checked_file')
args = parser.parse_args()
class Definition(object):
annotation_attribute_names = ['color', 'material', 'shape', 'size']
annotation_relation_names = ['behind', 'front', 'left', 'right']
concepts = {
'color': ['gray', 'red', 'blue', 'green', 'brown', 'purple', 'cyan', 'yellow'],
'material': ['rubber', 'metal'],
'shape': ['cube', 'sphere', 'cylinder'],
'size': ['small', 'large']
}
concept2attribute = {
v: k for k, vs in concepts.items() for v in vs
}
relational_concepts = {
'spatial_relation': ['left', 'right', 'front', 'behind']
}
synonyms = {
"thing": ["thing", "object"],
"sphere": ["sphere", "ball"],
"cube": ["cube", "block"],
"cylinder": ["cylinder"],
"large": ["large", "big"],
"small": ["small", "tiny"],
"metal": ["metallic", "metal", "shiny"],
"rubber": ["rubber", "matte"],
}
word2lemma = {
v: k for k, vs in synonyms.items() for v in vs
}
def_ = Definition()
def get_desc(obj):
names = [obj[k] for k in def_.annotation_attribute_names]
for i, n in enumerate(names):
if n in def_.synonyms:
names[i] = random.choice_list(def_.synonyms[n])
return names
def run_desc_obj(obj, desc):
for d in desc:
dd = def_.word2lemma.get(d, d)
if dd != obj[def_.concept2attribute[dd]]:
return False
return True
def run_desc_pred(all_preds, desc):
s = 10000
for d in desc:
s = np.fmin(s, all_preds[d])
return s
def test(index, all_objs, all_preds, meter):
obj = all_objs[index]
nr_descriptors = random.randint(1, 3)
desc = random.choice_list(get_desc(obj), size=nr_descriptors)
if isinstance(desc, six.string_types):
desc = [desc]
filtered_objs = [i for i, o in enumerate(all_objs) if not run_desc_obj(o, desc)]
all_scores = run_desc_pred(all_preds, desc)
rank = (all_scores[filtered_objs] > all_scores[index]).sum()
# print(desc)
# print(all_scores)
# print(all_scores[index])
meter.update('r@01', rank <= 1)
meter.update('r@02', rank <= 2)
meter.update('r@03', rank <= 3)
meter.update('r@04', rank <= 4)
meter.update('r@05', rank <= 5)
def transpose_scene(scene):
ret = dict()
for k in scene['0']:
ret[k] = np.array([scene[str(o)][k] for o in range(len(scene))])
return ret
def main():
scenes = io.load_json(args.scene_json)['scenes']
preds = io.load(args.preds_json)
if isinstance(preds, dict):
preds = list(preds.values())
if False:
preds = [transpose_scene(s) for s in preds]
# flattened_objs = [o for s in scenes for o in s['objects']]
# flattened_preds = {
# k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
# for k in preds[0]
# }
meter = GroupMeters()
'''
for i, scene in tqdm_gofor(scenes, mininterval=0.5):
for j in range(len(scene['objects'])):
test(j, scene['objects'], preds[i], meter)
'''
for i, pred in tqdm_gofor(preds, mininterval=0.5):
scene = scenes[i]
for j in range(len(scene['objects'])):
test(j, scene['objects'], pred, meter)
print(meter.format_simple('Results:', compressed=False))
if __name__ == '__main__':
main()
|
[
"jacinle.io.load_json",
"IPython.core.ultratb.FormattedTB",
"jacinle.utils.meter.GroupMeters",
"jacinle.random.randint",
"jacinle.cli.argument.JacArgumentParser",
"jacinle.random.choice_list",
"jacinle.io.load",
"numpy.fmin",
"jacinle.utils.tqdm.tqdm_gofor"
] |
[((890, 960), 'IPython.core.ultratb.FormattedTB', 'ultratb.FormattedTB', ([], {'mode': '"""Plain"""', 'color_scheme': '"""Linux"""', 'call_pdb': '(True)'}), "(mode='Plain', color_scheme='Linux', call_pdb=True)\n", (909, 960), False, 'from IPython.core import ultratb\n'), ((976, 995), 'jacinle.cli.argument.JacArgumentParser', 'JacArgumentParser', ([], {}), '()\n', (993, 995), False, 'from jacinle.cli.argument import JacArgumentParser\n'), ((2809, 2829), 'jacinle.random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2823, 2829), True, 'import jacinle.random as random\n'), ((3654, 3678), 'jacinle.io.load', 'io.load', (['args.preds_json'], {}), '(args.preds_json)\n', (3661, 3678), True, 'import jacinle.io as io\n'), ((4023, 4036), 'jacinle.utils.meter.GroupMeters', 'GroupMeters', ([], {}), '()\n', (4034, 4036), False, 'from jacinle.utils.meter import GroupMeters\n'), ((4232, 4266), 'jacinle.utils.tqdm.tqdm_gofor', 'tqdm_gofor', (['preds'], {'mininterval': '(0.5)'}), '(preds, mininterval=0.5)\n', (4242, 4266), False, 'from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm\n'), ((2677, 2701), 'numpy.fmin', 'np.fmin', (['s', 'all_preds[d]'], {}), '(s, all_preds[d])\n', (2684, 2701), True, 'import numpy as np\n'), ((3602, 3631), 'jacinle.io.load_json', 'io.load_json', (['args.scene_json'], {}), '(args.scene_json)\n', (3614, 3631), True, 'import jacinle.io as io\n'), ((2360, 2396), 'jacinle.random.choice_list', 'random.choice_list', (['def_.synonyms[n]'], {}), '(def_.synonyms[n])\n', (2378, 2396), True, 'import jacinle.random as random\n')]
|
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score
from category_encoders import MEstimateEncoder
import numpy as np
from collections import defaultdict
import os
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
def fit_predict(modelo, enc, data, target, test):
pipe = Pipeline([("encoder", enc), ("model", modelo)])
pipe.fit(data, target)
return pipe.predict(test)
def auc_group(model, data, y_true, dicc, group: str = "", min_samples: int = 50):
aux = data.copy()
aux["target"] = y_true
cats = aux[group].value_counts()
cats = cats[cats > min_samples].index.tolist()
cats = cats + ["all"]
if len(dicc) == 0:
dicc = defaultdict(list, {k: [] for k in cats})
for cat in cats:
if cat != "all":
aux2 = aux[aux[group] == cat]
preds = model.predict_proba(aux2.drop(columns="target"))[:, 1]
truth = aux2["target"]
dicc[cat].append(roc_auc_score(truth, preds))
elif cat == "all":
dicc[cat].append(roc_auc_score(y_true, model.predict_proba(data)[:, 1]))
else:
pass
return dicc
def explain(xgb: bool = True):
"""
Provide a SHAP explanation by fitting MEstimate and GBDT
"""
if xgb:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", GradientBoostingClassifier())]
)
pipe.fit(X_tr, y_tr)
explainer = shap.Explainer(pipe[1])
shap_values = explainer(pipe[:-1].transform(X_tr))
shap.plots.beeswarm(shap_values)
return pd.DataFrame(np.abs(shap_values.values), columns=X_tr.columns).sum()
else:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", LogisticRegression())]
)
pipe.fit(X_tr, y_tr)
coefficients = pd.concat(
[pd.DataFrame(X_tr.columns), pd.DataFrame(np.transpose(pipe[1].coef_))],
axis=1,
)
coefficients.columns = ["feat", "val"]
return coefficients.sort_values(by="val", ascending=False)
def calculate_cm(true, preds):
# Obtain the confusion matrix
cm = confusion_matrix(preds, true)
# https://stackoverflow.com/questions/31324218/scikit-learn-how-to-obtain-true-positive-true-negative-false-positive-and-fal
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
return TPR[0]
def metric_calculator(
modelo, data: pd.DataFrame, truth: pd.DataFrame, col: str, group1: str, group2: str
):
aux = data.copy()
aux["target"] = truth
# Filter the data
g1 = data[data[col] == group1]
g2 = data[data[col] == group2]
# Filter the ground truth
g1_true = aux[aux[col] == group1].target
g2_true = aux[aux[col] == group2].target
# Do predictions
p1 = modelo.predict(g1)
p2 = modelo.predict(g2)
# Extract metrics for each group
res1 = calculate_cm(p1, g1_true)
res2 = calculate_cm(p2, g2_true)
return res1 - res2
def plot_rolling(data, roll_mean: int = 5, roll_std: int = 20):
aux = data.rolling(roll_mean).mean().dropna()
stand = data.rolling(roll_std).quantile(0.05, interpolation="lower").dropna()
plt.figure()
for col in data.columns:
plt.plot(aux[col], label=col)
# plt.fill_between(aux.index,(aux[col] - stand[col]),(aux[col] + stand[col]),# color="b",alpha=0.1,)
plt.legend()
plt.show()
def scale_output(data):
return pd.DataFrame(
StandardScaler().fit_transform(data), columns=data.columns, index=data.index
)
import numpy as np
def psi(expected, actual, buckettype="bins", buckets=10, axis=0):
"""Calculate the PSI (population stability index) across all variables
Args:
expected: numpy matrix of original values
actual: numpy matrix of new values, same size as expected
buckettype: type of strategy for creating buckets, bins splits into even splits, quantiles splits into quantile buckets
buckets: number of quantiles to use in bucketing variables
axis: axis by which variables are defined, 0 for vertical, 1 for horizontal
Returns:
psi_values: ndarray of psi values for each variable
Author:
<NAME>
github.com/mwburke
worksofchart.com
"""
def _psi(expected_array, actual_array, buckets):
"""Calculate the PSI for a single variable
Args:
expected_array: numpy array of original values
actual_array: numpy array of new values, same size as expected
buckets: number of percentile ranges to bucket the values into
Returns:
psi_value: calculated PSI value
"""
def scale_range(input, min, max):
input += -(np.min(input))
input /= np.max(input) / (max - min)
input += min
return input
breakpoints = np.arange(0, buckets + 1) / (buckets) * 100
if buckettype == "bins":
breakpoints = scale_range(
breakpoints, np.min(expected_array), np.max(expected_array)
)
elif buckettype == "quantiles":
breakpoints = np.stack(
[np.percentile(expected_array, b) for b in breakpoints]
)
expected_percents = np.histogram(expected_array, breakpoints)[0] / len(
expected_array
)
actual_percents = np.histogram(actual_array, breakpoints)[0] / len(actual_array)
def sub_psi(e_perc, a_perc):
"""Calculate the actual PSI value from comparing the values.
Update the actual value to a very small number if equal to zero
"""
if a_perc == 0:
a_perc = 0.0001
if e_perc == 0:
e_perc = 0.0001
value = (e_perc - a_perc) * np.log(e_perc / a_perc)
return value
psi_value = np.sum(
sub_psi(expected_percents[i], actual_percents[i])
for i in range(0, len(expected_percents))
)
return psi_value
if len(expected.shape) == 1:
psi_values = np.empty(len(expected.shape))
else:
psi_values = np.empty(expected.shape[axis])
for i in range(0, len(psi_values)):
if len(psi_values) == 1:
psi_values = _psi(expected, actual, buckets)
elif axis == 0:
psi_values[i] = _psi(expected[:, i], actual[:, i], buckets)
elif axis == 1:
psi_values[i] = _psi(expected[i, :], actual[i, :], buckets)
return psi_values
def loop_estimators(
estimator_set: list,
normal_data,
normal_data_ood,
shap_data,
shap_data_ood,
performance_ood,
target,
state: str,
error_type: str,
target_shift: bool = False,
output_path: str = "",
):
"""
Loop through the estimators and calculate the performance for each
"""
res = []
for estimator in estimator_set:
## ONLY DATA
X_train, X_test, y_train, y_test = train_test_split(
normal_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(normal_data_ood),
np.nan_to_num(list(performance_ood.values())),
)
res.append([state, error_type, estimator, "Only Data", error_te, error_ood])
if target_shift == False:
#### ONLY SHAP
X_train, X_test, y_train, y_test = train_test_split(
shap_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(
estimator_set[estimator].predict(X_test), y_test
)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(shap_data_ood),
np.nan_to_num(list(performance_ood.values())),
)
res.append([state, error_type, estimator, "Only Shap", error_te, error_ood])
### SHAP + DATA
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([shap_data, normal_data], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(
estimator_set[estimator].predict(X_test), y_test
)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([shap_data_ood, normal_data_ood], axis=1)
),
np.nan_to_num(list(performance_ood.values())),
)
res.append(
[state, error_type, estimator, "Data + Shap", error_te, error_ood]
)
folder = os.path.join("results", state + "_" + error_type + ".csv")
columnas = ["state", "error_type", "estimator", "data", "error_te", "error_ood"]
pd.DataFrame(res, columns=columnas).to_csv(folder, index=False)
def loop_estimators_fairness(
estimator_set: list,
normal_data,
normal_data_ood,
target_shift,
target_shift_ood,
shap_data,
shap_data_ood,
performance_ood,
target,
state: str,
error_type: str,
output_path: str = "",
):
"""
Loop through the estimators and calculate the performance for each
Particular fairness case
"""
res = []
for estimator in estimator_set:
## ONLY DATA
X_train, X_test, y_train, y_test = train_test_split(
normal_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(normal_data_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Data", error_te, error_ood])
#### ONLY SHAP
X_train, X_test, y_train, y_test = train_test_split(
shap_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(shap_data_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Shap", error_te, error_ood])
#### ONLY TARGET
X_train, X_test, y_train, y_test = train_test_split(
target_shift, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(target_shift_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Target", error_te, error_ood])
#### TARGET + DISTRIBUTION
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([target_shift, normal_data], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([target_shift_ood, normal_data_ood], axis=1)
),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Data+Target", error_te, error_ood])
### SHAP + DATA
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([shap_data, normal_data, target_shift], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([shap_data_ood, normal_data_ood, target_shift_ood], axis=1)
),
np.nan_to_num(performance_ood),
)
res.append(
[state, error_type, estimator, "Data+Target+Shap", error_te, error_ood]
)
folder = os.path.join("results", state + "_" + error_type + ".csv")
columnas = ["state", "error_type", "estimator", "data", "error_te", "error_ood"]
pd.DataFrame(res, columns=columnas).to_csv(folder, index=False)
|
[
"numpy.log",
"sklearn.metrics.roc_auc_score",
"numpy.arange",
"numpy.histogram",
"numpy.max",
"numpy.empty",
"numpy.min",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"numpy.abs",
"sklearn.model_selection.train_test_split",
"sklearn.pipeline.Pipeline",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.transpose",
"category_encoders.MEstimateEncoder",
"os.path.join",
"numpy.diag",
"collections.defaultdict",
"numpy.percentile",
"pandas.concat",
"numpy.nan_to_num"
] |
[((492, 539), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('encoder', enc), ('model', modelo)]"], {}), "([('encoder', enc), ('model', modelo)])\n", (500, 539), False, 'from sklearn.pipeline import Pipeline\n'), ((2336, 2365), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['preds', 'true'], {}), '(preds, true)\n', (2352, 2365), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n'), ((2582, 2593), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2589, 2593), True, 'import numpy as np\n'), ((9750, 9808), 'os.path.join', 'os.path.join', (['"""results"""', "(state + '_' + error_type + '.csv')"], {}), "('results', state + '_' + error_type + '.csv')\n", (9762, 9808), False, 'import os\n'), ((13496, 13554), 'os.path.join', 'os.path.join', (['"""results"""', "(state + '_' + error_type + '.csv')"], {}), "('results', state + '_' + error_type + '.csv')\n", (13508, 13554), False, 'import os\n'), ((884, 924), 'collections.defaultdict', 'defaultdict', (['list', '{k: [] for k in cats}'], {}), '(list, {k: [] for k in cats})\n', (895, 924), False, 'from collections import defaultdict\n'), ((2523, 2534), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2530, 2534), True, 'import numpy as np\n'), ((2561, 2572), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2568, 2572), True, 'import numpy as np\n'), ((6920, 6950), 'numpy.empty', 'np.empty', (['expected.shape[axis]'], {}), '(expected.shape[axis])\n', (6928, 6950), True, 'import numpy as np\n'), ((7750, 7820), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(normal_data, target, test_size=0.33, random_state=42)\n', (7766, 7820), False, 'from sklearn.model_selection import train_test_split\n'), ((10461, 10531), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(normal_data, target, test_size=0.33, random_state=42)\n', (10477, 10531), False, 'from sklearn.model_selection import train_test_split\n'), ((11009, 11077), 'sklearn.model_selection.train_test_split', 'train_test_split', (['shap_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(shap_data, target, test_size=0.33, random_state=42)\n', (11025, 11077), False, 'from sklearn.model_selection import train_test_split\n'), ((11553, 11624), 'sklearn.model_selection.train_test_split', 'train_test_split', (['target_shift', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(target_shift, target, test_size=0.33, random_state=42)\n', (11569, 11624), False, 'from sklearn.model_selection import train_test_split\n'), ((8354, 8422), 'sklearn.model_selection.train_test_split', 'train_test_split', (['shap_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(shap_data, target, test_size=0.33, random_state=42)\n', (8370, 8422), False, 'from sklearn.model_selection import train_test_split\n'), ((9898, 9933), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': 'columnas'}), '(res, columns=columnas)\n', (9910, 9933), True, 'import pandas as pd\n'), ((10814, 10844), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (10827, 10844), True, 'import numpy as np\n'), ((11358, 11388), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (11371, 11388), True, 'import numpy as np\n'), ((11908, 11938), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (11921, 11938), True, 'import numpy as np\n'), ((12146, 12192), 'pandas.concat', 'pd.concat', (['[target_shift, normal_data]'], {'axis': '(1)'}), '([target_shift, normal_data], axis=1)\n', (12155, 12192), True, 'import pandas as pd\n'), ((12610, 12640), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (12623, 12640), True, 'import numpy as np\n'), ((12836, 12893), 'pandas.concat', 'pd.concat', (['[shap_data, normal_data, target_shift]'], {'axis': '(1)'}), '([shap_data, normal_data, target_shift], axis=1)\n', (12845, 12893), True, 'import pandas as pd\n'), ((13326, 13356), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (13339, 13356), True, 'import numpy as np\n'), ((13644, 13679), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': 'columnas'}), '(res, columns=columnas)\n', (13656, 13679), True, 'import pandas as pd\n'), ((1153, 1180), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['truth', 'preds'], {}), '(truth, preds)\n', (1166, 1180), False, 'from sklearn.metrics import roc_auc_score\n'), ((2043, 2069), 'pandas.DataFrame', 'pd.DataFrame', (['X_tr.columns'], {}), '(X_tr.columns)\n', (2055, 2069), True, 'import pandas as pd\n'), ((5497, 5510), 'numpy.min', 'np.min', (['input'], {}), '(input)\n', (5503, 5510), True, 'import numpy as np\n'), ((5533, 5546), 'numpy.max', 'np.max', (['input'], {}), '(input)\n', (5539, 5546), True, 'import numpy as np\n'), ((5634, 5659), 'numpy.arange', 'np.arange', (['(0)', '(buckets + 1)'], {}), '(0, buckets + 1)\n', (5643, 5659), True, 'import numpy as np\n'), ((5780, 5802), 'numpy.min', 'np.min', (['expected_array'], {}), '(expected_array)\n', (5786, 5802), True, 'import numpy as np\n'), ((5804, 5826), 'numpy.max', 'np.max', (['expected_array'], {}), '(expected_array)\n', (5810, 5826), True, 'import numpy as np\n'), ((6032, 6073), 'numpy.histogram', 'np.histogram', (['expected_array', 'breakpoints'], {}), '(expected_array, breakpoints)\n', (6044, 6073), True, 'import numpy as np\n'), ((6147, 6186), 'numpy.histogram', 'np.histogram', (['actual_array', 'breakpoints'], {}), '(actual_array, breakpoints)\n', (6159, 6186), True, 'import numpy as np\n'), ((6574, 6597), 'numpy.log', 'np.log', (['(e_perc / a_perc)'], {}), '(e_perc / a_perc)\n', (6580, 6597), True, 'import numpy as np\n'), ((9022, 9065), 'pandas.concat', 'pd.concat', (['[shap_data, normal_data]'], {'axis': '(1)'}), '([shap_data, normal_data], axis=1)\n', (9031, 9065), True, 'import pandas as pd\n'), ((12528, 12582), 'pandas.concat', 'pd.concat', (['[target_shift_ood, normal_data_ood]'], {'axis': '(1)'}), '([target_shift_ood, normal_data_ood], axis=1)\n', (12537, 12582), True, 'import pandas as pd\n'), ((13229, 13298), 'pandas.concat', 'pd.concat', (['[shap_data_ood, normal_data_ood, target_shift_ood]'], {'axis': '(1)'}), '([shap_data_ood, normal_data_ood, target_shift_ood], axis=1)\n', (13238, 13298), True, 'import pandas as pd\n'), ((1514, 1532), 'category_encoders.MEstimateEncoder', 'MEstimateEncoder', ([], {}), '()\n', (1530, 1532), False, 'from category_encoders import MEstimateEncoder\n'), ((1545, 1573), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (1571, 1573), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1787, 1813), 'numpy.abs', 'np.abs', (['shap_values.values'], {}), '(shap_values.values)\n', (1793, 1813), True, 'import numpy as np\n'), ((1903, 1921), 'category_encoders.MEstimateEncoder', 'MEstimateEncoder', ([], {}), '()\n', (1919, 1921), False, 'from category_encoders import MEstimateEncoder\n'), ((2084, 2111), 'numpy.transpose', 'np.transpose', (['pipe[1].coef_'], {}), '(pipe[1].coef_)\n', (2096, 2111), True, 'import numpy as np\n'), ((9467, 9518), 'pandas.concat', 'pd.concat', (['[shap_data_ood, normal_data_ood]'], {'axis': '(1)'}), '([shap_data_ood, normal_data_ood], axis=1)\n', (9476, 9518), True, 'import pandas as pd\n'), ((5934, 5966), 'numpy.percentile', 'np.percentile', (['expected_array', 'b'], {}), '(expected_array, b)\n', (5947, 5966), True, 'import numpy as np\n')]
|
import numpy as np
import numpy.linalg as LA
from .solve_R1 import problem_R1, Classo_R1, pathlasso_R1
from .solve_R2 import problem_R2, Classo_R2, pathlasso_R2
from .solve_R3 import problem_R3, Classo_R3, pathlasso_R3
from .solve_R4 import problem_R4, Classo_R4, pathlasso_R4
from .path_alg import solve_path, pathalgo_general, h_lambdamax
"""
Classo and pathlasso are the main functions,
they can call every algorithm acording
to the method and formulation required
"""
# can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR'
def Classo(
matrix,
lam,
typ="R1",
meth="DR",
rho=1.345,
get_lambdamax=False,
true_lam=False,
e=None,
rho_classification=-1.0,
w=None,
intercept=False,
return_sigm=True,
):
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R3":
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
e = len(matrices[0]) / 2
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R3(pb, lam / lambdamax)
else:
beta, s = Classo_R3(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
elif typ == "R4":
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
e = len(matrices[0])
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R4(pb, lam / lambdamax)
else:
beta, s = Classo_R4(pb, lam)
elif typ == "R2":
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "ODE"
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R2(pb, lam / lambdamax)
else:
beta = Classo_R2(pb, lam)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
out = solve_path(
matrices,
lam / lambdamax,
False,
rho_classification,
"C2",
intercept=intercept,
)
else:
out = solve_path(
matrices, lam, False, rho_classification, "C2", intercept=intercept
)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
out = solve_path(
matrices, lam / lambdamax, False, 0, "C1", intercept=intercept
)
else:
out = solve_path(matrices, lam, False, 0, "C1", intercept=intercept)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
else: # LS
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "DR"
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R1(pb, lam / lambdamax)
else:
beta = Classo_R1(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
if w is not None:
if intercept:
beta[1:] = beta[1:] / w
else:
beta = beta / w
if typ in ["R3", "R4"] and return_sigm:
if get_lambdamax:
return (lambdamax, beta, s)
else:
return (beta, s)
if get_lambdamax:
return (lambdamax, beta)
else:
return beta
def pathlasso(
matrix,
lambdas=False,
n_active=0,
lamin=1e-2,
typ="R1",
meth="Path-Alg",
rho=1.345,
true_lam=False,
e=None,
return_sigm=False,
rho_classification=-1.0,
w=None,
intercept=False,
):
Nactive = n_active
if Nactive == 0:
Nactive = False
if type(lambdas) is bool:
lambdas = lamin ** (np.linspace(0.0, 1, 100))
if lambdas[0] < lambdas[-1]:
lambdass = [
lambdas[i] for i in range(len(lambdas) - 1, -1, -1)
] # reverse the list if needed
else:
lambdass = [lambdas[i] for i in range(len(lambdas))]
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R2":
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R2(pb, lambdass, n_active=Nactive)
elif typ == "R3":
if intercept:
# here we use the fact that for R1 and R3, the intercept is simple beta0 = ybar-Xbar .vdot(beta) so by changing the X to X-Xbar and y to y-ybar we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R3(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
elif typ == "R4":
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R4(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices,
lambdass,
"C2",
n_active=Nactive,
rho=rho_classification,
intercept=intercept,
)
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices, lambdass, "C1", n_active=Nactive, intercept=intercept
)
else: # R1
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R1(pb, lambdass, n_active=n_active)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
real_path = [lam * lambdamax for lam in lambdass]
if w is not None:
if intercept:
ww = np.array([1] + list(w))
else:
ww = w
BETA = np.array([beta / ww for beta in BETA])
if typ in ["R3", "R4"] and return_sigm:
return (np.array(BETA), real_path, S)
return (np.array(BETA), real_path)
|
[
"numpy.vdot",
"numpy.array",
"numpy.linspace",
"numpy.mean"
] |
[((9722, 9762), 'numpy.array', 'np.array', (['[(beta / ww) for beta in BETA]'], {}), '([(beta / ww) for beta in BETA])\n', (9730, 9762), True, 'import numpy as np\n'), ((9864, 9878), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (9872, 9878), True, 'import numpy as np\n'), ((5733, 5757), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1)', '(100)'], {}), '(0.0, 1, 100)\n', (5744, 5757), True, 'import numpy as np\n'), ((7222, 7236), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (7230, 7236), True, 'import numpy as np\n'), ((9822, 9836), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (9830, 9836), True, 'import numpy as np\n'), ((1188, 1206), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1195, 1206), True, 'import numpy as np\n'), ((1208, 1218), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1215, 1218), True, 'import numpy as np\n'), ((1845, 1864), 'numpy.vdot', 'np.vdot', (['Xbar', 'beta'], {}), '(Xbar, beta)\n', (1852, 1864), True, 'import numpy as np\n'), ((7186, 7197), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (7194, 7197), True, 'import numpy as np\n'), ((7986, 8000), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (7994, 8000), True, 'import numpy as np\n'), ((6654, 6672), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (6661, 6672), True, 'import numpy as np\n'), ((6674, 6684), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (6681, 6684), True, 'import numpy as np\n'), ((7950, 7961), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (7958, 7961), True, 'import numpy as np\n'), ((4513, 4531), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4520, 4531), True, 'import numpy as np\n'), ((4533, 4543), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4540, 4543), True, 'import numpy as np\n'), ((4924, 4943), 'numpy.vdot', 'np.vdot', (['Xbar', 'beta'], {}), '(Xbar, beta)\n', (4931, 4943), True, 'import numpy as np\n'), ((9128, 9146), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (9135, 9146), True, 'import numpy as np\n'), ((9148, 9158), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (9155, 9158), True, 'import numpy as np\n')]
|
import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.models.cnn import load_cnn
from deep_audio_features.lib.training import test
from deep_audio_features.utils.model_editing import drop_layers
import deep_audio_features.bin.config
import numpy
def test_model(modelpath, ifile, layers_dropped,
test_segmentation=False, verbose=True):
"""Loads a model and predicts each classes probability
Arguments:
modelpath {str} : A path where the model was stored.
ifile {str} : A path of a given wav file,
which will be tested.
test_segmentation {bool}: If True extracts segment level
predictions of a sequence
verbose {bool}: If True prints the predictions
Returns:
y_pred {np.array} : An array with the probability of each class
that the model predicts.
posteriors {np.array}: An array containing the unormalized
posteriors of each class.
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
# Restore model
model, hop_length, window_length = load_cnn(modelpath)
model = model.to(device)
class_names = model.classes_mapping
max_seq_length = model.max_sequence_length
zero_pad = model.zero_pad
spec_size = model.spec_size
fuse = model.fuse
# Apply layer drop
model = drop_layers(model, layers_dropped)
model.max_sequence_length = max_seq_length
# print('Model:\n{}'.format(model))
# Move to device
model.to(device)
# Create test set
test_set = FeatureExtractorDataset(X=[ifile],
# Random class -- does not matter at all
y=[0],
fe_method="MEL_SPECTROGRAM",
oversampling=False,
max_sequence_length=max_seq_length,
zero_pad=zero_pad,
forced_size=spec_size,
fuse=fuse, show_hist=False,
test_segmentation=test_segmentation,
hop_length=hop_length, window_length=window_length)
# Create test dataloader
test_loader = DataLoader(dataset=test_set, batch_size=1,
num_workers=4, drop_last=False,
shuffle=False)
# Forward a sample
posteriors, y_pred, _ = test(model=model, dataloader=test_loader,
cnn=True,
classifier=True if layers_dropped == 0
else False)
if verbose:
print("--> Unormalized posteriors:\n {}\n".format(posteriors))
print("--> Predictions:\n {}".format([class_names[yy] for yy in y_pred]))
return y_pred, numpy.array(posteriors)
if __name__ == '__main__':
# Read arguments -- model
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', required=True,
type=str, help='Model')
parser.add_argument('-i', '--input', required=True,
type=str, help='Input file for testing')
parser.add_argument('-s', '--segmentation', required=False,
action='store_true',
help='Return segment predictions')
parser.add_argument('-L', '--layers', required=False, default=0,
help='Number of final layers to cut. Default is 0.')
args = parser.parse_args()
# Get arguments
model = args.model
ifile = args.input
layers_dropped = int(args.layers)
segmentation = args.segmentation
# Test the model
d, p = test_model(modelpath=model, ifile=ifile,
layers_dropped=layers_dropped,
test_segmentation=segmentation)
|
[
"deep_audio_features.models.cnn.load_cnn",
"deep_audio_features.utils.model_editing.drop_layers",
"argparse.ArgumentParser",
"deep_audio_features.dataloading.dataloading.FeatureExtractorDataset",
"deep_audio_features.lib.training.test",
"os.path.realpath",
"numpy.array",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] |
[((1363, 1382), 'deep_audio_features.models.cnn.load_cnn', 'load_cnn', (['modelpath'], {}), '(modelpath)\n', (1371, 1382), False, 'from deep_audio_features.models.cnn import load_cnn\n'), ((1621, 1655), 'deep_audio_features.utils.model_editing.drop_layers', 'drop_layers', (['model', 'layers_dropped'], {}), '(model, layers_dropped)\n', (1632, 1655), False, 'from deep_audio_features.utils.model_editing import drop_layers\n'), ((1825, 2127), 'deep_audio_features.dataloading.dataloading.FeatureExtractorDataset', 'FeatureExtractorDataset', ([], {'X': '[ifile]', 'y': '[0]', 'fe_method': '"""MEL_SPECTROGRAM"""', 'oversampling': '(False)', 'max_sequence_length': 'max_seq_length', 'zero_pad': 'zero_pad', 'forced_size': 'spec_size', 'fuse': 'fuse', 'show_hist': '(False)', 'test_segmentation': 'test_segmentation', 'hop_length': 'hop_length', 'window_length': 'window_length'}), "(X=[ifile], y=[0], fe_method='MEL_SPECTROGRAM',\n oversampling=False, max_sequence_length=max_seq_length, zero_pad=\n zero_pad, forced_size=spec_size, fuse=fuse, show_hist=False,\n test_segmentation=test_segmentation, hop_length=hop_length,\n window_length=window_length)\n", (1848, 2127), False, 'from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset\n'), ((2590, 2683), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': '(1)', 'num_workers': '(4)', 'drop_last': '(False)', 'shuffle': '(False)'}), '(dataset=test_set, batch_size=1, num_workers=4, drop_last=False,\n shuffle=False)\n', (2600, 2683), False, 'from torch.utils.data import DataLoader\n'), ((2790, 2897), 'deep_audio_features.lib.training.test', 'test', ([], {'model': 'model', 'dataloader': 'test_loader', 'cnn': '(True)', 'classifier': '(True if layers_dropped == 0 else False)'}), '(model=model, dataloader=test_loader, cnn=True, classifier=True if \n layers_dropped == 0 else False)\n', (2794, 2897), False, 'from deep_audio_features.lib.training import test\n'), ((3279, 3304), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3302, 3304), False, 'import argparse\n'), ((1267, 1292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1290, 1292), False, 'import torch\n'), ((3182, 3205), 'numpy.array', 'numpy.array', (['posteriors'], {}), '(posteriors)\n', (3193, 3205), False, 'import numpy\n'), ((137, 163), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (153, 163), False, 'import sys, os\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from deepspeech.frontend.utility import IGNORE_ID
from deepspeech.io.utility import pad_sequence
from deepspeech.utils.log import Log
__all__ = ["SpeechCollator"]
logger = Log(__name__).getlog()
class SpeechCollator():
def __init__(self, keep_transcription_text=True):
"""
Padding audio features with zeros to make them have the same shape (or
a user-defined shape) within one bach.
if ``keep_transcription_text`` is False, text is token ids else is raw string.
"""
self._keep_transcription_text = keep_transcription_text
def __call__(self, batch):
"""batch examples
Args:
batch ([List]): batch is (audio, text)
audio (np.ndarray) shape (D, T)
text (List[int] or str): shape (U,)
Returns:
tuple(audio, text, audio_lens, text_lens): batched data.
audio : (B, Tmax, D)
audio_lens: (B)
text : (B, Umax)
text_lens: (B)
"""
audios = []
audio_lens = []
texts = []
text_lens = []
for audio, text in batch:
# audio
audios.append(audio.T) # [T, D]
audio_lens.append(audio.shape[1])
# text
# for training, text is token ids
# else text is string, convert to unicode ord
tokens = []
if self._keep_transcription_text:
assert isinstance(text, str), (type(text), text)
tokens = [ord(t) for t in text]
else:
tokens = text # token ids
tokens = tokens if isinstance(tokens, np.ndarray) else np.array(
tokens, dtype=np.int64)
texts.append(tokens)
text_lens.append(tokens.shape[0])
padded_audios = pad_sequence(
audios, padding_value=0.0).astype(np.float32) #[B, T, D]
audio_lens = np.array(audio_lens).astype(np.int64)
padded_texts = pad_sequence(
texts, padding_value=IGNORE_ID).astype(np.int64)
text_lens = np.array(text_lens).astype(np.int64)
return padded_audios, audio_lens, padded_texts, text_lens
|
[
"numpy.array",
"deepspeech.io.utility.pad_sequence",
"deepspeech.utils.log.Log"
] |
[((804, 817), 'deepspeech.utils.log.Log', 'Log', (['__name__'], {}), '(__name__)\n', (807, 817), False, 'from deepspeech.utils.log import Log\n'), ((2330, 2362), 'numpy.array', 'np.array', (['tokens'], {'dtype': 'np.int64'}), '(tokens, dtype=np.int64)\n', (2338, 2362), True, 'import numpy as np\n'), ((2484, 2523), 'deepspeech.io.utility.pad_sequence', 'pad_sequence', (['audios'], {'padding_value': '(0.0)'}), '(audios, padding_value=0.0)\n', (2496, 2523), False, 'from deepspeech.io.utility import pad_sequence\n'), ((2589, 2609), 'numpy.array', 'np.array', (['audio_lens'], {}), '(audio_lens)\n', (2597, 2609), True, 'import numpy as np\n'), ((2650, 2694), 'deepspeech.io.utility.pad_sequence', 'pad_sequence', (['texts'], {'padding_value': 'IGNORE_ID'}), '(texts, padding_value=IGNORE_ID)\n', (2662, 2694), False, 'from deepspeech.io.utility import pad_sequence\n'), ((2745, 2764), 'numpy.array', 'np.array', (['text_lens'], {}), '(text_lens)\n', (2753, 2764), True, 'import numpy as np\n')]
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Laboratorio #3 - Predicción de textos
#
# * <NAME> - 17315
# * <NAME> - 17509
# * <NAME> - 17088
# %%
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.preprocessing.text import Tokenizer
from numpy import array
import random
import collections
from wordcloud import WordCloud
import matplotlib
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
import re
import nltk
nltk.download('stopwords')
# Definirmos lista de stopwords según nltk
stopwords = stopwords.words('english')
# Para el modelo
# %% [markdown]
# ## Importación y limpieza de datos
#
# ### 1. Abrir y leer archivos.
#
# Cabe mencionar que todos los archivos fueron convertidos a minúsculas, se quitan los urls y en algunas ocasiones, la mayoría de símbolos que consideramos innecesarios. También se quitan las stopwords, los números y finalmente las apostrophes. Además, se separan oraciones mediante los símbolos de **.**, **!** y **?**. Se debe validar que no hayan espacios vacíos luego de estas oraciones.
#
# #### Caso 1: Blogs
# %%
# Se instancian arreglos
blog = []
with open('./files/en_US.blogs.txt', 'r', encoding='utf-8') as blog_txt:
for line in blog_txt:
# Quitar saltos de linea y pasar todo a minusculas
line = line.rstrip('\n').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . ? ! y '
line = re.sub(r"[^\w.?!\d'\s]", '', line)
# Quitar números
line = re.sub(r'[0-9]', ' ', line)
# Quitar espacios extra
line = line.strip(' \t\n\r')
# Quitamos todas las stopwords
line = [word for word in line.split(' ') if word not in stopwords]
line = ' '.join(line)
# Finalmente, quitamos apostrofes
line = line.replace("'", '')
# Separar posibles oraciones
dotSentences = line.split('.')
excSentences = line.split('!')
queSentences = line.split('?')
# Validar y verificar que valga la pena recorrer varias oraciones
if len(dotSentences) > 1:
for sentence in dotSentences:
# Por cada posible oración, debemos quitar los símbolos de puntuación
sentence = re.sub(r'[^\w]', ' ', sentence).strip()
if len(sentence) > 1:
blog.append(sentence)
elif len(excSentences) > 1:
for sentence in excSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
blog.append(sentence)
elif len(queSentences) > 1:
for sentence in queSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
blog.append(sentence)
elif len(line.split(' ')) > 1:
line = re.sub(r'[^\w]', ' ', line).strip()
blog.append(line)
# %% [markdown]
# #### Caso 2: Noticias
#
# Este caso tuvo un procedimiento igual al caso 1.
# %%
news = []
with open('./files/en_US.news.txt', 'r', encoding='utf-8') as news_txt:
for line in news_txt:
# Quitar saltos de linea y pasar todo a minusculas
line = line.rstrip('\n').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . ? ! y '
line = re.sub(r"[^\w.?!\d'\s]", '', line)
# Quitar números
line = re.sub(r'[0-9]', ' ', line)
# Quitar espacios extra
line = line.strip(' \t\n\r')
# Quitamos todas las stopwords
line = [word for word in line.split(' ') if word not in stopwords]
line = ' '.join(line)
# Finalmente, quitamos apostrofes
line = line.replace("'", '')
# Separar posibles oraciones
dotSentences = line.split('.')
excSentences = line.split('!')
queSentences = line.split('?')
# Validar y verificar que valga la pena recorrer varias oraciones
if len(dotSentences) > 1:
for sentence in dotSentences:
# Por cada posible oración, debemos quitar los símbolos de puntuación
sentence = re.sub(r'[^\w]', ' ', sentence).strip()
if len(sentence) > 1:
news.append(sentence)
elif len(excSentences) > 1:
for sentence in excSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
news.append(sentence)
elif len(queSentences) > 1:
for sentence in queSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
news.append(sentence)
elif len(line.split(' ')) > 1:
line = re.sub(r'[^\w]', ' ', line).strip()
news.append(line)
# %% [markdown]
# #### Caso 3: Twitter
#
# En este caso, se toma cada distinto tweet como una oración. Es necesario quitar emojis y símbolos como #, $, %, !, @, etc. Además, se quitan urls y se permiten los símbolos: **.** **,** **'**
# %%
tweets = []
with open('./files/en_US.twitter.txt', 'r', encoding='utf-8') as twitter_txt:
for line in twitter_txt:
# Quitar \n y pasarlo a minusculas
line = line.replace('\n', '').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . , y '
line = re.sub(r"[^\w.,\d'\s]", '', line)
# Quitar números fuera de contexto
line = re.sub('^\d+\s|\s\d+\s|\s\d+$', '', line)
# Añadirlos a la lista de tweets
tweets.append(line.strip())
# %%
complete_data = blog + news + tweets
random.shuffle(complete_data)
# %%
data_size = int(len(complete_data)*0.005)
print('Se va a utilizar ' + str(data_size) + ' datos')
data = complete_data[:data_size]
# %% [markdown]
# Crear CSV con las palabras utilizadas para el entrenamiento
# %%
df = pd.DataFrame(data, columns=["oraciones"])
df.to_csv('training.csv', index=False)
# %% [markdown]
# Se genera un tokenizer lo cual es una representacion de enteros de cada palabra en nuestra data.
# %%
tokenizer = Tokenizer()
tokenizer.fit_on_texts([data])
encoded = tokenizer.texts_to_sequences([data])[0]
# %%
# Obtenemos el largo de nuestro vocabulario
vocab_size = len(tokenizer.word_index) + 1
# %%
# mapeamos 2 palabras a una palabra
sequences = list()
for i in range(2, len(encoded)):
sequence = encoded[i-2:i+1]
sequences.append(sequence)
max_length = max([len(seq) for seq in sequences])
sequences = pad_sequences(sequences, maxlen=max_length, padding='pre')
# %% [markdown]
# separamos en los elementos inputs y outputs
#
# %%
sequences = array(sequences)
X, y = sequences[:, :-1], sequences[:, -1]
y = to_categorical(y, num_classes=vocab_size)
# %% [markdown]
# Definimos el modelo
# %%
model = Sequential()
model.add(Embedding(vocab_size, 10, input_length=max_length-1))
model.add(LSTM(50))
model.add(Dense(vocab_size, activation='softmax'))
print(model.summary())
# %% [markdown]
# Compilamos el modelo
# %%
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# %%
# Entrenaoms el modelo
model.fit(X, y, epochs=150, verbose=2)
# %%
model.save_weights('deep_no_stopwords')
|
[
"keras.preprocessing.text.Tokenizer",
"random.shuffle",
"nltk.corpus.stopwords.words",
"nltk.download",
"keras.utils.to_categorical",
"keras.models.Sequential",
"numpy.array",
"keras.layers.LSTM",
"keras.layers.Dense",
"pandas.DataFrame",
"re.sub",
"keras.preprocessing.sequence.pad_sequences",
"keras.layers.Embedding"
] |
[((684, 710), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (697, 710), False, 'import nltk\n'), ((767, 793), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (782, 793), False, 'from nltk.corpus import stopwords\n'), ((6048, 6077), 'random.shuffle', 'random.shuffle', (['complete_data'], {}), '(complete_data)\n', (6062, 6077), False, 'import random\n'), ((6305, 6346), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['oraciones']"}), "(data, columns=['oraciones'])\n", (6317, 6346), True, 'import pandas as pd\n'), ((6520, 6531), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (6529, 6531), False, 'from keras.preprocessing.text import Tokenizer\n'), ((6928, 6986), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_length', 'padding': '"""pre"""'}), "(sequences, maxlen=max_length, padding='pre')\n", (6941, 6986), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((7070, 7086), 'numpy.array', 'array', (['sequences'], {}), '(sequences)\n', (7075, 7086), False, 'from numpy import array\n'), ((7134, 7175), 'keras.utils.to_categorical', 'to_categorical', (['y'], {'num_classes': 'vocab_size'}), '(y, num_classes=vocab_size)\n', (7148, 7175), False, 'from keras.utils import to_categorical\n'), ((7229, 7241), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7239, 7241), False, 'from keras.models import Sequential\n'), ((7252, 7306), 'keras.layers.Embedding', 'Embedding', (['vocab_size', '(10)'], {'input_length': '(max_length - 1)'}), '(vocab_size, 10, input_length=max_length - 1)\n', (7261, 7306), False, 'from keras.layers import Embedding\n'), ((7316, 7324), 'keras.layers.LSTM', 'LSTM', (['(50)'], {}), '(50)\n', (7320, 7324), False, 'from keras.layers import LSTM\n'), ((7336, 7375), 'keras.layers.Dense', 'Dense', (['vocab_size'], {'activation': '"""softmax"""'}), "(vocab_size, activation='softmax')\n", (7341, 7375), False, 'from keras.layers import Dense\n'), ((1595, 1638), 're.sub', 're.sub', (['"""^https?:\\\\/\\\\/.[\\\\r\\\\n]"""', '""""""', 'line'], {}), "('^https?:\\\\/\\\\/.[\\\\r\\\\n]', '', line)\n", (1601, 1638), False, 'import re\n'), ((1721, 1757), 're.sub', 're.sub', (['"""[^\\\\w.?!\\\\d\'\\\\s]"""', '""""""', 'line'], {}), '("[^\\\\w.?!\\\\d\'\\\\s]", \'\', line)\n', (1727, 1757), False, 'import re\n'), ((1796, 1822), 're.sub', 're.sub', (['"""[0-9]"""', '""" """', 'line'], {}), "('[0-9]', ' ', line)\n", (1802, 1822), False, 'import re\n'), ((3561, 3604), 're.sub', 're.sub', (['"""^https?:\\\\/\\\\/.[\\\\r\\\\n]"""', '""""""', 'line'], {}), "('^https?:\\\\/\\\\/.[\\\\r\\\\n]', '', line)\n", (3567, 3604), False, 'import re\n'), ((3687, 3723), 're.sub', 're.sub', (['"""[^\\\\w.?!\\\\d\'\\\\s]"""', '""""""', 'line'], {}), '("[^\\\\w.?!\\\\d\'\\\\s]", \'\', line)\n', (3693, 3723), False, 'import re\n'), ((3762, 3788), 're.sub', 're.sub', (['"""[0-9]"""', '""" """', 'line'], {}), "('[0-9]', ' ', line)\n", (3768, 3788), False, 'import re\n'), ((5669, 5712), 're.sub', 're.sub', (['"""^https?:\\\\/\\\\/.[\\\\r\\\\n]"""', '""""""', 'line'], {}), "('^https?:\\\\/\\\\/.[\\\\r\\\\n]', '', line)\n", (5675, 5712), False, 'import re\n'), ((5793, 5828), 're.sub', 're.sub', (['"""[^\\\\w.,\\\\d\'\\\\s]"""', '""""""', 'line'], {}), '("[^\\\\w.,\\\\d\'\\\\s]", \'\', line)\n', (5799, 5828), False, 'import re\n'), ((5885, 5933), 're.sub', 're.sub', (['"""^\\\\d+\\\\s|\\\\s\\\\d+\\\\s|\\\\s\\\\d+$"""', '""""""', 'line'], {}), "('^\\\\d+\\\\s|\\\\s\\\\d+\\\\s|\\\\s\\\\d+$', '', line)\n", (5891, 5933), False, 'import re\n'), ((2760, 2791), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (2766, 2791), False, 'import re\n'), ((4726, 4757), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (4732, 4757), False, 'import re\n'), ((2534, 2565), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (2540, 2565), False, 'import re\n'), ((2978, 3009), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (2984, 3009), False, 'import re\n'), ((4500, 4531), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (4506, 4531), False, 'import re\n'), ((4944, 4975), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'sentence'], {}), "('[^\\\\w]', ' ', sentence)\n", (4950, 4975), False, 'import re\n'), ((3149, 3176), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'line'], {}), "('[^\\\\w]', ' ', line)\n", (3155, 3176), False, 'import re\n'), ((5115, 5142), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'line'], {}), "('[^\\\\w]', ' ', line)\n", (5121, 5142), False, 'import re\n')]
|
import hashlib
import json
import math
import os
import dill
import base64
from sys import exit
import requests
from bson import ObjectId
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
#from cryptography.hazmat.primitives.asymmetric import padding
#from cryptography.hazmat.primitives import serialization, hashes
from tqdm import tqdm
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from datetime import datetime
from .Model import Model
from .DataLoader import DataLoader
from .Dataset import Dataset
from .saving.saving import save_data, determine_model, TF_str, mxnet_str, pytorch_str
from .web.urls import TOKEN_URL, HASH_URL, UPLOAD_DATA_URL
VERBOSITY = 1
MIN_VERBOSITY = 1
MID_VERBOSITY = 2
FULL_VERBOSITY = 3
_token = ""
_project = ""
_deployed = False
utcnow = datetime.utcnow
with open(os.path.join(os.path.dirname(__file__), "pub_cred_key.pub"), "rb") as key_file:
#pub_key_encryption = serialization.load_pem_public_key(key_file.read())
pub_key_encryption = PKCS1_OAEP.new(RSA.importKey(key_file.read()), SHA256)
# from SO
class bcolors:
PURPLE = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ORANGE = '\33[38;5;208m'
levels = {"WARNING": bcolors.ORANGE, "INFO": bcolors.PURPLE, "ERROR": bcolors.FAIL}
NEURO_AI_STR = f"[{bcolors.OKBLUE}Neuro Ai{bcolors.ENDC}]"
def api(token_, project_name, verbosity, deployed):
global _token
global _project
global VERBOSITY
global _deployed
if token_ == "":
token_ = os.environ.get("NPU_API_TOKEN", "")
_token = token_
VERBOSITY = verbosity
verbose_print(f"Verbosity level set to {VERBOSITY}", MID_VERBOSITY)
_deployed = deployed
if _deployed:
npu_print("DEPLOYMENT MODE")
params = {"token": _token, "project_name": project_name}
response = post(TOKEN_URL, json=params)
if response.status_code == 200:
npu_print("Token successfully authenticated")
_project = response.json()
npu_print(f"Using project: {project_name}")
return response
else:
raise ValueError(response.text)
# "API token not valid"
def getToken():
return _token
def auth_header():
return {"authorization": "Bearer " + getToken()}
def get_verbosity():
return VERBOSITY
def get_project():
return _project
def is_deployed():
return _deployed
def get_response(response):
try:
return response.json()
except Exception as e:
raise ConnectionError("Invalid response received. Error: {}".format(response.text))
# https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def add_kwargs_to_params(params, **kwargs):
params = {**params, **kwargs}
return params
def read_in_chunks(file_object, chunk_size=1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def check_model(model):
from .Task import Task
from .Model import Model
if not isinstance(model, Task) and not isinstance(model, str) and not isinstance(model, Model):
raise ValueError("Model is not a valid format. Please make sure you've compiled it first.")
def check_model_type(model, params):
from .Task import Task
if isinstance(model, Model):
params["model_name"] = model.name
params["model_attr"] = model.attr
elif isinstance(model, str) and not ObjectId.is_valid(model):
params["model_name"] = model
elif model != "" and not isinstance(model, Task):
params["modelId"] = model
def check_data_type(data, param_name, params):
from .Task import Task
if isinstance(data, Dataset):
params[param_name + "_name"] = data.id
elif isinstance(data, str) and not ObjectId.is_valid(data):
params[param_name + "_name"] = data
elif isinstance(data, HubDataset):
params[param_name + "Id"] = data.hub_meta
elif data != "" and not isinstance(data, Task):
params[param_name + "Id"] = data
params[f"{param_name}_hub_ds"] = isinstance(data, HubDataset)
def check_data(data, name=""):
if not isinstance(name, str):
raise ValueError("Name given is not valid. Please supply a string.")
if isinstance(data, dict):
return data
try:
import hub
hub_meta = {}
if hasattr(data, "dataset"):
if hasattr(data, "indexes"):
hub_meta["indexes"] = data.indexes
if hasattr(data, "subpath"):
hub_meta["subpath"] = data.subpath
data = data.dataset
if isinstance(data, hub.Dataset):
encrypted_token = base64.b64encode(
pub_key_encryption.encrypt(
json.dumps(data.token).encode()
)).decode()
#pub_key_encryption.encrypt(
# json.dumps(data.token).encode(),
# padding.OAEP(
# mgf=padding.MGF1(
# algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None))).decode()
hub_meta = {"url": data.url, "schema": data.schema, "token": encrypted_token, **hub_meta}
hub_meta = base64.b64encode(dill.dumps(hub_meta)).decode()
return HubDataset(hub_meta)
except Exception as e:
# print(e)
pass
if isinstance(data, str) and (data.endswith(("npy", "npz")) or ObjectId.is_valid(data) or data == ""):
return data
elif isinstance(data, Dataset):
return data
elif isinstance(data, DataLoader):
response = upload_data_loader(data, name)
else:
response = upload_data(data, name)
status_code = response.status_code
if status_code not in (204, 200, 201):
raise ConnectionAbortedError("Data upload has not worked: {}".format(response.content))
if status_code != 204:
response = get_response(response)
if isinstance(response, dict) and status_code == 200:
message = response.get("message")
npu_print(message)
response = response["id"]
return response
def slice_data(data):
id = data["id"]
start = data["indexes"]
end = None
if isinstance(start, slice):
end = start.stop
start = start.start
return id, start, end
def gen(dl):
for data_part in dl.numpy():
yield save_data(data_part)
def create_callback(encoder):
encoder_len = encoder.len
bar = tqdm(desc=f"{NEURO_AI_STR} Uploading", unit="B", unit_scale=True, total=encoder_len, unit_divisor=1024)
def callback(monitor):
bar.n = monitor.bytes_read
bar.refresh()
if monitor.bytes_read == encoder_len:
bar.close()
return callback
def get_progress_bar_uploader(file, json):
encoder = create_upload(file, json)
callback = create_callback(encoder)
monitor = MultipartEncoderMonitor(encoder, callback)
return monitor
def create_upload(file, _json):
return MultipartEncoder({
'file': ('file', file, 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'}),
'json': (None, json.dumps(_json), 'application/json', {}),
})
def upload_data_loader(dl, name=""):
verbose_print("Hashing data locally...", MID_VERBOSITY)
hash, size, length = dl.hash()
params = {"token": getToken(), "hash": hash, "collection": 1, "chunked": True, "is_last": False, "size": size,
"given_name": name, "input_shape": dl.shape, "project": get_project()}
# params = {"token": getToken(), "hash": hash, "collection": 1, "size": size, "given_name": name}
verbose_print("Checking if data is on servers...", MID_VERBOSITY)
response = get(HASH_URL, params=params)
if response.status_code == 200:
verbose_print("Data already uploaded. Will not reupload.", MID_VERBOSITY)
return response
npu_print("Data not on servers. Starting to upload. Total size of data is {}".format(convert_size(size)))
if length == 1:
return upload_data(next(dl.numpy()), name)
npu_print("{} chunks to upload...".format(length))
for i, data_part in enumerate(dl.numpy()):
verbose_print("Uploading chunk {} out of {}...".format(i + 1, length), MID_VERBOSITY)
if i == length - 1:
params["is_last"] = True
file = save_data(data_part)
monitor = get_progress_bar_uploader(file, params)
response = post(UPLOAD_DATA_URL, data=monitor,
headers={'Content-Type': monitor.content_type})
return response
def upload_data(data, name=""):
verbose_print("Saving data locally...", FULL_VERBOSITY)
generic_file = False
if isinstance(data, str):
file = open(data, "rb")
generic_file = True
else:
file = save_data(data)
verbose_print("Hashing...", FULL_VERBOSITY)
hash = hashlib.md5()
for piece in read_in_chunks(file):
hash.update(piece)
size = file.tell()
hash = hash.hexdigest()
verbose_print("Checking if data is on servers...", MID_VERBOSITY)
params = {"token": getToken(), "hash": hash, "collection": 1, "given_name": name, "project": get_project(),
"generic_file": generic_file}
response = get(HASH_URL, params=params, json=params)
if response.status_code == 200:
verbose_print("Data already on servers. Returning result...", MID_VERBOSITY)
file.close()
return response
npu_print("Data not found on servers. Total size of data is {}. Uploading now...".format(convert_size(size)))
file.seek(0)
monitor = get_progress_bar_uploader(file=file, json=params)
response = post(UPLOAD_DATA_URL, data=monitor,
headers={'Content-Type': monitor.content_type})
if isinstance(data, str):
file.close()
return response
def upload_sample(data, params):
required = (len(data[0]) if isinstance(data, (tuple, list)) else len(data)) > 10
if not required:
return False
data = [d[:10] for d in data] if isinstance(data, (tuple, list)) else data[:10]
def hash_file(file):
hash = hashlib.md5()
for piece in read_in_chunks(file):
hash.update(piece)
# break
hash = hash.hexdigest()
return hash
def validate_model(model, data):
library = determine_model(model)
if isinstance(data, str):
return
# data = convert_to_numpy(data)
if library == pytorch_str:
from torch import ones
elif library == mxnet_str:
from mxnet import nd
ones = nd.ones
elif library == TF_str:
from numpy import ones
else:
return
# raise ValueError("Cannot validate library: {} .".format(library))
placeholder_data = ones(data.shape)
model(placeholder_data)
def determine_data(data):
start = end = None
name = ""
if isinstance(data, dict):
data, start, end = slice_data(data)
if isinstance(data, Dataset):
name = data.id
data = data
return data, name, start, end
def npu_print(val, level="INFO"):
log_str = f"{NEURO_AI_STR} {utcnow_formatted()} - [{levels[level]}{level}{bcolors.ENDC}]: {val}"
print(f"{log_str}")
def verbose_print(str, verbosity):
if VERBOSITY >= verbosity:
npu_print(str)
def utcnow_formatted():
return utcnow().strftime("%H:%M:%S")
def make_request(request_type_function, url, data, headers, json, params, **kwargs):
if params is None:
params = {}
if json is None:
json = {}
if data is None:
data = {}
if headers is None:
headers = {}
try:
response = request_type_function(url, data=data, headers={**headers, **auth_header()}, json=json,
params=params, **kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as _:
response = response.json()
if "error" in response:
npu_print(f"Error: {response['error']}", level="ERROR")
elif "message" in response:
npu_print(f"Error: {response['message']}", level="ERROR")
raise Exception
# exit(1)
def post(url, data=None, headers=None, json=None, params=None, **kwargs):
return make_request(requests.post, url, data, headers, json, params, **kwargs)
def get(url, data=None, headers=None, json=None, params=None, **kwargs):
return make_request(requests.get, url, data, headers, json, params, **kwargs)
class HubDataset:
def __init__(self, hub_meta):
self.hub_meta = hub_meta
|
[
"bson.ObjectId.is_valid",
"numpy.ones",
"hashlib.md5",
"requests_toolbelt.MultipartEncoderMonitor",
"math.pow",
"tqdm.tqdm",
"os.environ.get",
"json.dumps",
"math.log",
"os.path.dirname",
"dill.dumps"
] |
[((3041, 3058), 'math.pow', 'math.pow', (['(1024)', 'i'], {}), '(1024, i)\n', (3049, 3058), False, 'import math\n'), ((7038, 7146), 'tqdm.tqdm', 'tqdm', ([], {'desc': 'f"""{NEURO_AI_STR} Uploading"""', 'unit': '"""B"""', 'unit_scale': '(True)', 'total': 'encoder_len', 'unit_divisor': '(1024)'}), "(desc=f'{NEURO_AI_STR} Uploading', unit='B', unit_scale=True, total=\n encoder_len, unit_divisor=1024)\n", (7042, 7146), False, 'from tqdm import tqdm\n'), ((7457, 7499), 'requests_toolbelt.MultipartEncoderMonitor', 'MultipartEncoderMonitor', (['encoder', 'callback'], {}), '(encoder, callback)\n', (7480, 7499), False, 'from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\n'), ((9442, 9455), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (9453, 9455), False, 'import hashlib\n'), ((10688, 10701), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (10699, 10701), False, 'import hashlib\n'), ((11309, 11325), 'numpy.ones', 'ones', (['data.shape'], {}), '(data.shape)\n', (11313, 11325), False, 'from numpy import ones\n'), ((1702, 1737), 'os.environ.get', 'os.environ.get', (['"""NPU_API_TOKEN"""', '""""""'], {}), "('NPU_API_TOKEN', '')\n", (1716, 1737), False, 'import os\n'), ((892, 917), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (907, 917), False, 'import os\n'), ((3004, 3030), 'math.log', 'math.log', (['size_bytes', '(1024)'], {}), '(size_bytes, 1024)\n', (3012, 3030), False, 'import math\n'), ((5998, 6021), 'bson.ObjectId.is_valid', 'ObjectId.is_valid', (['data'], {}), '(data)\n', (6015, 6021), False, 'from bson import ObjectId\n'), ((3999, 4023), 'bson.ObjectId.is_valid', 'ObjectId.is_valid', (['model'], {}), '(model)\n', (4016, 4023), False, 'from bson import ObjectId\n'), ((4346, 4369), 'bson.ObjectId.is_valid', 'ObjectId.is_valid', (['data'], {}), '(data)\n', (4363, 4369), False, 'from bson import ObjectId\n'), ((7707, 7724), 'json.dumps', 'json.dumps', (['_json'], {}), '(_json)\n', (7717, 7724), False, 'import json\n'), ((5801, 5821), 'dill.dumps', 'dill.dumps', (['hub_meta'], {}), '(hub_meta)\n', (5811, 5821), False, 'import dill\n'), ((5315, 5337), 'json.dumps', 'json.dumps', (['data.token'], {}), '(data.token)\n', (5325, 5337), False, 'import json\n')]
|
""" Code to implement ScaleFactor:: decorator supported
in gtlike.
The gtlike feature is documented here:
https://confluence.slac.stanford.edu/display/ST/Science+Tools+Development+Notes?focusedCommentId=103582318#comment-103582318
Author: <NAME>
"""
import operator
from copy import deepcopy
import numpy as np
from uw.like.Models import PowerLaw, PowerLawFlux, FileFunction, PLSuperExpCutoff, Gaussian, Constant, CompositeModel
from uw.darkmatter.spectral import DMFitFunction
def build_scale_factor(model_class):
"""
First, create the ScaleFactorPowerLaw and a comparison PowerLaw
>>> scale = 3.133141
>>> sfpl=ScaleFactorPowerLaw(ScaleFactor=scale)
>>> pl = PowerLaw()
>>> print sfpl.name
ScaleFactorPowerLaw
>>> print sfpl.gtlike['name']
ScaleFactor::PowerLaw
>>> print sfpl.pretty_name
ScaleFactor::PowerLaw
>>> print sfpl.full_name()
ScaleFactor::PowerLaw, e0=1000, ScaleFactor=3.133141
>>> print sfpl.e0 == pl.e0
True
>>> sfpl.default_extra_params == pl.default_extra_params
True
>>> np.all(sfpl.default_p == [1] + pl.default_p)
True
>>> print sfpl.param_names == ['ScaleFactor'] + pl.param_names
True
>>> print np.all(sfpl.default_mappers == Constant.default_mappers + PowerLaw.default_mappers)
True
>>> sfpl.default_extra_params == pl.default_extra_params
True
>>> sfpl.default_extra_attrs == sfpl.default_extra_attrs
True
>>> print sfpl.default_oomp_limits == ['ScaleFactor'] + PowerLaw.default_oomp_limits
True
Make sure that default_limits acts correclty
>>> dl=sfpl.default_limits
>>> dl['Norm'] == pl.default_limits['Norm']
True
>>> dl['Index'] == pl.default_limits['Index']
True
>>> dl['ScaleFactor'] == Constant.default_limits['Scale']
True
Make sure the __call__ function is correct
>>> energies=np.logspace(1,5,100)
>>> np.all(sfpl(energies) == scale*pl(energies))
True
And that the gradient follows the chain rule:
>>> grad = sfpl.external_gradient(energies)
>>> np.all(grad[0] == pl(energies))
True
>>> np.all(grad[1:] == scale*pl.external_gradient(energies))
True
Note, we can set default limits for ScaleFactor objects (necessary for XML creation):
>>> print sfpl.mappers == Constant.default_mappers + PowerLaw.default_mappers
True
>>> print np.all(sfpl.default_mappers == Constant.default_mappers + PowerLaw.default_mappers)
True
>>> sfpl.set_default_limits()
>>> sfpl.mappers == [Constant.default_limits['Scale'],PowerLaw.default_limits['Norm'],PowerLaw.default_limits['Index']]
True
Also, you can obtain the unfit parameters either as values of the object or with getp/setp
>>> sfpl.e0 == pl.e0 and sfpl['e0'] == pl.e0 and sfpl.getp('e0') == pl.e0
True
We can create ScaleFactor object for other models. For PowerLawFlux:
>>> sfpl2=ScaleFactorPowerLawFlux(ScaleFactor=scale)
>>> pl2 = PowerLawFlux()
>>> print sfpl2.name
ScaleFactorPowerLawFlux
>>> print sfpl2.gtlike['name']
ScaleFactor::PowerLaw2
>>> sfpl2.emax == pl2.emax and sfpl2.emax == pl2.emax
True
And, of course, the values are just scaled
>>> np.all(sfpl2(energies) == scale*pl2(energies))
True
There is also a ScaleFactorFileFunction object, which acts just like a FileFunction.
>>> from tempfile import NamedTemporaryFile
>>> temp = NamedTemporaryFile()
>>> filename = temp.name
>>> sfpl2.save_profile(filename, emin=1, emax=1e5)
>>> temp.seek(0)
>>> sfff = ScaleFactorFileFunction(ScaleFactor=5.5, normalization=1, file=filename)
>>> np.allclose(sfff(energies),5.5*sfpl2(energies),rtol=1e-10, atol=1e-10)
True
Note, it sets default_extra_attrs correctly:
>>> sfff.default_extra_attrs == FileFunction.default_extra_attrs
True
>>> sfff.file == filename
True
"""
# For a description of creating classes on the fly, see:
# http://jjinux.blogspot.com/2005/03/python-create-new-class-on-fly.html
c = type('ScaleFactor' + model_class.__name__, (CompositeModel,), {})
# Note, default_p, param_names, default_mappers, automatically taken care of by CompositeModel
c.default_extra_params=model_class.default_extra_params
c.default_extra_attrs=model_class.default_extra_attrs
c.gtlike = deepcopy(model_class.gtlike)
c.gtlike['name']='ScaleFactor::%s' % c.gtlike['name']
c.gtlike['param_names'].insert(0,'ScaleFactor')
c.gtlike['topointlike'].insert(0,operator.pos)
c.gtlike['togtlike'].insert(0,operator.pos)
def __init__(self, **kwargs):
scale = Constant(name='ScaleFactor')
scale.default_oomp_limits=['ScaleFactor']
if 'ScaleFactor' in kwargs:
scale['ScaleFactor'] = kwargs.pop('ScaleFactor')
m=model_class(**kwargs)
super(c,self).__init__(scale,m)
self.scale=scale
self.model=m
for p in c.default_extra_params.keys() + c.default_extra_attrs.keys():
# Allow getting and setting the default_extra_params and default_extra_attrs
# directly through the self.model object.
get=lambda self: getattr(self.model,p)
set=lambda self, value: setattr(self.model,p,value)
setattr(c,p,property(get, set, p))
c.__init__ = __init__
c.__call__ = lambda self,e: self.scale.__call__(e)*self.model.__call__(e)
c.pretty_name = property(lambda self: 'ScaleFactor::%s' % self.model.pretty_name)
c.full_name = lambda self: 'ScaleFactor::%s, ScaleFactor=%s' % (self.model.full_name(),self['ScaleFactor'])
def external_gradient(self, e):
a=self.scale.external_gradient(e)*self.model.__call__(e)
b=self.scale.__call__(e)*self.model.external_gradient(e)
return np.concatenate((a,b),axis=0)
c.external_gradient = external_gradient
return c
ScaleFactorPowerLaw=build_scale_factor(PowerLaw)
ScaleFactorPowerLawFlux=build_scale_factor(PowerLawFlux)
ScaleFactorFileFunction=build_scale_factor(FileFunction)
ScaleFactorDMFitFunction=build_scale_factor(DMFitFunction)
ScaleFactorPLSuperExpCutoff=build_scale_factor(PLSuperExpCutoff)
ScaleFactorGaussian=build_scale_factor(Gaussian)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"doctest.testmod",
"uw.like.Models.Constant",
"numpy.concatenate",
"copy.deepcopy"
] |
[((5021, 5049), 'copy.deepcopy', 'deepcopy', (['model_class.gtlike'], {}), '(model_class.gtlike)\n', (5029, 5049), False, 'from copy import deepcopy\n'), ((6931, 6948), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6946, 6948), False, 'import doctest\n'), ((5311, 5339), 'uw.like.Models.Constant', 'Constant', ([], {'name': '"""ScaleFactor"""'}), "(name='ScaleFactor')\n", (5319, 5339), False, 'from uw.like.Models import PowerLaw, PowerLawFlux, FileFunction, PLSuperExpCutoff, Gaussian, Constant, CompositeModel\n'), ((6455, 6485), 'numpy.concatenate', 'np.concatenate', (['(a, b)'], {'axis': '(0)'}), '((a, b), axis=0)\n', (6469, 6485), True, 'import numpy as np\n')]
|
#This is a class because it stores its model parameters and has a 'prediction' function which returns predictions for input data
import numpy as np
from baseModel import baseModel, ModellingError as me
from datetime import datetime
import pandas as pd
class ModellingError(me): pass
class ConstantMonthlyModel(baseModel):
"""
A constant consumption model: consumption is estimated as the average of all input data
Input_data must respond to the method call 'consumption'
"""
n_parameters = 1
def __init__(self, data):
if len(data) <= 11:#(self.n_parameters + 2):
self.mean = np.nan
self.std = np.nan
#raise ModellingError, "Not enough input data"
if 'temperature' in data.dtype.names:
x = data['temperature']
self.xrange = [min(x), max(x)]
data_pd = pd.DataFrame.from_records(data)
data_pd['ts'] = data_pd['timestamp'].apply(datetime.fromtimestamp)
data_pd = data_pd.set_index(pd.DatetimeIndex(data_pd['ts']))
data_pd.sort_index(inplace=True)
last_month = data_pd[-1:].index.month+1 if data_pd[-1:].index.month != 12 else 1
self.mean = data_pd[data_pd.index.month==last_month]['consumption'].mean()
self.std = data_pd[data_pd.index.month==last_month]['consumption'].std()
def prediction(self, independent_data):
return np.array([self.mean] * len(independent_data))
def simulation(self, independent_data):
return self.std * np.random.randn(independent_data.size) + self.mean
def parameters(self):
return {'mean': self.mean, 'std': self.std}
|
[
"pandas.DataFrame.from_records",
"pandas.DatetimeIndex",
"numpy.random.randn"
] |
[((823, 854), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['data'], {}), '(data)\n', (848, 854), True, 'import pandas as pd\n'), ((958, 989), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["data_pd['ts']"], {}), "(data_pd['ts'])\n", (974, 989), True, 'import pandas as pd\n'), ((1453, 1491), 'numpy.random.randn', 'np.random.randn', (['independent_data.size'], {}), '(independent_data.size)\n', (1468, 1491), True, 'import numpy as np\n')]
|
# Copyright 2018 <NAME>, <NAME>.
# (Strongly inspired by original Google BERT code and Hugging Face's code)
""" Fine-tuning on A Classification Task with pretrained Transformer """
import itertools
import csv
import fire
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import tokenization
import models
import optim
import train
import pdb
import numpy as np
import pandas as pd
from utils import set_seeds, get_device, truncate_tokens_pair
import os
def read_explanations(path):
header = []
uid = None
df = pd.read_csv(path, sep='\t', dtype=str)
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
header.append(name)
if not uid or len(df) == 0:
print('Possibly misformatted file: ' + path)
return []
return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()
tables = '/data/jacob/code/nlp/tfidf/data/annotation/expl-tablestore-export-2017-08-25-230344/tables'
questions = '/data/jacob/code/nlp/tfidf/data/questions/ARC-Elementary+EXPL-Dev.tsv'
def parse_e(e):
l = e.split(' ')
l = [ll.split('|')[0] for ll in l]
return l
class CsvDataset(Dataset):
""" Dataset Class for CSV file """
labels = None
def __init__(self, pipeline=[]): # cvs file and pipeline object
Dataset.__init__(self)
explanations = []
for path, _, files in os.walk(tables):
for file in files:
explanations += read_explanations(os.path.join(path, file))
if not explanations:
warnings.warn('Empty explanations')
df_q = pd.read_csv(questions, sep='\t', dtype=str)
df_e = pd.DataFrame(explanations, columns=('uid', 'text'))
# pdb.set_trace()
q_list = []
e_list = []
dict_e = {}
num_e = len(df_e['uid'])
num_q = len(df_q['questionID'])
for i in range(num_e):
dict_e[df_e['uid'][i]]= df_e['text'][i]
for i in range(num_q):
if not df_q['explanation'][i] is np.nan:
q_list.append(df_q['Question'][i])
e_list.append(parse_e(df_q['explanation'][i]))
self.q_list = q_list
self.e_list = e_list
self.dict_e = dict_e
self.pipeline = pipeline
self.es = list(dict_e.keys())
self.num_neg = 75
# pdb.set_trace()
# data = []
# with open(file, "r") as f:
# # list of splitted lines : line is also list
# lines = csv.reader(f, delimiter='\t', quotechar=None)
# pdb.set_trace()
# for instance in self.get_instances(lines): # instance : tuple of fields
# for proc in pipeline: # a bunch of pre-processing
# instance = proc(instance)
# data.append(instance)
# # To Tensors
# self.tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*data)]
def __len__(self):
return len(self.q_list)
def __getitem__(self, index):
# pdb.set_trace()
q = self.q_list[index]
e = self.e_list[index]
pos = self.dict_e[np.random.choice(e)]
# neg = []
samples = []
instance = ('1', q, pos)
for proc in self.pipeline:
instance = proc(instance)
samples.append(instance)
for i in range(self.num_neg):
# pdb.set_trace()
neg = self.dict_e[np.random.choice(self.es)]
instance = ('0', q, neg)
for proc in self.pipeline:
instance = proc(instance)
samples.append(instance)
# pdb.set_trace()
data = [torch.tensor(x, dtype=torch.long) for x in zip(*samples)]
# data = [d for d in zip(data)]
return data
class Pipeline():
""" Preprocess Pipeline Class : callable """
def __init__(self):
super().__init__()
def __call__(self, instance):
raise NotImplementedError
class Tokenizing(Pipeline):
""" Tokenizing sentence pair """
def __init__(self, preprocessor, tokenize):
super().__init__()
self.preprocessor = preprocessor # e.g. text normalization
self.tokenize = tokenize # tokenize function
def __call__(self, instance):
label, text_a, text_b = instance
label = self.preprocessor(label)
tokens_a = self.tokenize(self.preprocessor(text_a))
tokens_b = self.tokenize(self.preprocessor(text_b)) \
if text_b else []
return (label, tokens_a, tokens_b)
class AddSpecialTokensWithTruncation(Pipeline):
""" Add special tokens [CLS], [SEP] with truncation """
def __init__(self, max_len=512):
super().__init__()
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
# -3 special tokens for [CLS] text_a [SEP] text_b [SEP]
# -2 special tokens for [CLS] text_a [SEP]
_max_len = self.max_len - 3 if tokens_b else self.max_len - 2
truncate_tokens_pair(tokens_a, tokens_b, _max_len)
# Add Special Tokens
tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']
tokens_b = tokens_b + ['[SEP]'] if tokens_b else []
return (label, tokens_a, tokens_b)
class TokenIndexing(Pipeline):
""" Convert tokens into token indexes and do zero-padding """
def __init__(self, indexer, labels, max_len=512):
super().__init__()
self.indexer = indexer # function : tokens to indexes
# map from a label name to a label index
self.label_map = {name: i for i, name in enumerate(labels)}
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
input_ids = self.indexer(tokens_a + tokens_b)
segment_ids = [0]*len(tokens_a) + [1]*len(tokens_b) # token type ids
input_mask = [1]*(len(tokens_a) + len(tokens_b))
label_id = self.label_map[label]
# zero padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, label_id)
class Classifier(nn.Module):
""" Classifier with Transformer """
def __init__(self, cfg, n_labels):
super().__init__()
self.transformer = models.Transformer(cfg)
self.fc = nn.Linear(cfg.dim, cfg.dim)
self.activ = nn.Tanh()
self.drop = nn.Dropout(cfg.p_drop_hidden)
self.classifier = nn.Linear(cfg.dim, n_labels)
def forward(self, input_ids, segment_ids, input_mask):
h = self.transformer(input_ids, segment_ids, input_mask)
# only use the first h in the sequence
pooled_h = self.activ(self.fc(h[:, 0]))
logits = self.classifier(self.drop(pooled_h))
logits = torch.exp(logits).clamp(0, 100)
return logits
#pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
#pretrain_file='../exp/bert/pretrain_100k/model_epoch_3_steps_9732.pt',
def neg_logloss(logits):
score = logits[0] / logits.sum()
loss = -torch.log(score+1e-4)
return loss
def main(task='mrpc',
train_cfg='config/train_mrpc.json',
model_cfg='config/bert_base.json',
data_file='../glue/MRPC/train.tsv',
model_file=None,
pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
data_parallel=True,
vocab='../uncased_L-12_H-768_A-12/vocab.txt',
save_dir='../exp/bert/mrpc',
max_len=128,
mode='train'):
cfg = train.Config.from_json(train_cfg)
model_cfg = models.Config.from_json(model_cfg)
set_seeds(cfg.seed)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)
pipeline = [Tokenizing(tokenizer.convert_to_unicode, tokenizer.tokenize),
AddSpecialTokensWithTruncation(max_len),
TokenIndexing(tokenizer.convert_tokens_to_ids,
('0', '1'), max_len)]
dataset = CsvDataset(pipeline)
# print(dataset[0])
# pdb.set_trace()
data_iter = DataLoader(dataset, batch_size=1, shuffle=True)
model = Classifier(model_cfg, 1)
criterion = nn.CrossEntropyLoss()
trainer = train.Trainer(cfg,
model,
data_iter,
optim.optim4GPU(cfg, model),
save_dir, get_device())
if mode == 'train':
def get_loss(model, batch, global_step): # make sure loss is a scalar tensor
# pdb.set_trace()
input_ids, segment_ids, input_mask, label_id = [b[0] for b in batch]
# pdb.set_trace()
logits = model(input_ids, segment_ids, input_mask)
# pdb.set_trace()
loss = neg_logloss(logits)
# loss = criterion(logits, label_id)
return loss
trainer.train(get_loss, model_file, pretrain_file, data_parallel)
elif mode == 'eval':
def evaluate(model, batch):
input_ids, segment_ids, input_mask, label_id = batch
logits = model(input_ids, segment_ids, input_mask)
_, label_pred = logits.max(1)
result = (label_pred == label_id).float() #.cpu().numpy()
accuracy = result.mean()
return accuracy, result
results = trainer.eval(evaluate, model_file, data_parallel)
total_accuracy = torch.cat(results).mean().item()
print('Accuracy:', total_accuracy)
if __name__ == '__main__':
fire.Fire(main)
|
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"fire.Fire",
"torch.nn.Tanh",
"torch.exp",
"os.walk",
"utils.truncate_tokens_pair",
"pandas.DataFrame",
"tokenization.FullTokenizer",
"torch.utils.data.Dataset.__init__",
"models.Config.from_json",
"utils.set_seeds",
"numpy.random.choice",
"train.Config.from_json",
"pandas.isna",
"models.Transformer",
"torch.cat",
"torch.log",
"os.path.join",
"optim.optim4GPU",
"torch.tensor",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"utils.get_device"
] |
[((563, 601), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\t"""', 'dtype': 'str'}), "(path, sep='\\t', dtype=str)\n", (574, 601), True, 'import pandas as pd\n'), ((7755, 7788), 'train.Config.from_json', 'train.Config.from_json', (['train_cfg'], {}), '(train_cfg)\n', (7777, 7788), False, 'import train\n'), ((7805, 7839), 'models.Config.from_json', 'models.Config.from_json', (['model_cfg'], {}), '(model_cfg)\n', (7828, 7839), False, 'import models\n'), ((7845, 7864), 'utils.set_seeds', 'set_seeds', (['cfg.seed'], {}), '(cfg.seed)\n', (7854, 7864), False, 'from utils import set_seeds, get_device, truncate_tokens_pair\n'), ((7882, 7946), 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'vocab', 'do_lower_case': '(True)'}), '(vocab_file=vocab, do_lower_case=True)\n', (7908, 7946), False, 'import tokenization\n'), ((8295, 8342), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset, batch_size=1, shuffle=True)\n', (8305, 8342), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8397, 8418), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8416, 8418), True, 'import torch.nn as nn\n'), ((9745, 9760), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (9754, 9760), False, 'import fire\n'), ((1439, 1461), 'torch.utils.data.Dataset.__init__', 'Dataset.__init__', (['self'], {}), '(self)\n', (1455, 1461), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1520, 1535), 'os.walk', 'os.walk', (['tables'], {}), '(tables)\n', (1527, 1535), False, 'import os\n'), ((1738, 1781), 'pandas.read_csv', 'pd.read_csv', (['questions'], {'sep': '"""\t"""', 'dtype': 'str'}), "(questions, sep='\\t', dtype=str)\n", (1749, 1781), True, 'import pandas as pd\n'), ((1797, 1848), 'pandas.DataFrame', 'pd.DataFrame', (['explanations'], {'columns': "('uid', 'text')"}), "(explanations, columns=('uid', 'text'))\n", (1809, 1848), True, 'import pandas as pd\n'), ((5181, 5231), 'utils.truncate_tokens_pair', 'truncate_tokens_pair', (['tokens_a', 'tokens_b', '_max_len'], {}), '(tokens_a, tokens_b, _max_len)\n', (5201, 5231), False, 'from utils import set_seeds, get_device, truncate_tokens_pair\n'), ((6527, 6550), 'models.Transformer', 'models.Transformer', (['cfg'], {}), '(cfg)\n', (6545, 6550), False, 'import models\n'), ((6569, 6596), 'torch.nn.Linear', 'nn.Linear', (['cfg.dim', 'cfg.dim'], {}), '(cfg.dim, cfg.dim)\n', (6578, 6596), True, 'import torch.nn as nn\n'), ((6618, 6627), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6625, 6627), True, 'import torch.nn as nn\n'), ((6648, 6677), 'torch.nn.Dropout', 'nn.Dropout', (['cfg.p_drop_hidden'], {}), '(cfg.p_drop_hidden)\n', (6658, 6677), True, 'import torch.nn as nn\n'), ((6704, 6732), 'torch.nn.Linear', 'nn.Linear', (['cfg.dim', 'n_labels'], {}), '(cfg.dim, n_labels)\n', (6713, 6732), True, 'import torch.nn as nn\n'), ((7286, 7311), 'torch.log', 'torch.log', (['(score + 0.0001)'], {}), '(score + 0.0001)\n', (7295, 7311), False, 'import torch\n'), ((8555, 8582), 'optim.optim4GPU', 'optim.optim4GPU', (['cfg', 'model'], {}), '(cfg, model)\n', (8570, 8582), False, 'import optim\n'), ((8622, 8634), 'utils.get_device', 'get_device', ([], {}), '()\n', (8632, 8634), False, 'from utils import set_seeds, get_device, truncate_tokens_pair\n'), ((3284, 3303), 'numpy.random.choice', 'np.random.choice', (['e'], {}), '(e)\n', (3300, 3303), True, 'import numpy as np\n'), ((3809, 3842), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.long'}), '(x, dtype=torch.long)\n', (3821, 3842), False, 'import torch\n'), ((3584, 3609), 'numpy.random.choice', 'np.random.choice', (['self.es'], {}), '(self.es)\n', (3600, 3609), True, 'import numpy as np\n'), ((7024, 7041), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (7033, 7041), False, 'import torch\n'), ((1618, 1642), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1630, 1642), False, 'import os\n'), ((9636, 9654), 'torch.cat', 'torch.cat', (['results'], {}), '(results)\n', (9645, 9654), False, 'import torch\n'), ((976, 986), 'pandas.isna', 'pd.isna', (['s'], {}), '(s)\n', (983, 986), True, 'import pandas as pd\n')]
|
# Training to a set of multiple objects (e.g. ShapeNet or DTU)
# tensorboard logs available in logs/<expname>
import sys
import os
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
import warnings
import trainlib
from model import make_model, loss
from render import NeRFRenderer
from data import get_split_dataset
import util
import numpy as np
import torch.nn.functional as F
import torch
from model import NeuralRenderer
import torchvision.transforms as transforms
from dotmap import DotMap
from PIL import Image
import pdb
from torchvision.utils import save_image, make_grid
warnings.filterwarnings(action='ignore')
def extra_args(parser):
parser.add_argument(
"--batch_size", "-B", type=int, default=32, help="Object batch size ('SB')"
)
parser.add_argument(
"--nviews",
"-V",
type=str,
default="1",
help="Number of source views (multiview); put multiple (space delim) to pick randomly per batch ('NV')",
)
parser.add_argument(
"--freeze_enc",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
parser.add_argument(
"--recon",
type=float,
default=1.,
help="Loss of reconstruction error",
)
parser.add_argument(
"--swap",
type=float,
default=1.,
help="Weights of swap loss error",
)
parser.add_argument(
"--epoch-period",
type=float,
default=1.,
help="period of using discriminator loss",
)
parser.add_argument(
"--disc_lr",
type=float,
default=1.,
help="Discriminator learning rate ratio",
)
parser.add_argument(
"--cam",
type=float,
default=1.,
help="Loss of camera prediction error",
)
parser.add_argument(
"--no_bbox_step",
type=int,
default=100000,
help="Step to stop using bbox sampling",
)
parser.add_argument(
"--fixed_test",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
return parser
args, conf = util.args.parse_args(extra_args, training=True, default_ray_batch_size=128)
device = util.get_cuda(args.gpu_id[0])
train_vis_path = os.path.join(args.visual_path, args.name, 'train')
dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir)
print(
"dset z_near {}, z_far {}, lindisp {}".format(dset.z_near, dset.z_far, dset.lindisp)
)
# make_model: model에 대한 option.
net = make_model(conf["model"]).to(device=device) # PixelNeRFNet
# conf['renderer']
# renderer {
# n_coarse = 64
# n_fine = 32
# # Try using expected depth sample
# n_fine_depth = 16
# # Noise to add to depth sample
# depth_std = 0.01
# # Decay schedule, not used
# sched = []
# # White background color (false : black)
# white_bkgd = True
# }
# Ours로 변경 예정! # from_config: 모델 세팅 알려줌
renderer = NeRFRenderer.from_conf(conf["renderer"], lindisp=dset.lindisp,).to(
device=device # NeRFRenderer -> renderer setting
)
# Parallize # net: pixelNeRF -> pixelNeRF를
render_par = renderer.bind_parallel(net, args.gpu_id).eval() # -> _RenderWrapper를 선언함 -> 얘의 forward 함수가 class NeRFRenderer 실행하는거!
# self까지도 속성받아버림!
# renderer.bind_parallel -> _RenderWrapper(net, self, simple_output=simple_output)
nviews = list(map(int, args.nviews.split())) # 1.
class PixelNeRFTrainer(trainlib.Trainer):
def __init__(self):
super().__init__(net, dset, val_dset, args, conf["train"], device=device) # superclass에서의 init
self.renderer_state_path = "%s/%s/_renderer" % (
self.args.checkpoints_path,
self.args.name,
)
self.lambda_coarse = conf.get_float("loss.lambda_coarse")
self.lambda_fine = conf.get_float("loss.lambda_fine", 1.0)
print(
"lambda coarse {} and fine {}".format(self.lambda_coarse, self.lambda_fine)
)
fine_loss_conf = conf["loss.rgb"]
if "rgb_fine" in conf["loss"]:
print("using fine loss")
fine_loss_conf = conf["loss.rgb_fine"]
self.rgb_fine_crit = loss.get_rgb_loss(fine_loss_conf, False)
if args.resume:
if os.path.exists(self.renderer_state_path):
renderer.load_state_dict(
torch.load(self.renderer_state_path, map_location=device), strict=False
)
self.z_near = dset.z_near # 일단은 그냥 두기
self.z_far = dset.z_far
self.focal = torch.tensor([2.187719,]) * 10
self.c = torch.tensor([8.000000, 8.000000])
self.use_bbox = args.no_bbox_step > 0
self.recon_loss = torch.nn.MSELoss()
self.cam_loss = torch.nn.MSELoss()
# self.optim.add_param_group({'params': self.neural_renderer.parameters()})
def compute_bce(self, d_out, target):
targets = d_out.new_full(size=d_out.size(), fill_value=target)
loss = F.binary_cross_entropy_with_logits(d_out, targets)
return loss
def post_batch(self, epoch, batch):
renderer.sched_step(args.batch_size)
def extra_save_state(self):
torch.save(renderer.state_dict(), self.renderer_state_path)
def calc_losses_eval(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
# SB: number of batches
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
all_poses = data["poses"].to(device=device)
SB, NV, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
if self.use_bbox and global_step >= args.no_bbox_step:
self.use_bbox = False
print(">>> Stopped using bbox sampling @ iter", global_step)
all_rgb_gt = []
all_rays = []
curr_nviews = nviews[torch.randint(0, len(nviews), ()).item()]
if curr_nviews == 1: # (0,) 을 batch size만큼 만들어준다!
image_ord = torch.randint(0, NV, (SB, 1)) # ours -> 계속 nviews=1일 예정!
else: # Pass
image_ord = torch.empty((SB, curr_nviews), dtype=torch.long)
val_num = 4
##### object마다의 Process
##### 여기서는 RGB sampling하는 과정은 아예 빼고, extrinsic을 통한 camera ray를 가져올 것 pix_inds는 필요없음
for obj_idx in range(SB): # batch 안의 index마다 pose가 다르기 때문! # SB: 4 # meshgrid만 괜찮다면 batch 연산으로 큼지막하게 한번 가도 괜찮을듯
# batch size는 작은 편, 각 sample에 대해서 처리함
# 이거 자체가 하나의 batch로서 기능함
# 너무 메모리가 커서 조금 샘플링 해야할 것 같기도..
indices = torch.randint(0, NV, (val_num,)) # (전체 251개의 view 중 5개 뽑기!)
# 딱 5개만 뽑아냄!
images = all_images[obj_idx][indices] # (NV, 3, H, W) # (50, 3, 128, 128)
poses = all_poses[obj_idx][indices] # (NV, 4, 4) # (50, 4, 4) # <- multi-view rotation
focal = self.focal
c = self.c
if curr_nviews > 1: # Pass
# Somewhat inefficient, don't know better way
image_ord[obj_idx] = torch.from_numpy( # 배치 안의 한 샘플에 대해 5개 중에 하나 뽑기!
np.random.choice(indices, curr_nviews, replace=False) # 0부터 4중에 하나 고르기! <- 각 batch마다 어떤 view에서 source image를 가져올지 결정!
) # ex. image_ord[0] = 2 -> 0번째 샘플의 obj index는 2
images_0to1 = images * 0.5 + 0.5
feat_H, feat_W = 16, 16
# ㅇㅇ 다 넣고 봐도 될 듯. 어차피 feature field에 대해서 보는거라!
cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
rgb_gt_all = images_0to1 # image는 encoder에 들어가는 그대로 넣어주면 됨
rgb_gt_all = (
rgb_gt_all.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (NV * H * W, 3)
# 여기선 Ray sampling을 해서 pix_inds를 얻어내려고 하는데, 우리는 Feature map을 보고 하기 때문에
# pix_inds로 인덱싱해줄 대상이 없음. 그냥 이거 자체를 없애도 됨.
rgb_gt = rgb_gt_all # (ray_batch_size, 3)
rays = cam_rays.view(-1, cam_rays.shape[-1]).to(
device=device # 그냥 어떤 resolution에 대해 생성하기 때문..
) # (ray_batch_size, 8)
all_rgb_gt.append(rgb_gt)
all_rays.append(rays)
all_rgb_gt = torch.stack(all_rgb_gt) # (SB, 5*ray_batch_size, 3) # 5장의 이미지
all_rays = torch.stack(all_rays) # (SB, 5*ray_batch_size, 8)
image_ord = image_ord.to(device) # single-view이기 때문에 어차피 0으로 전부 indexing 되어있음
src_images = util.batched_index_select_nd( # NS: number of samples
all_images, image_ord # 모든 이미지에 대해 랜덤하게 뽑은 source image를 가져오게 됨
) # (SB, NS, 3, H, W) <- NV에서 NS로 바뀜 -> index_select_nd에 따라서 결정됨! <- ㅇㅋ 인정 어차피 한 obj 안에 50개 있으니까
src_poses = util.batched_index_select_nd(all_poses, image_ord) # (SB, NS, 4, 4) <- 이 src poses를 예측해보자!
# 4개의 batch, 각 batch의 NS개 중 일부만 골라서 poses로 처리 <- 오키.. <- 이거는 진짜 camera poses
all_poses = all_images = None
# 각 batch마다 하나의 sample src image를 고름
#######################################################################################
################### 여기까지 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
# remove
############### NeRF encoding하는 부분!!!!!!!!
net.encode(
src_images, # batch, 1, 3, 128, 128
src_poses,
self.focal.to(device=device), # batch
c=self.c.to(device=device) if all_c is not None else None,
)
# 하나의 source image에 대해 5개의 feature output을 만듦 -> 전체 sample에 대해서!
# all_rays: ((SB, ray_batch_size, 8)) <- NV images에서의 전체 rays에 SB만큼을!
feat_out = render_par(all_rays, val_num, want_weights=True, training=False) # models.py의 forward 함수를 볼 것
# render par 함수 밑으로 전부 giraffe renderer로 바꾸기
test_out = net.neural_renderer(feat_out)
# test out 있는 여기에 self.neural_renderer 놓기
loss_dict = {}
test_out_pred = test_out.reshape(SB, -1, 3)
rgb_loss = self.recon_loss(test_out_pred, all_rgb_gt)
loss_dict["rc"] = rgb_loss.item() * args.recon
loss = rgb_loss
loss_dict["t"] = loss.item()
return loss_dict
def calc_losses_train_generator(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
SB, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4)
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
# 원래는 object for문에 껴있었는데 그냥 바로 배치 단위로
images_0to1 = all_images * 0.5 + 0.5
rgb_gt_all = (
images_0to1.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (B, H, W, 3)
# feat-W, feat-H 받아야 함!
feat_H = 16 # <- args로 조정 가능하도록!
feat_W = 16 # <- args로 조정 가능하도록! # 아 오키 이거 volume renderer 세팅 따라가고, 다른 부분 있으면 giraffe 모듈 가져오기
net.encode( # <- encode부분은 동일하게 가져오고, forward하는 부분 좀더 신경써서 가져오기!
all_images,
all_poses,
self.focal.to(device=device),
c=self.c.to(device=device)
) # encoder 결과로 self.rotmat, self.shape, self.appearance 예측됨
################################################
########################### for generated views
cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
all_poses, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
rays = cam_rays.view(SB, -1, cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
featmap = render_par(rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_fake = net.neural_renderer(featmap)
################################################
########################### for swapped views
swap_rot = all_poses.flip(0)
swap_cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
swap_rays = swap_cam_rays.view(SB, -1, swap_cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
swap_featmap = render_par(swap_rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_swap = net.neural_renderer(swap_featmap)
if global_step % self.vis_interval == 0:
image_grid = make_grid(torch.cat((all_images, rgb_fake, rgb_swap), dim=0), nrow=len(all_images)) # row에 들어갈 image 갯수
save_image(image_grid, f'{train_vis_path}/{epoch}_{batch}_out.jpg')
# neural renderer를 저 render par 프로세스 안에 넣기!
# discriminator가 swap을 지날 예정!
d_fake = self.discriminator(rgb_swap)
rgb_loss = self.recon_loss(rgb_fake, all_images) # 아 오키. sampling된 points 갯수가 128개인가보군
# net attribute으로 rotmat있는지 확인 + 예측했던 rotmat과 같은지 확인
gen_swap_loss = self.compute_bce(d_fake, 1)
loss_gen = rgb_loss * args.recon + gen_swap_loss * args.swap
return loss_gen, rgb_loss, gen_swap_loss
def calc_losses_train_discriminator(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
SB, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4)
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
# 원래는 object for문에 껴있었는데 그냥 바로 배치 단위로
images_0to1 = all_images * 0.5 + 0.5
rgb_gt_all = (
images_0to1.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (B, H, W, 3)
# feat-W, feat-H 받아야 함!
feat_H = 16 # <- args로 조정 가능하도록!
feat_W = 16 # <- args로 조정 가능하도록! # 아 오키 이거 volume renderer 세팅 따라가고, 다른 부분 있으면 giraffe 모듈 가져오기
net.encode( # <- encode부분은 동일하게 가져오고, forward하는 부분 좀더 신경써서 가져오기!
all_images,
all_poses,
self.focal.to(device=device),
c=self.c.to(device=device)
) # encoder 결과로 self.rotmat, self.shape, self.appearance 예측됨
# ################################################
# ########################### for generated views
# cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
# all_poses, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
# ) # (NV, H, W, 8)
# rays = cam_rays.view(SB, -1, cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
# val_num = 1
# featmap = render_par(rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
# rgb_fake = net.neural_renderer(featmap)
################################################
########################### for swapped views
swap_rot = all_poses.flip(0)
swap_cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
swap_rays = swap_cam_rays.view(SB, -1, swap_cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
swap_featmap = render_par(swap_rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_swap = net.neural_renderer(swap_featmap)
# neural renderer를 저 render par 프로세스 안에 넣기!
# discriminator가 swap을 지날 예정!
d_real = self.discriminator(all_images)
d_fake = self.discriminator(rgb_swap.detach())
disc_swap_loss = self.compute_bce(d_fake, 0)
disc_real_loss = self.compute_bce(d_real, 1)
loss_disc = disc_swap_loss * args.swap + disc_real_loss * args.swap
return loss_disc, disc_swap_loss, disc_real_loss
def train_step(self, data, epoch, batch, global_step):
# discriminator가 먼저 update
dict_ = {}
# generator
# dict(net.named_parameters())["neural_renderer.conv_rgb.3.weight"][0,0,0]
# name neural_renderer.conv_rgb.3.weight | param torch.Size([3, 32, 3, 3]) -> [-0.0322, -0.0191, 0.0099]
# discriminator
# name conv_out.weight | param torch.Size([1, 512, 4, 4]) [0, 0, 0] -> [0.0052, 0.0011, 0.0091, 0.0003]
# ([0.0052, 0.0011, 0.0091, 0.0003], device='cuda:0', <- 얘는 왜 안변해..?
if epoch % args.epoch_period == 0:
disc_loss, disc_swap, disc_real = self.calc_losses_train_discriminator(data, epoch=epoch, batch=batch, global_step=global_step)
self.optim_d.zero_grad()
disc_loss.backward()
self.optim_d.step()
dict_['disc_loss'] = round(disc_loss.item(), 3)
dict_['disc_swap'] = round(disc_swap.item(), 3)
dict_['disc_real'] = round(disc_real.item(), 3)
# name neural_renderer.conv_rgb.3.weight : tensor([-0.0322, -0.0191, 0.0099], device='cuda:0', grad_fn=<SelectBackward0>) <- 안바뀜
# generator 그다음에 update
gen_loss, gen_rgb, gen_swap = self.calc_losses_train_generator(data, epoch=epoch, batch=batch, global_step=global_step)
self.optim.zero_grad()
gen_loss.backward()
self.optim.step()
# tensor([-0.0321, -0.0190, 0.0100], device='cuda:0', grad_fn=<SelectBackward0>) <- 바뀜
# tensor([0.0052, 0.0011, 0.0091, 0.0003], device='cuda:0') <- 안바뀜 <- discriminator가 학습이 안되고 있음
dict_['gen_loss'] = round(gen_loss.item(), 3)
dict_['gen_rgb'] = round(gen_rgb.item(), 3)
dict_['gen_swap'] = round(gen_swap.item(), 3)
return dict_
def eval_step(self, data, global_step):
renderer.eval()
losses = self.calc_losses_eval(data, global_step=global_step)
renderer.train()
return losses
# 얘네는 기존의 data loader 그대로 활용하도록 고고
def vis_step(self, data, global_step, epoch, batch, idx=None):
if "images" not in data:
return {}
if idx is None:
batch_indices = np.random.randint(0, data["images"].shape[0], 4) # 16 = batch -> (16, 251, 3, 128, 128)
else:
print(idx)
batch_indices = idx
total_psnr = 0
cat_list = []
for batch_idx in batch_indices:
# 16개 batch objects 중에 하나의 batch index를
images = data["images"][batch_idx].to(device=device) # (NV, 3, H, W)
poses = data["poses"][batch_idx].to(device=device) # (NV, 4, 4)
focal = self.focal # (1)
c = self.c
feat_H, feat_W = 16, 16
NV, _, H, W = images.shape
cam_rays = util.gen_rays( # (251개의 poses에 대해서 만듦..)
poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c # (251, 16, 16, 8)
) # (NV, H, W, 8)
images_0to1 = images * 0.5 + 0.5 # (NV, 3, H, W) # (251, 3, 128, 128)
val_num = 3
# curr_nviews를 4개로 잡아볼까
curr_nviews = nviews[torch.randint(0, len(nviews), (1,)).item()] # curr_nviews = 1
views_src = np.sort(np.random.choice(NV, curr_nviews, replace=False)) # NV: 251 -> ex.views_src: 여러 이미지들 나오는디요 시발
view_dests = np.random.randint(0, NV - curr_nviews, val_num) # ex. 63
for vs in range(curr_nviews):
view_dests += view_dests >= views_src[vs]
views_src = torch.from_numpy(views_src)
# set renderer net to eval mode
renderer.eval() # <- encoder는 왜 eval() 아니지 # renderer의 parameter 찾고 여기에 2DCNN 포함되는지 확인!
source_views = (
images_0to1[views_src].repeat(val_num, 1, 1, 1)
.permute(0, 2, 3, 1)
.cpu()
.numpy()
.reshape(-1, H, W, 3) # (3, 128, 128, 3)
)
gt = images_0to1[view_dests].permute(0, 2, 3, 1).cpu().numpy().reshape(val_num, H, W, 3) # (128, 128, 3)
with torch.no_grad(): # cam_rays: (NV, 16, 16, 8)
test_rays_dest = cam_rays[view_dests] # (3, H, W, 8) # -> (val_num, 16, 16, 8)
test_rays_src = cam_rays[views_src].repeat(val_num, 1, 1, 1) # (H, W, 8) # -> (16, 16, 8)
test_images_src = images[views_src].repeat(val_num, 1, 1, 1) # (NS, 3, H, W) # -> (3, 128, 128)
test_images_dest = images[view_dests] # -> # -> (val_num, 3, 128, 128)
net.encode(
test_images_src, # (val_num, 3, 128, 128)
poses[views_src].repeat(val_num, 1, 1), # (val_num, 4, 4)
self.focal.to(device=device),
c=self.c.to(device=device),
)
test_rays_dest = test_rays_dest.reshape(val_num, feat_H * feat_W, -1) # -> (1, 16*16, 8)
test_rays_src = test_rays_src.reshape(val_num, feat_H * feat_W, -1) # -> (1, 16*16, 8)
# test_rays: 1, 16x16, 8
feat_test_dest = render_par(test_rays_dest, val_num = 1, want_weights=True) # -> (1, 16*16, 8)
out_dest = net.neural_renderer(feat_test_dest)
feat_test_src = render_par(test_rays_src, val_num = 1, want_weights=True) # -> (1, 16*16, 8)
out_src = net.neural_renderer(feat_test_src)
rgb_psnr = out_dest.cpu().numpy().reshape(val_num, H, W, 3)
# for vals calculation
psnr = util.psnr(rgb_psnr, gt)
total_psnr += psnr
# source views, gt, test_out
cat = torch.cat((test_images_src[[0]], test_images_dest.reshape(-1, 3, H, W), out_src[[0]].clamp_(0., 1.), out_dest.reshape(-1, 3, H, W).clamp_(0., 1.)), dim=0)
cat_list.append(cat)
# new_cat = torch.stack(cat_list, dim=0).reshape(-1, 3, 128, 128)
new_cat = torch.cat(cat_list, dim=0)
image_grid = make_grid(new_cat, nrow=len(cat)) # row에 들어갈 image 갯수
save_image(image_grid, f'visuals/{args.name}/{epoch}_{batch}_out.jpg')
vals = {"psnr": total_psnr / len(batch_indices)}
print("psnr", total_psnr / len(batch_indices))
# set the renderer network back to train mode
renderer.train()
return None, vals
trainer = PixelNeRFTrainer()
trainer.start()
|
[
"util.get_cuda",
"torch.from_numpy",
"torch.nn.MSELoss",
"data.get_split_dataset",
"torchvision.utils.save_image",
"render.NeRFRenderer.from_conf",
"os.path.exists",
"util.gen_rays",
"torch.randint",
"model.loss.get_rgb_loss",
"util.batched_index_select_nd",
"numpy.random.choice",
"util.psnr",
"model.make_model",
"os.path.dirname",
"torch.empty",
"warnings.filterwarnings",
"torch.cat",
"torch.load",
"torch.stack",
"os.path.join",
"torch.tensor",
"numpy.random.randint",
"torch.no_grad",
"model.loss.item",
"util.args.parse_args",
"torch.nn.functional.binary_cross_entropy_with_logits"
] |
[((630, 670), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""'}), "(action='ignore')\n", (653, 670), False, 'import warnings\n'), ((2238, 2313), 'util.args.parse_args', 'util.args.parse_args', (['extra_args'], {'training': '(True)', 'default_ray_batch_size': '(128)'}), '(extra_args, training=True, default_ray_batch_size=128)\n', (2258, 2313), False, 'import util\n'), ((2323, 2352), 'util.get_cuda', 'util.get_cuda', (['args.gpu_id[0]'], {}), '(args.gpu_id[0])\n', (2336, 2352), False, 'import util\n'), ((2371, 2421), 'os.path.join', 'os.path.join', (['args.visual_path', 'args.name', '"""train"""'], {}), "(args.visual_path, args.name, 'train')\n", (2383, 2421), False, 'import os\n'), ((2443, 2495), 'data.get_split_dataset', 'get_split_dataset', (['args.dataset_format', 'args.datadir'], {}), '(args.dataset_format, args.datadir)\n', (2460, 2495), False, 'from data import get_split_dataset\n'), ((2634, 2659), 'model.make_model', 'make_model', (["conf['model']"], {}), "(conf['model'])\n", (2644, 2659), False, 'from model import make_model, loss\n'), ((3077, 3139), 'render.NeRFRenderer.from_conf', 'NeRFRenderer.from_conf', (["conf['renderer']"], {'lindisp': 'dset.lindisp'}), "(conf['renderer'], lindisp=dset.lindisp)\n", (3099, 3139), False, 'from render import NeRFRenderer\n'), ((4305, 4345), 'model.loss.get_rgb_loss', 'loss.get_rgb_loss', (['fine_loss_conf', '(False)'], {}), '(fine_loss_conf, False)\n', (4322, 4345), False, 'from model import make_model, loss\n'), ((4735, 4759), 'torch.tensor', 'torch.tensor', (['[8.0, 8.0]'], {}), '([8.0, 8.0])\n', (4747, 4759), False, 'import torch\n'), ((4842, 4860), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4858, 4860), False, 'import torch\n'), ((4885, 4903), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4901, 4903), False, 'import torch\n'), ((5117, 5167), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['d_out', 'targets'], {}), '(d_out, targets)\n', (5151, 5167), True, 'import torch.nn.functional as F\n'), ((9023, 9046), 'torch.stack', 'torch.stack', (['all_rgb_gt'], {}), '(all_rgb_gt)\n', (9034, 9046), False, 'import torch\n'), ((9109, 9130), 'torch.stack', 'torch.stack', (['all_rays'], {}), '(all_rays)\n', (9120, 9130), False, 'import torch\n'), ((9273, 9324), 'util.batched_index_select_nd', 'util.batched_index_select_nd', (['all_images', 'image_ord'], {}), '(all_images, image_ord)\n', (9301, 9324), False, 'import util\n'), ((9546, 9596), 'util.batched_index_select_nd', 'util.batched_index_select_nd', (['all_poses', 'image_ord'], {}), '(all_poses, image_ord)\n', (9574, 9596), False, 'import util\n'), ((11058, 11069), 'model.loss.item', 'loss.item', ([], {}), '()\n', (11067, 11069), False, 'from model import make_model, loss\n'), ((12708, 12798), 'util.gen_rays', 'util.gen_rays', (['all_poses', 'feat_W', 'feat_H', 'self.focal', 'self.z_near', 'self.z_far', 'self.c'], {}), '(all_poses, feat_W, feat_H, self.focal, self.z_near, self.\n z_far, self.c)\n', (12721, 12798), False, 'import util\n'), ((13415, 13503), 'util.gen_rays', 'util.gen_rays', (['swap_rot', 'feat_W', 'feat_H', 'self.focal', 'self.z_near', 'self.z_far', 'self.c'], {}), '(swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far,\n self.c)\n', (13428, 13503), False, 'import util\n'), ((17042, 17130), 'util.gen_rays', 'util.gen_rays', (['swap_rot', 'feat_W', 'feat_H', 'self.focal', 'self.z_near', 'self.z_far', 'self.c'], {}), '(swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far,\n self.c)\n', (17055, 17130), False, 'import util\n'), ((24141, 24167), 'torch.cat', 'torch.cat', (['cat_list'], {'dim': '(0)'}), '(cat_list, dim=0)\n', (24150, 24167), False, 'import torch\n'), ((24252, 24322), 'torchvision.utils.save_image', 'save_image', (['image_grid', 'f"""visuals/{args.name}/{epoch}_{batch}_out.jpg"""'], {}), "(image_grid, f'visuals/{args.name}/{epoch}_{batch}_out.jpg')\n", (24262, 24322), False, 'from torchvision.utils import save_image, make_grid\n'), ((186, 211), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (201, 211), False, 'import os\n'), ((4386, 4426), 'os.path.exists', 'os.path.exists', (['self.renderer_state_path'], {}), '(self.renderer_state_path)\n', (4400, 4426), False, 'import os\n'), ((4687, 4711), 'torch.tensor', 'torch.tensor', (['[2.187719]'], {}), '([2.187719])\n', (4699, 4711), False, 'import torch\n'), ((6573, 6602), 'torch.randint', 'torch.randint', (['(0)', 'NV', '(SB, 1)'], {}), '(0, NV, (SB, 1))\n', (6586, 6602), False, 'import torch\n'), ((6678, 6726), 'torch.empty', 'torch.empty', (['(SB, curr_nviews)'], {'dtype': 'torch.long'}), '((SB, curr_nviews), dtype=torch.long)\n', (6689, 6726), False, 'import torch\n'), ((7166, 7198), 'torch.randint', 'torch.randint', (['(0)', 'NV', '(val_num,)'], {}), '(0, NV, (val_num,))\n', (7179, 7198), False, 'import torch\n'), ((8111, 8184), 'util.gen_rays', 'util.gen_rays', (['poses', 'feat_W', 'feat_H', 'focal', 'self.z_near', 'self.z_far'], {'c': 'c'}), '(poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c)\n', (8124, 8184), False, 'import util\n'), ((14169, 14236), 'torchvision.utils.save_image', 'save_image', (['image_grid', 'f"""{train_vis_path}/{epoch}_{batch}_out.jpg"""'], {}), "(image_grid, f'{train_vis_path}/{epoch}_{batch}_out.jpg')\n", (14179, 14236), False, 'from torchvision.utils import save_image, make_grid\n'), ((20241, 20289), 'numpy.random.randint', 'np.random.randint', (['(0)', "data['images'].shape[0]", '(4)'], {}), "(0, data['images'].shape[0], 4)\n", (20258, 20289), True, 'import numpy as np\n'), ((20867, 20940), 'util.gen_rays', 'util.gen_rays', (['poses', 'feat_W', 'feat_H', 'focal', 'self.z_near', 'self.z_far'], {'c': 'c'}), '(poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c)\n', (20880, 20940), False, 'import util\n'), ((21446, 21493), 'numpy.random.randint', 'np.random.randint', (['(0)', '(NV - curr_nviews)', 'val_num'], {}), '(0, NV - curr_nviews, val_num)\n', (21463, 21493), True, 'import numpy as np\n'), ((21628, 21655), 'torch.from_numpy', 'torch.from_numpy', (['views_src'], {}), '(views_src)\n', (21644, 21655), False, 'import torch\n'), ((14062, 14112), 'torch.cat', 'torch.cat', (['(all_images, rgb_fake, rgb_swap)'], {'dim': '(0)'}), '((all_images, rgb_fake, rgb_swap), dim=0)\n', (14071, 14112), False, 'import torch\n'), ((21325, 21373), 'numpy.random.choice', 'np.random.choice', (['NV', 'curr_nviews'], {'replace': '(False)'}), '(NV, curr_nviews, replace=False)\n', (21341, 21373), True, 'import numpy as np\n'), ((22207, 22222), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22220, 22222), False, 'import torch\n'), ((23728, 23751), 'util.psnr', 'util.psnr', (['rgb_psnr', 'gt'], {}), '(rgb_psnr, gt)\n', (23737, 23751), False, 'import util\n'), ((4490, 4547), 'torch.load', 'torch.load', (['self.renderer_state_path'], {'map_location': 'device'}), '(self.renderer_state_path, map_location=device)\n', (4500, 4547), False, 'import torch\n'), ((7750, 7803), 'numpy.random.choice', 'np.random.choice', (['indices', 'curr_nviews'], {'replace': '(False)'}), '(indices, curr_nviews, replace=False)\n', (7766, 7803), True, 'import numpy as np\n')]
|
#
# File:
# color4.py
#
# Synopsis:
# Draws sixteen sample color boxs with RGB labels.
#
# Category:
# Colors
#
# Author:
# <NAME>
#
# Date of initial publication:
# January, 2006
#
# Description:
# This example draws sixteen color boxes using the RGB
# values for named colors. The boxes are labeled with
# the color name and the associated RGB values.
#
# Effects illustrated:
# o Drawing lines and polygons in NDC space.
# o RGB equivalents for some named colors.
# o Converting integer RGB color specifications to floating point.
#
# Output:
# o One plot is produced with sixteen sample color boxes.
#
from __future__ import print_function
import Ngl
import numpy
#
# Define the colors and labels to be used.
#
colors_and_labels = \
[ \
[233, 150, 122], "DarkSalmon", \
[164, 42, 42], "Brown", \
[255, 127, 0], "DarkOrange1", \
[255, 0, 0], "Red", \
[255, 255, 0], "Yellow", \
[ 0, 255, 0], "Green", \
[ 34, 139, 34], "ForestGreen", \
[ 0, 255, 255], "Cyan", \
[ 79, 148, 205], "SteelBlue3", \
[ 0, 0, 255], "Blue", \
[148, 0, 211], "DarkViolet", \
[255, 0, 255], "Magneta", \
[255, 255, 255], "White", \
[153, 153, 153], "Gray60", \
[102, 102, 102], "Gray40", \
[ 0, 0, 0], "Black" \
]
#
# Open a workstation with a default color table having
# background color "black" and foreground color "white".
#
rlist = Ngl.Resources()
rlist.wkColorMap = "default"
rlist.wkForegroundColor = "White"
rlist.wkBackgroundColor = "Black"
wks_type = "png"
wks = Ngl.open_wks(wks_type,"color4",rlist)
#
# Extract the colors and labels.
#
colors = colors_and_labels[0:len(colors_and_labels):2]
labels = colors_and_labels[1:len(colors_and_labels):2]
#
# Set up arrays and resource lists for drawing the boxes.
# Select "Helvetica-Bold" for all text.
#
x = numpy.zeros(5,'f')
y = numpy.zeros(5,'f')
poly_res = Ngl.Resources()
text_res = Ngl.Resources()
text_res.txFont = "Helvetica-Bold"
#
# Draw the color boxes and titles.
#
for i in range(0,len(colors)):
#
# delx_0 - horizontal spacing between boxes.
# delx_1 - width of a box.
# dely_0 - vertical spacing between boxes.
# dely_1 - height of a box.
#
delx_0, delx_1, dely_0, dely_1 = 0.245, 0.235, 0.22, 0.15
x[0], y[0] = 0.015 + delx_0*(i%4), 0.90 - (i//4)*dely_0
x[1], y[1] = x[0] + delx_1 , y[0]
x[2], y[2] = x[1] , y[1] - dely_1
x[3], y[3] = x[0] , y[2]
x[4], y[4] = x[0] , y[0]
#
# Convert the integer color values obtained from the
# named color chart (as entered above) to floating
# point numbers in the range 0. to 1.
#
r, g, b = colors[i][0]/255., colors[i][1]/255., colors[i][2]/255.
poly_res.gsFillColor = [r,g,b] # Ngl.new_color(wks, r, g, b)
#
# Draw a white outline if the color is black, otherwise draw a colored box.
#
if (labels[i] == "Black"):
Ngl.polyline_ndc(wks, x, y, poly_res)
else:
Ngl.polygon_ndc(wks, x, y, poly_res)
#
# Label the boxes.
#
text_res.txFontHeightF = 0.017
Ngl.text_ndc(wks, labels[i], 0.5*(x[0]+x[1]), y[0] + 0.0125, text_res)
rgb_label = "R={:4.2f} G={:4.2f} B={:4.2f}".format(r, g, b)
text_res.txFontHeightF = 0.015
Ngl.text_ndc(wks, rgb_label, 0.5*(x[0]+x[1]), y[3] - 0.0125, text_res)
#
# Plot top and bottom labels.
#
text_res.txFontHeightF = 0.025
Ngl.text_ndc(wks, "Sixteen Sample Colors", 0.5, 0.96, text_res)
text_res.txFontHeightF = 0.018
Ngl.text_ndc(wks, "The titles below each box indicate Red, Green, and Blue intensity values.", 0.5, 0.035, text_res)
Ngl.frame(wks)
Ngl.end()
|
[
"Ngl.polyline_ndc",
"Ngl.polygon_ndc",
"Ngl.Resources",
"Ngl.end",
"Ngl.open_wks",
"Ngl.text_ndc",
"numpy.zeros",
"Ngl.frame"
] |
[((1636, 1651), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (1649, 1651), False, 'import Ngl\n'), ((1772, 1811), 'Ngl.open_wks', 'Ngl.open_wks', (['wks_type', '"""color4"""', 'rlist'], {}), "(wks_type, 'color4', rlist)\n", (1784, 1811), False, 'import Ngl\n'), ((2069, 2088), 'numpy.zeros', 'numpy.zeros', (['(5)', '"""f"""'], {}), "(5, 'f')\n", (2080, 2088), False, 'import numpy\n'), ((2092, 2111), 'numpy.zeros', 'numpy.zeros', (['(5)', '"""f"""'], {}), "(5, 'f')\n", (2103, 2111), False, 'import numpy\n'), ((2122, 2137), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (2135, 2137), False, 'import Ngl\n'), ((2149, 2164), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (2162, 2164), False, 'import Ngl\n'), ((3569, 3632), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', '"""Sixteen Sample Colors"""', '(0.5)', '(0.96)', 'text_res'], {}), "(wks, 'Sixteen Sample Colors', 0.5, 0.96, text_res)\n", (3581, 3632), False, 'import Ngl\n'), ((3664, 3789), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', '"""The titles below each box indicate Red, Green, and Blue intensity values."""', '(0.5)', '(0.035)', 'text_res'], {}), "(wks,\n 'The titles below each box indicate Red, Green, and Blue intensity values.'\n , 0.5, 0.035, text_res)\n", (3676, 3789), False, 'import Ngl\n'), ((3788, 3802), 'Ngl.frame', 'Ngl.frame', (['wks'], {}), '(wks)\n', (3797, 3802), False, 'import Ngl\n'), ((3803, 3812), 'Ngl.end', 'Ngl.end', ([], {}), '()\n', (3810, 3812), False, 'import Ngl\n'), ((3263, 3337), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', 'labels[i]', '(0.5 * (x[0] + x[1]))', '(y[0] + 0.0125)', 'text_res'], {}), '(wks, labels[i], 0.5 * (x[0] + x[1]), y[0] + 0.0125, text_res)\n', (3275, 3337), False, 'import Ngl\n'), ((3431, 3505), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', 'rgb_label', '(0.5 * (x[0] + x[1]))', '(y[3] - 0.0125)', 'text_res'], {}), '(wks, rgb_label, 0.5 * (x[0] + x[1]), y[3] - 0.0125, text_res)\n', (3443, 3505), False, 'import Ngl\n'), ((3117, 3154), 'Ngl.polyline_ndc', 'Ngl.polyline_ndc', (['wks', 'x', 'y', 'poly_res'], {}), '(wks, x, y, poly_res)\n', (3133, 3154), False, 'import Ngl\n'), ((3167, 3203), 'Ngl.polygon_ndc', 'Ngl.polygon_ndc', (['wks', 'x', 'y', 'poly_res'], {}), '(wks, x, y, poly_res)\n', (3182, 3203), False, 'import Ngl\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import freqent.freqentn as fen
import dynamicstructurefactor.sqw as sqw
from itertools import product
import os
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
savepath = '/media/daniel/storage11/Dropbox/LLM_Danny/frequencySpaceDissipation/tests/freqentn_tests/'
plt.close('all')
def create_sphericalWave(wavelength, period, phi,
v=[0, 0],
n_txy=[100, 100, 100],
max_txy=[1, 1, 1],
r0=[0, 0]):
'''
Inputs
------
wavelength : float
wavelength of spherical wave
period : float
period of spherical wave
phi : float
initial phase of wave
v : array-like
drift velocity of wave, in format [vx, vy]
n_txy : list
list of integers for number of time points, x points, y points
max_txy : list
list of floats for total time and total length in x and y dimensions
r0 : array-like
initial position of spherical wave
'''
n_txy = np.asarray(n_txy)
max_txy = np.asarray(max_txy)
sample_spacing = max_txy / n_txy
tArr = np.linspace(0, max_txy[0], n_txy[0])
xArr = np.linspace(-max_txy[1] / 2, max_txy[1] / 2, n_txy[1])
yArr = np.linspace(-max_txy[2] / 2, max_txy[2] / 2, n_txy[2])
t, x, y = np.meshgrid(tArr, xArr, yArr, indexing='ij')
k = 2 * np.pi / wavelength
w = 2 * np.pi / period
r = np.sqrt((x - r0[0] - (v[0] * t))**2 + (y - r0[1] - (v[1] * t))**2)
wave = np.cos(k * r - w * t + phi)
return wave, t, x, y
# Set up parameters
xmax = 6 * np.pi # total distance in physical units
ymax = 6 * np.pi
tmax = 100
nx = 250 # total number of pixels across
ny = 250
nt = 100
dx = xmax / nx # sampling spacing
dy = ymax / ny
dt = tmax / nt
xArr = np.linspace(-xmax / 2, xmax / 2, nx)
yArr = np.linspace(-ymax / 2, ymax / 2, ny)
tArr = np.linspace(0, tmax, nt)
# Set up grid in real space, remembering to multiply by the
# sampling periods in time and space
tt, xx, yy = np.meshgrid(tArr, xArr, yArr, indexing='ij')
# Spatial and temporal frequency (in radians/length or time)
lambda0 = np.pi / 6
k0 = 2 * np.pi / lambda0
T0 = 5
w0 = 2 * np.pi / T0
lambda1 = np.pi / 6
k1 = 2 * np.pi / lambda1
T1 = 5
w1 = 2 * np.pi / T1
# Center offset
x0 = 0 * dx
y0 = 0 * dy
x1 = 0 * dx
y1 = 0 * dy
# phase difference
phi = 1 * np.pi / 2
# Function and its power spectrum
r0 = ((xx - x0)**2 + (yy - y0)**2)**0.5
r1 = ((xx - x1)**2 + (yy - y1)**2)**0.5
r0t = np.cos(k0 * r0 - w0 * tt)
r1t = np.cos(k1 * r1 - w1 * tt + phi)
data = np.zeros((2, *r0t.shape))
data[0] = r0t
data[1] = r1t
c, freqs = fen.corr_matrix(data, sample_spacing=[dt, dx, dy])
c = fen._nd_gauss_smooth(c, stddev=[1, 2, 2])
idx_array = list(product(np.arange(2), repeat=2))
figReal, axReal = plt.subplots(2, 2, sharex=True, sharey=True)
figImag, axImag = plt.subplots(2, 2, sharex=True, sharey=True)
for idx in idx_array:
aziAvg_real, kr_real = sqw.azimuthal_average_3D(c[..., idx[0], idx[1]].real,
dx=2 * np.pi / xmax)
aziAvg_imag, kr_imag = sqw.azimuthal_average_3D(c[..., idx[0], idx[1]].imag,
dx=2 * np.pi / xmax)
axReal[idx[0], idx[1]].pcolormesh(kr_real, freqs[0], aziAvg_real, vmin=-1, vmax=15)
axImag[idx[0], idx[1]].pcolormesh(kr_imag, freqs[0], aziAvg_imag, vmin=-0.3, vmax=0.3)
axReal[1, 0].set(xlabel=r'$k$ (rad/um)', ylabel=r'$\omega$ (rad/s)')
axReal[0, 0].set(ylabel=r'$\omega$ (rad/s)')
axReal[1, 1].set(xlabel=r'$k$ (rad/um)')
figReal.suptitle(r'$\Re[\langle r_i(\mathbf{{k}}, \omega) r_j^*(\mathbf{{k}}, \omega) \rangle]$')
# figReal.savefig(os.path.join(savepath, 'sphericalWaveCSD_Real_smoothed_sigma1.pdf'), format='pdf')
axImag[1, 0].set(xlabel=r'$k$ (rad/um)', ylabel=r'$\omega$ (rad/s)')
axImag[0, 0].set(ylabel=r'$\omega$ (rad/s)')
axImag[1, 1].set(xlabel=r'$k$ (rad/um)')
figImag.suptitle(r'$\Im[\langle r_i(\mathbf{{k}}, \omega) r_j^*(\mathbf{{k}}, \omega) \rangle]$')
# figImag.savefig(os.path.join(savepath, 'sphericalWaveCSD_Imag_smoothed_sigma1.pdf'), format='pdf')
plt.show()
|
[
"numpy.sqrt",
"numpy.asarray",
"freqent.freqentn._nd_gauss_smooth",
"matplotlib.pyplot.close",
"freqent.freqentn.corr_matrix",
"numpy.linspace",
"numpy.zeros",
"numpy.cos",
"numpy.meshgrid",
"dynamicstructurefactor.sqw.azimuthal_average_3D",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((326, 342), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (335, 342), True, 'import matplotlib.pyplot as plt\n'), ((1853, 1889), 'numpy.linspace', 'np.linspace', (['(-xmax / 2)', '(xmax / 2)', 'nx'], {}), '(-xmax / 2, xmax / 2, nx)\n', (1864, 1889), True, 'import numpy as np\n'), ((1897, 1933), 'numpy.linspace', 'np.linspace', (['(-ymax / 2)', '(ymax / 2)', 'ny'], {}), '(-ymax / 2, ymax / 2, ny)\n', (1908, 1933), True, 'import numpy as np\n'), ((1941, 1965), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax', 'nt'], {}), '(0, tmax, nt)\n', (1952, 1965), True, 'import numpy as np\n'), ((2077, 2121), 'numpy.meshgrid', 'np.meshgrid', (['tArr', 'xArr', 'yArr'], {'indexing': '"""ij"""'}), "(tArr, xArr, yArr, indexing='ij')\n", (2088, 2121), True, 'import numpy as np\n'), ((2555, 2580), 'numpy.cos', 'np.cos', (['(k0 * r0 - w0 * tt)'], {}), '(k0 * r0 - w0 * tt)\n', (2561, 2580), True, 'import numpy as np\n'), ((2587, 2618), 'numpy.cos', 'np.cos', (['(k1 * r1 - w1 * tt + phi)'], {}), '(k1 * r1 - w1 * tt + phi)\n', (2593, 2618), True, 'import numpy as np\n'), ((2627, 2652), 'numpy.zeros', 'np.zeros', (['(2, *r0t.shape)'], {}), '((2, *r0t.shape))\n', (2635, 2652), True, 'import numpy as np\n'), ((2693, 2743), 'freqent.freqentn.corr_matrix', 'fen.corr_matrix', (['data'], {'sample_spacing': '[dt, dx, dy]'}), '(data, sample_spacing=[dt, dx, dy])\n', (2708, 2743), True, 'import freqent.freqentn as fen\n'), ((2748, 2789), 'freqent.freqentn._nd_gauss_smooth', 'fen._nd_gauss_smooth', (['c'], {'stddev': '[1, 2, 2]'}), '(c, stddev=[1, 2, 2])\n', (2768, 2789), True, 'import freqent.freqentn as fen\n'), ((2860, 2904), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, sharex=True, sharey=True)\n', (2872, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2923, 2967), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, sharex=True, sharey=True)\n', (2935, 2967), True, 'import matplotlib.pyplot as plt\n'), ((4189, 4199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4197, 4199), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1102), 'numpy.asarray', 'np.asarray', (['n_txy'], {}), '(n_txy)\n', (1095, 1102), True, 'import numpy as np\n'), ((1117, 1136), 'numpy.asarray', 'np.asarray', (['max_txy'], {}), '(max_txy)\n', (1127, 1136), True, 'import numpy as np\n'), ((1187, 1223), 'numpy.linspace', 'np.linspace', (['(0)', 'max_txy[0]', 'n_txy[0]'], {}), '(0, max_txy[0], n_txy[0])\n', (1198, 1223), True, 'import numpy as np\n'), ((1235, 1289), 'numpy.linspace', 'np.linspace', (['(-max_txy[1] / 2)', '(max_txy[1] / 2)', 'n_txy[1]'], {}), '(-max_txy[1] / 2, max_txy[1] / 2, n_txy[1])\n', (1246, 1289), True, 'import numpy as np\n'), ((1301, 1355), 'numpy.linspace', 'np.linspace', (['(-max_txy[2] / 2)', '(max_txy[2] / 2)', 'n_txy[2]'], {}), '(-max_txy[2] / 2, max_txy[2] / 2, n_txy[2])\n', (1312, 1355), True, 'import numpy as np\n'), ((1371, 1415), 'numpy.meshgrid', 'np.meshgrid', (['tArr', 'xArr', 'yArr'], {'indexing': '"""ij"""'}), "(tArr, xArr, yArr, indexing='ij')\n", (1382, 1415), True, 'import numpy as np\n'), ((1484, 1550), 'numpy.sqrt', 'np.sqrt', (['((x - r0[0] - v[0] * t) ** 2 + (y - r0[1] - v[1] * t) ** 2)'], {}), '((x - r0[0] - v[0] * t) ** 2 + (y - r0[1] - v[1] * t) ** 2)\n', (1491, 1550), True, 'import numpy as np\n'), ((1563, 1590), 'numpy.cos', 'np.cos', (['(k * r - w * t + phi)'], {}), '(k * r - w * t + phi)\n', (1569, 1590), True, 'import numpy as np\n'), ((3018, 3092), 'dynamicstructurefactor.sqw.azimuthal_average_3D', 'sqw.azimuthal_average_3D', (['c[..., idx[0], idx[1]].real'], {'dx': '(2 * np.pi / xmax)'}), '(c[..., idx[0], idx[1]].real, dx=2 * np.pi / xmax)\n', (3042, 3092), True, 'import dynamicstructurefactor.sqw as sqw\n'), ((3172, 3246), 'dynamicstructurefactor.sqw.azimuthal_average_3D', 'sqw.azimuthal_average_3D', (['c[..., idx[0], idx[1]].imag'], {'dx': '(2 * np.pi / xmax)'}), '(c[..., idx[0], idx[1]].imag, dx=2 * np.pi / xmax)\n', (3196, 3246), True, 'import dynamicstructurefactor.sqw as sqw\n'), ((2816, 2828), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (2825, 2828), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
""""
Bandidos estocásticos: introducción, algoritmos y experimentos
TFG Informática
Sección 8.4.4
Figuras 26, 27 y 28
Autor: <NAME>
"""
import math
import random
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def computemTeor(n,Delta):
if Delta == 0:
return 0
else :
return max(1,math.ceil(4/(Delta*Delta)*math.log(n*Delta*Delta/4)))
def computemOpt(n,Delta):
expectedRegret = np.empty(n//2+1)
X = stats.norm(0,1)
expectedRegret[0] = 0.5*n*Delta
for m in range(1,n//2+1):
expectedRegret[m] = m*Delta+(n-m)*Delta*X.cdf(-m*Delta/math.sqrt(2*m))
mOpt = min(range(n//2+1),key = lambda i: expectedRegret[i])
return mOpt
def samplePseudoRegretEF(n,k,m,arms,gaps):
rwds = k*[0]
for i in range(m):
for j in range(k):
rwds[j] += arms[j].rvs()
maximum = max(rwds)
bestarm = random.choice([i for i in range(k) if rwds[i] == maximum])
return m*sum(gaps)+(n-m*k)*gaps[bestarm]
def samplePseudoRegretUCB(n,k,delta,arms,gaps):#cambiar a pseudo
T = k*[0] # número de veces que se ha elegido cada brazo
meanReward = k*[0] # media muestral de las recompensas obtenidas por cada brazo
UCB = k*[np.inf] # cota superior de confianza de cada brazo
regret = 0
for i in range(n):
chosenArm = max(range(k),key=lambda i: UCB[i])
rwd = arms[chosenArm].rvs()
meanReward[chosenArm] = T[chosenArm]/(T[chosenArm]+1)*meanReward[chosenArm] \
+ rwd/(T[chosenArm]+1)
T[chosenArm] +=1
UCB[chosenArm] = meanReward[chosenArm] + math.sqrt((2*math.log(1/delta))/T[chosenArm])
regret += gaps[chosenArm]
return regret
def plotDeltaRegret():
n = 1000
sampleNum = 600
arms = 2*[None]
arms[0] = stats.norm(0,1)
gaps = 2*[0]
nDeltas = 20
Deltas = np.linspace(0,1,nDeltas)
regretEF25 = np.empty(nDeltas)
regretEF50 = np.empty(nDeltas)
regretEF75 = np.empty(nDeltas)
regretEF100 = np.empty(nDeltas)
regretEFmTeor = np.empty(nDeltas)
regretEFOptimo = np.empty(nDeltas)
regretUCB = np.empty(nDeltas)
mTeor = nDeltas*[0]
mOpt = nDeltas*[0]
for i in range(nDeltas):
Delta = Deltas[i]
arms[1]= stats.norm(-Delta,1)
gaps[1] = Delta
regretEF25[i] = 0
for k in range(sampleNum):
regretEF25[i] += samplePseudoRegretEF(n,2,25,arms,gaps)
regretEF25[i] /= sampleNum
regretEF50[i] = 0
for k in range(sampleNum):
regretEF50[i] += samplePseudoRegretEF(n,2,50,arms,gaps)
regretEF50[i] /= sampleNum
regretEF75[i] = 0
for k in range(sampleNum):
regretEF75[i] += samplePseudoRegretEF(n,2,75,arms,gaps)
regretEF75[i] /= sampleNum
regretEF100[i] = 0
for k in range(sampleNum):
regretEF100[i] += samplePseudoRegretEF(n,2,100,arms,gaps)
regretEF100[i] /= sampleNum
regretEFmTeor[i]= 0
mTeor[i] = computemTeor(n,Delta)
for k in range(sampleNum):
regretEFmTeor[i] += samplePseudoRegretEF(n,2,mTeor[i],arms,gaps)
regretEFmTeor[i] /= sampleNum
regretEFOptimo[i] = 0
mOpt[i] = computemOpt(n,Delta)
for k in range(sampleNum):
regretEFOptimo[i] += samplePseudoRegretEF(n,2,mOpt[i],arms,gaps)
regretEFOptimo[i] /= sampleNum
regretUCB[i] = 0
for k in range(sampleNum):
regretUCB[i] += samplePseudoRegretUCB(n,2,1/(n*n),arms,gaps)
regretUCB[i] /= sampleNum
fig = plt.figure()
plt.plot(Deltas,regretEF25, color='tab:blue',label= 'EP (m = 25)')
plt.plot(Deltas,regretEF50, color='tab:green',label = 'EP (m = 50)')
plt.plot(Deltas,regretEF75, color='tab:olive',label = 'EP (m = 75)')
plt.plot(Deltas,regretEF100, color='tab:red', label = 'EP (m = 100)')
plt.plot(Deltas,regretEFmTeor, color='tab:purple',label = 'EP (m = m_Teor)')
plt.plot(Deltas,regretEFOptimo, color='tab:gray', label = 'EP (m = m_Opt)')
plt.plot(Deltas,regretUCB, color='black', label = 'UCB')
plt.xlabel('∆')
plt.ylabel('Remordimiento esperado')
plt.legend(loc='upper left',ncol = 2)
fig.savefig('UCBDeltaRegret.pdf',format='pdf')
plt.show()
fig = plt.figure()
plt.plot(Deltas, mTeor, color='tab:purple', label = 'm_Teor')
plt.plot(Deltas,mOpt, color = 'tab:gray', label = 'm_Opt')
plt.xlabel('∆')
plt.ylabel('m')
plt.legend(loc='upper left')
fig.savefig('ms.pdf',format='pdf')
plt.show()
def plotDeltaRegret2():
n = 1000
sampleNum = 600
arms = 2*[None]
arms[0] = stats.norm(0,1)
gaps = 2*[0]
nDeltas = 20
Deltas = np.linspace(0,1,nDeltas)
regretEF25 = np.empty(nDeltas)
regretEF100 = np.empty(nDeltas)
regretEFOptimo = np.empty(nDeltas)
regretUCB0 = np.empty(nDeltas)
regretUCB2 = np.empty(nDeltas)
regretUCB4 = np.empty(nDeltas)
regretUCB6 = np.empty(nDeltas)
regretUCB8 = np.empty(nDeltas)
mOpt = nDeltas*[0]
for i in range(nDeltas):
Delta = Deltas[i]
arms[1]= stats.norm(-Delta,1)
gaps[1] = Delta
regretEF25[i] = 0
for k in range(sampleNum):
regretEF25[i] += samplePseudoRegretEF(n,2,25,arms,gaps)
regretEF25[i] /= sampleNum
regretEF100[i] = 0
for k in range(sampleNum):
regretEF100[i] += samplePseudoRegretEF(n,2,100,arms,gaps)
regretEF100[i] /= sampleNum
regretEFOptimo[i] = 0
mOpt[i] = computemOpt(n,Delta)
for k in range(sampleNum):
regretEFOptimo[i] += samplePseudoRegretEF(n,2,mOpt[i],arms,gaps)
regretEFOptimo[i] /= sampleNum
regretUCB0[i] = 0
for k in range(sampleNum):
regretUCB0[i] += samplePseudoRegretUCB(n,2,1,arms,gaps)
regretUCB0[i] /= sampleNum
regretUCB2[i] = 0
for k in range(sampleNum):
regretUCB2[i] += samplePseudoRegretUCB(n,2,1/100,arms,gaps)
regretUCB2[i] /= sampleNum
regretUCB4[i] = 0
for k in range(sampleNum):
regretUCB4[i] += samplePseudoRegretUCB(n,2,1/10000,arms,gaps)
regretUCB4[i] /= sampleNum
regretUCB6[i] = 0
for k in range(sampleNum):
regretUCB6[i] += samplePseudoRegretUCB(n,2,1/(n*n),arms,gaps)
regretUCB6[i] /= sampleNum
regretUCB8[i] = 0
for k in range(sampleNum):
regretUCB8[i] += samplePseudoRegretUCB(n,2,1/(10**8),arms,gaps)
regretUCB8[i] /= sampleNum
fig = plt.figure()
plt.plot(Deltas,regretEF25, color='tab:blue',label= 'EP (m = 25)')
plt.plot(Deltas,regretEF100, color='tab:red', label = 'EP (m = 100)')
plt.plot(Deltas,regretEFOptimo, color='tab:gray', label = 'EP (m = m_Opt)')
plt.plot(Deltas,regretUCB0, color='salmon', label = 'UCB (δ = 1)')
plt.plot(Deltas,regretUCB2, color='gold', label = 'UCB (δ = 1/100)')
plt.plot(Deltas,regretUCB4, color='mediumspringgreen', label = 'UCB (δ = 1/10⁴)')
plt.plot(Deltas,regretUCB6, color='black', label = 'UCB (δ = 1/10⁶)')
plt.plot(Deltas,regretUCB8, color='indigo', label = 'UCB (δ = 1/10⁸)')
plt.xlabel('∆')
plt.ylabel('Remordimiento esperado')
plt.legend(loc='upper left',ncol = 2)
fig.savefig('UCBDeltaRegret2.pdf',format='pdf')
plt.show()
plotDeltaRegret()
#plotDeltaRegret2()
|
[
"matplotlib.pyplot.ylabel",
"scipy.stats.norm",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sqrt",
"math.log",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.empty",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((486, 506), 'numpy.empty', 'np.empty', (['(n // 2 + 1)'], {}), '(n // 2 + 1)\n', (494, 506), True, 'import numpy as np\n'), ((512, 528), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (522, 528), True, 'import scipy.stats as stats\n'), ((1994, 2010), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (2004, 2010), True, 'import scipy.stats as stats\n'), ((2066, 2092), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nDeltas'], {}), '(0, 1, nDeltas)\n', (2077, 2092), True, 'import numpy as np\n'), ((2109, 2126), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2117, 2126), True, 'import numpy as np\n'), ((2145, 2162), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2153, 2162), True, 'import numpy as np\n'), ((2181, 2198), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2189, 2198), True, 'import numpy as np\n'), ((2218, 2235), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2226, 2235), True, 'import numpy as np\n'), ((2257, 2274), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2265, 2274), True, 'import numpy as np\n'), ((2297, 2314), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2305, 2314), True, 'import numpy as np\n'), ((2332, 2349), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (2340, 2349), True, 'import numpy as np\n'), ((3996, 4008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4006, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4014, 4081), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF25'], {'color': '"""tab:blue"""', 'label': '"""EP (m = 25)"""'}), "(Deltas, regretEF25, color='tab:blue', label='EP (m = 25)')\n", (4022, 4081), True, 'import matplotlib.pyplot as plt\n'), ((4086, 4154), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF50'], {'color': '"""tab:green"""', 'label': '"""EP (m = 50)"""'}), "(Deltas, regretEF50, color='tab:green', label='EP (m = 50)')\n", (4094, 4154), True, 'import matplotlib.pyplot as plt\n'), ((4160, 4228), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF75'], {'color': '"""tab:olive"""', 'label': '"""EP (m = 75)"""'}), "(Deltas, regretEF75, color='tab:olive', label='EP (m = 75)')\n", (4168, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4302), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF100'], {'color': '"""tab:red"""', 'label': '"""EP (m = 100)"""'}), "(Deltas, regretEF100, color='tab:red', label='EP (m = 100)')\n", (4242, 4302), True, 'import matplotlib.pyplot as plt\n'), ((4309, 4385), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEFmTeor'], {'color': '"""tab:purple"""', 'label': '"""EP (m = m_Teor)"""'}), "(Deltas, regretEFmTeor, color='tab:purple', label='EP (m = m_Teor)')\n", (4317, 4385), True, 'import matplotlib.pyplot as plt\n'), ((4391, 4465), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEFOptimo'], {'color': '"""tab:gray"""', 'label': '"""EP (m = m_Opt)"""'}), "(Deltas, regretEFOptimo, color='tab:gray', label='EP (m = m_Opt)')\n", (4399, 4465), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4527), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB'], {'color': '"""black"""', 'label': '"""UCB"""'}), "(Deltas, regretUCB, color='black', label='UCB')\n", (4480, 4527), True, 'import matplotlib.pyplot as plt\n'), ((4534, 4549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""∆"""'], {}), "('∆')\n", (4544, 4549), True, 'import matplotlib.pyplot as plt\n'), ((4555, 4591), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Remordimiento esperado"""'], {}), "('Remordimiento esperado')\n", (4565, 4591), True, 'import matplotlib.pyplot as plt\n'), ((4597, 4633), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'ncol': '(2)'}), "(loc='upper left', ncol=2)\n", (4607, 4633), True, 'import matplotlib.pyplot as plt\n'), ((4692, 4702), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4700, 4702), True, 'import matplotlib.pyplot as plt\n'), ((4720, 4732), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4730, 4732), True, 'import matplotlib.pyplot as plt\n'), ((4738, 4797), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'mTeor'], {'color': '"""tab:purple"""', 'label': '"""m_Teor"""'}), "(Deltas, mTeor, color='tab:purple', label='m_Teor')\n", (4746, 4797), True, 'import matplotlib.pyplot as plt\n'), ((4805, 4860), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'mOpt'], {'color': '"""tab:gray"""', 'label': '"""m_Opt"""'}), "(Deltas, mOpt, color='tab:gray', label='m_Opt')\n", (4813, 4860), True, 'import matplotlib.pyplot as plt\n'), ((4869, 4884), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""∆"""'], {}), "('∆')\n", (4879, 4884), True, 'import matplotlib.pyplot as plt\n'), ((4890, 4905), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""m"""'], {}), "('m')\n", (4900, 4905), True, 'import matplotlib.pyplot as plt\n'), ((4911, 4939), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (4921, 4939), True, 'import matplotlib.pyplot as plt\n'), ((4985, 4995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4993, 4995), True, 'import matplotlib.pyplot as plt\n'), ((5108, 5124), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (5118, 5124), True, 'import scipy.stats as stats\n'), ((5180, 5206), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nDeltas'], {}), '(0, 1, nDeltas)\n', (5191, 5206), True, 'import numpy as np\n'), ((5223, 5240), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5231, 5240), True, 'import numpy as np\n'), ((5260, 5277), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5268, 5277), True, 'import numpy as np\n'), ((5300, 5317), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5308, 5317), True, 'import numpy as np\n'), ((5342, 5359), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5350, 5359), True, 'import numpy as np\n'), ((5378, 5395), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5386, 5395), True, 'import numpy as np\n'), ((5414, 5431), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5422, 5431), True, 'import numpy as np\n'), ((5450, 5467), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5458, 5467), True, 'import numpy as np\n'), ((5486, 5503), 'numpy.empty', 'np.empty', (['nDeltas'], {}), '(nDeltas)\n', (5494, 5503), True, 'import numpy as np\n'), ((7294, 7306), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7304, 7306), True, 'import matplotlib.pyplot as plt\n'), ((7312, 7379), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF25'], {'color': '"""tab:blue"""', 'label': '"""EP (m = 25)"""'}), "(Deltas, regretEF25, color='tab:blue', label='EP (m = 25)')\n", (7320, 7379), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7452), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEF100'], {'color': '"""tab:red"""', 'label': '"""EP (m = 100)"""'}), "(Deltas, regretEF100, color='tab:red', label='EP (m = 100)')\n", (7392, 7452), True, 'import matplotlib.pyplot as plt\n'), ((7459, 7533), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretEFOptimo'], {'color': '"""tab:gray"""', 'label': '"""EP (m = m_Opt)"""'}), "(Deltas, regretEFOptimo, color='tab:gray', label='EP (m = m_Opt)')\n", (7467, 7533), True, 'import matplotlib.pyplot as plt\n'), ((7540, 7605), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB0'], {'color': '"""salmon"""', 'label': '"""UCB (δ = 1)"""'}), "(Deltas, regretUCB0, color='salmon', label='UCB (δ = 1)')\n", (7548, 7605), True, 'import matplotlib.pyplot as plt\n'), ((7612, 7679), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB2'], {'color': '"""gold"""', 'label': '"""UCB (δ = 1/100)"""'}), "(Deltas, regretUCB2, color='gold', label='UCB (δ = 1/100)')\n", (7620, 7679), True, 'import matplotlib.pyplot as plt\n'), ((7686, 7771), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB4'], {'color': '"""mediumspringgreen"""', 'label': '"""UCB (δ = 1/10⁴)"""'}), "(Deltas, regretUCB4, color='mediumspringgreen', label='UCB (δ = 1/10⁴)'\n )\n", (7694, 7771), True, 'import matplotlib.pyplot as plt\n'), ((7773, 7841), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB6'], {'color': '"""black"""', 'label': '"""UCB (δ = 1/10⁶)"""'}), "(Deltas, regretUCB6, color='black', label='UCB (δ = 1/10⁶)')\n", (7781, 7841), True, 'import matplotlib.pyplot as plt\n'), ((7848, 7917), 'matplotlib.pyplot.plot', 'plt.plot', (['Deltas', 'regretUCB8'], {'color': '"""indigo"""', 'label': '"""UCB (δ = 1/10⁸)"""'}), "(Deltas, regretUCB8, color='indigo', label='UCB (δ = 1/10⁸)')\n", (7856, 7917), True, 'import matplotlib.pyplot as plt\n'), ((7924, 7939), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""∆"""'], {}), "('∆')\n", (7934, 7939), True, 'import matplotlib.pyplot as plt\n'), ((7945, 7981), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Remordimiento esperado"""'], {}), "('Remordimiento esperado')\n", (7955, 7981), True, 'import matplotlib.pyplot as plt\n'), ((7987, 8023), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'ncol': '(2)'}), "(loc='upper left', ncol=2)\n", (7997, 8023), True, 'import matplotlib.pyplot as plt\n'), ((8083, 8093), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8091, 8093), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2517), 'scipy.stats.norm', 'stats.norm', (['(-Delta)', '(1)'], {}), '(-Delta, 1)\n', (2506, 2517), True, 'import scipy.stats as stats\n'), ((5621, 5642), 'scipy.stats.norm', 'stats.norm', (['(-Delta)', '(1)'], {}), '(-Delta, 1)\n', (5631, 5642), True, 'import scipy.stats as stats\n'), ((407, 438), 'math.log', 'math.log', (['(n * Delta * Delta / 4)'], {}), '(n * Delta * Delta / 4)\n', (415, 438), False, 'import math\n'), ((666, 682), 'math.sqrt', 'math.sqrt', (['(2 * m)'], {}), '(2 * m)\n', (675, 682), False, 'import math\n'), ((1764, 1783), 'math.log', 'math.log', (['(1 / delta)'], {}), '(1 / delta)\n', (1772, 1783), False, 'import math\n')]
|
"""
LCCS Level 3 Classification
| Class name | Code | Numeric code |
|----------------------------------|-----|-----|
| Cultivated Terrestrial Vegetated | A11 | 111 |
| Natural Terrestrial Vegetated | A12 | 112 |
| Cultivated Aquatic Vegetated | A23 | 123 |
| Natural Aquatic Vegetated | A24 | 124 |
| Artificial Surface | B15 | 215 |
| Natural Surface | B16 | 216 |
| Artificial Water | B27 | 227 |
| Natural Water | B28 | 228 |
"""
import logging
import numpy
#: Required input variables
LCCS_L3_REQUIRED_VARIABLES = ["vegetat_veg_cat",
"aquatic_wat_cat",
"cultman_agr_cat",
"artific_urb_cat",
"artwatr_wat_cat"]
#: LCCS Level 3 Colour Scheme
LCCS_L3_COLOUR_SCHEME = {111 : (192, 255, 0, 255),
112 : (0, 128, 0, 255),
123 : (0, 255, 245, 255),
124 : (0, 192, 122, 255),
215 : (255, 0, 255, 255),
216 : (255, 192, 160, 255),
227 : (0, 155, 255, 255),
228 : (0, 0, 255, 255)}
def colour_lccs_level3(classification_array):
""""
Colour classification array using LCCS Level 3 standard
colour scheme. Returns four arays:
* red
* green
* blue
* alpha
"""
red = numpy.zeros_like(classification_array, dtype=numpy.uint8)
green = numpy.zeros_like(red)
blue = numpy.zeros_like(red)
alpha = numpy.zeros_like(red)
for class_id, colours in LCCS_L3_COLOUR_SCHEME.items():
subset = (classification_array == class_id)
red[subset], green[subset], blue[subset], alpha[subset] = colours
return red, green, blue, alpha
def _check_required_variables(classification_data):
"""
Check requited variables are in xarray
"""
# Check all input variable exist - warning if they don't
for var in LCCS_L3_REQUIRED_VARIABLES:
if var not in classification_data.data_vars:
logging.warning("Required variable {0} not found".format(var))
def classify_lccs_level3(classification_data):
"""
Apply Level 3 LCCS Classification
Requires xarray containing the following variables
* vegetat_veg_cat - Binary mask 1=vegetation, 0=non-vegetation
* aquatic_wat_cat - Binary mask 1=aquatic, 0=non-aquatic
* cultman_agr_cat - Binary mask 1=cultivated/managed, 0=natural
* artific_urb_cat - Binary mask 1=urban, 0=non-urban
* artwatr_wat_cat - Binary mask 1=artificial water, 0=natural water
Returns three arrays:
* level1
* level2
* level3
"""
# Check required input and output variables exist.
_check_required_variables(classification_data)
# Set up arrays for outputs
try:
vegetation = classification_data["vegetat_veg_cat"].values == 1
except KeyError:
raise Exception("No data available for first level of classification "
"(vegetation / non-vegetation), can not proceed")
level3 = numpy.zeros(vegetation.shape, dtype=numpy.uint8)
# Level 1
# Assign level 1 class of primarily vegetated (A,100) or primarily non-vegetated (B,200)
level1 = numpy.where(vegetation, numpy.uint8(100), numpy.uint8(200))
# Level 2
# Assign level 2 class of terrestrial (10) or aquatic (20)
try:
aquatic = classification_data["aquatic_wat_cat"].values == 1
level2 = numpy.where(aquatic, numpy.uint8(20), numpy.uint8(10))
except KeyError:
raise Exception("No data available for second level of classification "
"(aquatic / non-aquatic), can not proceed")
# Level 3
# Assign level 3 (Supercategory) class based on cultivated or artificial
try:
cultivated = classification_data["cultman_agr_cat"].values == 1
# Cultivated Terrestrial Vegetation (A11)
level3[vegetation & ~aquatic & cultivated] = 111
# Cultivated Aquatic Vegetation (A23)
level3[vegetation & aquatic & cultivated] = 123
# Natural Terrestrial Vegetation (A12)
level3[vegetation & ~aquatic & ~cultivated] = 112
# Natural Aquatic Vegetation (A24)
level3[vegetation & aquatic & ~cultivated] = 124
except KeyError:
logging.warning("No cultivated vegetation layer available. Skipping "
"assigning level 3 catergories for vegetation")
try:
urban = classification_data["artific_urb_cat"].values == 1
# Artificial Surface (B15)
level3[~vegetation & ~aquatic & urban] = 215
# Natural Surface (B16)
level3[~vegetation & ~aquatic & ~urban] = 216
except KeyError:
logging.warning("No urban layer available. Skipping assigning "
"level 3 for terrestrial non-vegetation")
try:
artificial_water = classification_data["artwatr_wat_cat"].values == 1
# Artificial Water (B27)
level3[~vegetation & aquatic & artificial_water] = 227
# Natural Water (B28)
level3[~vegetation & aquatic & ~artificial_water] = 228
except KeyError:
logging.warning("No artificial water layer available. Skipping assigning "
"level 3 for aquatic non-vegetation (water)")
return level1, level2, level3
|
[
"numpy.uint8",
"numpy.zeros",
"logging.warning",
"numpy.zeros_like"
] |
[((1476, 1533), 'numpy.zeros_like', 'numpy.zeros_like', (['classification_array'], {'dtype': 'numpy.uint8'}), '(classification_array, dtype=numpy.uint8)\n', (1492, 1533), False, 'import numpy\n'), ((1546, 1567), 'numpy.zeros_like', 'numpy.zeros_like', (['red'], {}), '(red)\n', (1562, 1567), False, 'import numpy\n'), ((1579, 1600), 'numpy.zeros_like', 'numpy.zeros_like', (['red'], {}), '(red)\n', (1595, 1600), False, 'import numpy\n'), ((1613, 1634), 'numpy.zeros_like', 'numpy.zeros_like', (['red'], {}), '(red)\n', (1629, 1634), False, 'import numpy\n'), ((3163, 3211), 'numpy.zeros', 'numpy.zeros', (['vegetation.shape'], {'dtype': 'numpy.uint8'}), '(vegetation.shape, dtype=numpy.uint8)\n', (3174, 3211), False, 'import numpy\n'), ((3357, 3373), 'numpy.uint8', 'numpy.uint8', (['(100)'], {}), '(100)\n', (3368, 3373), False, 'import numpy\n'), ((3375, 3391), 'numpy.uint8', 'numpy.uint8', (['(200)'], {}), '(200)\n', (3386, 3391), False, 'import numpy\n'), ((3587, 3602), 'numpy.uint8', 'numpy.uint8', (['(20)'], {}), '(20)\n', (3598, 3602), False, 'import numpy\n'), ((3604, 3619), 'numpy.uint8', 'numpy.uint8', (['(10)'], {}), '(10)\n', (3615, 3619), False, 'import numpy\n'), ((4412, 4536), 'logging.warning', 'logging.warning', (['"""No cultivated vegetation layer available. Skipping assigning level 3 catergories for vegetation"""'], {}), "(\n 'No cultivated vegetation layer available. Skipping assigning level 3 catergories for vegetation'\n )\n", (4427, 4536), False, 'import logging\n'), ((4837, 4949), 'logging.warning', 'logging.warning', (['"""No urban layer available. Skipping assigning level 3 for terrestrial non-vegetation"""'], {}), "(\n 'No urban layer available. Skipping assigning level 3 for terrestrial non-vegetation'\n )\n", (4852, 4949), False, 'import logging\n'), ((5277, 5404), 'logging.warning', 'logging.warning', (['"""No artificial water layer available. Skipping assigning level 3 for aquatic non-vegetation (water)"""'], {}), "(\n 'No artificial water layer available. Skipping assigning level 3 for aquatic non-vegetation (water)'\n )\n", (5292, 5404), False, 'import logging\n')]
|
from manimlib.imports import *
from srcs.utils import run
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg
from sklearn import svm # sklearn = scikit-learn
from sklearn.datasets import make_moons
def mplfig_to_npimage(fig):
""" Converts a matplotlib figure to a RGB frame after updating the canvas"""
# only the Agg backend now supports the tostring_rgb function
canvas = FigureCanvasAgg(fig)
canvas.draw() # update/draw the elements
# get the width and the height to resize the matrix
l,b,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
# exports the canvas to a string buffer and then to a numpy nd.array
buf = canvas.tostring_rgb()
image= np.frombuffer(buf, dtype=np.uint8)
plt.close()
return image.reshape(h, w, 3)
def make_frame_mpl(t):
fig_mpl, ax = plt.subplots(1, figsize=(5, 3), facecolor='white')
xx = np.linspace(-2, 2, 200) # x向量
zz = lambda d: np.sinc(xx ** 2) + np.sin(xx + d) # (变化的)Z向量
ax.set_title("Elevation in y=0")
ax.set_ylim(-1.5, 2.5)
line, = ax.plot(xx, zz(0), lw=3)
line.set_ydata( zz(np.pi*t)) # 更新曲面
return mplfig_to_npimage(fig_mpl) # 图形的RGB图像
def make_frame(t):
X, Y = make_moons(50, noise=0.1, random_state=2) # 半随机数据
fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1, 1, 1))
fig.subplots_adjust(left=0, right=1, bottom=0)
xx, yy = np.meshgrid(np.linspace(-2, 3, 500), np.linspace(-1, 2, 500))
ax.clear()
ax.axis('off')
ax.set_title("SVC classification", fontsize=16)
classifier = svm.SVC(gamma=2, C=1)
# 不断变化的权重让数据点一个接一个的出现
weights = np.minimum(1, np.maximum(0, t**2+10-np.arange(50)))
classifier.fit(X, Y, sample_weight=weights)
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=plt.cm.bone, alpha=0.8,
vmin=-2.5, vmax=2.5, levels=np.linspace(-2,2,20))
ax.scatter(X[:,0], X[:,1], c=Y, s=50*weights, cmap=plt.cm.bone)
return mplfig_to_npimage(fig)
class manim_with_animation(Scene):
def construct(self):
during_times = ValueTracker(0)
self.img = ImageMobject(make_frame_mpl(0))
self.left_img = ImageMobject(make_frame(0))
self.img.add_updater(lambda d: d.set_array(make_frame_mpl(during_times.get_value())))
self.img.shift(2*RIGHT)
self.left_img.add_updater(lambda d: d.set_array(make_frame(during_times.get_value())))
self.left_img.shift(2*LEFT)
self.play(ShowCreation(self.img), ShowCreation(self.left_img), run_times=2)
for i in range(6):
self.play(during_times.increment_value, 0.5*i, rate_func=linear,run_times=0.5*i)
#
# for i in range(6)[::-1]:
# self.play(during_times.increment_value, 0.1*i, rate_func=linear,run_times=0.1*i)
self.wait()
if __name__=="__main__":
run([manim_with_animation])
|
[
"numpy.arange",
"numpy.sinc",
"matplotlib.pyplot.close",
"sklearn.datasets.make_moons",
"numpy.linspace",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.sin",
"numpy.frombuffer",
"matplotlib.pyplot.subplots",
"srcs.utils.run",
"sklearn.svm.SVC"
] |
[((452, 472), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (467, 472), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((758, 792), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (771, 792), True, 'import numpy as np\n'), ((797, 808), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (806, 808), True, 'import matplotlib.pyplot as plt\n'), ((886, 936), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(5, 3)', 'facecolor': '"""white"""'}), "(1, figsize=(5, 3), facecolor='white')\n", (898, 936), True, 'import matplotlib.pyplot as plt\n'), ((946, 969), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(200)'], {}), '(-2, 2, 200)\n', (957, 969), True, 'import numpy as np\n'), ((1266, 1307), 'sklearn.datasets.make_moons', 'make_moons', (['(50)'], {'noise': '(0.1)', 'random_state': '(2)'}), '(50, noise=0.1, random_state=2)\n', (1276, 1307), False, 'from sklearn.datasets import make_moons\n'), ((1332, 1384), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(4, 4)', 'facecolor': '(1, 1, 1)'}), '(1, figsize=(4, 4), facecolor=(1, 1, 1))\n', (1344, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1636), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(2)', 'C': '(1)'}), '(gamma=2, C=1)\n', (1622, 1636), False, 'from sklearn import svm\n'), ((2962, 2989), 'srcs.utils.run', 'run', (['[manim_with_animation]'], {}), '([manim_with_animation])\n', (2965, 2989), False, 'from srcs.utils import run\n'), ((1461, 1484), 'numpy.linspace', 'np.linspace', (['(-2)', '(3)', '(500)'], {}), '(-2, 3, 500)\n', (1472, 1484), True, 'import numpy as np\n'), ((1486, 1509), 'numpy.linspace', 'np.linspace', (['(-1)', '(2)', '(500)'], {}), '(-1, 2, 500)\n', (1497, 1509), True, 'import numpy as np\n'), ((996, 1012), 'numpy.sinc', 'np.sinc', (['(xx ** 2)'], {}), '(xx ** 2)\n', (1003, 1012), True, 'import numpy as np\n'), ((1015, 1029), 'numpy.sin', 'np.sin', (['(xx + d)'], {}), '(xx + d)\n', (1021, 1029), True, 'import numpy as np\n'), ((1973, 1995), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(20)'], {}), '(-2, 2, 20)\n', (1984, 1995), True, 'import numpy as np\n'), ((1713, 1726), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (1722, 1726), True, 'import numpy as np\n')]
|
import copy
import datetime
import os
import random
import traceback
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from inference.inference_utils import get_trange, get_tqdm
def init_random_seed(value=0):
random.seed(value)
np.random.seed(value)
torch.manual_seed(value)
torch.cuda.manual_seed(value)
torch.backends.cudnn.deterministic = True
def copy_data_to_device(data, device):
if torch.is_tensor(data):
return data.to(device)
elif isinstance(data, (list, tuple)):
return [copy_data_to_device(elem, device) for elem in data]
elif isinstance(data, dict):
return {name: copy_data_to_device(value, device) for name, value in data.items()}
raise ValueError('Unexpected data type {}'.format(type(data)))
def sum_dicts(current, new):
if current is None:
return new
result = dict(current)
for name, new_value in new.items():
result[name] = result.get(name, 0) + new_value
return result
def norm_dict(current, n):
if n == 0:
return current
return {name: value / (n + 1e-6) for name, value in current.items()}
def train_eval_loop(model, train_dataset, val_dataset, criterion,
lr=1e-4, epoch_n=10, batch_size=32,
device='cuda', early_stopping_patience=10, l2_reg_alpha=0,
max_batches_per_epoch_train=10000,
max_batches_per_epoch_val=1000,
data_loader_ctor=DataLoader,
optimizer_ctor=None,
lr_scheduler_ctor=None,
shuffle_train=True,
dataloader_workers_n=0,
clip_grad=10,
save_vis_images_path=None,
save_vis_images_freq=100,
save_models_path=None,
save_models_freq=10):
device = torch.device(device)
model.to(device)
if optimizer_ctor is None:
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_reg_alpha)
else:
optimizer = optimizer_ctor(model.parameters(), lr=lr)
if lr_scheduler_ctor is not None:
lr_scheduler = lr_scheduler_ctor(optimizer)
else:
lr_scheduler = None
train_dataloader = data_loader_ctor(train_dataset, batch_size=batch_size, shuffle=shuffle_train,
num_workers=dataloader_workers_n)
val_dataloader = data_loader_ctor(val_dataset, batch_size=batch_size, shuffle=False,
num_workers=dataloader_workers_n)
best_val_loss = float('inf')
best_val_metrics = None
best_epoch_i = 0
best_model = copy.deepcopy(model)
for epoch_i in get_trange(epoch_n, desc='Epochs'):
try:
epoch_start = datetime.datetime.now()
print('Epoch {}'.format(epoch_i))
model.train()
mean_train_loss = 0
mean_train_metrics = None
train_batches_n = 0
for batch_i, (batch_x, batch_y) in get_tqdm(enumerate(train_dataloader), desc=f'Epoch {epoch_i}',
total=max_batches_per_epoch_train, leave=True):
if batch_i > max_batches_per_epoch_train:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
optimizer.step()
mean_train_loss += float(loss)
mean_train_metrics = sum_dicts(mean_train_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_train.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
train_batches_n += 1
mean_train_loss /= train_batches_n
mean_train_metrics = norm_dict(mean_train_metrics, train_batches_n)
print('Epoch: {} iterations, {:0.2f} sec'.format(train_batches_n,
(datetime.datetime.now() - epoch_start).total_seconds()))
print('Mean train loss', mean_train_loss, mean_train_metrics)
if save_models_path is not None and epoch_i % save_models_freq == 0:
torch.save(model, os.path.join(save_models_path, 'model_epoch_{:04d}.pth'.format(epoch_i)))
model.eval()
mean_val_loss = 0
mean_val_metrics = None
val_batches_n = 0
with torch.no_grad():
for batch_i, (batch_x, batch_y) in enumerate(val_dataloader):
if batch_i > max_batches_per_epoch_val:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
mean_val_loss += float(loss)
mean_val_metrics = sum_dicts(mean_val_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_val.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
val_batches_n += 1
mean_val_loss /= val_batches_n + 1e-6
mean_val_metrics = norm_dict(mean_val_metrics, val_batches_n)
print('Mean validation loss', mean_val_loss, mean_val_metrics)
if mean_val_loss < best_val_loss:
best_epoch_i = epoch_i
best_val_loss = mean_val_loss
best_val_metrics = mean_val_metrics
best_model = copy.deepcopy(model)
print('New best model!')
if save_models_path is not None:
torch.save(best_model, os.path.join(save_models_path, 'best_model.pth'))
elif epoch_i - best_epoch_i > early_stopping_patience:
print('Model has not improved during the last {} epochs, stopping training early'.format(
early_stopping_patience))
break
if lr_scheduler is not None:
lr_scheduler.step(mean_val_loss)
print()
except KeyboardInterrupt:
print('Interrupted by user')
break
except Exception as ex:
print('Fatal error during training: {}\n{}'.format(ex, traceback.format_exc()))
break
return best_val_loss, best_val_metrics, best_model
def predict_with_model(model, dataset, device='cuda', batch_size=32, num_workers=0, return_labels=False):
"""
:param model: torch.nn.Module - trained model
:param dataset: torch.utils.data.Dataset - data to apply model
:param device: cuda/cpu
:param batch_size:
:return: numpy.array dimensionality len(dataset) x *
"""
results_by_batch = []
device = torch.device(device)
model.to(device)
model.eval()
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
labels = []
with torch.no_grad():
import tqdm
for batch_x, batch_y in tqdm.tqdm_notebook(dataloader, total=len(dataset)/batch_size):
batch_x = copy_data_to_device(batch_x, device)
if return_labels:
labels.append(batch_y.numpy())
batch_pred = model(batch_x)
results_by_batch.append(batch_pred.detach().cpu().numpy())
if return_labels:
return np.concatenate(results_by_batch, 0), np.concatenate(labels, 0)
else:
return np.concatenate(results_by_batch, 0)
|
[
"torch.manual_seed",
"traceback.format_exc",
"torch.utils.data.DataLoader",
"os.path.join",
"random.seed",
"torch.is_tensor",
"datetime.datetime.now",
"numpy.random.seed",
"numpy.concatenate",
"copy.deepcopy",
"torch.no_grad",
"torch.cuda.manual_seed",
"inference.inference_utils.get_trange",
"torch.device"
] |
[((280, 298), 'random.seed', 'random.seed', (['value'], {}), '(value)\n', (291, 298), False, 'import random\n'), ((303, 324), 'numpy.random.seed', 'np.random.seed', (['value'], {}), '(value)\n', (317, 324), True, 'import numpy as np\n'), ((329, 353), 'torch.manual_seed', 'torch.manual_seed', (['value'], {}), '(value)\n', (346, 353), False, 'import torch\n'), ((358, 387), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['value'], {}), '(value)\n', (380, 387), False, 'import torch\n'), ((482, 503), 'torch.is_tensor', 'torch.is_tensor', (['data'], {}), '(data)\n', (497, 503), False, 'import torch\n'), ((1943, 1963), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1955, 1963), False, 'import torch\n'), ((2746, 2766), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2759, 2766), False, 'import copy\n'), ((2787, 2821), 'inference.inference_utils.get_trange', 'get_trange', (['epoch_n'], {'desc': '"""Epochs"""'}), "(epoch_n, desc='Epochs')\n", (2797, 2821), False, 'from inference.inference_utils import get_trange, get_tqdm\n'), ((7909, 7929), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (7921, 7929), False, 'import torch\n'), ((7986, 8073), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=False, num_workers=\n num_workers)\n', (7996, 8073), False, 'from torch.utils.data import DataLoader\n'), ((8094, 8109), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8107, 8109), False, 'import torch\n'), ((8601, 8636), 'numpy.concatenate', 'np.concatenate', (['results_by_batch', '(0)'], {}), '(results_by_batch, 0)\n', (8615, 8636), True, 'import numpy as np\n'), ((2862, 2885), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2883, 2885), False, 'import datetime\n'), ((8513, 8548), 'numpy.concatenate', 'np.concatenate', (['results_by_batch', '(0)'], {}), '(results_by_batch, 0)\n', (8527, 8548), True, 'import numpy as np\n'), ((8550, 8575), 'numpy.concatenate', 'np.concatenate', (['labels', '(0)'], {}), '(labels, 0)\n', (8564, 8575), True, 'import numpy as np\n'), ((5146, 5161), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5159, 5161), False, 'import torch\n'), ((6669, 6689), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (6682, 6689), False, 'import copy\n'), ((6825, 6873), 'os.path.join', 'os.path.join', (['save_models_path', '"""best_model.pth"""'], {}), "(save_models_path, 'best_model.pth')\n", (6837, 6873), False, 'import os\n'), ((7421, 7443), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7441, 7443), False, 'import traceback\n'), ((4685, 4708), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4706, 4708), False, 'import datetime\n')]
|
#!/usr/bin/env python3
import numpy as np
import h5py
import matplotlib.pyplot as plt
# import plotly.graph_objects as go
#========= Configuration ===========
DIR ="../data"
file_name = "particle"#"rhoNeutral" #"P"
h5 = h5py.File('../data/'+file_name+'.hdf5','r')
Lx = h5.attrs["Lx"]
Ly = h5.attrs["Ly"]
Lz = h5.attrs["Lz"]
N = h5.attrs["N"]
dp = h5.attrs["dp"]
Nt = h5.attrs["Nt"]
data_num = np.arange(start=0, stop=Nt, step=1, dtype=int)
time = data_num*dp
energy = h5["/energy"]
energy = 3*(np.array(energy[:-1]))/N
fig,ax = plt.subplots(figsize=(6, 6))
plt.plot(time[1:],energy[1:])
ax.set_xlabel("$timestep$")
ax.set_ylabel("$Energy$")
plt.show()
|
[
"matplotlib.pyplot.plot",
"h5py.File",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((225, 273), 'h5py.File', 'h5py.File', (["('../data/' + file_name + '.hdf5')", '"""r"""'], {}), "('../data/' + file_name + '.hdf5', 'r')\n", (234, 273), False, 'import h5py\n'), ((407, 453), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'Nt', 'step': '(1)', 'dtype': 'int'}), '(start=0, stop=Nt, step=1, dtype=int)\n', (416, 453), True, 'import numpy as np\n'), ((544, 572), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (556, 572), True, 'import matplotlib.pyplot as plt\n'), ((573, 603), 'matplotlib.pyplot.plot', 'plt.plot', (['time[1:]', 'energy[1:]'], {}), '(time[1:], energy[1:])\n', (581, 603), True, 'import matplotlib.pyplot as plt\n'), ((661, 671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (669, 671), True, 'import matplotlib.pyplot as plt\n'), ((509, 530), 'numpy.array', 'np.array', (['energy[:-1]'], {}), '(energy[:-1])\n', (517, 530), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Iris classification example, pratice on using high-level API
Algorithms: Neutral Network
Reference: https://www.tensorflow.org/get_started/tflearn
Date: Jun 14, 2017
@author: <NAME>
@Library: tensorflow - high-level API with tf.contrib.learn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# import urllib # only python 2
import urllib.request # python 3
import tensorflow as tf
import numpy as np
IRIS_TRAINING = "./iris_dataset/iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "./iris_dataset/iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
def main():
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
raw = urllib.request.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, 'wb') as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.request.urlopen(IRIS_TEST_URL).read()
with open(IRIS_TEST, 'wb') as f:
f.write(raw)
# Load datasets
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename = IRIS_TRAINING,
target_dtype = np.int,
features_dtype = np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename = IRIS_TEST,
target_dtype = np.int,
features_dtype = np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension = 4)]
# Build 3 layer DNN with 10, 20, 10 units respectively
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units = [10, 20, 10],
n_classes = 3,
model_dir = "./tmp/iris_models")
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model
classifier.fit(input_fn = get_train_inputs, steps = 2000)
# # Equivalent to follows:
# classifier.fit(x = training_set.data, y = training_set.target, steps = 1000)
# classifier.fit(x = training_set.data, y = training_set.target, steps = 1000)
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy
accuracy_score = classifier.evaluate(input_fn = get_test_inputs, steps = 1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# Predict for new example
def new_samples():
return np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype = np.float32)
predictions = list(classifier.predict(input_fn = new_samples))
print("New samples, Class predictions: {}\n".format(predictions))
if __name__ == "__main__":
main()
|
[
"os.path.exists",
"tensorflow.contrib.learn.DNNClassifier",
"tensorflow.contrib.layers.real_valued_column",
"tensorflow.contrib.learn.datasets.base.load_csv_with_header",
"numpy.array",
"tensorflow.constant"
] |
[((1176, 1303), 'tensorflow.contrib.learn.datasets.base.load_csv_with_header', 'tf.contrib.learn.datasets.base.load_csv_with_header', ([], {'filename': 'IRIS_TRAINING', 'target_dtype': 'np.int', 'features_dtype': 'np.float32'}), '(filename=IRIS_TRAINING,\n target_dtype=np.int, features_dtype=np.float32)\n', (1227, 1303), True, 'import tensorflow as tf\n'), ((1332, 1455), 'tensorflow.contrib.learn.datasets.base.load_csv_with_header', 'tf.contrib.learn.datasets.base.load_csv_with_header', ([], {'filename': 'IRIS_TEST', 'target_dtype': 'np.int', 'features_dtype': 'np.float32'}), '(filename=IRIS_TEST,\n target_dtype=np.int, features_dtype=np.float32)\n', (1383, 1455), True, 'import tensorflow as tf\n'), ((1674, 1812), 'tensorflow.contrib.learn.DNNClassifier', 'tf.contrib.learn.DNNClassifier', ([], {'feature_columns': 'feature_columns', 'hidden_units': '[10, 20, 10]', 'n_classes': '(3)', 'model_dir': '"""./tmp/iris_models"""'}), "(feature_columns=feature_columns,\n hidden_units=[10, 20, 10], n_classes=3, model_dir='./tmp/iris_models')\n", (1704, 1812), True, 'import tensorflow as tf\n'), ((841, 870), 'os.path.exists', 'os.path.exists', (['IRIS_TRAINING'], {}), '(IRIS_TRAINING)\n', (855, 870), False, 'import os\n'), ((1001, 1026), 'os.path.exists', 'os.path.exists', (['IRIS_TEST'], {}), '(IRIS_TEST)\n', (1015, 1026), False, 'import os\n'), ((1544, 1597), 'tensorflow.contrib.layers.real_valued_column', 'tf.contrib.layers.real_valued_column', (['""""""'], {'dimension': '(4)'}), "('', dimension=4)\n", (1580, 1597), True, 'import tensorflow as tf\n'), ((1988, 2018), 'tensorflow.constant', 'tf.constant', (['training_set.data'], {}), '(training_set.data)\n', (1999, 2018), True, 'import tensorflow as tf\n'), ((2027, 2059), 'tensorflow.constant', 'tf.constant', (['training_set.target'], {}), '(training_set.target)\n', (2038, 2059), True, 'import tensorflow as tf\n'), ((2378, 2404), 'tensorflow.constant', 'tf.constant', (['test_set.data'], {}), '(test_set.data)\n', (2389, 2404), True, 'import tensorflow as tf\n'), ((2413, 2441), 'tensorflow.constant', 'tf.constant', (['test_set.target'], {}), '(test_set.target)\n', (2424, 2441), True, 'import tensorflow as tf\n'), ((2692, 2764), 'numpy.array', 'np.array', (['[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]]'], {'dtype': 'np.float32'}), '([[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=np.float32)\n', (2700, 2764), True, 'import numpy as np\n')]
|
# import scipy.signal
from gym.spaces import Box, Discrete
import numpy as np
import torch
from torch import nn
import IPython
# from torch.nn import Parameter
import torch.nn.functional as F
from torch.distributions import Independent, OneHotCategorical, Categorical
from torch.distributions.normal import Normal
# # from torch.distributions.categorical import Categorical
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes) - 1):
act = activation if j < len(sizes) - 2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j + 1]), act()]
return nn.Sequential(*layers)
class Actor(nn.Module):
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward(self, obs, act=None):
# Produce action distributions for given observations, and optionally
# compute the log likelihood of given actions under those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class MLPCategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.logits_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
logits = self.logits_net(obs)
return Categorical(logits=logits)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act)
class MLPGaussianActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = nn.Parameter(torch.as_tensor(log_std))
self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution
class MLPCritic(nn.Module):
def __init__(self, obs_dim, hidden_sizes, activation):
super().__init__()
self.v_net = mlp([obs_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs):
return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
class MLPActorCritic(nn.Module):
def __init__(self, observation_space, action_space,
hidden_sizes=(64, 64), activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
# policy builder depends on action space
if isinstance(action_space, Box):
self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation)
elif isinstance(action_space, Discrete):
self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation)
# build value function critics
self.v = MLPCritic(obs_dim, hidden_sizes, activation)
self.vc = MLPCritic(obs_dim, hidden_sizes, activation)
def step(self, obs):
with torch.no_grad():
pi = self.pi._distribution(obs)
# print("pi dist! ", pi)
a = pi.sample()
logp_a = self.pi._log_prob_from_distribution(pi, a)
v = self.v(obs)
vc = self.vc(obs)
return a.numpy(), v.numpy(), vc.numpy(), logp_a.numpy()
def act(self, obs):
return self.step(obs)[0]
class MEMOActor(nn.Module):
def __init__(self, state_dim, hidden_size, action_dim, activation=nn.Tanh):
super(MEMOActor, self).__init__()
log_std = -0.5 * np.ones(action_dim, dtype=np.float32)
self.log_std = nn.Parameter(torch.as_tensor(log_std))
self.mu_net = mlp([state_dim] + hidden_size + [action_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def forward(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
# the critic is error here would be: reward + gamma*V(s_t+1)-V(s_t)
# http://incompleteideas.net/book/first/ebook/node66.html
class MEMO(nn.Module):
"""Multiple Experts, Multiple Objectives;
"""
def __init__(self, obs_dim, out_dim, encoder_hidden, decoder_hidden, actor_hidden, latent_modes):
'''
:param obs_dim:
:param latent_dim:
:param out_dim:
:param encoder_hidden:
:param decoder_hidden:
'''
super(MEMO, self).__init__()
self.found_contexts = []
self.latent_modes = latent_modes
self.num_embeddings = self.latent_modes
self.embedding_dim = obs_dim
self.vq_encoder = VQEncoder(obs_dim, self.embedding_dim) # original
self.prenet = nn.Linear(self.embedding_dim, self.embedding_dim)
self.vector_quantizer = VectorQuantizer(self.num_embeddings, self.embedding_dim)
self.postnet = nn.Linear(self.embedding_dim, encoder_hidden[-1])
self.vq_decoder = VQDecoder(encoder_hidden[-1], decoder_hidden, obs_dim)
self.action_decoder = MEMOActor(state_dim=obs_dim + self.latent_modes, hidden_size=actor_hidden, action_dim=out_dim)
# self.action_gaussian = GaussianActor(obs_dim=obs_dim + self.latent_modes, act_dim=out_dim,
# hidden_sizes=[128]*4, activation=nn.LeakyReLU)
self.action_vq_dist = None
def compute_quantized_loss(self, state, delta_state, actions):
'''
:param state:
:param delta_state:
:param actions:
:return:
'''
delta_state_enc = self.vq_encoder(delta_state) # In: [B, OBS_DIM]; Out: # [B, OBS_DIM]
encoder_output = self.prenet(delta_state_enc) # In: [B, OBS_DIM]; Out: # [B, OBS_DIM]
quantized, categorical_proposal, categorical_proposal_prob = self.vector_quantizer(encoder_output)
# update the set of known contexts
self.found_contexts = set([t.data.item() for t in categorical_proposal])
# Straight Through Estimator (Some Magic)
st_quantized = encoder_output + (quantized - encoder_output).detach()
post_quantized = self.postnet(st_quantized)
# print("Post Quantized: ", post_quantized)
reconstruction = self.vq_decoder(post_quantized)
# print("Reconstruction: ", reconstruction)
categorical_proposal_reshape = torch.reshape(categorical_proposal, (-1, 1))
categorical_proposal_onehot = F.one_hot(categorical_proposal_reshape, self.latent_modes).squeeze().float()
# total_max = torch.tensor(0.)
# print("distances max: ", max(total_max, torch.max(categorical_proposal_prob)))
# concat_state_vq = torch.cat([state, categorical_proposal_onehot], dim=-1)
concat_state_vq = torch.cat([state, categorical_proposal_prob], dim=-1)
action_vq_dist = self.action_decoder(concat_state_vq)
return encoder_output, quantized, reconstruction, categorical_proposal, action_vq_dist
# return encoder_output, quantized, reconstruction, categorical_proposal, action_mse
def act(self, state, context_label):
concat_state_vq = torch.cat([state, torch.reshape(torch.as_tensor(context_label), (-1,))], dim=-1)
action_vq_dist = self.action_decoder(concat_state_vq)
action = action_vq_dist.sample()
return action
def forward(self, X, Delta_X, A, kl_beta=1., recon_gamma=1.):
"""
Given input tensor, forward propagate, compute the loss, and backward propagate.
Represents the lifecycle of a single iteration
:param x: Raw state tensor
:param Delta_x: State difference tensor
:param a: Action tensor
:param kl_beta: KL divergence temperance factor
:param recon_gamma: State weights
: Important to note that both recon and context loss cannot be negative.
"""
encoder_output, quantized, reconstruction, vq_latent_labels, action_vq_dist =\
self.compute_quantized_loss(X, Delta_X, A)
vq_criterion = VQCriterion(beta=kl_beta)
vq_total_loss, recons_loss, vq_loss, commitment_loss = vq_criterion(Delta_X, encoder_output, quantized, reconstruction)
# original formula
loss_pi = (torch.tensor(1.)/(torch.exp(action_vq_dist.log_prob(A)) + torch.tensor(0.1))).sum(axis=-1)
loss = loss_pi * vq_total_loss
return loss, loss_pi, X, vq_latent_labels, vq_total_loss
class VQEncoder(nn.Module):
def __init__(self, in_dim, out_dim):
super(VQEncoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(in_dim, out_dim // 2),
nn.Tanh(),
nn.Linear(out_dim // 2, out_dim),
nn.Tanh()
)
# self.net = nn.Sequential(
# nn.Linear(in_dim, out_dim),
# nn.Tanh(),
# nn.Linear(out_dim, out_dim),
# nn.Tanh(),
# nn.Linear(out_dim, out_dim),
# nn.Tanh()
# )
def forward(self, input):
return self.net(input)
class Clamper(nn.Module):
def __init__(self, min=None, max=None):
super().__init__()
self.min = min
self.max = max
def forward(self, input):
return torch.clamp(input, self.min, self.max)
class VQDecoder(nn.Module):
def __init__(self, obs_dim, hidden_dim, out_dim, activation=nn.Tanh):
super().__init__()
self.initial_act = nn.Tanh()
self.net = mlp([obs_dim] + hidden_dim + [out_dim], activation)
def forward(self, input):
return self.net(self.initial_act(input))
class VectorQuantizer(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.num_embeddings = num_embeddings # E_N
self.embedding_dim = embedding_dim # E_D
self.embeddings = nn.Embedding(num_embeddings, embedding_dim)
self.scale = 1. / self.num_embeddings # decimal
print("Quantizer Scale: ", self.scale)
nn.init.uniform_(self.embeddings.weight, -self.scale, self.scale)
def proposal_distribution(self, input):
input_shape = input.shape # [B, OBS_DIM]
flatten_input = input.flatten(end_dim=-2).contiguous() # [B, OBS_DIM]
distances = (flatten_input ** 2).sum(dim=1, keepdim=True) # [B, 1]
distances = distances + (self.embeddings.weight ** 2).sum(dim=1) # [B, E_N]
distances -= 2 * flatten_input @ self.embeddings.weight.t() # [B, E_N]
categorical_posterior = torch.argmin(distances, dim=-1) # [B] # original
categorical_posterior_prob = distances
# categorical_posterior_prob = torch.clamp(distances, 0, 10) # 10 is a hyperparameter
# categorical_posterior_prob = torch.clamp(distances, 0, 5) # 5 is a hyperparameter
return categorical_posterior, categorical_posterior_prob
def forward(self, input):
proposal, proposal_prob = self.proposal_distribution(input) # [B]
quantized = self.embeddings(proposal).contiguous() # [B, OBS_DIM]
return quantized, proposal, proposal_prob
class VQCriterion(nn.Module):
"""
vq_loss: \| \text{sg}[I(x, e)] * e - \text{sg}[z_e(x)] \|_2^2
"""
def __init__(self, beta):
super().__init__()
self.beta = beta
def forward(self, input, encoder_output, quantized, reconstruction):
flatten_quantized = quantized.flatten(end_dim=-2)
flatten_encoder_output = encoder_output.flatten(end_dim=-2)
reconstruction_loss = F.mse_loss(input, reconstruction)
vq_loss = F.mse_loss(flatten_encoder_output.detach(), flatten_quantized)
commitment_loss = F.mse_loss(flatten_encoder_output, flatten_quantized.detach())
total_loss = reconstruction_loss + vq_loss + self.beta * commitment_loss # Original. TODO: review this loss.
return total_loss, reconstruction_loss, vq_loss, commitment_loss
class VDB(nn.Module):
def __init__(self, num_inputs, args):
super(VDB, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.z_size)
self.fc3 = nn.Linear(args.hidden_size, args.z_size)
self.fc4 = nn.Linear(args.z_size, args.hidden_size)
self.fc5 = nn.Linear(args.hidden_size, 1)
self.fc5.weight.data.mul_(0.1)
self.fc5.bias.data.mul_(0.0)
def encoder(self, x):
h = torch.tanh(self.fc1(x))
return self.fc2(h), self.fc3(h)
def reparameterize(self, mu, logvar):
std = torch.exp(logvar / 2)
eps = torch.randn_like(std)
return mu + std * eps
def discriminator(self, z):
h = torch.tanh(self.fc4(z))
return torch.sigmoid(self.fc5(h))
def forward(self, x):
mu, logvar = self.encoder(x)
z = self.reparameterize(mu, logvar)
prob = self.discriminator(z)
return prob, mu, logvar
###########################################################################3
from torch.autograd import Variable
from torch.distributions import Distribution, Normal
class TanhNormal(torch.distributions.Distribution):
"""
Represent distribution of X where
X ~ tanh(Z)
Z ~ N(mean, std)
Note: this is not very numerically stable.
"""
def __init__(self, normal_mean, normal_std, epsilon=1e-6):
"""
:param normal_mean: Mean of the normal distribution
:param normal_std: Std of the normal distribution
:param epsilon: Numerical stability epsilon when computing log-prob.
"""
self.normal_mean = normal_mean
self.normal_std = normal_std
self.normal = Normal(normal_mean, normal_std)
self.epsilon = epsilon
def sample_n(self, n, return_pre_tanh_value=False):
z = self.normal.sample_n(n)
if return_pre_tanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def log_prob(self, value, pre_tanh_value=None):
"""
:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""
if pre_tanh_value is None:
pre_tanh_value = torch.log(
(1+value) / (1-value)
) / 2
return self.normal.log_prob(pre_tanh_value) - torch.log(
1 - value * value + self.epsilon
)
def sample(self, return_pretanh_value=False):
z = self.normal.sample()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def rsample(self, return_pretanh_value=False):
z = (
self.normal_mean +
self.normal_std *
Variable(Normal(
np.zeros(self.normal_mean.size()),
np.ones(self.normal_std.size())
).sample())
)
# z.requires_grad_()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
|
[
"torch.as_tensor",
"torch.distributions.Categorical",
"torch.nn.Tanh",
"torch.nn.Sequential",
"torch.exp",
"torch.tanh",
"torch.nn.init.uniform_",
"torch.nn.Embedding",
"torch.nn.functional.mse_loss",
"torch.distributions.Normal",
"numpy.ones",
"torch.randn_like",
"torch.nn.functional.one_hot",
"torch.argmin",
"torch.reshape",
"torch.clamp",
"torch.cat",
"torch.log",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad"
] |
[((628, 650), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (641, 650), False, 'from torch import nn\n'), ((1513, 1539), 'torch.distributions.Categorical', 'Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (1524, 1539), False, 'from torch.distributions import Independent, OneHotCategorical, Categorical\n'), ((2037, 2060), 'torch.exp', 'torch.exp', (['self.log_std'], {}), '(self.log_std)\n', (2046, 2060), False, 'import torch\n'), ((2076, 2091), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (2082, 2091), False, 'from torch.distributions import Distribution, Normal\n'), ((4125, 4148), 'torch.exp', 'torch.exp', (['self.log_std'], {}), '(self.log_std)\n', (4134, 4148), False, 'import torch\n'), ((4164, 4179), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (4170, 4179), False, 'from torch.distributions import Distribution, Normal\n'), ((4253, 4276), 'torch.exp', 'torch.exp', (['self.log_std'], {}), '(self.log_std)\n', (4262, 4276), False, 'import torch\n'), ((4292, 4307), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (4298, 4307), False, 'from torch.distributions import Distribution, Normal\n'), ((5074, 5123), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_dim', 'self.embedding_dim'], {}), '(self.embedding_dim, self.embedding_dim)\n', (5083, 5123), False, 'from torch import nn\n'), ((5236, 5285), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_dim', 'encoder_hidden[-1]'], {}), '(self.embedding_dim, encoder_hidden[-1])\n', (5245, 5285), False, 'from torch import nn\n'), ((6716, 6760), 'torch.reshape', 'torch.reshape', (['categorical_proposal', '(-1, 1)'], {}), '(categorical_proposal, (-1, 1))\n', (6729, 6760), False, 'import torch\n'), ((7115, 7168), 'torch.cat', 'torch.cat', (['[state, categorical_proposal_prob]'], {'dim': '(-1)'}), '([state, categorical_proposal_prob], dim=-1)\n', (7124, 7168), False, 'import torch\n'), ((9586, 9624), 'torch.clamp', 'torch.clamp', (['input', 'self.min', 'self.max'], {}), '(input, self.min, self.max)\n', (9597, 9624), False, 'import torch\n'), ((9782, 9791), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9789, 9791), False, 'from torch import nn\n'), ((10190, 10233), 'torch.nn.Embedding', 'nn.Embedding', (['num_embeddings', 'embedding_dim'], {}), '(num_embeddings, embedding_dim)\n', (10202, 10233), False, 'from torch import nn\n'), ((10347, 10412), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.embeddings.weight', '(-self.scale)', 'self.scale'], {}), '(self.embeddings.weight, -self.scale, self.scale)\n', (10363, 10412), False, 'from torch import nn\n'), ((10859, 10890), 'torch.argmin', 'torch.argmin', (['distances'], {'dim': '(-1)'}), '(distances, dim=-1)\n', (10871, 10890), False, 'import torch\n'), ((11868, 11901), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['input', 'reconstruction'], {}), '(input, reconstruction)\n', (11878, 11901), True, 'import torch.nn.functional as F\n'), ((12387, 12426), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'args.hidden_size'], {}), '(num_inputs, args.hidden_size)\n', (12396, 12426), False, 'from torch import nn\n'), ((12446, 12486), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', 'args.z_size'], {}), '(args.hidden_size, args.z_size)\n', (12455, 12486), False, 'from torch import nn\n'), ((12506, 12546), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', 'args.z_size'], {}), '(args.hidden_size, args.z_size)\n', (12515, 12546), False, 'from torch import nn\n'), ((12566, 12606), 'torch.nn.Linear', 'nn.Linear', (['args.z_size', 'args.hidden_size'], {}), '(args.z_size, args.hidden_size)\n', (12575, 12606), False, 'from torch import nn\n'), ((12626, 12656), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', '(1)'], {}), '(args.hidden_size, 1)\n', (12635, 12656), False, 'from torch import nn\n'), ((12894, 12915), 'torch.exp', 'torch.exp', (['(logvar / 2)'], {}), '(logvar / 2)\n', (12903, 12915), False, 'import torch\n'), ((12930, 12951), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (12946, 12951), False, 'import torch\n'), ((14018, 14049), 'torch.distributions.Normal', 'Normal', (['normal_mean', 'normal_std'], {}), '(normal_mean, normal_std)\n', (14024, 14049), False, 'from torch.distributions import Distribution, Normal\n'), ((575, 608), 'torch.nn.Linear', 'nn.Linear', (['sizes[j]', 'sizes[j + 1]'], {}), '(sizes[j], sizes[j + 1])\n', (584, 608), False, 'from torch import nn\n'), ((1779, 1813), 'numpy.ones', 'np.ones', (['act_dim'], {'dtype': 'np.float32'}), '(act_dim, dtype=np.float32)\n', (1786, 1813), True, 'import numpy as np\n'), ((1850, 1874), 'torch.as_tensor', 'torch.as_tensor', (['log_std'], {}), '(log_std)\n', (1865, 1874), False, 'import torch\n'), ((3316, 3331), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3329, 3331), False, 'import torch\n'), ((3866, 3903), 'numpy.ones', 'np.ones', (['action_dim'], {'dtype': 'np.float32'}), '(action_dim, dtype=np.float32)\n', (3873, 3903), True, 'import numpy as np\n'), ((3940, 3964), 'torch.as_tensor', 'torch.as_tensor', (['log_std'], {}), '(log_std)\n', (3955, 3964), False, 'import torch\n'), ((8948, 8979), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(out_dim // 2)'], {}), '(in_dim, out_dim // 2)\n', (8957, 8979), False, 'from torch import nn\n'), ((8993, 9002), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9000, 9002), False, 'from torch import nn\n'), ((9016, 9048), 'torch.nn.Linear', 'nn.Linear', (['(out_dim // 2)', 'out_dim'], {}), '(out_dim // 2, out_dim)\n', (9025, 9048), False, 'from torch import nn\n'), ((9062, 9071), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9069, 9071), False, 'from torch import nn\n'), ((14277, 14290), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14287, 14290), False, 'import torch\n'), ((14648, 14691), 'torch.log', 'torch.log', (['(1 - value * value + self.epsilon)'], {}), '(1 - value * value + self.epsilon)\n', (14657, 14691), False, 'import torch\n'), ((14900, 14913), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14910, 14913), False, 'import torch\n'), ((15334, 15347), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (15344, 15347), False, 'import torch\n'), ((14227, 14240), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14237, 14240), False, 'import torch\n'), ((14527, 14563), 'torch.log', 'torch.log', (['((1 + value) / (1 - value))'], {}), '((1 + value) / (1 - value))\n', (14536, 14563), False, 'import torch\n'), ((14850, 14863), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (14860, 14863), False, 'import torch\n'), ((15284, 15297), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (15294, 15297), False, 'import torch\n'), ((7521, 7551), 'torch.as_tensor', 'torch.as_tensor', (['context_label'], {}), '(context_label)\n', (7536, 7551), False, 'import torch\n'), ((8591, 8608), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (8603, 8608), False, 'import torch\n'), ((6799, 6857), 'torch.nn.functional.one_hot', 'F.one_hot', (['categorical_proposal_reshape', 'self.latent_modes'], {}), '(categorical_proposal_reshape, self.latent_modes)\n', (6808, 6857), True, 'import torch.nn.functional as F\n'), ((8649, 8666), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (8661, 8666), False, 'import torch\n')]
|
"""This module contains the code related to the DAG and the scheduler."""
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from networkx.drawing import nx_pydot
from pipeline.shared import ensure_list
BLUE = "#547482"
YELLOW_TO_RED = ["#C8B05C", "#C89D64", "#F1B05D", "#EE8445", "#C87259", "#6C4A4D"]
class Scheduler:
"""This class allows to schedule tasks.
The functionality is inspired by func:`networkx.topological_sort` which allows to
loop over a directed acyclic graph such that all preceding nodes are executed before
a dependent node.
The scheduler keeps track of all unfinished tasks and their dependencies in the
`task_dict`. If a task has no dependencies, it is eligible to be executed. All
submitted tasks are remove from `task_dict`. If a task finishes, it is removed as a
dependency from all tasks in `task_dict`.
The scheduler can take task priorities into account and proposes only tasks
with the highest priorities.
"""
def __init__(self, dag, unfinished_tasks, priority):
self.dag = dag
self.task_dict = self._create_task_dependency_dict(unfinished_tasks)
self.submitted_tasks = set()
self.priority = priority
def _create_task_dependency_dict(self, unfinished_tasks):
"""Create a task-dependency dictionary.
For each unfinished task, this function collects the tasks which have to be
executed in advance.
"""
task_dict = {}
for id_ in unfinished_tasks:
task_dict[id_] = {
preceding_task
for dependency in ensure_list(self.dag.nodes[id_].get("depends_on", []))
for preceding_task in self.dag.predecessors(dependency)
if preceding_task in unfinished_tasks
}
return task_dict
def propose(self, n_proposals=1):
"""Propose a number of tasks.
This function proposes tasks which can be executed. If a task is proposed,
remove it from the `task_dict`.
Parameters
----------
n_proposals : int
Number of tasks which should be proposed. For any nonnegative number, return
a set of task ids. For `-1` return all possible tasks.
Returns
-------
proposals : set
A set of task ids which should be executed.
"""
# Get task candidates.
candidates = [id_ for id_ in self.task_dict if len(self.task_dict[id_]) == 0]
if self.priority:
candidates = sorted(
candidates,
key=lambda id_: self.dag.nodes[id_]["priority"],
reverse=True,
)
if 0 <= n_proposals:
proposals = set(candidates[:n_proposals])
elif n_proposals == -1:
proposals = set(candidates)
else:
raise NotImplementedError
self.submitted_tasks = self.submitted_tasks.union(proposals)
for id_ in proposals:
del self.task_dict[id_]
return proposals
def process_finished(self, finished_tasks):
"""Process finished tasks.
The executor passes an id or a list of ids of finished tasks back to the
scheduler. The scheduler removes the ids from the set of submitted tasks and
removes the finished tasks from the dependency sets of all unfinished tasks in
`task_dict`.
Parameters
----------
finished_tasks : str or list
An id or a list of ids of finished tasks.
"""
finished_tasks = ensure_list(finished_tasks)
for id_ in finished_tasks:
self.submitted_tasks.remove(id_)
for id__ in self.task_dict:
self.task_dict[id__].discard(id_)
@property
def are_tasks_left(self):
return len(self.task_dict) != 0 or len(self.submitted_tasks) != 0
def create_dag(tasks, config):
"""Create a directed acyclic graph (DAG) capturing dependencies between functions.
Parameters
----------
tasks : dict
Dictionary containing tasks.
Returns
-------
dag : nx.DiGraph
The directed acyclic graph.
"""
dag_dict = _create_dag_dict(tasks)
dag = nx.DiGraph(dag_dict).reverse()
dag = _insert_tasks_in_dag(dag, tasks)
dag = _assign_priority_to_nodes(dag, config)
_draw_dag(dag, config)
return dag
def _create_dag_dict(tasks):
dag_dict = {}
for id_, task_info in tasks.items():
# Add the task to the graph as a node.
depends_on = ensure_list(task_info.get("depends_on", [])).copy()
depends_on.extend(ensure_list(task_info.get("template", [])))
depends_on.append(task_info["config"])
dag_dict[id_] = depends_on
# If the task produces anything, register the output as a node.
for target in ensure_list(task_info.get("produces", [])):
dag_dict[target] = [id_]
return dag_dict
def _insert_tasks_in_dag(dag, tasks):
for id_ in dag.nodes:
if id_ in tasks:
dag.nodes[id_].update(**tasks[id_], _is_task=True)
else:
dag.nodes[id_].update(_is_task=False)
return dag
def _assign_priority_to_nodes(dag, config):
"""Assign a priority to a node.
Task priorities trickle down from the last nodes in the DAG to the first nodes. The
total priority of a task is its own priority plus the discounted sum of priorities
of its targets.
"""
discount_factor = config["priority_discount_factor"]
reversed_dag = dag.reverse()
for id_ in nx.topological_sort(reversed_dag):
if reversed_dag.nodes[id_]["_is_task"] and config["priority_scheduling"]:
sum_priorities = 0
for pre in reversed_dag.predecessors(id_):
for pre_task in reversed_dag.predecessors(pre):
sum_priorities += dag.nodes[pre_task].get("priority", 0)
dag.nodes[id_]["priority"] = (
dag.nodes[id_].get("priority", 0) + discount_factor * sum_priorities
)
else:
pass
return dag
def _draw_dag(dag, config):
fig, ax = plt.subplots(figsize=(16, 12))
fig.suptitle("Task Graph", fontsize=24)
# Relabel absolute paths to path names.
project_directory = Path(config["project_directory"])
mapping = {
node: Path(node).relative_to(project_directory)
for node in dag.nodes
if Path(node).is_absolute()
}
dag = nx.relabel_nodes(dag, mapping)
layout = nx_pydot.pydot_layout(dag, prog="dot")
nx.draw_networkx_edges(dag, pos=layout, ax=ax)
nx.draw_networkx_labels(dag, pos=layout, ax=ax)
# Draw non-task nodes.
non_task_nodes = [node for node in dag.nodes if not dag.nodes[node]["_is_task"]]
nx.draw_networkx_nodes(
dag, pos=layout, nodelist=non_task_nodes, node_color=BLUE, ax=ax
)
task_nodes = [node for node in dag.nodes if dag.nodes[node]["_is_task"]]
if config["priority_scheduling"]:
node_size = np.array([dag.nodes[node]["priority"] for node in task_nodes])
node_size_demeaned = node_size - node_size.min()
node_size_relative = node_size_demeaned / node_size_demeaned.max()
node_size = node_size_relative * 1_000 + 300
cmap = LinearSegmentedColormap.from_list("cmap", YELLOW_TO_RED)
priority_kwargs = {
"node_size": node_size,
"node_color": node_size_relative,
"cmap": cmap,
}
else:
priority_kwargs = {"node_color": BLUE}
im = nx.draw_networkx_nodes(
dag, pos=layout, nodelist=task_nodes, **priority_kwargs, ax=ax
)
if config["priority_scheduling"]:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.1)
fig.colorbar(im, cax=cax, orientation="vertical")
cax.set_title("Priority")
path = Path(config["hidden_build_directory"], ".dag.png")
path.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(path)
plt.close()
|
[
"networkx.relabel_nodes",
"matplotlib.pyplot.savefig",
"networkx.topological_sort",
"pathlib.Path",
"networkx.drawing.nx_pydot.pydot_layout",
"networkx.DiGraph",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"networkx.draw_networkx_nodes",
"matplotlib.pyplot.close",
"numpy.array",
"networkx.draw_networkx_labels",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"pipeline.shared.ensure_list",
"networkx.draw_networkx_edges",
"matplotlib.pyplot.subplots"
] |
[((5769, 5802), 'networkx.topological_sort', 'nx.topological_sort', (['reversed_dag'], {}), '(reversed_dag)\n', (5788, 5802), True, 'import networkx as nx\n'), ((6347, 6377), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (6359, 6377), True, 'import matplotlib.pyplot as plt\n'), ((6492, 6525), 'pathlib.Path', 'Path', (["config['project_directory']"], {}), "(config['project_directory'])\n", (6496, 6525), False, 'from pathlib import Path\n'), ((6680, 6710), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['dag', 'mapping'], {}), '(dag, mapping)\n', (6696, 6710), True, 'import networkx as nx\n'), ((6725, 6763), 'networkx.drawing.nx_pydot.pydot_layout', 'nx_pydot.pydot_layout', (['dag'], {'prog': '"""dot"""'}), "(dag, prog='dot')\n", (6746, 6763), False, 'from networkx.drawing import nx_pydot\n'), ((6769, 6815), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['dag'], {'pos': 'layout', 'ax': 'ax'}), '(dag, pos=layout, ax=ax)\n', (6791, 6815), True, 'import networkx as nx\n'), ((6820, 6867), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['dag'], {'pos': 'layout', 'ax': 'ax'}), '(dag, pos=layout, ax=ax)\n', (6843, 6867), True, 'import networkx as nx\n'), ((6985, 7078), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['dag'], {'pos': 'layout', 'nodelist': 'non_task_nodes', 'node_color': 'BLUE', 'ax': 'ax'}), '(dag, pos=layout, nodelist=non_task_nodes, node_color\n =BLUE, ax=ax)\n', (7007, 7078), True, 'import networkx as nx\n'), ((7757, 7848), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['dag'], {'pos': 'layout', 'nodelist': 'task_nodes', 'ax': 'ax'}), '(dag, pos=layout, nodelist=task_nodes, **\n priority_kwargs, ax=ax)\n', (7779, 7848), True, 'import networkx as nx\n'), ((8106, 8156), 'pathlib.Path', 'Path', (["config['hidden_build_directory']", '""".dag.png"""'], {}), "(config['hidden_build_directory'], '.dag.png')\n", (8110, 8156), False, 'from pathlib import Path\n'), ((8212, 8229), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (8223, 8229), True, 'import matplotlib.pyplot as plt\n'), ((8234, 8245), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8243, 8245), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3781), 'pipeline.shared.ensure_list', 'ensure_list', (['finished_tasks'], {}), '(finished_tasks)\n', (3765, 3781), False, 'from pipeline.shared import ensure_list\n'), ((7224, 7286), 'numpy.array', 'np.array', (["[dag.nodes[node]['priority'] for node in task_nodes]"], {}), "([dag.nodes[node]['priority'] for node in task_nodes])\n", (7232, 7286), True, 'import numpy as np\n'), ((7488, 7544), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""cmap"""', 'YELLOW_TO_RED'], {}), "('cmap', YELLOW_TO_RED)\n", (7521, 7544), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((7915, 7938), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (7934, 7938), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((4416, 4436), 'networkx.DiGraph', 'nx.DiGraph', (['dag_dict'], {}), '(dag_dict)\n', (4426, 4436), True, 'import networkx as nx\n'), ((6556, 6566), 'pathlib.Path', 'Path', (['node'], {}), '(node)\n', (6560, 6566), False, 'from pathlib import Path\n'), ((6639, 6649), 'pathlib.Path', 'Path', (['node'], {}), '(node)\n', (6643, 6649), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
# coding: utf-8
# ## E2E Xgboost MLFLOW
# In[45]:
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, pandas_udf,udf,lit
import azure.synapse.ml.predict as pcontext
import azure.synapse.ml.predict.utils._logger as synapse_predict_logger
import numpy as np
import pandas as pd
import xgboost as xgb
import mlflow
# In[46]:
spark.conf.set("spark.synapse.ml.predict.enabled","true")
# ## Train and Save Model
# ### Training
# In[47]:
data = np.random.rand(5, 10) # 5 entities, each contains 10 features
label = np.random.randint(1, size=5) # binary target
dtrain = xgb.DMatrix(data, label=label)
xgr = xgb.XGBRFRegressor(objective='reg:linear', n_estimators=10, seed=123)
xgr.fit(data, label)
# In[48]:
xgr.save_model('./model.json')
# In[49]:
mlflow.pyfunc.save_model(
data_path='./model.json',
path='./xgboost_pyfunc_model_path',
loader_module='mlflow.xgboost')
# In[50]:
MODEL_URI = './xgboost_pyfunc_model_path'
RETURN_TYPES = 'float'
# In[51]:
model = pcontext.bind_model(
return_types = RETURN_TYPES,
runtime = 'mlflow',
model_alias = 'xgb_model',
model_uri = MODEL_URI,).register()
# In[52]:
type(model)
# In[53]:
data = np.random.rand(5, 10)
df = spark.createDataFrame(pd.DataFrame(data))
df.createOrReplaceTempView("data")
df.show()
# In[54]:
predictions = spark.sql(
"""
SELECT PREDICT('xgb_model', *) AS predict FROM data
"""
).show()
|
[
"mlflow.pyfunc.save_model",
"numpy.random.rand",
"azure.synapse.ml.predict.bind_model",
"numpy.random.randint",
"pandas.DataFrame",
"xgboost.DMatrix",
"xgboost.XGBRFRegressor"
] |
[((500, 521), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (514, 521), True, 'import numpy as np\n'), ((571, 599), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'size': '(5)'}), '(1, size=5)\n', (588, 599), True, 'import numpy as np\n'), ((626, 656), 'xgboost.DMatrix', 'xgb.DMatrix', (['data'], {'label': 'label'}), '(data, label=label)\n', (637, 656), True, 'import xgboost as xgb\n'), ((664, 733), 'xgboost.XGBRFRegressor', 'xgb.XGBRFRegressor', ([], {'objective': '"""reg:linear"""', 'n_estimators': '(10)', 'seed': '(123)'}), "(objective='reg:linear', n_estimators=10, seed=123)\n", (682, 733), True, 'import xgboost as xgb\n'), ((814, 937), 'mlflow.pyfunc.save_model', 'mlflow.pyfunc.save_model', ([], {'data_path': '"""./model.json"""', 'path': '"""./xgboost_pyfunc_model_path"""', 'loader_module': '"""mlflow.xgboost"""'}), "(data_path='./model.json', path=\n './xgboost_pyfunc_model_path', loader_module='mlflow.xgboost')\n", (838, 937), False, 'import mlflow\n'), ((1234, 1255), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (1248, 1255), True, 'import numpy as np\n'), ((1283, 1301), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1295, 1301), True, 'import pandas as pd\n'), ((1047, 1161), 'azure.synapse.ml.predict.bind_model', 'pcontext.bind_model', ([], {'return_types': 'RETURN_TYPES', 'runtime': '"""mlflow"""', 'model_alias': '"""xgb_model"""', 'model_uri': 'MODEL_URI'}), "(return_types=RETURN_TYPES, runtime='mlflow',\n model_alias='xgb_model', model_uri=MODEL_URI)\n", (1066, 1161), True, 'import azure.synapse.ml.predict as pcontext\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import g_functions as g_f
R1 = 2
R2 = .6
M = 500
Delta = .1
NB_POINTS = 2**10
EPSILON_IMAG = 1e-8
parameters = {
'M' : M,
'R1' : R1,
'R2' : R2,
'NB_POINTS' : NB_POINTS,
'EPSILON_IMAG' : EPSILON_IMAG,
'verbosity' : 1,
'ENSAMBLE' : 'Wishart'
}
# Compute sample
S, Y = g_f.make_sample(parameters, Delta)
# Computer rho from theory
rho_theory = g_f.find_rho(parameters, Delta)
# Compute deoising function from theory
denoiser_plot = np.zeros(parameters["NB_POINTS"])
for (i_z, z) in enumerate(rho_theory["zs"]):
denoiser_plot[i_z] = g_f.denoiser(z, parameters, Delta)
plt.hist(g_f.find_spectrum(Y), 80, density=True)
# plt.hist(g_f.find_spectrum(g_f.denoise_sample(Y, parameters, Delta)), 160, density=True)
plt.plot(rho_theory['zs'],rho_theory['rho'],color='red')
# plt.plot(rho_theory['zs'],denoiser_plot)
plt.title(f"R2 = {parameters['R2']}, R1 = {parameters['R1']}")
plt.ylabel("Frequency")
plt.xlabel("Singular value")
plt.show()
|
[
"g_functions.find_rho",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"g_functions.denoiser",
"numpy.zeros",
"g_functions.find_spectrum",
"g_functions.make_sample",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] |
[((398, 432), 'g_functions.make_sample', 'g_f.make_sample', (['parameters', 'Delta'], {}), '(parameters, Delta)\n', (413, 432), True, 'import g_functions as g_f\n'), ((474, 505), 'g_functions.find_rho', 'g_f.find_rho', (['parameters', 'Delta'], {}), '(parameters, Delta)\n', (486, 505), True, 'import g_functions as g_f\n'), ((563, 596), 'numpy.zeros', 'np.zeros', (["parameters['NB_POINTS']"], {}), "(parameters['NB_POINTS'])\n", (571, 596), True, 'import numpy as np\n'), ((846, 904), 'matplotlib.pyplot.plot', 'plt.plot', (["rho_theory['zs']", "rho_theory['rho']"], {'color': '"""red"""'}), "(rho_theory['zs'], rho_theory['rho'], color='red')\n", (854, 904), True, 'import matplotlib.pyplot as plt\n'), ((946, 1008), 'matplotlib.pyplot.title', 'plt.title', (['f"""R2 = {parameters[\'R2\']}, R1 = {parameters[\'R1\']}"""'], {}), '(f"R2 = {parameters[\'R2\']}, R1 = {parameters[\'R1\']}")\n', (955, 1008), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1019, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1061), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Singular value"""'], {}), "('Singular value')\n", (1043, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1070, 1072), True, 'import matplotlib.pyplot as plt\n'), ((667, 701), 'g_functions.denoiser', 'g_f.denoiser', (['z', 'parameters', 'Delta'], {}), '(z, parameters, Delta)\n', (679, 701), True, 'import g_functions as g_f\n'), ((714, 734), 'g_functions.find_spectrum', 'g_f.find_spectrum', (['Y'], {}), '(Y)\n', (731, 734), True, 'import g_functions as g_f\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import datetime as dt
from statsmodels.stats.multitest import fdrcorrection
from pylab import savefig
# FUNCTIONS YOU CAN USE:
# analyses(filepath) spits out a nifty heatmap to let you check correlation between variables
#
# regress(option, df) churns out a saucy graph of the linear regression for the variables you provided, where
# option is 'snr_total' or 'tsnr', whichever you want to make the dependent variable of your model
# df is the pandas DataFrame containing your data. To modify which variables you want in your model, you'll
# have to directly modify the regress function
# NOTABLE FILENAMES
# ../data/extractions/p2_BOLD.csv - all dates for p2_BOLD
# ../data/extractions/p2Xs4X35mm_BOLD.csv - all dates for p2Xs4X35mm_BOLD
# ../data/extractions/anat.csv - all possible dates for anatomical data
def filter(option, df):
is_p2 = df['Filetype'] == "task-rest_acq-p2_bold.json"
is_x = df['Filetype'] == "task-rest_acq-p2Xs4X35mm_bold.json"
if option == 'x':
return df[is_x]
elif option == 'p2':
return df[is_p2]
def analyses(filepath):
files = pd.read_csv(filepath)
# FIRST CHECK: CONVERSION SOFTWARE VERSIONS
check = files.iloc[0, 7]
valid = True
for i in files.index:
if check != files.iloc[i, 7]:
valid = False
print("All Conversion Softwares are the same: " + str(valid))
# SECOND CHECK: HEATMAP
figure = sns.heatmap(files.corr(), cmap=sns.diverging_palette(h_neg=240, h_pos=10, n=9, sep=1, center="dark"), center=0)
figure
save = figure.get_figure()
save.savefig('heatmap.svg', pad_inches = 0.1)
def add_seasonal_simple(df, col='Date', start='2017-01-01'):
# Add a very simplistic seasonal regressors as cos and sin since some date in a year
time_delta = df[col] - np.datetime64(start)
time_delta_rad = time_delta.apply(lambda d: d.days) * 2 * np.pi / 365.25
df['Seasonal (sin)'] = np.sin(time_delta_rad)
df['Seasonal (cos)'] = np.cos(time_delta_rad)
def Ftest(model, var_prefix, queue, prints=False):
var_columns = [c for c in model.params.index if c.startswith(var_prefix)]
if var_columns:
f_test = model.f_test(' = '.join(var_columns) + " = 0")
if f_test.pvalue < 0.05:
if var_prefix == "Shim":
for i in range(8):
queue.append("Shim" + str(i+1))
elif var_prefix == "IOPD":
for i in range(6):
queue.append("IOPD" + str(i+1))
if prints:
print("%s F-test: %s" % (var_prefix, f_test))
return f_test
else:
if prints:
print("No %s variables in the model" % var_prefix)
return None
# copy pasted from nipy function, renamed from _orthogonalize
def orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p), the data to be orthogonalized
Returns
-------
X: after orthogonalization
Notes
-----
X is changed in place. the columns are not normalized
"""
if X.size == X.shape[0]:
return X
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(X[:, i], np.dot(X[:, :i], np.linalg.pinv(X[:, :i])))
return X
def regress(target_variable, model_df, plot=True, print_summary=True, add_qa=True, add_seasonal=True, real_data=False):
"""
creates a regression graph plotted against actual data from certain QA metrics
Parameters
----------
target_variable: takes str value of either snr_total or tsnr to model against
model_df : takes pandas DataFrame with data to be used for predictive modeling
plot : boolean to turn the plotted graph on/off
print_summary : boolean to turn the printed summary of OLS regression on/off
add_qa : boolean to add/not add snr_total_qa into list of variables to be modeled
add_seasonal : boolean to add/not add seasonal variables into list of variables to be modeled
real_data : boolean to indicate whether or not the pandas DataFrame being fed in is from real data or not
"""
if type(model_df) is not pd.core.frame.DataFrame:
return "DataFrame must be of type pandas.core.frame.DataFrame"
########## adding seasonal curves to the model
add_seasonal_simple(model_df)
########## Converting date to a format that can be parsed by statsmodels API
model_df = model_df.copy()
date_df = model_df['Date']
model_df['Date'] = pd.to_datetime(model_df['Date'], format="%Y%m%d")
model_df['Date'] = model_df['Date'].map(lambda x: x.toordinal())
f_tests_todo = ['IOPD']
excluded_cols = ['Date', 'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6', 'Seasonal (sin)', 'Seasonal (cos)']
seasonal_cols = ['Seasonal (sin)', 'Seasonal (cos)',]
cols = ['Date']
if not real_data:
# preparing model_df for orthogonalization
cols += ['AcquisitionTime', 'SAR', 'TxRefAmp',
'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6']
if add_seasonal:
cols += seasonal_cols
else:
cols += ['age', 'sex_male', 'PatientWeight',]
if add_seasonal:
cols += seasonal_cols
if add_qa:
cols += ['snr_total_qa']
cols += ['IOPD1_real', 'IOPD2_real', 'IOPD3_real', 'IOPD4_real', 'IOPD5_real', 'IOPD6_real']
if add_seasonal:
f_tests_todo += ['Seasonal']
cols.append(target_variable)
model_df = model_df[cols]
# There is apparently a sample date (20170626) with SAR being unknown None/NaN
# For now we will just filter out those samples
if 'SAR' in model_df.columns:
finite_SAR = np.isfinite(model_df['SAR'])
if not np.all(finite_SAR):
print("Following dates didn't have SAR, excluding them: %s" % str(model_df['Date'][~finite_SAR]))
model_df = model_df[finite_SAR]
orthogonalized_df = model_df.drop(target_variable, axis=1) # avoid orthogonalizing target variable
cols = cols[:-1] # remove target variable from column list
# orthogonalize dataframe after its conversion to NumPy array, then convert back and replace in original model_df
model_array = orthogonalize(orthogonalized_df.to_numpy())
orthogonalized_df = pd.DataFrame(model_array)
orthogonalized_df.columns = [cols]
orthogonalized_df[target_variable] = pd.Series(model_df[target_variable])
model_df = orthogonalized_df
# add datetime64[ns] formatted date time
model_df.columns=[x[0] for x in model_df.columns]
model_df['Date'] = pd.to_datetime(model_df['Date'])
model_df = model_df.drop('Date', axis=1)
model_df['Date'] = date_df
########## Assigning independent and dependent variables
model_vars = []
for item in model_df.std().iteritems():
if item[0] != 'Date' and item[0] != target_variable:
model_vars.append(item[0])
X = model_df[model_vars]
y = model_df[target_variable]
X = X.sub(X.mean())
X = sm.add_constant(X)
model_df = sm.add_constant(model_df)
########## modeling predictions
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
################ CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
significant_variables = []
F_tests_pvals = {
v: float(Ftest(model, v, significant_variables).pvalue)
for v in f_tests_todo
}
# get p-values
for key, value in dict(model.pvalues).items():
if key not in significant_variables and value < 0.05 or key.lower() == 'const':
# identify statistically insignificant variables in df
significant_variables.append(key)
######## set statistically insignificant variables to 0, then predict
partial_fits = {} # partial_fits = {}
for variable in significant_variables:
X2 = X.copy(True) # prepare for mods
for col in X2:
if col != variable:
X2[col] = 0
partial_fits[str(variable)] = model.predict(X2)
if print_summary:
print("Statistically significant variables: " + str(significant_variables))
################ END CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
# Functionality for carrying out FDR correction
outvars = {} # dict containing all predictive variables and their p values from the model
for var in cols:
is_f_test = False
for f_test in f_tests_todo:
if var.startswith(f_test):
is_f_test = True
break
if is_f_test:
continue
if var not in excluded_cols:
var_pvalue = getattr(model.pvalues, var)
outvars[var] = var_pvalue
outvars.update(F_tests_pvals) # add previously conducted F test p values to the outvars
FDR_tuple = fdrcorrection(list(outvars.values())) # actual FDR test conduct
t_f = list(FDR_tuple[0]) # split tuple into true/false array
FDR_pvals = list(FDR_tuple[1]) # split tuple into p value array
print("FDR-corrected p-values:")
for (var, value), fdr_pval, is_sign in zip(outvars.items(), FDR_pvals, t_f):
print("%15s | Original p-value: %8.3g" % (var, value) +
" | FDR-corrected p-value: %8.3g%s" % (fdr_pval, '**' if is_sign else ''))
print("\n")
# giving additional data
if print_summary:
print(model.summary())
print("AIC: " + str(model.aic))
print("BIC: " + str(model.bic))
if not plot:
return model
######### converting the above predictions to a format that can be plotted
plot_df = predictions.to_frame() # new DataFrame containing only data needed for the plot
plot_df.columns = ['full fit']
plot_df = plot_df.join(model_df['Date'])
plot_df = plot_df.join(model_df[target_variable])
summation_df = None
for key, value in partial_fits.items():
column = value.to_frame()
column.columns = ['partial fit']
if summation_df is None:
summation_df = column # used to add up the values
else:
summation_df = summation_df.add(column, axis=1)
plot_df = pd.concat([plot_df, summation_df], axis=1)
# plotting the graph
plt.figure(figsize=(15, 6))
ax = sns.lineplot(x="Date", y=target_variable, data=plot_df, color="#000000")
# plotting partial fit
ax_partial = plt.twinx()
sns.lineplot(x="Date", y="full fit", data=plot_df, color="r", ax=ax)
if partial_fits:
sns.lineplot(x="Date", y="partial fit", data=plot_df, color="#ffcccc", ax=ax_partial)
plt.ylim(145, 305)
ax_partial.legend(['partial fit'])
ax.legend(['actual', 'full fit'], loc='upper left')
plt.savefig("test.svg")
return model
def scrape_var_significance(targets, p_var, df):
dummy = [] # dud list for Seasonal f test comparison
columns = ['Variable', p_var + ' p value', 'R2 value']
result = pd.DataFrame(columns = columns)
raw_pvals = []
for target in targets:
input_df = pd.DataFrame(df,columns=['Date', 'sid', 'ses', target, 'age', 'tsnr',
'snr_total_qa', 'IOPD1_real', 'IOPD2_real', 'IOPD3_real',
'IOPD4_real', 'IOPD5_real', 'IOPD6_real', 'sex_male', 'PatientWeight'])
model = regress(target, input_df, plot=False, print_summary=False, real_data=True)
if p_var == 'Seasonal':
seasonal_ftest = Ftest(model, 'Seasonal', dummy).pvalue
result.loc[len(result)] = [target, seasonal_ftest, model.rsquared]
raw_pvals.append(seasonal_ftest)
else:
var_pval = model.pvalues[p_var]
result.loc[len(result)] = [target, var_pval, model.rsquared]
raw_pvals.append(var_pval)
fdr_df = pd.DataFrame({'FDR-corrected': fdrcorrection(raw_pvals)[1].tolist()})
result = result.join(fdr_df)
return result
|
[
"numpy.linalg.pinv",
"pandas.read_csv",
"numpy.isfinite",
"numpy.sin",
"statsmodels.api.OLS",
"pandas.to_datetime",
"matplotlib.pyplot.twinx",
"numpy.datetime64",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"statsmodels.stats.multitest.fdrcorrection",
"matplotlib.pyplot.savefig",
"seaborn.diverging_palette",
"seaborn.lineplot",
"statsmodels.api.add_constant",
"numpy.cos",
"pandas.Series",
"matplotlib.pyplot.figure",
"numpy.all",
"pandas.concat"
] |
[((1308, 1329), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (1319, 1329), True, 'import pandas as pd\n'), ((2167, 2189), 'numpy.sin', 'np.sin', (['time_delta_rad'], {}), '(time_delta_rad)\n', (2173, 2189), True, 'import numpy as np\n'), ((2217, 2239), 'numpy.cos', 'np.cos', (['time_delta_rad'], {}), '(time_delta_rad)\n', (2223, 2239), True, 'import numpy as np\n'), ((4849, 4898), 'pandas.to_datetime', 'pd.to_datetime', (["model_df['Date']"], {'format': '"""%Y%m%d"""'}), "(model_df['Date'], format='%Y%m%d')\n", (4863, 4898), True, 'import pandas as pd\n'), ((6673, 6698), 'pandas.DataFrame', 'pd.DataFrame', (['model_array'], {}), '(model_array)\n', (6685, 6698), True, 'import pandas as pd\n'), ((6779, 6815), 'pandas.Series', 'pd.Series', (['model_df[target_variable]'], {}), '(model_df[target_variable])\n', (6788, 6815), True, 'import pandas as pd\n'), ((6976, 7008), 'pandas.to_datetime', 'pd.to_datetime', (["model_df['Date']"], {}), "(model_df['Date'])\n", (6990, 7008), True, 'import pandas as pd\n'), ((7422, 7440), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (7437, 7440), True, 'import statsmodels.api as sm\n'), ((7461, 7486), 'statsmodels.api.add_constant', 'sm.add_constant', (['model_df'], {}), '(model_df)\n', (7476, 7486), True, 'import statsmodels.api as sm\n'), ((10718, 10760), 'pandas.concat', 'pd.concat', (['[plot_df, summation_df]'], {'axis': '(1)'}), '([plot_df, summation_df], axis=1)\n', (10727, 10760), True, 'import pandas as pd\n'), ((10795, 10822), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (10805, 10822), True, 'import matplotlib.pyplot as plt\n'), ((10833, 10905), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': 'target_variable', 'data': 'plot_df', 'color': '"""#000000"""'}), "(x='Date', y=target_variable, data=plot_df, color='#000000')\n", (10845, 10905), True, 'import seaborn as sns\n'), ((10955, 10966), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (10964, 10966), True, 'import matplotlib.pyplot as plt\n'), ((10971, 11039), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': '"""full fit"""', 'data': 'plot_df', 'color': '"""r"""', 'ax': 'ax'}), "(x='Date', y='full fit', data=plot_df, color='r', ax=ax)\n", (10983, 11039), True, 'import seaborn as sns\n'), ((11290, 11313), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.svg"""'], {}), "('test.svg')\n", (11301, 11313), True, 'import matplotlib.pyplot as plt\n'), ((11516, 11545), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (11528, 11545), True, 'import pandas as pd\n'), ((2042, 2062), 'numpy.datetime64', 'np.datetime64', (['start'], {}), '(start)\n', (2055, 2062), True, 'import numpy as np\n'), ((6073, 6101), 'numpy.isfinite', 'np.isfinite', (["model_df['SAR']"], {}), "(model_df['SAR'])\n", (6084, 6101), True, 'import numpy as np\n'), ((11069, 11159), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Date"""', 'y': '"""partial fit"""', 'data': 'plot_df', 'color': '"""#ffcccc"""', 'ax': 'ax_partial'}), "(x='Date', y='partial fit', data=plot_df, color='#ffcccc', ax=\n ax_partial)\n", (11081, 11159), True, 'import seaborn as sns\n'), ((11163, 11181), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(145)', '(305)'], {}), '(145, 305)\n', (11171, 11181), True, 'import matplotlib.pyplot as plt\n'), ((11618, 11826), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'columns': "['Date', 'sid', 'ses', target, 'age', 'tsnr', 'snr_total_qa', 'IOPD1_real',\n 'IOPD2_real', 'IOPD3_real', 'IOPD4_real', 'IOPD5_real', 'IOPD6_real',\n 'sex_male', 'PatientWeight']"}), "(df, columns=['Date', 'sid', 'ses', target, 'age', 'tsnr',\n 'snr_total_qa', 'IOPD1_real', 'IOPD2_real', 'IOPD3_real', 'IOPD4_real',\n 'IOPD5_real', 'IOPD6_real', 'sex_male', 'PatientWeight'])\n", (11630, 11826), True, 'import pandas as pd\n'), ((1680, 1749), 'seaborn.diverging_palette', 'sns.diverging_palette', ([], {'h_neg': '(240)', 'h_pos': '(10)', 'n': '(9)', 'sep': '(1)', 'center': '"""dark"""'}), "(h_neg=240, h_pos=10, n=9, sep=1, center='dark')\n", (1701, 1749), True, 'import seaborn as sns\n'), ((6117, 6135), 'numpy.all', 'np.all', (['finite_SAR'], {}), '(finite_SAR)\n', (6123, 6135), True, 'import numpy as np\n'), ((7545, 7557), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (7551, 7557), True, 'import statsmodels.api as sm\n'), ((3511, 3535), 'numpy.linalg.pinv', 'np.linalg.pinv', (['X[:, :i]'], {}), '(X[:, :i])\n', (3525, 3535), True, 'import numpy as np\n'), ((12473, 12497), 'statsmodels.stats.multitest.fdrcorrection', 'fdrcorrection', (['raw_pvals'], {}), '(raw_pvals)\n', (12486, 12497), False, 'from statsmodels.stats.multitest import fdrcorrection\n')]
|
"""Implement the Unit class."""
import numpy as np
from .. import config, constants
__all__ = ["Pixels", "Degrees", "Munits", "Percent"]
class _PixelUnits:
def __mul__(self, val):
return val * config.frame_width / config.pixel_width
def __rmul__(self, val):
return val * config.frame_width / config.pixel_width
class Percent:
def __init__(self, axis):
if np.array_equal(axis, constants.X_AXIS):
self.length = config.frame_width
if np.array_equal(axis, constants.Y_AXIS):
self.length = config.frame_height
if np.array_equal(axis, constants.Z_AXIS):
raise NotImplementedError("length of Z axis is undefined")
def __mul__(self, val):
return val / 100 * self.length
def __rmul__(self, val):
return val / 100 * self.length
Pixels = _PixelUnits()
Degrees = constants.PI / 180
Munits = 1
|
[
"numpy.array_equal"
] |
[((399, 437), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.X_AXIS'], {}), '(axis, constants.X_AXIS)\n', (413, 437), True, 'import numpy as np\n'), ((495, 533), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.Y_AXIS'], {}), '(axis, constants.Y_AXIS)\n', (509, 533), True, 'import numpy as np\n'), ((592, 630), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.Z_AXIS'], {}), '(axis, constants.Z_AXIS)\n', (606, 630), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2D linear elasticity example
Solve the equilibrium equation -\nabla \cdot \sigma(x) = f(x) for x\in\Omega
with the strain-displacement equation:
\epsilon = 1/2(\nabla u + \nabla u^T)
and the constitutive law:
\sigma = 2*\mu*\epsilon + \lambda*(\nabla\cdot u)I,
where \mu and \lambda are Lame constants, I is the identity tensor.
Dirichlet boundary conditions: u(x)=\hat{u} for x\in\Gamma_D
Neumann boundary conditions: \sigma n = \hat{t} for x\in \Gamma_N,
where n is the normal vector.
For this example:
\Omega is a quarter annulus in the 1st quadrant, centered at origin
with inner radius 1, outer radius 4
Symmetry (Dirichlet) boundary conditions on the bottom and left
u_x(x,y) = 0 for x=0
u_y(x,y) = 0 for y=0
and pressure boundary conditions for the curved boundaries:
\sigma n = P_int n on the interior boundary with P_int = 10 MPa
\sigma n = P_ext n on the exterior boundary with P_ext = 0 MPa.
Use DEM
"""
import tensorflow as tf
import numpy as np
import time
from utils.tfp_loss import tfp_function_factory
from utils.Geom_examples import QuarterAnnulus
from utils.Solvers import Elasticity2D_DEM_dist
from utils.Plotting import plot_field_2d
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
#make figures bigger on HiDPI monitors
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 200
np.random.seed(42)
tf.random.set_seed(42)
class Elast_ThickCylinder(Elasticity2D_DEM_dist):
'''
Class including the symmetry boundary conditions for the thick cylinder problem
'''
def __init__(self, layers, train_op, num_epoch, print_epoch, model_data, data_type):
super().__init__(layers, train_op, num_epoch, print_epoch, model_data, data_type)
@tf.function
def dirichletBound(self, X, xPhys, yPhys):
# multiply by x,y for strong imposition of boundary conditions
u_val = X[:,0:1]
v_val = X[:,1:2]
u_val = xPhys*u_val
v_val = yPhys*v_val
return u_val, v_val
#define the model properties
model_data = dict()
model_data["radius_int"] = 1.
model_data["radius_ext"] = 4.
model_data["E"] = 1e2
model_data["nu"] = 0.3
model_data["state"] = "plane strain"
model_data["inner_pressure"] = 10.
model_data["outer_pressure"] = 0.
# generate the model geometry
geomDomain = QuarterAnnulus(model_data["radius_int"], model_data["radius_ext"])
# define the input and output data set
numElemU = 10
numElemV = 10
numGauss = 5
#xPhys, yPhys = myQuad.getRandomIntPts(numPtsU*numPtsV)
xPhys, yPhys, Wint = geomDomain.getQuadIntPts(numElemU, numElemV, numGauss)
data_type = "float32"
Xint = np.concatenate((xPhys,yPhys),axis=1).astype(data_type)
Wint = np.array(Wint).astype(data_type)
# prepare boundary points in the fromat Xbnd = [Xcoord, Ycoord, norm_x, norm_y] and
# Wbnd for boundary integration weights and
# Ybnd = [trac_x, trac_y], where Xcoord, Ycoord are the x and y coordinates of the point,
# norm_x, norm_y are the x and y components of the unit normals
# trac_x, trac_y are the x and y components of the traction vector at each point
# inner curved boundary, include both x and y directions
xPhysBnd, yPhysBnd , xNorm, yNorm, Wbnd = geomDomain.getQuadEdgePts(numElemV, numGauss, 4)
Xbnd = np.concatenate((xPhysBnd, yPhysBnd), axis=1).astype(data_type)
Wbnd = np.array(Wbnd).astype(data_type)
plt.scatter(xPhys, yPhys, s=0.1)
plt.scatter(xPhysBnd, yPhysBnd, s=1, c='red')
plt.title("Boundary and interior integration points")
plt.show()
# define loading
Ybnd_x = -model_data["inner_pressure"]*xNorm
Ybnd_y = -model_data["inner_pressure"]*yNorm
Ybnd = np.concatenate((Ybnd_x, Ybnd_y), axis=1).astype(data_type)
#define the model
tf.keras.backend.set_floatx(data_type)
l1 = tf.keras.layers.Dense(20, "swish")
l2 = tf.keras.layers.Dense(20, "swish")
l3 = tf.keras.layers.Dense(20, "swish")
l4 = tf.keras.layers.Dense(2, None)
train_op = tf.keras.optimizers.Adam()
train_op2 = "TFP-BFGS"
num_epoch = 1000
print_epoch = 100
pred_model = Elast_ThickCylinder([l1, l2, l3, l4], train_op, num_epoch,
print_epoch, model_data, data_type)
#convert the training data to tensors
Xint_tf = tf.convert_to_tensor(Xint)
Wint_tf = tf.convert_to_tensor(Wint)
Xbnd_tf = tf.convert_to_tensor(Xbnd)
Wbnd_tf = tf.convert_to_tensor(Wbnd)
Ybnd_tf = tf.convert_to_tensor(Ybnd)
#training
t0 = time.time()
print("Training (ADAM)...")
pred_model.network_learn(Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
t1 = time.time()
print("Time taken (ADAM)", t1-t0, "seconds")
print("Training (TFP-BFGS)...")
loss_func = tfp_function_factory(pred_model, Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
# convert initial model parameters to a 1D tf.Tensor
init_params = tf.dynamic_stitch(loss_func.idx, pred_model.trainable_variables)
# train the model with L-BFGS solver
results = tfp.optimizer.bfgs_minimize(
value_and_gradients_function=loss_func, initial_position=init_params,
max_iterations=10000, tolerance=1e-14)
# after training, the final optimized parameters are still in results.position
# so we have to manually put them back to the model
loss_func.assign_new_model_parameters(results.position)
t2 = time.time()
print("Time taken (BFGS)", t2-t1, "seconds")
print("Time taken (all)", t2-t0, "seconds")
def cart2pol(x, y):
rho = np.sqrt(np.array(x)**2 + np.array(y)**2)
phi = np.arctan2(y, x)
return rho, phi
# define the exact displacements
def exact_disp(x,y,model):
nu = model["nu"]
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
mu = model["E"]/(2*(1+nu))
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
dispxy = 1/(2*mu*(b**2-a**2))*((1-2*nu)*(p1*a**2-p0*b**2)+(p1-p0)*a**2*b**2/r**2)
ux = x*dispxy
uy = y*dispxy
return ux, uy
#define the exact stresses
def exact_stresses(x,y,model):
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
term_fact = a**2*b**2/(b**2-a**2)
term_one = p1/b**2 - p0/a**2 + (p1-p0)/r**2
term_two = 2*(p1-p0)/r**4
sigma_xx = term_fact*(term_one - term_two*x**2)
sigma_yy = term_fact*(term_one - term_two*y**2)
sigma_xy = term_fact*(-term_two*x*y)
return sigma_xx, sigma_yy, sigma_xy
print("Testing...")
numPtsUTest = 2*numElemU*numGauss
numPtsVTest = 2*numElemV*numGauss
xPhysTest, yPhysTest = geomDomain.getUnifIntPts(numPtsUTest, numPtsVTest, [1,1,1,1])
XTest = np.concatenate((xPhysTest,yPhysTest),axis=1).astype(data_type)
XTest_tf = tf.convert_to_tensor(XTest)
YTest = pred_model(XTest_tf).numpy()
xPhysTest = xPhysTest.astype(data_type)
yPhysTest = yPhysTest.astype(data_type)
stress_xx_comp, stress_yy_comp, stress_xy_comp = pred_model.constitutiveEq(xPhysTest, yPhysTest)
stress_xx_comp = stress_xx_comp.numpy()
stress_yy_comp = stress_yy_comp.numpy()
stress_xy_comp = stress_xy_comp.numpy()
# plot the displacement
plot_field_2d(XTest, YTest[:,0], numPtsUTest, numPtsVTest, title="Computed x-displacement")
plot_field_2d(XTest, YTest[:,1], numPtsUTest, numPtsVTest, title="Computed y-displacement")
# comparison with exact solution
ux_exact, uy_exact = exact_disp(xPhysTest, yPhysTest, model_data)
ux_test = YTest[:,0:1]
uy_test = YTest[:,1:2]
err_norm = np.sqrt(np.sum((ux_exact-ux_test)**2+(uy_exact-uy_test)**2))
ex_norm = np.sqrt(np.sum(ux_exact**2 + uy_exact**2))
rel_err_l2 = err_norm/ex_norm
print("Relative L2 error: ", rel_err_l2)
stress_xx_exact, stress_yy_exact, stress_xy_exact = exact_stresses(xPhysTest,
yPhysTest, model_data)
stress_xx_err = stress_xx_exact - stress_xx_comp
stress_yy_err = stress_yy_exact - stress_yy_comp
stress_xy_err = stress_xx_exact - stress_xx_comp
C_inv = np.linalg.inv(pred_model.Emat.numpy())
energy_err = 0.
energy_norm = 0.
numPts = len(xPhysTest)
for i in range(numPts):
err_pt = np.array([stress_xx_err[i,0],stress_yy_err[i,0],stress_xy_err[i,0]])
norm_pt = np.array([stress_xx_exact[i,0],stress_yy_exact[i,0],stress_xy_exact[i,0]])
energy_err = energy_err + err_pt@C_inv@err_pt.T
energy_norm = energy_norm + norm_pt@C_inv@norm_pt.T
print("Relative energy error: ", np.sqrt(energy_err/energy_norm))
plot_field_2d(XTest, ux_exact-YTest[:,0:1], numPtsUTest, numPtsVTest, title="Error for x-displacement")
plot_field_2d(XTest, uy_exact-YTest[:,1:2], numPtsUTest, numPtsVTest, title="Error for y-displacement")
# plot the stresses
plot_field_2d(XTest, stress_xx_comp, numPtsUTest, numPtsVTest, title="Computed sigma_xx")
plot_field_2d(XTest, stress_yy_comp, numPtsUTest, numPtsVTest, title="Computed sigma_yy")
plot_field_2d(XTest, stress_xy_comp, numPtsUTest, numPtsVTest, title="Computed sigma_xy")
plot_field_2d(XTest, stress_xx_err, numPtsUTest, numPtsVTest, title="Error for sigma_xx")
plot_field_2d(XTest, stress_yy_err, numPtsUTest, numPtsVTest, title="Error for sigma_yy")
plot_field_2d(XTest, stress_xy_err, numPtsUTest, numPtsVTest, title="Error for sigma_xy")
|
[
"numpy.sqrt",
"utils.Geom_examples.QuarterAnnulus",
"numpy.array",
"tensorflow.keras.layers.Dense",
"numpy.arctan2",
"utils.tfp_loss.tfp_function_factory",
"tensorflow.dynamic_stitch",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.concatenate",
"tensorflow.convert_to_tensor",
"numpy.hypot",
"tensorflow.keras.backend.set_floatx",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.show",
"utils.Plotting.plot_field_2d",
"tensorflow.random.set_seed",
"tensorflow_probability.optimizer.bfgs_minimize",
"tensorflow.keras.optimizers.Adam",
"numpy.sum"
] |
[((1429, 1447), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1443, 1447), True, 'import numpy as np\n'), ((1448, 1470), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (1466, 1470), True, 'import tensorflow as tf\n'), ((2429, 2495), 'utils.Geom_examples.QuarterAnnulus', 'QuarterAnnulus', (["model_data['radius_int']", "model_data['radius_ext']"], {}), "(model_data['radius_int'], model_data['radius_ext'])\n", (2443, 2495), False, 'from utils.Geom_examples import QuarterAnnulus\n'), ((3458, 3490), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xPhys', 'yPhys'], {'s': '(0.1)'}), '(xPhys, yPhys, s=0.1)\n', (3469, 3490), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3536), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xPhysBnd', 'yPhysBnd'], {'s': '(1)', 'c': '"""red"""'}), "(xPhysBnd, yPhysBnd, s=1, c='red')\n", (3502, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3537, 3590), 'matplotlib.pyplot.title', 'plt.title', (['"""Boundary and interior integration points"""'], {}), "('Boundary and interior integration points')\n", (3546, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3599, 3601), True, 'import matplotlib.pyplot as plt\n'), ((3796, 3834), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['data_type'], {}), '(data_type)\n', (3823, 3834), True, 'import tensorflow as tf\n'), ((3840, 3874), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(20)', '"""swish"""'], {}), "(20, 'swish')\n", (3861, 3874), True, 'import tensorflow as tf\n'), ((3880, 3914), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(20)', '"""swish"""'], {}), "(20, 'swish')\n", (3901, 3914), True, 'import tensorflow as tf\n'), ((3920, 3954), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(20)', '"""swish"""'], {}), "(20, 'swish')\n", (3941, 3954), True, 'import tensorflow as tf\n'), ((3960, 3990), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)', 'None'], {}), '(2, None)\n', (3981, 3990), True, 'import tensorflow as tf\n'), ((4002, 4028), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (4026, 4028), True, 'import tensorflow as tf\n'), ((4281, 4307), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Xint'], {}), '(Xint)\n', (4301, 4307), True, 'import tensorflow as tf\n'), ((4318, 4344), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Wint'], {}), '(Wint)\n', (4338, 4344), True, 'import tensorflow as tf\n'), ((4355, 4381), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Xbnd'], {}), '(Xbnd)\n', (4375, 4381), True, 'import tensorflow as tf\n'), ((4392, 4418), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Wbnd'], {}), '(Wbnd)\n', (4412, 4418), True, 'import tensorflow as tf\n'), ((4429, 4455), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Ybnd'], {}), '(Ybnd)\n', (4449, 4455), True, 'import tensorflow as tf\n'), ((4473, 4484), 'time.time', 'time.time', ([], {}), '()\n', (4482, 4484), False, 'import time\n'), ((4589, 4600), 'time.time', 'time.time', ([], {}), '()\n', (4598, 4600), False, 'import time\n'), ((4691, 4768), 'utils.tfp_loss.tfp_function_factory', 'tfp_function_factory', (['pred_model', 'Xint_tf', 'Wint_tf', 'Xbnd_tf', 'Wbnd_tf', 'Ybnd_tf'], {}), '(pred_model, Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)\n', (4711, 4768), False, 'from utils.tfp_loss import tfp_function_factory\n'), ((4836, 4900), 'tensorflow.dynamic_stitch', 'tf.dynamic_stitch', (['loss_func.idx', 'pred_model.trainable_variables'], {}), '(loss_func.idx, pred_model.trainable_variables)\n', (4853, 4900), True, 'import tensorflow as tf\n'), ((4948, 5088), 'tensorflow_probability.optimizer.bfgs_minimize', 'tfp.optimizer.bfgs_minimize', ([], {'value_and_gradients_function': 'loss_func', 'initial_position': 'init_params', 'max_iterations': '(10000)', 'tolerance': '(1e-14)'}), '(value_and_gradients_function=loss_func,\n initial_position=init_params, max_iterations=10000, tolerance=1e-14)\n', (4975, 5088), True, 'import tensorflow_probability as tfp\n'), ((5298, 5309), 'time.time', 'time.time', ([], {}), '()\n', (5307, 5309), False, 'import time\n'), ((6676, 6703), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['XTest'], {}), '(XTest)\n', (6696, 6703), True, 'import tensorflow as tf\n'), ((7066, 7163), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'YTest[:, 0]', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed x-displacement"""'}), "(XTest, YTest[:, 0], numPtsUTest, numPtsVTest, title=\n 'Computed x-displacement')\n", (7079, 7163), False, 'from utils.Plotting import plot_field_2d\n'), ((7158, 7255), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'YTest[:, 1]', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed y-displacement"""'}), "(XTest, YTest[:, 1], numPtsUTest, numPtsVTest, title=\n 'Computed y-displacement')\n", (7171, 7255), False, 'from utils.Plotting import plot_field_2d\n'), ((8385, 8495), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', '(ux_exact - YTest[:, 0:1])', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for x-displacement"""'}), "(XTest, ux_exact - YTest[:, 0:1], numPtsUTest, numPtsVTest,\n title='Error for x-displacement')\n", (8398, 8495), False, 'from utils.Plotting import plot_field_2d\n'), ((8489, 8599), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', '(uy_exact - YTest[:, 1:2])', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for y-displacement"""'}), "(XTest, uy_exact - YTest[:, 1:2], numPtsUTest, numPtsVTest,\n title='Error for y-displacement')\n", (8502, 8599), False, 'from utils.Plotting import plot_field_2d\n'), ((8614, 8708), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xx_comp', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed sigma_xx"""'}), "(XTest, stress_xx_comp, numPtsUTest, numPtsVTest, title=\n 'Computed sigma_xx')\n", (8627, 8708), False, 'from utils.Plotting import plot_field_2d\n'), ((8704, 8798), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_yy_comp', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed sigma_yy"""'}), "(XTest, stress_yy_comp, numPtsUTest, numPtsVTest, title=\n 'Computed sigma_yy')\n", (8717, 8798), False, 'from utils.Plotting import plot_field_2d\n'), ((8794, 8888), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xy_comp', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Computed sigma_xy"""'}), "(XTest, stress_xy_comp, numPtsUTest, numPtsVTest, title=\n 'Computed sigma_xy')\n", (8807, 8888), False, 'from utils.Plotting import plot_field_2d\n'), ((8885, 8979), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xx_err', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for sigma_xx"""'}), "(XTest, stress_xx_err, numPtsUTest, numPtsVTest, title=\n 'Error for sigma_xx')\n", (8898, 8979), False, 'from utils.Plotting import plot_field_2d\n'), ((8975, 9069), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_yy_err', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for sigma_yy"""'}), "(XTest, stress_yy_err, numPtsUTest, numPtsVTest, title=\n 'Error for sigma_yy')\n", (8988, 9069), False, 'from utils.Plotting import plot_field_2d\n'), ((9065, 9159), 'utils.Plotting.plot_field_2d', 'plot_field_2d', (['XTest', 'stress_xy_err', 'numPtsUTest', 'numPtsVTest'], {'title': '"""Error for sigma_xy"""'}), "(XTest, stress_xy_err, numPtsUTest, numPtsVTest, title=\n 'Error for sigma_xy')\n", (9078, 9159), False, 'from utils.Plotting import plot_field_2d\n'), ((5482, 5498), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (5492, 5498), True, 'import numpy as np\n'), ((5609, 5623), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (5617, 5623), True, 'import numpy as np\n'), ((5983, 5997), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (5991, 5997), True, 'import numpy as np\n'), ((7423, 7484), 'numpy.sum', 'np.sum', (['((ux_exact - ux_test) ** 2 + (uy_exact - uy_test) ** 2)'], {}), '((ux_exact - ux_test) ** 2 + (uy_exact - uy_test) ** 2)\n', (7429, 7484), True, 'import numpy as np\n'), ((7494, 7531), 'numpy.sum', 'np.sum', (['(ux_exact ** 2 + uy_exact ** 2)'], {}), '(ux_exact ** 2 + uy_exact ** 2)\n', (7500, 7531), True, 'import numpy as np\n'), ((8050, 8123), 'numpy.array', 'np.array', (['[stress_xx_err[i, 0], stress_yy_err[i, 0], stress_xy_err[i, 0]]'], {}), '([stress_xx_err[i, 0], stress_yy_err[i, 0], stress_xy_err[i, 0]])\n', (8058, 8123), True, 'import numpy as np\n'), ((8133, 8212), 'numpy.array', 'np.array', (['[stress_xx_exact[i, 0], stress_yy_exact[i, 0], stress_xy_exact[i, 0]]'], {}), '([stress_xx_exact[i, 0], stress_yy_exact[i, 0], stress_xy_exact[i, 0]])\n', (8141, 8212), True, 'import numpy as np\n'), ((8350, 8383), 'numpy.sqrt', 'np.sqrt', (['(energy_err / energy_norm)'], {}), '(energy_err / energy_norm)\n', (8357, 8383), True, 'import numpy as np\n'), ((2739, 2777), 'numpy.concatenate', 'np.concatenate', (['(xPhys, yPhys)'], {'axis': '(1)'}), '((xPhys, yPhys), axis=1)\n', (2753, 2777), True, 'import numpy as np\n'), ((2801, 2815), 'numpy.array', 'np.array', (['Wint'], {}), '(Wint)\n', (2809, 2815), True, 'import numpy as np\n'), ((3354, 3398), 'numpy.concatenate', 'np.concatenate', (['(xPhysBnd, yPhysBnd)'], {'axis': '(1)'}), '((xPhysBnd, yPhysBnd), axis=1)\n', (3368, 3398), True, 'import numpy as np\n'), ((3424, 3438), 'numpy.array', 'np.array', (['Wbnd'], {}), '(Wbnd)\n', (3432, 3438), True, 'import numpy as np\n'), ((3717, 3757), 'numpy.concatenate', 'np.concatenate', (['(Ybnd_x, Ybnd_y)'], {'axis': '(1)'}), '((Ybnd_x, Ybnd_y), axis=1)\n', (3731, 3757), True, 'import numpy as np\n'), ((6602, 6648), 'numpy.concatenate', 'np.concatenate', (['(xPhysTest, yPhysTest)'], {'axis': '(1)'}), '((xPhysTest, yPhysTest), axis=1)\n', (6616, 6648), True, 'import numpy as np\n'), ((5439, 5450), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5447, 5450), True, 'import numpy as np\n'), ((5456, 5467), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5464, 5467), True, 'import numpy as np\n')]
|
import datetime
import os
import sys
from cmath import inf
from typing import Any
import hypothesis.extra.numpy as xps
import hypothesis.strategies as st
import numpy
import pytest
from hypothesis import assume, given
from eopf.product.utils import (
apply_xpath,
conv,
convert_to_unix_time,
is_date,
parse_xml,
reverse_conv,
translate_structure,
)
@pytest.fixture
def tree(EMBEDED_TEST_DATA_FOLDER: str):
snippet_path = os.path.join(EMBEDED_TEST_DATA_FOLDER, "snippet_xfdumanifest.xml")
with open(snippet_path) as f:
return parse_xml(f)
@st.composite
def value_with_type(draw, elements=st.integers(), expected_type=int, expected_container_type=None):
if isinstance(expected_type, st.SearchStrategy):
expected_type = draw(expected_type)
if expected_container_type is not None:
if isinstance(expected_container_type, st.SearchStrategy):
expected_container_type = draw(expected_container_type)
return (draw(elements), expected_type, expected_container_type)
return (draw(elements), expected_type)
@st.composite
def numpy_value(draw, dtype_st=xps.scalar_dtypes(), allow_infinity=True, allow_nan=True):
return draw(xps.from_dtype(draw(dtype_st), allow_infinity=allow_infinity, allow_nan=allow_nan))
@pytest.mark.unit
def test_parse_xml(tree):
"""Given an input xml,
the output of the function must match the expected output"""
result = ""
display_namespaces = True
for element in tree.iter():
tag = element.tag
result += f"{tag}\n"
if display_namespaces:
display_namespaces = False
for key, value in element.nsmap.items():
result += f"{key} : {value}\n"
attributes = element.attrib
for key, value in attributes.items():
result += f"{key} : {value}\n"
textual_content = element.text
if textual_content and textual_content.strip():
result += textual_content + "\n"
file_path = os.path.join(os.path.abspath("tests/data"), "solutions.txt")
with open(file_path, "r") as f:
expected = f.read()
assert result == expected
@pytest.mark.unit
def test_translate_structure(tree):
"""Given an input xml,
the output of the function must match the expected output"""
MAP = {
"title": "concat('',metadataSection/metadataObject[@ID='generalProductInformation']/metadataWrap/xmlData/"
"sentinel3:generalProductInformation/sentinel3:productName/text())",
"Conventions": "'CF-1.9'",
}
NAMESPACES = {
"xfdu": "urn:ccsds:schema:xfdu:1",
"gml": "http://www.opengis.net/gml",
"sentinel-safe": "http://www.esa.int/safe/sentinel/1.1",
"sentinel3": "http://www.esa.int/safe/sentinel/sentinel-3/1.0",
"olci": "http://www.esa.int/safe/sentinel/sentinel-3/olci/1.0",
}
result = translate_structure(MAP, tree, NAMESPACES)
assert result == {
"title": "S3A_OL_1_EFR____20220116T092821_20220116T093121_20220117T134858_0179_081_036_2160_LN1_O_NT_002.SEN3",
"Conventions": "CF-1.9",
}
@pytest.mark.unit
def test_apply_xpath(tree):
"""Given an input xml,
the output of the function must match the expected output"""
MAP = {
"title": "concat('',metadataSection/metadataObject[@ID='generalProductInformation']/metadataWrap/xmlData/"
"sentinel3:generalProductInformation/sentinel3:productName/text())",
"Conventions": "'CF-1.9'",
}
NAMESPACES = {
"xfdu": "urn:ccsds:schema:xfdu:1",
"gml": "http://www.opengis.net/gml",
"sentinel-safe": "http://www.esa.int/safe/sentinel/1.1",
"sentinel3": "http://www.esa.int/safe/sentinel/sentinel-3/1.0",
"olci": "http://www.esa.int/safe/sentinel/sentinel-3/olci/1.0",
}
result = {attr: apply_xpath(tree, MAP[attr], NAMESPACES) for attr in MAP}
assert result == {
"title": "S3A_OL_1_EFR____20220116T092821_20220116T093121_20220117T134858_0179_081_036_2160_LN1_O_NT_002.SEN3",
"Conventions": "CF-1.9",
}
@pytest.mark.unit
def test_is_date():
string_date_1 = "2020-03-31T17:19:29.230522Z" # Zulu time
string_date_2 = "2020-03-31T17:19:29.230522GMT+3" # GMT+3 Time
string_date_3 = "some_random_string"
dt_date = datetime.datetime(2020, 3, 31, 17, 19, 29, 230522)
assert is_date(string_date_1)
assert is_date(string_date_2)
assert is_date(str(dt_date))
assert not is_date(string_date_3)
@pytest.mark.unit
def test_convert_unix_time():
import pytz
# Define datetime-like string and verify if conversion match with datetime object and expected unix time. (MS)
string_date = "2020-03-31T17:19:29.230522Z"
dt_date = datetime.datetime(2020, 3, 31, 17, 19, 29, 230522, pytz.UTC)
expected_unix_time = 1585675169230522
assert convert_to_unix_time(string_date) == convert_to_unix_time(dt_date) == expected_unix_time
# Define datetime-like string in Zulu Time Zone, and verify that it doesnt match with expected unix time
string_date = "2020-03-31T17:19:29.230522GMT-3"
assert convert_to_unix_time(string_date) != convert_to_unix_time(dt_date)
assert convert_to_unix_time(string_date) != expected_unix_time
#
try:
string_date = "a string that is not a valid date"
convert_to_unix_time(string_date)
except ValueError:
assert True
@pytest.mark.unit
@given(
value_and_types=st.one_of(
value_with_type(
st.lists(elements=st.floats(allow_infinity=False, allow_nan=False), unique=True, min_size=10),
float,
list,
),
value_with_type(st.lists(elements=st.integers(), unique=True, min_size=10), int, list),
value_with_type(st.lists(elements=st.booleans(), unique=True, min_size=2), int, list),
value_with_type(st.sets(elements=st.floats(allow_infinity=False, allow_nan=False), min_size=10), float, set),
value_with_type(st.sets(elements=st.integers(), min_size=10), int, set),
value_with_type(st.sets(elements=st.booleans(), min_size=2), int, set),
value_with_type(st.dictionaries(st.text(), st.integers(), min_size=10), int, dict),
value_with_type(st.dictionaries(st.text(), st.booleans(), min_size=10), int, dict),
value_with_type(
st.dictionaries(st.text(), st.floats(allow_infinity=False, allow_nan=False), min_size=10),
float,
dict,
),
value_with_type(xps.arrays(xps.floating_dtypes(), 10, unique=True), float, list),
value_with_type(xps.arrays(xps.integer_dtypes(), 10, unique=True), int, list),
value_with_type(xps.arrays(xps.boolean_dtypes(), 10, unique=True), int, list),
),
)
def test_conv_sequences(value_and_types: tuple[Any, type, type]):
values, type_, container_type = value_and_types
assume(inf not in values)
converted_list = conv(values)
assert isinstance(converted_list, container_type)
# Check if size of converted value doesn't change
assert len(converted_list) == len(values)
# Check if type of each item from converted value is correct
if isinstance(converted_list, dict):
iterator = converted_list.values()
original = values.values()
else:
iterator = converted_list
original = values
for converted_value, value in zip(sorted(iterator), sorted(original)):
assert isinstance(converted_value, type_)
conv_value = conv(value)
# check if converted values are the same or both are nan
assert converted_value == conv_value or (converted_value != converted_value and conv_value != conv_value)
@pytest.mark.unit
@pytest.mark.parametrize("EPSILON", [0.1])
@given(value=numpy_value(xps.floating_dtypes(), allow_infinity=False, allow_nan=False))
def test_epsilon_on_fp_conv(value, EPSILON):
converted_value = conv(value)
assert value - converted_value < EPSILON
assert converted_value - value < EPSILON
@pytest.mark.unit
@given(
value_and_type=st.one_of(
value_with_type(
elements=numpy_value(xps.floating_dtypes(), allow_infinity=False, allow_nan=False),
expected_type=float,
),
value_with_type(
elements=numpy_value(xps.integer_dtypes(), allow_infinity=False, allow_nan=False),
expected_type=int,
),
value_with_type(
elements=st.datetimes(),
expected_type=int,
),
),
)
def test_conv(value_and_type):
value, expected_type = value_and_type
converted_value = conv(value)
assert isinstance(converted_value, expected_type)
@pytest.mark.unit
@pytest.mark.parametrize(
"sysmax, maxint",
[
(numpy.int64(sys.maxsize), numpy.int64(9223372036854775807)),
],
)
def test_maxint_conv(sysmax, maxint):
# Robustness
assert conv(sysmax) == maxint
@pytest.mark.unit
@given(
value_and_types=st.one_of(
value_with_type(
st.integers(min_value=numpy.iinfo("int64").min, max_value=numpy.iinfo("int64").max),
int,
xps.integer_dtypes(endianness="=", sizes=(64,)),
),
value_with_type(
st.integers(min_value=numpy.iinfo("int32").min, max_value=numpy.iinfo("int32").max),
int,
xps.integer_dtypes(endianness="=", sizes=(32,)),
),
value_with_type(
st.integers(min_value=numpy.iinfo("int16").min, max_value=numpy.iinfo("int16").max),
int,
xps.integer_dtypes(endianness="=", sizes=(16,)),
),
value_with_type(
st.integers(min_value=numpy.iinfo("int8").min, max_value=numpy.iinfo("int8").max),
int,
xps.integer_dtypes(endianness="=", sizes=(8,)),
),
value_with_type(st.floats(width=16), float, xps.floating_dtypes(endianness="=", sizes=(16,))),
value_with_type(st.floats(width=32), float, xps.floating_dtypes(endianness="=", sizes=(32,))),
value_with_type(st.floats(width=64), float, xps.floating_dtypes(endianness="=", sizes=(64,))),
),
)
def test_reverse_conv(value_and_types):
value, current_type, data_type = value_and_types
# verify if the current data type is as expected (int or float)
assert isinstance(value, current_type)
# convert value to given data type (int64, int32, float64 etc .. )
converted_value = reverse_conv(data_type, value)
# check if conversion is performed according to given data (int -> numpy.int64, float -> numpy.float64)
assert numpy.issubdtype(type(converted_value), data_type)
# check if converted data type is changed and not match with old one
assert type(converted_value) != current_type
|
[
"numpy.iinfo",
"eopf.product.utils.convert_to_unix_time",
"hypothesis.extra.numpy.integer_dtypes",
"eopf.product.utils.conv",
"hypothesis.extra.numpy.boolean_dtypes",
"datetime.datetime",
"eopf.product.utils.is_date",
"numpy.int64",
"hypothesis.strategies.booleans",
"hypothesis.strategies.text",
"eopf.product.utils.translate_structure",
"hypothesis.assume",
"hypothesis.strategies.datetimes",
"eopf.product.utils.reverse_conv",
"hypothesis.extra.numpy.scalar_dtypes",
"hypothesis.strategies.integers",
"eopf.product.utils.parse_xml",
"os.path.join",
"hypothesis.strategies.floats",
"eopf.product.utils.apply_xpath",
"pytest.mark.parametrize",
"os.path.abspath",
"hypothesis.extra.numpy.floating_dtypes"
] |
[((7718, 7759), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""EPSILON"""', '[0.1]'], {}), "('EPSILON', [0.1])\n", (7741, 7759), False, 'import pytest\n'), ((457, 523), 'os.path.join', 'os.path.join', (['EMBEDED_TEST_DATA_FOLDER', '"""snippet_xfdumanifest.xml"""'], {}), "(EMBEDED_TEST_DATA_FOLDER, 'snippet_xfdumanifest.xml')\n", (469, 523), False, 'import os\n'), ((637, 650), 'hypothesis.strategies.integers', 'st.integers', ([], {}), '()\n', (648, 650), True, 'import hypothesis.strategies as st\n'), ((1142, 1161), 'hypothesis.extra.numpy.scalar_dtypes', 'xps.scalar_dtypes', ([], {}), '()\n', (1159, 1161), True, 'import hypothesis.extra.numpy as xps\n'), ((2906, 2948), 'eopf.product.utils.translate_structure', 'translate_structure', (['MAP', 'tree', 'NAMESPACES'], {}), '(MAP, tree, NAMESPACES)\n', (2925, 2948), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((4324, 4374), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(31)', '(17)', '(19)', '(29)', '(230522)'], {}), '(2020, 3, 31, 17, 19, 29, 230522)\n', (4341, 4374), False, 'import datetime\n'), ((4386, 4408), 'eopf.product.utils.is_date', 'is_date', (['string_date_1'], {}), '(string_date_1)\n', (4393, 4408), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((4420, 4442), 'eopf.product.utils.is_date', 'is_date', (['string_date_2'], {}), '(string_date_2)\n', (4427, 4442), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((4758, 4818), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(31)', '(17)', '(19)', '(29)', '(230522)', 'pytz.UTC'], {}), '(2020, 3, 31, 17, 19, 29, 230522, pytz.UTC)\n', (4775, 4818), False, 'import datetime\n'), ((6892, 6917), 'hypothesis.assume', 'assume', (['(inf not in values)'], {}), '(inf not in values)\n', (6898, 6917), False, 'from hypothesis import assume, given\n'), ((6939, 6951), 'eopf.product.utils.conv', 'conv', (['values'], {}), '(values)\n', (6943, 6951), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((7915, 7926), 'eopf.product.utils.conv', 'conv', (['value'], {}), '(value)\n', (7919, 7926), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((8610, 8621), 'eopf.product.utils.conv', 'conv', (['value'], {}), '(value)\n', (8614, 8621), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((10433, 10463), 'eopf.product.utils.reverse_conv', 'reverse_conv', (['data_type', 'value'], {}), '(data_type, value)\n', (10445, 10463), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((573, 585), 'eopf.product.utils.parse_xml', 'parse_xml', (['f'], {}), '(f)\n', (582, 585), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((2036, 2065), 'os.path.abspath', 'os.path.abspath', (['"""tests/data"""'], {}), "('tests/data')\n", (2051, 2065), False, 'import os\n'), ((3858, 3898), 'eopf.product.utils.apply_xpath', 'apply_xpath', (['tree', 'MAP[attr]', 'NAMESPACES'], {}), '(tree, MAP[attr], NAMESPACES)\n', (3869, 3898), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((4491, 4513), 'eopf.product.utils.is_date', 'is_date', (['string_date_3'], {}), '(string_date_3)\n', (4498, 4513), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((4873, 4906), 'eopf.product.utils.convert_to_unix_time', 'convert_to_unix_time', (['string_date'], {}), '(string_date)\n', (4893, 4906), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((4910, 4939), 'eopf.product.utils.convert_to_unix_time', 'convert_to_unix_time', (['dt_date'], {}), '(dt_date)\n', (4930, 4939), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((5135, 5168), 'eopf.product.utils.convert_to_unix_time', 'convert_to_unix_time', (['string_date'], {}), '(string_date)\n', (5155, 5168), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((5172, 5201), 'eopf.product.utils.convert_to_unix_time', 'convert_to_unix_time', (['dt_date'], {}), '(dt_date)\n', (5192, 5201), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((5213, 5246), 'eopf.product.utils.convert_to_unix_time', 'convert_to_unix_time', (['string_date'], {}), '(string_date)\n', (5233, 5246), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((5351, 5384), 'eopf.product.utils.convert_to_unix_time', 'convert_to_unix_time', (['string_date'], {}), '(string_date)\n', (5371, 5384), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((7506, 7517), 'eopf.product.utils.conv', 'conv', (['value'], {}), '(value)\n', (7510, 7517), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((8895, 8907), 'eopf.product.utils.conv', 'conv', (['sysmax'], {}), '(sysmax)\n', (8899, 8907), False, 'from eopf.product.utils import apply_xpath, conv, convert_to_unix_time, is_date, parse_xml, reverse_conv, translate_structure\n'), ((7785, 7806), 'hypothesis.extra.numpy.floating_dtypes', 'xps.floating_dtypes', ([], {}), '()\n', (7804, 7806), True, 'import hypothesis.extra.numpy as xps\n'), ((8759, 8783), 'numpy.int64', 'numpy.int64', (['sys.maxsize'], {}), '(sys.maxsize)\n', (8770, 8783), False, 'import numpy\n'), ((8785, 8817), 'numpy.int64', 'numpy.int64', (['(9223372036854775807)'], {}), '(9223372036854775807)\n', (8796, 8817), False, 'import numpy\n'), ((9128, 9175), 'hypothesis.extra.numpy.integer_dtypes', 'xps.integer_dtypes', ([], {'endianness': '"""="""', 'sizes': '(64,)'}), "(endianness='=', sizes=(64,))\n", (9146, 9175), True, 'import hypothesis.extra.numpy as xps\n'), ((9339, 9386), 'hypothesis.extra.numpy.integer_dtypes', 'xps.integer_dtypes', ([], {'endianness': '"""="""', 'sizes': '(32,)'}), "(endianness='=', sizes=(32,))\n", (9357, 9386), True, 'import hypothesis.extra.numpy as xps\n'), ((9550, 9597), 'hypothesis.extra.numpy.integer_dtypes', 'xps.integer_dtypes', ([], {'endianness': '"""="""', 'sizes': '(16,)'}), "(endianness='=', sizes=(16,))\n", (9568, 9597), True, 'import hypothesis.extra.numpy as xps\n'), ((9759, 9805), 'hypothesis.extra.numpy.integer_dtypes', 'xps.integer_dtypes', ([], {'endianness': '"""="""', 'sizes': '(8,)'}), "(endianness='=', sizes=(8,))\n", (9777, 9805), True, 'import hypothesis.extra.numpy as xps\n'), ((9842, 9861), 'hypothesis.strategies.floats', 'st.floats', ([], {'width': '(16)'}), '(width=16)\n', (9851, 9861), True, 'import hypothesis.strategies as st\n'), ((9870, 9918), 'hypothesis.extra.numpy.floating_dtypes', 'xps.floating_dtypes', ([], {'endianness': '"""="""', 'sizes': '(16,)'}), "(endianness='=', sizes=(16,))\n", (9889, 9918), True, 'import hypothesis.extra.numpy as xps\n'), ((9945, 9964), 'hypothesis.strategies.floats', 'st.floats', ([], {'width': '(32)'}), '(width=32)\n', (9954, 9964), True, 'import hypothesis.strategies as st\n'), ((9973, 10021), 'hypothesis.extra.numpy.floating_dtypes', 'xps.floating_dtypes', ([], {'endianness': '"""="""', 'sizes': '(32,)'}), "(endianness='=', sizes=(32,))\n", (9992, 10021), True, 'import hypothesis.extra.numpy as xps\n'), ((10048, 10067), 'hypothesis.strategies.floats', 'st.floats', ([], {'width': '(64)'}), '(width=64)\n', (10057, 10067), True, 'import hypothesis.strategies as st\n'), ((10076, 10124), 'hypothesis.extra.numpy.floating_dtypes', 'xps.floating_dtypes', ([], {'endianness': '"""="""', 'sizes': '(64,)'}), "(endianness='=', sizes=(64,))\n", (10095, 10124), True, 'import hypothesis.extra.numpy as xps\n'), ((6177, 6186), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (6184, 6186), True, 'import hypothesis.strategies as st\n'), ((6188, 6201), 'hypothesis.strategies.integers', 'st.integers', ([], {}), '()\n', (6199, 6201), True, 'import hypothesis.strategies as st\n'), ((6269, 6278), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (6276, 6278), True, 'import hypothesis.strategies as st\n'), ((6280, 6293), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (6291, 6293), True, 'import hypothesis.strategies as st\n'), ((6374, 6383), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (6381, 6383), True, 'import hypothesis.strategies as st\n'), ((6385, 6433), 'hypothesis.strategies.floats', 'st.floats', ([], {'allow_infinity': '(False)', 'allow_nan': '(False)'}), '(allow_infinity=False, allow_nan=False)\n', (6394, 6433), True, 'import hypothesis.strategies as st\n'), ((6532, 6553), 'hypothesis.extra.numpy.floating_dtypes', 'xps.floating_dtypes', ([], {}), '()\n', (6551, 6553), True, 'import hypothesis.extra.numpy as xps\n'), ((6622, 6642), 'hypothesis.extra.numpy.integer_dtypes', 'xps.integer_dtypes', ([], {}), '()\n', (6640, 6642), True, 'import hypothesis.extra.numpy as xps\n'), ((6709, 6729), 'hypothesis.extra.numpy.boolean_dtypes', 'xps.boolean_dtypes', ([], {}), '()\n', (6727, 6729), True, 'import hypothesis.extra.numpy as xps\n'), ((8448, 8462), 'hypothesis.strategies.datetimes', 'st.datetimes', ([], {}), '()\n', (8460, 8462), True, 'import hypothesis.strategies as st\n'), ((5542, 5590), 'hypothesis.strategies.floats', 'st.floats', ([], {'allow_infinity': '(False)', 'allow_nan': '(False)'}), '(allow_infinity=False, allow_nan=False)\n', (5551, 5590), True, 'import hypothesis.strategies as st\n'), ((5709, 5722), 'hypothesis.strategies.integers', 'st.integers', ([], {}), '()\n', (5720, 5722), True, 'import hypothesis.strategies as st\n'), ((5805, 5818), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (5816, 5818), True, 'import hypothesis.strategies as st\n'), ((5899, 5947), 'hypothesis.strategies.floats', 'st.floats', ([], {'allow_infinity': '(False)', 'allow_nan': '(False)'}), '(allow_infinity=False, allow_nan=False)\n', (5908, 5947), True, 'import hypothesis.strategies as st\n'), ((6017, 6030), 'hypothesis.strategies.integers', 'st.integers', ([], {}), '()\n', (6028, 6030), True, 'import hypothesis.strategies as st\n'), ((6098, 6111), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (6109, 6111), True, 'import hypothesis.strategies as st\n'), ((8133, 8154), 'hypothesis.extra.numpy.floating_dtypes', 'xps.floating_dtypes', ([], {}), '()\n', (8152, 8154), True, 'import hypothesis.extra.numpy as xps\n'), ((8298, 8318), 'hypothesis.extra.numpy.integer_dtypes', 'xps.integer_dtypes', ([], {}), '()\n', (8316, 8318), True, 'import hypothesis.extra.numpy as xps\n'), ((9036, 9056), 'numpy.iinfo', 'numpy.iinfo', (['"""int64"""'], {}), "('int64')\n", (9047, 9056), False, 'import numpy\n'), ((9072, 9092), 'numpy.iinfo', 'numpy.iinfo', (['"""int64"""'], {}), "('int64')\n", (9083, 9092), False, 'import numpy\n'), ((9247, 9267), 'numpy.iinfo', 'numpy.iinfo', (['"""int32"""'], {}), "('int32')\n", (9258, 9267), False, 'import numpy\n'), ((9283, 9303), 'numpy.iinfo', 'numpy.iinfo', (['"""int32"""'], {}), "('int32')\n", (9294, 9303), False, 'import numpy\n'), ((9458, 9478), 'numpy.iinfo', 'numpy.iinfo', (['"""int16"""'], {}), "('int16')\n", (9469, 9478), False, 'import numpy\n'), ((9494, 9514), 'numpy.iinfo', 'numpy.iinfo', (['"""int16"""'], {}), "('int16')\n", (9505, 9514), False, 'import numpy\n'), ((9669, 9688), 'numpy.iinfo', 'numpy.iinfo', (['"""int8"""'], {}), "('int8')\n", (9680, 9688), False, 'import numpy\n'), ((9704, 9723), 'numpy.iinfo', 'numpy.iinfo', (['"""int8"""'], {}), "('int8')\n", (9715, 9723), False, 'import numpy\n')]
|
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn import feature_selection as fs
from sklearn import naive_bayes
from sklearn import model_selection
from sklearn import metrics
from sklearn import linear_model
from sklearn import svm
from imblearn.under_sampling import NeighbourhoodCleaningRule
from imblearn.over_sampling import SMOTE, RandomOverSampler
COLUMN_NAMES = ['sex', 'length', 'diameter', 'height',
'whole weight', 'shucked weight', 'viscera weight',
'shell weight', 'rings']
# feature selection
def cal_features_mutual_info(data):
y = data['rings']
features = data.loc[:, data.columns != 'rings']
info = fs.mutual_info_regression(features, y)
print('========== mutual info ==============')
for idx, col in enumerate(COLUMN_NAMES):
if col == 'rings':
break
name = COLUMN_NAMES[idx]
print('{0} ==> {1}'.format(name, info[idx]))
def cal_feature_variance(data):
vt = fs.VarianceThreshold()
vt.fit_transform(data)
print('======== variance ================')
for idx, col in enumerate(COLUMN_NAMES):
print('{0} ==> {1}'.format(col, vt.variances_[idx]))
def draw_class_hist(Y):
bins = [x for x in range(1, 29, 5)]
Y.plot.hist(bins=bins)
plt.show()
# data loading / preprocessing
def preprocessing(data):
_, v = np.unique(data['sex'], return_inverse=True)
data['sex'] = v
def load_data():
data = pd.read_csv('../uci_data/abalone.data.txt', header=None, names=COLUMN_NAMES)
preprocessing(data)
print(data.describe())
return data
def oversampling(X, Y):
# some class has only one sample
# to apply SMOTE we first oversample it randomly
X_resampled, Y_resampled = RandomOverSampler().fit_sample(X, Y)
X_resampled, Y_resampled = SMOTE().fit_sample(X_resampled, Y_resampled)
return (X_resampled, Y_resampled)
def undersampling(X, Y):
rus = NeighbourhoodCleaningRule(ratio='majority')
x_new, y_new = rus.fit_sample(X, Y)
return (x_new, y_new)
# metrics
# 1. metrics for multi-class classification problem
def cal_metrics(y_test, y_pred, label):
acc = metrics.accuracy_score(y_test, y_pred)
print('{0} acc: {1}'.format(label, acc))
prec = metrics.precision_score(y_test, y_pred, average='weighted')
print('{0} precision: {1}'.format(label, prec))
recall = metrics.recall_score(y_test, y_pred, average='weighted')
print('{0} recall: {1}'.format(label, recall))
# models
def gaussian_naive_bayes(x_train, y_train, x_test, y_test):
model = naive_bayes.GaussianNB()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
cal_metrics(y_test, y_pred, 'gaussianNB')
def multinomial_naive_bayes(x_train, y_train, x_test, y_test):
model = naive_bayes.MultinomialNB()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
cal_metrics(y_test, y_pred, 'multinomialNB')
def logistics_regression(x_train, y_train, x_test, y_test):
model = linear_model.LogisticRegression(solver='sag', multi_class='multinomial')
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
cal_metrics(y_test, y_pred, 'logisticsc regression')
def select_features_by_stat_info(data):
cal_features_mutual_info(data)
cal_feature_variance(data)
print('==================')
# ignore features with low variance
return['sex', 'length', 'whole weight',
'shucked weight', 'viscera weight',
'shell weight']
def select_feature_by_L1(data_train, data_test):
all_cols = ['sex', 'length', 'diameter', 'height',
'whole weight', 'shucked weight', 'viscera weight',
'shell weight']
Y = data_train['rings']
X = data_train[all_cols]
X_test = data_test[all_cols]
svc = svm.LinearSVC(penalty='l1', dual=False).fit(X, Y)
model = fs.SelectFromModel(svc, threshold=0.5, prefit=True)
return (model.transform(X), model.transform(X_test))
if __name__ == '__main__':
data = load_data()
split_point = math.floor(len(data) * 0.8)
data_train = data[: split_point]
data_test = data[split_point:]
y_train = data_train['rings']
y_test = data_test['rings']
print('======== select features by stat info ========')
selected_features = select_features_by_stat_info(data)
x_train = data_train[selected_features]
x_test = data_test[selected_features]
gaussian_naive_bayes(x_train, y_train, x_test, y_test)
logistics_regression(x_train, y_train, x_test, y_test)
multinomial_naive_bayes(x_train, y_train, x_test, y_test)
print('=========== select features by L1 =============')
x_train, x_test = select_feature_by_L1(data_train, data_test)
gaussian_naive_bayes(x_train, y_train, x_test, y_test)
logistics_regression(x_train, y_train, x_test, y_test)
multinomial_naive_bayes(x_train, y_train, x_test, y_test)
print('============ under sampling ==============')
x_res, y_res = undersampling(x_train, y_train)
gaussian_naive_bayes(x_res, y_res, x_test, y_test)
logistics_regression(x_res, y_res, x_test, y_test)
multinomial_naive_bayes(x_res, y_res, x_test, y_test)
print('============ over sampling ==============')
x_res, y_res = oversampling(x_train, y_train)
gaussian_naive_bayes(x_res, y_res, x_test, y_test)
logistics_regression(x_res, y_res, x_test, y_test)
multinomial_naive_bayes(x_res, y_res, x_test, y_test)
#draw_class_hist(data['rings'])
|
[
"numpy.unique",
"sklearn.feature_selection.VarianceThreshold",
"pandas.read_csv",
"imblearn.under_sampling.NeighbourhoodCleaningRule",
"sklearn.feature_selection.SelectFromModel",
"imblearn.over_sampling.SMOTE",
"sklearn.svm.LinearSVC",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.linear_model.LogisticRegression",
"imblearn.over_sampling.RandomOverSampler",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.naive_bayes.GaussianNB",
"sklearn.feature_selection.mutual_info_regression",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.show"
] |
[((714, 752), 'sklearn.feature_selection.mutual_info_regression', 'fs.mutual_info_regression', (['features', 'y'], {}), '(features, y)\n', (739, 752), True, 'from sklearn import feature_selection as fs\n'), ((1027, 1049), 'sklearn.feature_selection.VarianceThreshold', 'fs.VarianceThreshold', ([], {}), '()\n', (1047, 1049), True, 'from sklearn import feature_selection as fs\n'), ((1331, 1341), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1339, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1411, 1454), 'numpy.unique', 'np.unique', (["data['sex']"], {'return_inverse': '(True)'}), "(data['sex'], return_inverse=True)\n", (1420, 1454), True, 'import numpy as np\n'), ((1504, 1580), 'pandas.read_csv', 'pd.read_csv', (['"""../uci_data/abalone.data.txt"""'], {'header': 'None', 'names': 'COLUMN_NAMES'}), "('../uci_data/abalone.data.txt', header=None, names=COLUMN_NAMES)\n", (1515, 1580), True, 'import pandas as pd\n'), ((1981, 2024), 'imblearn.under_sampling.NeighbourhoodCleaningRule', 'NeighbourhoodCleaningRule', ([], {'ratio': '"""majority"""'}), "(ratio='majority')\n", (2006, 2024), False, 'from imblearn.under_sampling import NeighbourhoodCleaningRule\n'), ((2205, 2243), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2227, 2243), False, 'from sklearn import metrics\n'), ((2301, 2360), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (2324, 2360), False, 'from sklearn import metrics\n'), ((2427, 2483), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (2447, 2483), False, 'from sklearn import metrics\n'), ((2618, 2642), 'sklearn.naive_bayes.GaussianNB', 'naive_bayes.GaussianNB', ([], {}), '()\n', (2640, 2642), False, 'from sklearn import naive_bayes\n'), ((2832, 2859), 'sklearn.naive_bayes.MultinomialNB', 'naive_bayes.MultinomialNB', ([], {}), '()\n', (2857, 2859), False, 'from sklearn import naive_bayes\n'), ((3049, 3121), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'solver': '"""sag"""', 'multi_class': '"""multinomial"""'}), "(solver='sag', multi_class='multinomial')\n", (3080, 3121), False, 'from sklearn import linear_model\n'), ((3942, 3993), 'sklearn.feature_selection.SelectFromModel', 'fs.SelectFromModel', (['svc'], {'threshold': '(0.5)', 'prefit': '(True)'}), '(svc, threshold=0.5, prefit=True)\n', (3960, 3993), True, 'from sklearn import feature_selection as fs\n'), ((1794, 1813), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {}), '()\n', (1811, 1813), False, 'from imblearn.over_sampling import SMOTE, RandomOverSampler\n'), ((1862, 1869), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {}), '()\n', (1867, 1869), False, 'from imblearn.over_sampling import SMOTE, RandomOverSampler\n'), ((3880, 3919), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'penalty': '"""l1"""', 'dual': '(False)'}), "(penalty='l1', dual=False)\n", (3893, 3919), False, 'from sklearn import svm\n')]
|
import os
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from losses import focal_loss,weighted_binary_crossentropy
from utils import Dataset
class DeepFM(object):
def __init__(self, params):
self.feature_size = params['feature_size']
self.field_size = params['field_size']
self.embedding_size = params['embedding_size']
self.deep_layers = params['deep_layers']
self.l2_reg_coef = params['l2_reg']
self.learning_rate = params['learning_rate']
self.pos_ratio = params['pos_ratio']
self.keep_prob_v = params['keep_prob']
self.activate = tf.nn.relu
self.weight = {}
self.saver=None
self.checkpoint_dir = params['checkpoint_dir']
self.build()
def build(self):
"""
feature_size: N
field_size: F
embedding_size: K
batch_size: None
"""
self.feat_index = tf.placeholder(tf.int32, shape=[None, None], name='feature_index')
self.feat_value = tf.placeholder(tf.float32, shape=[None, None], name='feature_value')
self.label = tf.placeholder(tf.float32, shape=[None,1], name='label')
self.keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob') # scaler
self.is_training= tf.placeholder(tf.bool, shape=[],name='is_training')
#1、-------------------------定义权值-----------------------------------------
# FM部分中一次项的权值定义
self.weight['first_order'] = tf.Variable(tf.random_normal([self.feature_size, 1], 0.0, 0.05), # N * 1
name='first_order')
# One-hot编码后的输入层与Dense embeddings层的权值定义,即DNN的输入embedding。
self.weight['embedding_weight'] = tf.Variable(tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.05), # N*K
name='embedding_weight')
# deep网络部分的weight和bias, deep网络初始输入维度:input_size = F*K
num_layer = len(self.deep_layers)
input_size = self.field_size * self.embedding_size
# glorot_normal = np.sqrt(2.0 / (input_size + self.deep_layers[0])) # for sigmoid
he_normal = np.sqrt(2.0 /input_size) # for relu
self.weight['layer_0'] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(input_size, self.deep_layers[0])), dtype=np.float32)
self.weight['bias_0'] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(1, self.deep_layers[0])), dtype=np.float32)
# 生成deep network里面每层的weight 和 bias
for i in range(1, num_layer):
he_normal = np.sqrt(2.0 / (self.deep_layers[i - 1]))
self.weight['layer_' + str(i)] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32)
self.weight['bias_' + str(i)] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(1, self.deep_layers[i])),dtype=np.float32)
# deep部分output_size + 一次项output_size + 二次项output_size
last_layer_size = self.deep_layers[-1] + self.field_size + self.embedding_size
glorot_normal = np.sqrt(2.0 / (last_layer_size + 1))
# 生成最后一层的weight和bias
self.weight['last_layer'] = tf.Variable(np.random.normal(loc=0, scale=glorot_normal, size=(last_layer_size, 1)), dtype=np.float32)
self.weight['last_bias'] = tf.Variable(tf.constant(0.0), dtype=np.float32)
#2、----------------------前向传播------------------------------------
# None*F*K
self.embedding_index = tf.nn.embedding_lookup(self.weight['embedding_weight'],self.feat_index)
# [None*F*K] .*[None*F*1] = None*F*K
self.embedding_part = tf.multiply(self.embedding_index, tf.reshape(self.feat_value, [-1, self.field_size, 1]))
# FM部分一阶特征
# None * F*1
self.embedding_first = tf.nn.embedding_lookup(self.weight['first_order'],
self.feat_index)
#[None*F*1].*[None*F*1] = None*F*1
self.embedding_first = tf.multiply(self.embedding_first, tf.reshape(self.feat_value, [-1, self.field_size, 1]))
# None*F
self.first_order = tf.reduce_sum(self.embedding_first, 2)
# 二阶特征 None*K
self.sum_second_order = tf.reduce_sum(self.embedding_part, 1)
self.sum_second_order_square = tf.square(self.sum_second_order)
self.square_second_order = tf.square(self.embedding_part)
self.square_second_order_sum = tf.reduce_sum(self.square_second_order, 1)
# 1/2*((a+b)^2 - a^2 - b^2)=ab
# None*K
self.second_order = 0.5 * tf.subtract(self.sum_second_order_square, self.square_second_order_sum)
# FM部分的输出 None*(F+K)
self.fm_part = tf.concat([self.first_order, self.second_order], axis=1)
# DNN部分
# None*(F*K)
self.deep_embedding = tf.reshape(self.embedding_part, [-1, self.field_size * self.embedding_size])
# 全连接部分
for i in range(0, len(self.deep_layers)):
self.deep_embedding = tf.add(tf.matmul(self.deep_embedding, self.weight["layer_%d" % i]),
self.weight["bias_%d" % i])
# self.deep_embedding =tf.matmul(self.deep_embedding, self.weight["layer_%d" % i])
self.bn_out = tf.layers.batch_normalization(self.deep_embedding, training=self.is_training)
# self.bn_out = tf.layers.dropout(self.deep_embedding, rate=self.keep_prob,training=self.is_training)
self.deep_embedding = self.activate(self.bn_out)
self.deep_embedding = tf.layers.dropout(self.deep_embedding, rate =1.0-self.keep_prob, training= self.is_training)
# FM输出与DNN输出拼接 None*(F+K+layer[-1]])
din_all = tf.concat([self.fm_part, self.deep_embedding], axis=1)
#None*1
self.out = tf.add(tf.matmul(din_all, self.weight['last_layer']), self.weight['last_bias'])
#3. ------------------确定损失---------------------------------------
# loss部分 None*1
self.prob = tf.nn.sigmoid(self.out)
# self.entropy_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels= self.label, logits= self.out))
# self.entropy_loss = -tf.reduce_mean(
# self.label * tf.log(tf.clip_by_value(self.prob, 1e-10, 1.0))+ (1 - self.label)* tf.log(tf.clip_by_value(1-self.prob,1e-10,1.0)))
self.entropy_loss = focal_loss(self.prob, self.label, alpha=0.5, gamma=2)
# self.entropy_loss = weighted_binary_crossentropy(self.prob, self.label, pos_ratio=self.pos_ratio)
# 正则:sum(w^2)/2*l2_reg_coef
self.reg_loss = tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight["last_layer"])
for i in range(len(self.deep_layers)):
self.reg_loss += tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight["layer_%d" % i])
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight['layer_1']))
# print(self.entropy_loss.shape.as_list(), self.reg_loss.shape.as_list())
self.loss = self.entropy_loss + self.reg_loss
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step,3000, 0.99,staircase=False)
opt = tf.train.AdamOptimizer(self.learning_rate)
# opt = tf.train.GradientDescentOptimizer(self.learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
with tf.control_dependencies(update_ops):
# self.train_op = opt.minimize(self.loss, global_step = self.global_step)
self.train_op = opt.apply_gradients(zip(clip_gradients, trainable_params), global_step=self.global_step)
self.saver = tf.train.Saver(max_to_keep=3)
def train(self, sess, feat_index, feat_value, label):
_, step = sess.run([self.train_op, self.global_step], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.label: label,
self.keep_prob: self.keep_prob_v,
self.is_training:True})
return step
def predict(self, sess, feat_index, feat_value, batch_size=None):
if batch_size is None:
prob = sess.run([self.prob], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.keep_prob: 1,
self.is_training:False})[0]
else:
data =Dataset(feat_value, feat_index, [None]*len(feat_index), batch_size, shuffle=False)
probs =[]
for feat_index, feat_value, _ in data:
prob = sess.run([self.prob], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.keep_prob: 1,
self.is_training:False})[0]
probs.append(prob.ravel())
prob = np.concatenate(probs)
return prob.ravel()
def evaluate(self, sess, feat_index, feat_value, label, batch_size=None):
tloss, entloss,regloss = 0,0,0
if batch_size is None:
tloss, entloss,regloss = sess.run([self.loss, self.entropy_loss, self.reg_loss],feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.label: label,
self.keep_prob: 1,
self.is_training:False})
else:
data = Dataset(feat_value,feat_index,label, batch_size, shuffle=False)
for i, (feat_index, feat_value, label) in enumerate(data,1):
_tloss, _entloss, _regloss = sess.run([self.loss, self.entropy_loss, self.reg_loss],feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.label: label,
self.keep_prob: 1,
self.is_training:False})
tloss = tloss+ (_tloss-tloss)/i
entloss = entloss + (_entloss-entloss)/i
regloss = regloss + (_regloss-regloss)/i
return tloss, entloss, regloss
def save(self, sess, path, global_step):
if self.saver is not None:
self.saver.save(sess, save_path=path, global_step= global_step)
def restore(self, sess, path):
model_file = tf.train.latest_checkpoint(path)
if model_file is not None:
print('restore model:', model_file)
self.saver.restore(sess, save_path=model_file)
if __name__ == '__main__':
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
params ={'feature_size':None,
'field_size':None,
'embedding_size':4,
'deep_layers':[32,32,32],
'epoch':200,
'batch_size':128,
'learning_rate':0.001,
'l2_reg': 0.001,
'keep_prob':0.7,
'checkpoint_dir':os.path.join(BASE_PATH,'data/deepfm'),
'training_model':True}
with tf.Session() as sess:
model = DeepFM(params)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) # global_step counter etc.
sys.stdout.flush()
if params['training_model']:
#---------------training---------------------------------
for i in range(params['epoch']):
print('epoch ={}'.format(i).center(50,'-'))
for j, (xi, xv, y) in enumerate(train_data):
loss,_, step = model.train(sess, xi, xv, y)
if j %1000 ==0:
train_loss,train_entropy,train_reg = model.evaluate(sess, Xi,Xv, Y)
val_loss,val_entropy, val_reg = model.evaluate(sess, val_Xi, val_Xv, val_y)
print('---batch= %d--- \n train_loss=%f,\t train_entropy=%f,\t train_reg=%f \n val_loss=%f,\t val_entropy=%f,\t val_reg=%f' % (
j,train_loss,train_entropy,train_reg, val_loss,val_entropy,val_reg))
if i%10 ==0 or i == params['epoch']-1:
model.save(sess, model.checkpoint_dir, i)
prob = model.predict(sess, Xi, Xv)
hit_rate, top_k = top_ratio_hit_rate(np.array(Y).ravel(), np.array(prob[0]).ravel(), top_ratio=0.001) # ravel return view, flatten return copy
print('top-k={}, train-hit-rate={}'.format(top_k ,hit_rate))
#-----------------test-----------------------------------
probs =[]
test_y=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # list of np.ndarry
probs.extend(prob[0].ravel().tolist())
test_y.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(test_y).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('top-k={}, test-hit-rate={}'.format(top_k ,hit_rate))
calc_threshold_vs_depth(np.asarray(test_y).ravel(), np.asarray(probs).ravel())
else:
model.restore(sess, os.path.split(model.checkpoint_dir)[0])
probs=[]
Y =[]
for xi, xv, y in train_data:
prob = model.predict(sess, xi, xv) # np.ndarry
probs.extend(prob[0].ravel().tolist())
Y.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(Y).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('train-top-k={}, train-hit-rate={}'.format(top_k ,hit_rate))
probs=[]
test_y=[]
for xi, xv, y in test_data:
prob = model.predict(sess, xi, xv) # np.ndarry
probs.extend(prob[0].ravel().tolist())
test_y.extend(y.tolist())
hit_rate, top_k = top_ratio_hit_rate(np.array(test_y).ravel(), np.array(probs).ravel(), top_ratio=0.001)
print('test-top-k={}, test-hit-rate={}'.format(top_k ,hit_rate))
|
[
"tensorflow.local_variables_initializer",
"numpy.sqrt",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.reduce_sum",
"tensorflow.gradients",
"numpy.array",
"tensorflow.control_dependencies",
"tensorflow.clip_by_global_norm",
"tensorflow.nn.embedding_lookup",
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.asarray",
"os.path.split",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.layers.dropout",
"tensorflow.layers.batch_normalization",
"tensorflow.train.exponential_decay",
"tensorflow.square",
"tensorflow.matmul",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"sys.stdout.flush",
"numpy.concatenate",
"numpy.random.normal",
"tensorflow.Variable",
"losses.focal_loss",
"tensorflow.reshape",
"tensorflow.subtract",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.Saver",
"os.path.join",
"utils.Dataset",
"tensorflow.global_variables_initializer",
"tensorflow.constant",
"os.path.abspath",
"tensorflow.get_collection"
] |
[((945, 1011), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""feature_index"""'}), "(tf.int32, shape=[None, None], name='feature_index')\n", (959, 1011), True, 'import tensorflow as tf\n'), ((1038, 1106), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None]', 'name': '"""feature_value"""'}), "(tf.float32, shape=[None, None], name='feature_value')\n", (1052, 1106), True, 'import tensorflow as tf\n'), ((1128, 1185), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]', 'name': '"""label"""'}), "(tf.float32, shape=[None, 1], name='label')\n", (1142, 1185), True, 'import tensorflow as tf\n'), ((1210, 1264), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]', 'name': '"""keep_prob"""'}), "(tf.float32, shape=[], name='keep_prob')\n", (1224, 1264), True, 'import tensorflow as tf\n'), ((1300, 1353), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[]', 'name': '"""is_training"""'}), "(tf.bool, shape=[], name='is_training')\n", (1314, 1353), True, 'import tensorflow as tf\n'), ((2200, 2225), 'numpy.sqrt', 'np.sqrt', (['(2.0 / input_size)'], {}), '(2.0 / input_size)\n', (2207, 2225), True, 'import numpy as np\n'), ((3212, 3248), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (last_layer_size + 1))'], {}), '(2.0 / (last_layer_size + 1))\n', (3219, 3248), True, 'import numpy as np\n'), ((3625, 3697), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (["self.weight['embedding_weight']", 'self.feat_index'], {}), "(self.weight['embedding_weight'], self.feat_index)\n", (3647, 3697), True, 'import tensorflow as tf\n'), ((3943, 4010), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (["self.weight['first_order']", 'self.feat_index'], {}), "(self.weight['first_order'], self.feat_index)\n", (3965, 4010), True, 'import tensorflow as tf\n'), ((4272, 4310), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.embedding_first', '(2)'], {}), '(self.embedding_first, 2)\n', (4285, 4310), True, 'import tensorflow as tf\n'), ((4367, 4404), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.embedding_part', '(1)'], {}), '(self.embedding_part, 1)\n', (4380, 4404), True, 'import tensorflow as tf\n'), ((4444, 4476), 'tensorflow.square', 'tf.square', (['self.sum_second_order'], {}), '(self.sum_second_order)\n', (4453, 4476), True, 'import tensorflow as tf\n'), ((4512, 4542), 'tensorflow.square', 'tf.square', (['self.embedding_part'], {}), '(self.embedding_part)\n', (4521, 4542), True, 'import tensorflow as tf\n'), ((4582, 4624), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.square_second_order', '(1)'], {}), '(self.square_second_order, 1)\n', (4595, 4624), True, 'import tensorflow as tf\n'), ((4841, 4897), 'tensorflow.concat', 'tf.concat', (['[self.first_order, self.second_order]'], {'axis': '(1)'}), '([self.first_order, self.second_order], axis=1)\n', (4850, 4897), True, 'import tensorflow as tf\n'), ((4966, 5042), 'tensorflow.reshape', 'tf.reshape', (['self.embedding_part', '[-1, self.field_size * self.embedding_size]'], {}), '(self.embedding_part, [-1, self.field_size * self.embedding_size])\n', (4976, 5042), True, 'import tensorflow as tf\n'), ((5846, 5900), 'tensorflow.concat', 'tf.concat', (['[self.fm_part, self.deep_embedding]'], {'axis': '(1)'}), '([self.fm_part, self.deep_embedding], axis=1)\n', (5855, 5900), True, 'import tensorflow as tf\n'), ((6143, 6166), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['self.out'], {}), '(self.out)\n', (6156, 6166), True, 'import tensorflow as tf\n'), ((6509, 6562), 'losses.focal_loss', 'focal_loss', (['self.prob', 'self.label'], {'alpha': '(0.5)', 'gamma': '(2)'}), '(self.prob, self.label, alpha=0.5, gamma=2)\n', (6519, 6562), False, 'from losses import focal_loss, weighted_binary_crossentropy\n'), ((7268, 7319), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (7279, 7319), True, 'import tensorflow as tf\n'), ((7349, 7446), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.learning_rate', 'self.global_step', '(3000)', '(0.99)'], {'staircase': '(False)'}), '(self.learning_rate, self.global_step, 3000, 0.99,\n staircase=False)\n', (7375, 7446), True, 'import tensorflow as tf\n'), ((7455, 7497), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (7477, 7497), True, 'import tensorflow as tf\n'), ((7589, 7631), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (7606, 7631), True, 'import tensorflow as tf\n'), ((7660, 7684), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (7682, 7684), True, 'import tensorflow as tf\n'), ((7705, 7746), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'trainable_params'], {}), '(self.loss, trainable_params)\n', (7717, 7746), True, 'import tensorflow as tf\n'), ((7775, 7811), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', '(5)'], {}), '(gradients, 5)\n', (7797, 7811), True, 'import tensorflow as tf\n'), ((8086, 8115), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(3)'}), '(max_to_keep=3)\n', (8100, 8115), True, 'import tensorflow as tf\n'), ((10976, 11008), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['path'], {}), '(path)\n', (11002, 11008), True, 'import tensorflow as tf\n'), ((11214, 11239), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (11229, 11239), False, 'import os\n'), ((11526, 11564), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""data/deepfm"""'], {}), "(BASE_PATH, 'data/deepfm')\n", (11538, 11564), False, 'import os\n'), ((11607, 11619), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11617, 11619), True, 'import tensorflow as tf\n'), ((11798, 11816), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11814, 11816), False, 'import sys\n'), ((1517, 1568), 'tensorflow.random_normal', 'tf.random_normal', (['[self.feature_size, 1]', '(0.0)', '(0.05)'], {}), '([self.feature_size, 1], 0.0, 0.05)\n', (1533, 1568), True, 'import tensorflow as tf\n'), ((1770, 1839), 'tensorflow.random_normal', 'tf.random_normal', (['[self.feature_size, self.embedding_size]', '(0.0)', '(0.05)'], {}), '([self.feature_size, self.embedding_size], 0.0, 0.05)\n', (1786, 1839), True, 'import tensorflow as tf\n'), ((2284, 2369), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'he_normal', 'size': '(input_size, self.deep_layers[0])'}), '(loc=0, scale=he_normal, size=(input_size, self.deep_layers[0])\n )\n', (2300, 2369), True, 'import numpy as np\n'), ((2428, 2499), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'he_normal', 'size': '(1, self.deep_layers[0])'}), '(loc=0, scale=he_normal, size=(1, self.deep_layers[0]))\n', (2444, 2499), True, 'import numpy as np\n'), ((2625, 2663), 'numpy.sqrt', 'np.sqrt', (['(2.0 / self.deep_layers[i - 1])'], {}), '(2.0 / self.deep_layers[i - 1])\n', (2632, 2663), True, 'import numpy as np\n'), ((3326, 3397), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'glorot_normal', 'size': '(last_layer_size, 1)'}), '(loc=0, scale=glorot_normal, size=(last_layer_size, 1))\n', (3342, 3397), True, 'import numpy as np\n'), ((3464, 3480), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (3475, 3480), True, 'import tensorflow as tf\n'), ((3808, 3861), 'tensorflow.reshape', 'tf.reshape', (['self.feat_value', '[-1, self.field_size, 1]'], {}), '(self.feat_value, [-1, self.field_size, 1])\n', (3818, 3861), True, 'import tensorflow as tf\n'), ((4173, 4226), 'tensorflow.reshape', 'tf.reshape', (['self.feat_value', '[-1, self.field_size, 1]'], {}), '(self.feat_value, [-1, self.field_size, 1])\n', (4183, 4226), True, 'import tensorflow as tf\n'), ((4716, 4787), 'tensorflow.subtract', 'tf.subtract', (['self.sum_second_order_square', 'self.square_second_order_sum'], {}), '(self.sum_second_order_square, self.square_second_order_sum)\n', (4727, 4787), True, 'import tensorflow as tf\n'), ((5402, 5479), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.deep_embedding'], {'training': 'self.is_training'}), '(self.deep_embedding, training=self.is_training)\n', (5431, 5479), True, 'import tensorflow as tf\n'), ((5689, 5786), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['self.deep_embedding'], {'rate': '(1.0 - self.keep_prob)', 'training': 'self.is_training'}), '(self.deep_embedding, rate=1.0 - self.keep_prob, training=\n self.is_training)\n', (5706, 5786), True, 'import tensorflow as tf\n'), ((5943, 5988), 'tensorflow.matmul', 'tf.matmul', (['din_all', "self.weight['last_layer']"], {}), "(din_all, self.weight['last_layer'])\n", (5952, 5988), True, 'import tensorflow as tf\n'), ((6741, 6791), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.l2_reg_coef'], {}), '(self.l2_reg_coef)\n', (6773, 6791), True, 'import tensorflow as tf\n'), ((7825, 7860), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (7848, 7860), True, 'import tensorflow as tf\n'), ((9266, 9287), 'numpy.concatenate', 'np.concatenate', (['probs'], {}), '(probs)\n', (9280, 9287), True, 'import numpy as np\n'), ((9948, 10013), 'utils.Dataset', 'Dataset', (['feat_value', 'feat_index', 'label', 'batch_size'], {'shuffle': '(False)'}), '(feat_value, feat_index, label, batch_size, shuffle=False)\n', (9955, 10013), False, 'from utils import Dataset\n'), ((11677, 11710), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11708, 11710), True, 'import tensorflow as tf\n'), ((11729, 11761), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (11759, 11761), True, 'import tensorflow as tf\n'), ((2723, 2820), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'he_normal', 'size': '(self.deep_layers[i - 1], self.deep_layers[i])'}), '(loc=0, scale=he_normal, size=(self.deep_layers[i - 1],\n self.deep_layers[i]))\n', (2739, 2820), True, 'import numpy as np\n'), ((2947, 3018), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'he_normal', 'size': '(1, self.deep_layers[i])'}), '(loc=0, scale=he_normal, size=(1, self.deep_layers[i]))\n', (2963, 3018), True, 'import numpy as np\n'), ((5151, 5210), 'tensorflow.matmul', 'tf.matmul', (['self.deep_embedding', "self.weight['layer_%d' % i]"], {}), "(self.deep_embedding, self.weight['layer_%d' % i])\n", (5160, 5210), True, 'import tensorflow as tf\n'), ((6895, 6945), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.l2_reg_coef'], {}), '(self.l2_reg_coef)\n', (6927, 6945), True, 'import tensorflow as tf\n'), ((13867, 13902), 'os.path.split', 'os.path.split', (['model.checkpoint_dir'], {}), '(model.checkpoint_dir)\n', (13880, 13902), False, 'import os\n'), ((14205, 14216), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (14213, 14216), True, 'import numpy as np\n'), ((14226, 14241), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (14234, 14241), True, 'import numpy as np\n'), ((14640, 14656), 'numpy.array', 'np.array', (['test_y'], {}), '(test_y)\n', (14648, 14656), True, 'import numpy as np\n'), ((14666, 14681), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (14674, 14681), True, 'import numpy as np\n'), ((12917, 12928), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (12925, 12928), True, 'import numpy as np\n'), ((12938, 12955), 'numpy.array', 'np.array', (['prob[0]'], {}), '(prob[0])\n', (12946, 12955), True, 'import numpy as np\n'), ((13561, 13577), 'numpy.array', 'np.array', (['test_y'], {}), '(test_y)\n', (13569, 13577), True, 'import numpy as np\n'), ((13587, 13602), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (13595, 13602), True, 'import numpy as np\n'), ((13753, 13771), 'numpy.asarray', 'np.asarray', (['test_y'], {}), '(test_y)\n', (13763, 13771), True, 'import numpy as np\n'), ((13781, 13798), 'numpy.asarray', 'np.asarray', (['probs'], {}), '(probs)\n', (13791, 13798), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.