code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
"""
Functions for explaining classifiers that use tabular data (matrices).
"""
import collections
import json
import copy
import numpy as np
import sklearn
import sklearn.preprocessing
from . import lime_base
from . import explanation
class TableDomainMapper(explanation.DomainMapper):
"""Maps feature ids to names, generates table views, etc"""
def __init__(self, feature_names, feature_values, scaled_row, categorical_features, discretized_feature_names=None):
"""Init.
Args:
feature_names: list of feature names, in order
feature_values: list of strings with the values of the original row
scaled_row: scaled row
categorical_featuers: list of categorical features ids (ints)
"""
self.exp_feature_names = feature_names
if discretized_feature_names is not None:
self.exp_feature_names = discretized_feature_names
self.feature_names = feature_names
self.feature_values = feature_values
self.scaled_row = scaled_row
self.all_categorical = len(categorical_features) == len(scaled_row)
self.categorical_features = categorical_features
def map_exp_ids(self, exp):
"""Maps ids to feature names.
Args:
exp: list of tuples [(id, weight), (id,weight)]
Returns:
list of tuples (feature_name, weight)
"""
return [(self.exp_feature_names[x[0]], x[1]) for x in exp]
def visualize_instance_html(self,
exp,
label,
random_id,
show_table=True,
show_contributions=None,
show_scaled=None,
show_all=False):
"""Shows the current example in a table format.
Args:
exp: list of tuples [(id, weight), (id,weight)]
label: label id (integer)
random_id: random_id being used, appended to div ids and etc in
html.
show_table: if False, don't show table visualization.
show_contributions: if True, add an aditional bar plot with weights
multiplied by example. By default, this is true if there are any
continuous features.
show_scaled: if True, display scaled values in table.
show_all: if True, show zero-weighted features in the table.
"""
if show_contributions is None:
show_contributions = not self.all_categorical
if show_scaled is None:
show_scaled = not self.all_categorical
show_scaled = json.dumps(show_scaled)
weights = [0] * len(self.feature_names)
scaled_exp = []
for i, value in exp:
weights[i] = value * self.scaled_row[i]
scaled_exp.append((i, value * self.scaled_row[i]))
scaled_exp = json.dumps(self.map_exp_ids(scaled_exp))
row = ['%.2f' % a if i not in self.categorical_features else 'N/A'
for i, a in enumerate(self.scaled_row)]
out_list = list(zip(self.feature_names, self.feature_values,
row, weights))
if not show_all:
out_list = [out_list[x[0]] for x in exp]
out = u''
if show_contributions:
out += u'''<script>
var cur_width = parseInt(d3.select('#model%s').select('svg').style('width'));
console.log(cur_width);
var svg_contrib = d3.select('#model%s').append('svg');
exp.ExplainFeatures(svg_contrib, %d, %s, '%s', true);
cur_width = Math.max(cur_width, parseInt(svg_contrib.style('width'))) + 'px';
d3.select('#model%s').style('width', cur_width);
</script>
''' % (random_id, random_id, label, scaled_exp,
'Feature contributions', random_id)
if show_table:
out += u'<div id="mytable%s"></div>' % random_id
out += u'''<script>
var tab = d3.select('#mytable%s');
exp.ShowTable(tab, %s, %d, %s);
</script>
''' % (random_id, json.dumps(out_list), label, show_scaled)
return out
class LimeTabularExplainer(object):
"""Explains predictions on tabular (i.e. matrix) data.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to the
means and stds in the training data. For categorical features, perturb by
sampling according to the training distribution, and making a binary feature
that is 1 when the value is the same as the instance being explained."""
def __init__(self, training_data, feature_names=None, categorical_features=None,
categorical_names=None, kernel_width=3, verbose=False,
class_names=None, feature_selection='auto',
discretize_continuous=True):
"""Init function.
Args:
training_data: numpy 2d array
feature_names: list of names (strings) corresponding to the columns
in the training data.
categorical_features: list of indices (ints) corresponding to the
categorical columns. Everything else will be considered
continuous.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
kernel_width: kernel width for the exponential kernel
verbose: if true, print local prediction values from linear model
class_names: list of class names, ordered according to whatever the
classifier is using. If not present, class names will be '0',
'1', ...
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'explain_instance_with_data' in lime_base.py for
details on what each of the options does.
discretize_continuous: if True, all non-categorical features will be
discretized into quartiles.
"""
self.categorical_names = categorical_names
self.categorical_features = categorical_features
if self.categorical_names is None:
self.categorical_names = {}
if self.categorical_features is None:
self.categorical_features = []
self.discretizer = None
if discretize_continuous:
self.discretizer = QuartileDiscretizer(training_data, categorical_features, feature_names)
categorical_features = range(training_data.shape[1])
discretized_training_data = self.discretizer.discretize(training_data)
kernel = lambda d: np.sqrt(np.exp(-(d**2) / kernel_width ** 2))
self.feature_selection = feature_selection
self.base = lime_base.LimeBase(kernel, verbose)
self.scaler = None
self.class_names = class_names
self.feature_names = feature_names
self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)
self.scaler.fit(training_data)
self.feature_values = {}
self.feature_frequencies = {}
for feature in categorical_features:
feature_count = collections.defaultdict(lambda: 0.0)
column = training_data[:, feature]
if self.discretizer is not None:
column = discretized_training_data[:, feature]
feature_count[0] = 0.
feature_count[1] = 0.
feature_count[2] = 0.
feature_count[3] = 0.
for value in column:
feature_count[value] += 1
values, frequencies = map(list, zip(*(feature_count.items())))
#print feature, values, frequencies
self.feature_values[feature] = values
self.feature_frequencies[feature] = (np.array(frequencies) /
sum(frequencies))
self.scaler.mean_[feature] = 0
self.scaler.scale_[feature] = 1
#print self.feature_frequencies
def explain_instance(self, data_row, classifier_fn, labels=(1,),
top_labels=None, num_features=10, num_samples=5000):
"""Generates explanations for a prediction.
First, we generate neighborhood data by randomly perturbing features from
the instance (see __data_inverse). We then learn locally weighted linear
models on this neighborhood data to explain each of the classes in an
interpretable way (see lime_base.py).
Args:
data_row: 1d numpy array, corresponding to a row
classifier_fn: classifier prediction probability function, which
takes a string and outputs prediction probabilities. For
ScikitClassifiers , this is classifier.predict_proba.
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations.
"""
data, inverse = self.__data_inverse(data_row, num_samples)
scaled_data = (data - self.scaler.mean_) / self.scaler.scale_
distances = np.sqrt(np.sum((scaled_data - scaled_data[0]) ** 2, axis=1))
yss = classifier_fn(inverse)
if self.class_names is None:
self.class_names = [str(x) for x in range(yss[0].shape[0])]
else:
self.class_names = list(self.class_names)
feature_names = copy.deepcopy(self.feature_names)
if feature_names is None:
feature_names = [str(x) for x in range(data_row.shape[0])]
round_stuff = lambda x: ['%.2f' % a for a in x]
values = round_stuff(data_row)
for i in self.categorical_features:
name = int(data_row[i])
if i in self.categorical_names:
name = self.categorical_names[i][name]
feature_names[i] = '%s=%s' % (feature_names[i], name)
values[i] = 'True'
categorical_features = self.categorical_features
discretized_feature_names=None
if self.discretizer is not None:
categorical_features = range(data.shape[1])
discretized_instance = self.discretizer.discretize(data_row)
discretized_feature_names = copy.deepcopy(feature_names)
for f in self.discretizer.names:
discretized_feature_names[f] = self.discretizer.names[f][int(discretized_instance[f])]
#values[f] = 'True'
domain_mapper = TableDomainMapper(
feature_names, values, scaled_data[0],
categorical_features=categorical_features,
discretized_feature_names=discretized_feature_names)
ret_exp = explanation.Explanation(domain_mapper=domain_mapper,
class_names=self.class_names)
ret_exp.predict_proba = yss[0]
if top_labels:
labels = np.argsort(yss[0])[-top_labels:]
ret_exp.top_labels = list(labels)
ret_exp.top_labels.reverse()
for label in labels:
ret_exp.intercept[label], ret_exp.local_exp[label] = self.base.explain_instance_with_data(
scaled_data, yss, distances, label, num_features,
feature_selection=self.feature_selection)
return ret_exp
def __data_inverse(self,
data_row,
num_samples):
"""Generates a neighborhood around a prediction.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to
the means and stds in the training data. For categorical features,
perturb by sampling according to the training distribution, and making a
binary feature that is 1 when the value is the same as the instance
being explained.
Args:
data_row: 1d numpy array, corresponding to a row
num_samples: size of the neighborhood to learn the linear model
Returns:
A tuple (data, inverse), where:
data: dense num_samples * K matrix, where categorical features
are encoded with either 0 (not equal to the corresponding value
in data_row) or 1. The first row is the original instance.
inverse: same as data, except the categorical features are not
binary, but categorical (as the original data)
"""
data = np.zeros((num_samples, data_row.shape[0]))
categorical_features = range(data_row.shape[0])
if self.discretizer is None:
data = np.random.normal(0, 1, num_samples * data_row.shape[0]).reshape(
num_samples, data_row.shape[0])
data = data * self.scaler.scale_ + self.scaler.mean_
categorical_features = self.categorical_features
first_row = data_row
else:
first_row = self.discretizer.discretize(data_row)
data[0] = data_row.copy()
inverse = data.copy()
for column in categorical_features:
values = self.feature_values[column]
freqs = self.feature_frequencies[column]
#print self.feature_frequencies[column], column
inverse_column = np.random.choice(values, size=num_samples, replace=True, p=freqs)
binary_column = np.array([1 if x == first_row[column] else 0 for x in inverse_column])
binary_column[0] = 1
inverse_column[0] = data[0, column]
data[:, column] = binary_column
inverse[:, column] = inverse_column
# if column not in self.categorical_features:
# print values, column,
# print inverse[1:, column]
if self.discretizer is not None:
inverse[1:] = self.discretizer.undiscretize(inverse[1:])
#print zip(inverse[:,10], data[:,10])
return data, inverse
class QuartileDiscretizer:
"""Discretizes data into quartiles."""
def __init__(self, data, categorical_features, feature_names):
"""Initializer
Args:
data: numpy 2d array
categorical_features: list of indices (ints) corresponding to the
categorical columns. These features will not be discretized.
Everything else will be considered continuous, and will be
discretized.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
feature_names: list of names (strings) corresponding to the columns
in the training data.
"""
to_discretize = [x for x in range(data.shape[1]) if x not in categorical_features]
self.names = {}
self.lambdas = {}
self.ranges = {}
self.means = {}
self.stds = {}
self.mins = {}
self.maxs = {}
for feature in to_discretize:
qts = np.percentile(data[:,feature], [25, 50, 75])
boundaries = np.min(data[:, feature]), np.max(data[:, feature])
name = feature_names[feature]
self.names[feature] = ['%s <= %.2f' % (name, qts[0]), '%.2f < %s <= %.2f' % (qts[0], name, qts[1]), '%.2f < %s <= %.2f' % (qts[1], name, qts[2]), '%s > %.2f' % (name, qts[2])]
self.lambdas[feature] = lambda x, qts=qts: np.searchsorted(qts, x)
discretized = self.lambdas[feature](data[:, feature])
self.means[feature] = [np.mean(data[discretized == x, feature]) for x in range(4)]
self.stds[feature] = [np.std(data[discretized == x, feature]) + 0.000000000001 for x in range(4)]
self.mins[feature] = [boundaries[0], qts[0], qts[1], qts[2]]
self.maxs[feature] = [qts[0], qts[1],qts[2], boundaries[1]]
def discretize(self, data):
"""Discretizes the data.
Args:
data: numpy 2d or 1d array
Returns:
numpy array of same dimension, discretized.
"""
ret = data.copy()
for feature in self.lambdas:
if len(data.shape) == 1:
ret[feature] = int(self.lambdas[feature](ret[feature]))
else:
ret[:,feature] = self.lambdas[feature](ret[:,feature]).astype(int)
return ret
def undiscretize(self, data):
ret = data.copy()
for feature in self.means:
mins = self.mins[feature]
maxs = self.maxs[feature]
means = self.means[feature]
stds = self.stds[feature]
get_inverse = lambda q: max(mins[q], min(np.random.normal(means[q], stds[q]), maxs[q]))
if len(data.shape) == 1:
q = int(ret[feature])
ret[feature] = get_inverse(q)
else:
ret[:,feature] = [get_inverse(int(x)) for x in ret[:, feature]]
return ret
|
[
"numpy.random.normal",
"numpy.mean",
"numpy.random.choice",
"numpy.searchsorted",
"numpy.std",
"json.dumps",
"numpy.min",
"numpy.max",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"collections.defaultdict",
"numpy.exp",
"numpy.argsort",
"copy.deepcopy",
"numpy.percentile"
] |
[((2727, 2750), 'json.dumps', 'json.dumps', (['show_scaled'], {}), '(show_scaled)\n', (2737, 2750), False, 'import json\n'), ((7312, 7365), 'sklearn.preprocessing.StandardScaler', 'sklearn.preprocessing.StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (7348, 7365), False, 'import sklearn\n'), ((10183, 10216), 'copy.deepcopy', 'copy.deepcopy', (['self.feature_names'], {}), '(self.feature_names)\n', (10196, 10216), False, 'import copy\n'), ((13244, 13286), 'numpy.zeros', 'np.zeros', (['(num_samples, data_row.shape[0])'], {}), '((num_samples, data_row.shape[0]))\n', (13252, 13286), True, 'import numpy as np\n'), ((7549, 7586), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (7572, 7586), False, 'import collections\n'), ((9892, 9943), 'numpy.sum', 'np.sum', (['((scaled_data - scaled_data[0]) ** 2)'], {'axis': '(1)'}), '((scaled_data - scaled_data[0]) ** 2, axis=1)\n', (9898, 9943), True, 'import numpy as np\n'), ((10999, 11027), 'copy.deepcopy', 'copy.deepcopy', (['feature_names'], {}), '(feature_names)\n', (11012, 11027), False, 'import copy\n'), ((14046, 14111), 'numpy.random.choice', 'np.random.choice', (['values'], {'size': 'num_samples', 'replace': '(True)', 'p': 'freqs'}), '(values, size=num_samples, replace=True, p=freqs)\n', (14062, 14111), True, 'import numpy as np\n'), ((14140, 14212), 'numpy.array', 'np.array', (['[(1 if x == first_row[column] else 0) for x in inverse_column]'], {}), '([(1 if x == first_row[column] else 0) for x in inverse_column])\n', (14148, 14212), True, 'import numpy as np\n'), ((15798, 15843), 'numpy.percentile', 'np.percentile', (['data[:, feature]', '[25, 50, 75]'], {}), '(data[:, feature], [25, 50, 75])\n', (15811, 15843), True, 'import numpy as np\n'), ((7037, 7072), 'numpy.exp', 'np.exp', (['(-d ** 2 / kernel_width ** 2)'], {}), '(-d ** 2 / kernel_width ** 2)\n', (7043, 7072), True, 'import numpy as np\n'), ((8190, 8211), 'numpy.array', 'np.array', (['frequencies'], {}), '(frequencies)\n', (8198, 8211), True, 'import numpy as np\n'), ((11653, 11671), 'numpy.argsort', 'np.argsort', (['yss[0]'], {}), '(yss[0])\n', (11663, 11671), True, 'import numpy as np\n'), ((15868, 15892), 'numpy.min', 'np.min', (['data[:, feature]'], {}), '(data[:, feature])\n', (15874, 15892), True, 'import numpy as np\n'), ((15894, 15918), 'numpy.max', 'np.max', (['data[:, feature]'], {}), '(data[:, feature])\n', (15900, 15918), True, 'import numpy as np\n'), ((16204, 16227), 'numpy.searchsorted', 'np.searchsorted', (['qts', 'x'], {}), '(qts, x)\n', (16219, 16227), True, 'import numpy as np\n'), ((16329, 16369), 'numpy.mean', 'np.mean', (['data[discretized == x, feature]'], {}), '(data[discretized == x, feature])\n', (16336, 16369), True, 'import numpy as np\n'), ((4304, 4324), 'json.dumps', 'json.dumps', (['out_list'], {}), '(out_list)\n', (4314, 4324), False, 'import json\n'), ((13399, 13454), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(num_samples * data_row.shape[0])'], {}), '(0, 1, num_samples * data_row.shape[0])\n', (13415, 13454), True, 'import numpy as np\n'), ((16423, 16462), 'numpy.std', 'np.std', (['data[discretized == x, feature]'], {}), '(data[discretized == x, feature])\n', (16429, 16462), True, 'import numpy as np\n'), ((17443, 17478), 'numpy.random.normal', 'np.random.normal', (['means[q]', 'stds[q]'], {}), '(means[q], stds[q])\n', (17459, 17478), True, 'import numpy as np\n')]
|
import os
import numpy as np
import pandas as pd
from databroker.assets.handlers_base import HandlerBase
class APBBinFileHandler(HandlerBase):
"Read electrometer *.bin files"
def __init__(self, fpath):
# It's a text config file, which we don't store in the resources yet, parsing for now
fpath_txt = f"{os.path.splitext(fpath)[0]}.txt"
with open(fpath_txt, "r") as fp:
content = fp.readlines()
content = [x.strip() for x in content]
_ = int(content[0].split(":")[1])
# Gains = [int(x) for x in content[1].split(":")[1].split(",")]
# Offsets = [int(x) for x in content[2].split(":")[1].split(",")]
# FAdiv = float(content[3].split(":")[1])
# FArate = float(content[4].split(":")[1])
# trigger_timestamp = float(content[5].split(":")[1].strip().replace(",", "."))
raw_data = np.fromfile(fpath, dtype=np.int32)
columns = ["timestamp", "i0", "it", "ir", "iff", "aux1", "aux2", "aux3", "aux4"]
num_columns = len(columns) + 1 # TODO: figure out why 1
raw_data = raw_data.reshape((raw_data.size // num_columns, num_columns))
derived_data = np.zeros((raw_data.shape[0], raw_data.shape[1] - 1))
derived_data[:, 0] = (
raw_data[:, -2] + raw_data[:, -1] * 8.0051232 * 1e-9
) # Unix timestamp with nanoseconds
for i in range(num_columns - 2):
derived_data[:, i + 1] = raw_data[:, i] # ((raw_data[:, i] ) - Offsets[i]) / Gains[i]
self.df = pd.DataFrame(data=derived_data, columns=columns)
self.raw_data = raw_data
def __call__(self):
return self.df
|
[
"pandas.DataFrame",
"numpy.fromfile",
"os.path.splitext",
"numpy.zeros"
] |
[((892, 926), 'numpy.fromfile', 'np.fromfile', (['fpath'], {'dtype': 'np.int32'}), '(fpath, dtype=np.int32)\n', (903, 926), True, 'import numpy as np\n'), ((1187, 1239), 'numpy.zeros', 'np.zeros', (['(raw_data.shape[0], raw_data.shape[1] - 1)'], {}), '((raw_data.shape[0], raw_data.shape[1] - 1))\n', (1195, 1239), True, 'import numpy as np\n'), ((1540, 1588), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'derived_data', 'columns': 'columns'}), '(data=derived_data, columns=columns)\n', (1552, 1588), True, 'import pandas as pd\n'), ((331, 354), 'os.path.splitext', 'os.path.splitext', (['fpath'], {}), '(fpath)\n', (347, 354), False, 'import os\n')]
|
import numpy as np
import pandas as pd
import sys
import re
# question type definition
S = 0 # [S, col, corr [,rate]]
MS = 1 # [MS, [cols,..], [corr,..] [,rate]]
Num = 2 # [Num, [cols,..], [corr,..] [,rate]]
SS = 3 # [SS, [start,end], [corr,...] [,rate]]
# the list of question type and reference
# [type, column, answer[, num_candidate]]
QuestionReferences = None
def get_num_squestions(qref):
numq = 0
for q in qref:
if q[0] == MS:
numq += len(q[1])
elif q[0] == SS:
numq += q[1][1]-q[1][0]+1
else: numq += 1
return numq
def ascoringS(answer, q):
if answer[q[1]] == q[2]:
return 1
else:
return 0
def ascoringMS(answer, columns, i, ref):
ans = answer[columns]
if ref[i] in ans:
return 1
else:
return 0
def ascoringSS(answer, i, columns, ref):
ans = answer[columns[0]+i]
if ans == ref[i]:
return 1
else:
return 0
def ascoringNum(answer, columns, ref):
for i,p in enumerate(columns):
if answer[p] != ref[i]:
return 0
return 1
def ascoring(df, q):
if q[0] == S:
return df.apply(ascoringS, axis=1, raw=True, args=(q,))
elif q[0] == MS:
res = None
for i in range(len(q[2])):
rr = df.apply(ascoringMS, axis=1, raw=True, args=(q[1], i,q[2]))
if res is None:
res = rr
else:
res = pd.concat([res, rr], axis=1)
return res
elif q[0] == Num:
return df.apply(ascoringNum, axis=1, raw=True, args=(q[1], q[2]))
elif q[0] == SS:
res = None
for i in range(q[1][1]-q[1][0]+1):
rr = df.apply(ascoringSS, axis=1, raw=True, args=(i, q[1], q[2]))
if res is None:
res = rr
else:
res = pd.concat([res, rr], axis=1)
return res
else:
print(f"ERROR: Undefined question type: {q[0]}")
exit()
def get_maxcolms(qref):
maxcol = 0
for q in qref:
if q[0] == S:
if maxcol < q[1]: maxcol = q[1]
else:
if maxcol < max(q[1]): maxcol = max(q[1])
return maxcol
def get_sq2p(qref):
num_squestions = get_num_squestions(qref)
sq2p = np.zeros(num_squestions, dtype=np.int)
numq = 0
numsq = 0
for q in qref:
if q[0] == MS:
for i in q[1]:
sq2p[numsq] = numq
numsq += 1
elif q[0] == SS:
for i in range(q[1][1]-q[1][0]+1):
sq2p[numsq] = numq
numsq += 1
else:
sq2p[numsq] = numq
numsq += 1
numq += 1
return sq2p
def correctRate(scorelist_v):
return sum(scorelist_v) / len(scorelist_v)
def print_crate(marubatu, points_alloc):
print("====================================", file=sys.stderr)
print("Correct rate for each small question", file=sys.stderr)
print(" and allocation of points", file=sys.stderr)
print(" No: rate, points, q_type", file=sys.stderr)
print("------------------------------------", file=sys.stderr)
crate = marubatu.iloc[:,1:].apply(correctRate, raw=True)
sq2p = get_sq2p(QuestionReferences)
for i,rate in enumerate(crate):
q = QuestionReferences[sq2p[i]]
if q[0] == S: kind = f' S[{q[1]}]'
elif q[0] == MS: kind = f' MS{q[1]}'
elif q[0] == SS: kind = f' SS{q[1]}'
else: kind = f'Num{q[1]}'
print(f"{i+1:3d}:{rate*100.0:3.0f}%, {points_alloc[i]:2}, {kind:}", file=sys.stderr)
def totalscore(scorelist, points_alloc):
if len(scorelist) != len(points_alloc)+1:
print("ERROR: in totalscore()", file=sys.stderr)
print(scorelist, file=sys.stderr)
print(points_alloc, file=sys.stderr)
exit()
return sum(scorelist[1:] * points_alloc)
# return sum(scorelist[1:]) * 3
def get_points_alloc(qref, desired_pscore):
num_squestions = get_num_squestions(qref)
points_alloc = np.zeros(num_squestions, dtype=np.int)
num = 0
sum_palloc = 0
for q in qref:
weight = 100
if len(q) >= 4:
weight = q[3]
if q[0] == MS:
inum = len(q[1])
elif q[0] == SS:
inum = q[1][1]-q[1][0]+1
else:
inum = 1
for i in range(inum):
points_alloc[num] = weight
sum_palloc += weight
num += 1
basic_unit_float = desired_pscore * 100.0 / sum_palloc
for i in range(num_squestions):
points_float = desired_pscore * points_alloc[i] / sum_palloc
points = round(points_float)
if points <= 0: points = 1
points_alloc[i] = points
return points_alloc, basic_unit_float
def marksheetScoring(filename, crate, desired_pscore):
maxcolms = get_maxcolms(QuestionReferences)
df = pd.read_csv(filename, header=None, dtype=object, skipinitialspace=True, usecols=list(range(maxcolms+1)))
df.fillna('-1', inplace=True)
df.replace('*', -1, inplace=True) # multi-mark col.
df = df.astype('int')
df[0] = df[0]+200000000
df = df.sort_values(by=0, ascending=True)
print(f"Marksheet-answer: #students={df.shape[0]}, #columns={df.shape[1]}(including id-number)", file=sys.stderr)
marubatu = df[[0]]
for q in QuestionReferences:
ascore = ascoring(df, q)
marubatu = pd.concat([marubatu, ascore], axis=1, ignore_index=True)
marubatu.to_csv(filename+'.marubatu', index=False, header=False)
points_alloc, basic_unit_float = get_points_alloc(QuestionReferences, desired_pscore)
perfect_score = sum(points_alloc)
print(f"#Small_questions={len(points_alloc)}", file=sys.stderr)
print(f"Perfect_score={perfect_score} (desired_perfect_score={desired_pscore})", file=sys.stderr)
basic_point_unit = round(basic_unit_float)
basic_point_unit = basic_point_unit if basic_point_unit >= 1 else 1
print(f"Basic_points_unit(weight=100)={basic_point_unit}, (float_unit = {basic_unit_float:5.2f})", file=sys.stderr)
if crate:
print_crate(marubatu, points_alloc)
id_scores = pd.concat([marubatu[0], marubatu.apply(totalscore, axis=1, raw=True, args=(points_alloc,))], axis=1, ignore_index=True)
# scores = marubatu.apply(totalscore, axis=1, raw=True, args=(points_alloc))
# print(scores, file=sys.stderr)
# id_scores = pd.concat([marubatu[0], scores], axis=1, ignore_index=True)
return id_scores
### for Twins upload file
def read_twins_upload_file(twinsfilename):
twins = pd.read_csv(twinsfilename, skiprows=1, header=None, skipinitialspace=True)
twins.columns=['科目番号', '学籍番号', '学期区分', '学期評価', '総合評価']
twins['総合評価'].fillna('0', inplace=True)
# scores = twins['総合評価'].astype(int, inplace=True) # inplaceは働かない
scores = twins['総合評価'].astype(int)
del twins['総合評価']
twins = pd.concat([twins, scores], axis=1, ignore_index=True)
twins.columns=['科目番号', '学籍番号', '学期区分', '学期評価', '総合評価']
id_scores = pd.concat([twins['学籍番号'], twins['総合評価']], axis=1, ignore_index=True)
return id_scores, twins
### ajusting
# adjust
def point_adjust(point, xp, yp, xmax):
gradient1 = yp / xp
gradient2 = (xmax - yp)/(xmax - xp)
if point <= xp:
point = gradient1 * point
elif point <= xmax:
point = gradient2 * point + (xmax * (1.0-gradient2))
return point
def adjust(id_scores, params):
xp, yp, xmax = params
adjustfunc = lambda p: point_adjust(p, xp, yp, xmax)
id_scores = pd.concat([id_scores[0], id_scores[1].map(adjustfunc).astype(int)], axis=1, ignore_index=True)
return id_scores
# a2djust
def get_points_abcd(params, id_scores):
score_list = np.sort(id_scores[1])[::-1]
num = len(score_list)
points_list = [score_list[0]]
cp = 0
for p in params:
cp += p
points_list.append(score_list[round(num * cp / 100.0)])
return points_list
def point_a2djust(p, p_max, p_ap, p_a, p_b, p_c):
if p >= p_ap:
newpoint = 90 + (10/(p_max-p_ap)) * (p-p_ap)
elif p >= p_a:
newpoint = 80 + (10/(p_ap-p_a)) * (p-p_a)
elif p >= p_b:
newpoint = 70 + (10/(p_a-p_b)) * (p-p_b)
elif p >= p_c:
newpoint = 60 + (10/(p_b-p_c)) * (p-p_c)
else:
newpoint = (60.0/p_c) * p
return round(newpoint)
def a2djust(id_scores, params):
# rate_ap, rate_a, rate_b, rate_c = params
p_max, p_ap, p_a, p_b, p_c = get_points_abcd(params, id_scores)
print(f"A2djust: rate_ap={params[0]}, rate_a={params[1]}, rate_b={params[2]}, rate_c={params[3]}", file=sys.stderr)
print(f"A2djust: p_max={p_max}, p_ap={p_ap}, p_a={p_a}, p_b={p_b}, p_c={p_c}", file=sys.stderr)
a2djustfunc = lambda p: point_a2djust(p, p_max, p_ap, p_a, p_b, p_c)
new_id_scores = pd.concat([id_scores[0], id_scores[1].map(a2djustfunc).astype(int)], axis=1, ignore_index=True)
return new_id_scores
# interval
def finterval(x, minval, maxval):
if x < minval: return minval
elif x > maxval: return maxval
else: return x
def interval(id_scores, minmax):
min, max = minmax
func = lambda x: finterval(x, min, max)
scores = id_scores.iloc[:,1].map(func).astype(int)
id_scores = pd.concat([id_scores[0], scores], axis=1, ignore_index=True)
return id_scores
#### print statistics
Pgakuruimei = re.compile(r'.*学群(.+学類).*')
def ex_gakuruimei(str):
mobj = Pgakuruimei.match(str)
if mobj:
return mobj.group(1)
if str.find('体育専門学群') != -1:
return '体育専門学群'
if str.find('芸術専門学群') != -1:
return '芸術専門学群'
return '不明学類'
def read_meibo(filename):
meibo = pd.read_csv(filename, skiprows=4, header=None, skipinitialspace=True)
if meibo[0][0] != 1:
print("Score Error in reading meibo file.", file=sys.stderr)
exit()
meibo = meibo[[3,1,2,4,5]]
meibo.columns = ['学籍番号', '所属学類', '学年', '氏名', '氏名カナ']
meibo['所属学類'] = meibo['所属学類'].map(ex_gakuruimei)
return meibo
def mk_gakurui_dicset(meibo):
dicset = {}
for i in range(meibo.shape[0]):
# gakuruimei = ex_gakuruimei(meibo['所属学類'][i])
gakuruimei = meibo['所属学類'][i]
if gakuruimei in dicset:
dicset[gakuruimei].add(meibo['学籍番号'][i])
else:
dicset[gakuruimei] = set([meibo['学籍番号'][i]])
return dicset
def gakurui_statistics(id_scores, meibofilename):
meibo = read_meibo(meibofilename)
gdicset = mk_gakurui_dicset(meibo)
res = []
for gname in gdicset:
aset = gdicset[gname]
selectstudents = [no in aset for no in id_scores.iloc[:,0]]
scores = id_scores.iloc[:,1][selectstudents]
res.append([gname, scores.describe()])
return res
def print_stat(scores):
print("==================", file=sys.stderr)
print("Score statistics", file=sys.stderr)
print("------------------", file=sys.stderr)
print(scores.describe(), file=sys.stderr)
def print_stat_gakurui(id_scores, meibofilename):
gakurui_sta_list = gakurui_statistics(id_scores, meibofilename)
print("==================", file=sys.stderr)
print("Gakurui statistics", file=sys.stderr)
print("------------------", file=sys.stderr)
notfirst = False
for gakuruiinfo in gakurui_sta_list:
if notfirst:
print('-------', file=sys.stderr)
else:
notfirst = True
print(gakuruiinfo[0], file=sys.stderr)
print(gakuruiinfo[1], file=sys.stderr)
def print_abcd(scores):
all = len(scores)
aplus = scores[scores>=90]
a = scores[scores<90]
aa = a[a>=80]
b = scores[scores<80]
bb = b[b>=70]
c = scores[scores<70]
cc = c[c>=60]
d = scores[scores<60]
print("=================", file=sys.stderr)
print("ABCD distribution", file=sys.stderr)
print("-----------------", file=sys.stderr)
print(f"a+ = {len(aplus)}, {len(aplus)*100/all:4.1f}%", file=sys.stderr)
print(f"a = {len(aa)}, {len(aa)*100/all:4.1f}%", file=sys.stderr)
print(f"b = {len(bb)}, {len(bb)*100/all:4.1f}%", file=sys.stderr)
print(f"c = {len(cc)}, {len(cc)*100/all:4.1f}%", file=sys.stderr)
print(f"d = {len(d)}, {len(d)*100/all:4.1f}%", file=sys.stderr)
def print_distribution(scores):
maxscores = max(scores)
numinterval = maxscores // 10 + 1
counts = np.zeros(numinterval, dtype=np.int)
for c in scores:
cat = c // 10
counts[cat] += 1
print("==================", file=sys.stderr)
print("Score distribution", file=sys.stderr)
print("------------------", file=sys.stderr)
print("L.score: num:", file=sys.stderr)
maxcount = max(counts)
if maxcount > 80:
unit = 80.0/maxcount
else:
unit = 1.0
for i in range(numinterval):
cat = numinterval - i - 1
print(f"{10*cat:5}- :{counts[cat]:4}: ", end="", file=sys.stderr)
for x in range(int(counts[cat]*unit)):
print("*", end="", file=sys.stderr)
print("", file=sys.stderr)
#### join
def print_only_ids(df, idlabel, ncol):
num = 0
for i in df[idlabel]:
if num == 0:
print(" ", end="", file=sys.stderr)
elif num%ncol == 0:
print(", \n ", end="", file=sys.stderr)
else:
print(", ", end="", file=sys.stderr)
print(f"{i}", end="", file=sys.stderr)
num += 1
print("", file=sys.stderr)
def join(id_scores, joinfilename, how):
# id_scores_join = pd.read_csv(joinfilename, header=None, dtype=int, skipinitialspace=True)
id_scores_join = pd.read_csv(joinfilename, header=None, dtype=object, skipinitialspace=True)
id_scores_join.fillna('0', inplace=True)
id_scores_join = id_scores_join.astype('int')
new_id_scores = pd.merge(id_scores, id_scores_join, on=0, how=how)
outer_id_scores = pd.merge(id_scores, id_scores_join, on=0, how='outer', indicator='from')
nrow_left = id_scores.shape[0]
nrow_right = id_scores_join.shape[0]
nrow_new = new_id_scores.shape[0]
nrow_outer = outer_id_scores.shape[0]
print(f"Join({how}): left({nrow_left}) + right({nrow_right}) = {how}-join({nrow_new})", file=sys.stderr)
left_only = outer_id_scores[outer_id_scores['from']=='left_only']
right_only = outer_id_scores[outer_id_scores['from']=='right_only']
print(f" #left_only = {left_only.shape[0]}: keep left scores", file=sys.stderr)
if left_only.shape[0] > 0:
print_only_ids(left_only, 0, 5)
if how == 'left':
print(f" #right_only = {right_only.shape[0]}: ignored by 'left-join'", file=sys.stderr)
else:
print(f" #right_only = {right_only.shape[0]}: keep right scores", file=sys.stderr)
if right_only.shape[0] > 0:
print_only_ids(right_only, 0, 5)
scores_sum = new_id_scores.iloc[:,1:].fillna(0).apply(sum, axis=1, raw=True)
joined_new_id_scores = pd.concat([new_id_scores.iloc[:,0], scores_sum], axis=1, ignore_index=True)
joined_new_id_scores.fillna(0, inplace=True)
# joined_new_id_scores.astype(int, inplace=True) # inplace optoin is ineffective
joined_new_id_scores = joined_new_id_scores.astype(int)
return joined_new_id_scores
def twinsjoin(twins, id_scores, joinfilename):
del twins['総合評価']
id_scores.columns=['学籍番号', '総合評価']
newtwins = pd.merge(twins, id_scores, on='学籍番号', how='left')
# check correctness
twins_outer = pd.merge(twins, id_scores, on='学籍番号', how='outer', indicator='from')
left_only = twins_outer[twins_outer['from']=='left_only']
right_only = twins_outer[twins_outer['from']=='right_only']
if left_only.shape[0] > 0 or right_only.shape[0] > 0:
print("WARNING!!: occur something wrongs in 'twinsjoin'", file=sys.stderr)
print("WARNING!!: occur something wrongs in 'twinsjoin'", file=sys.stderr)
"""
nrow_left = twins.shape[0]
nrow_right = id_scores.shape[0]
nrow_new = newtwins.shape[0]
nrow_outer = twins_outer.shape[0]
print(f"Join(for Twins file): left({nrow_left}) + right({nrow_right}) = LEFT-join({nrow_new})", file=sys.stderr)
print(f" #left_only = {left_only.shape[0]}: keep twins scores (or put a zero score)", file=sys.stderr)
if left_only.shape[0] > 0:
print_only_ids(left_only, '学籍番号', 5)
print(f" #right_only = {right_only.shape[0]}: ignored", file=sys.stderr)
if right_only.shape[0] > 0:
print_only_ids(right_only, '学籍番号', 5)
"""
newtwins['総合評価'].fillna(0, inplace=True)
newscores = newtwins['総合評価'].astype('int')
del newtwins['総合評価']
newtwins = pd.concat([twins, newscores], axis=1, ignore_index=True)
newtwins.columns=['科目番号', '学籍番号', '学期区分', '学期評価', '総合評価']
new_id_scores = newtwins[['学籍番号', '総合評価']]
return newtwins, new_id_scores
#### record
def record(meibofilename, csvfilename2s):
df = read_meibo(meibofilename)
df.rename(columns={'学籍番号':0}, inplace=True)
for csvfilename2 in csvfilename2s:
df2 = pd.read_csv(csvfilename2, header=None, skipinitialspace=True)
df = pd.merge(df, df2, on=0, how='outer')
df.rename(columns={0:'学籍番号'}, inplace=True)
df = df.sort_values(by=['所属学類','学籍番号'], ascending=True)
return df
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='support tools of scoring for performance evaluation', prog='score')
parser.add_argument('csvfile')
parser.add_argument('-marksheet', nargs=2, default=None, metavar=('ref', 'desired_pscore'))
parser.add_argument('-crate', action='store_true', default=False)
parser.add_argument('-join', default=None, metavar='csvfile2')
parser.add_argument('-record', nargs='+', default=None, metavar=('csvfile2'))
parser.add_argument('-twins', action='store_true', default=False)
parser.add_argument('-adjust', nargs=3, type=float, default=None, metavar=('x', 'y', 'xmax'))
parser.add_argument('-a2djust', nargs=4, type=float, default=None, metavar=('A+', 'A', 'B', 'C'))
parser.add_argument('-interval', nargs=2, type=int, default=None, metavar=('min', 'max'))
parser.add_argument('-distribution', action='store_true', default=False)
parser.add_argument('-abcd', action='store_true', default=False)
parser.add_argument('-statistics', action='store_true', default=False)
parser.add_argument('-gakuruistat', default=None, metavar='csv-meibo-utf8')
parser.add_argument('-nostdout', action='store_true', default=False)
parser.add_argument('-output', default=None, metavar='filename')
args = parser.parse_args()
if args.marksheet and args.twins:
print("scoring error: exclusive options: -marksheet and -twins", file=sys.stderr)
exit()
if args.record and args.twins:
print("scoring error: exclusive options: -record and -twins", file=sys.stderr)
exit()
if args.record:
print("NOTICE:", file=sys.stderr)
print("-record option ignores all other options but -output option", file=sys.stderr)
df = record(args.csvfile, args.record)
if args.output:
df.to_excel(args.output, index=False)
else:
df.to_csv(sys.stdout, index=False)
exit()
if args.marksheet:
QuestionReferences = eval(open(args.marksheet[0]).read())
id_scores = marksheetScoring(args.csvfile, args.crate, int(args.marksheet[1]))
else:
if args.twins:
id_scores, twins = read_twins_upload_file(args.csvfile)
else:
# id_scores = pd.read_csv(args.csvfile, header=None, dtype=int, skipinitialspace=True)
id_scores = pd.read_csv(args.csvfile, header=None, dtype=object, skipinitialspace=True)
id_scores.fillna('0', inplace=True)
id_scores = id_scores.astype('int')
if args.join:
if args.twins:
id_scores = join(id_scores, args.join, 'left')
else:
id_scores = join(id_scores, args.join, 'outer')
if args.adjust:
id_scores = adjust(id_scores, args.adjust)
if args.a2djust:
id_scores = a2djust(id_scores, args.a2djust)
if args.interval:
id_scores = interval(id_scores, args.interval)
if args.twins:
twins, id_scores = twinsjoin(twins, id_scores, args.join)
if args.statistics:
print_stat(id_scores.iloc[:,1])
if args.abcd:
print_abcd(id_scores.iloc[:,1])
if args.gakuruistat:
print_stat_gakurui(id_scores, args.gakuruistat)
if args.distribution:
print_distribution(id_scores.iloc[:,1])
if not args.nostdout or args.output:
if args.output:
output = args.output
else:
output = sys.stdout
if args.twins:
twins.to_csv(output, index=False, encoding='cp932')
else:
id_scores.to_csv(output, index=False, header=False)
|
[
"pandas.read_csv",
"re.compile",
"argparse.ArgumentParser",
"pandas.merge",
"numpy.sort",
"numpy.zeros",
"pandas.concat"
] |
[((9321, 9347), 're.compile', 're.compile', (['""".*学群(.+学類).*"""'], {}), "('.*学群(.+学類).*')\n", (9331, 9347), False, 'import re\n'), ((2271, 2309), 'numpy.zeros', 'np.zeros', (['num_squestions'], {'dtype': 'np.int'}), '(num_squestions, dtype=np.int)\n', (2279, 2309), True, 'import numpy as np\n'), ((4017, 4055), 'numpy.zeros', 'np.zeros', (['num_squestions'], {'dtype': 'np.int'}), '(num_squestions, dtype=np.int)\n', (4025, 4055), True, 'import numpy as np\n'), ((6548, 6622), 'pandas.read_csv', 'pd.read_csv', (['twinsfilename'], {'skiprows': '(1)', 'header': 'None', 'skipinitialspace': '(True)'}), '(twinsfilename, skiprows=1, header=None, skipinitialspace=True)\n', (6559, 6622), True, 'import pandas as pd\n'), ((6868, 6921), 'pandas.concat', 'pd.concat', (['[twins, scores]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([twins, scores], axis=1, ignore_index=True)\n', (6877, 6921), True, 'import pandas as pd\n'), ((6997, 7065), 'pandas.concat', 'pd.concat', (["[twins['学籍番号'], twins['総合評価']]"], {'axis': '(1)', 'ignore_index': '(True)'}), "([twins['学籍番号'], twins['総合評価']], axis=1, ignore_index=True)\n", (7006, 7065), True, 'import pandas as pd\n'), ((9200, 9260), 'pandas.concat', 'pd.concat', (['[id_scores[0], scores]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([id_scores[0], scores], axis=1, ignore_index=True)\n', (9209, 9260), True, 'import pandas as pd\n'), ((9620, 9689), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'skiprows': '(4)', 'header': 'None', 'skipinitialspace': '(True)'}), '(filename, skiprows=4, header=None, skipinitialspace=True)\n', (9631, 9689), True, 'import pandas as pd\n'), ((12285, 12320), 'numpy.zeros', 'np.zeros', (['numinterval'], {'dtype': 'np.int'}), '(numinterval, dtype=np.int)\n', (12293, 12320), True, 'import numpy as np\n'), ((13517, 13592), 'pandas.read_csv', 'pd.read_csv', (['joinfilename'], {'header': 'None', 'dtype': 'object', 'skipinitialspace': '(True)'}), '(joinfilename, header=None, dtype=object, skipinitialspace=True)\n', (13528, 13592), True, 'import pandas as pd\n'), ((13708, 13758), 'pandas.merge', 'pd.merge', (['id_scores', 'id_scores_join'], {'on': '(0)', 'how': 'how'}), '(id_scores, id_scores_join, on=0, how=how)\n', (13716, 13758), True, 'import pandas as pd\n'), ((13781, 13853), 'pandas.merge', 'pd.merge', (['id_scores', 'id_scores_join'], {'on': '(0)', 'how': '"""outer"""', 'indicator': '"""from"""'}), "(id_scores, id_scores_join, on=0, how='outer', indicator='from')\n", (13789, 13853), True, 'import pandas as pd\n'), ((14819, 14895), 'pandas.concat', 'pd.concat', (['[new_id_scores.iloc[:, 0], scores_sum]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([new_id_scores.iloc[:, 0], scores_sum], axis=1, ignore_index=True)\n', (14828, 14895), True, 'import pandas as pd\n'), ((15244, 15293), 'pandas.merge', 'pd.merge', (['twins', 'id_scores'], {'on': '"""学籍番号"""', 'how': '"""left"""'}), "(twins, id_scores, on='学籍番号', how='left')\n", (15252, 15293), True, 'import pandas as pd\n'), ((15336, 15404), 'pandas.merge', 'pd.merge', (['twins', 'id_scores'], {'on': '"""学籍番号"""', 'how': '"""outer"""', 'indicator': '"""from"""'}), "(twins, id_scores, on='学籍番号', how='outer', indicator='from')\n", (15344, 15404), True, 'import pandas as pd\n'), ((16498, 16554), 'pandas.concat', 'pd.concat', (['[twins, newscores]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([twins, newscores], axis=1, ignore_index=True)\n', (16507, 16554), True, 'import pandas as pd\n'), ((17186, 17295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""support tools of scoring for performance evaluation"""', 'prog': '"""score"""'}), "(description=\n 'support tools of scoring for performance evaluation', prog='score')\n", (17209, 17295), False, 'import argparse\n'), ((5394, 5450), 'pandas.concat', 'pd.concat', (['[marubatu, ascore]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([marubatu, ascore], axis=1, ignore_index=True)\n', (5403, 5450), True, 'import pandas as pd\n'), ((7693, 7714), 'numpy.sort', 'np.sort', (['id_scores[1]'], {}), '(id_scores[1])\n', (7700, 7714), True, 'import numpy as np\n'), ((16890, 16951), 'pandas.read_csv', 'pd.read_csv', (['csvfilename2'], {'header': 'None', 'skipinitialspace': '(True)'}), '(csvfilename2, header=None, skipinitialspace=True)\n', (16901, 16951), True, 'import pandas as pd\n'), ((16965, 17001), 'pandas.merge', 'pd.merge', (['df', 'df2'], {'on': '(0)', 'how': '"""outer"""'}), "(df, df2, on=0, how='outer')\n", (16973, 17001), True, 'import pandas as pd\n'), ((19531, 19606), 'pandas.read_csv', 'pd.read_csv', (['args.csvfile'], {'header': 'None', 'dtype': 'object', 'skipinitialspace': '(True)'}), '(args.csvfile, header=None, dtype=object, skipinitialspace=True)\n', (19542, 19606), True, 'import pandas as pd\n'), ((1454, 1482), 'pandas.concat', 'pd.concat', (['[res, rr]'], {'axis': '(1)'}), '([res, rr], axis=1)\n', (1463, 1482), True, 'import pandas as pd\n'), ((1852, 1880), 'pandas.concat', 'pd.concat', (['[res, rr]'], {'axis': '(1)'}), '([res, rr], axis=1)\n', (1861, 1880), True, 'import pandas as pd\n')]
|
"""Generalized Gell-Mann matrices."""
from typing import Union
from scipy import sparse
import numpy as np
def gen_gell_mann(
ind_1: int, ind_2: int, dim: int, is_sparse: bool = False
) -> Union[np.ndarray, sparse.lil_matrix]:
r"""
Produce a generalized Gell-Mann operator [WikGM2]_.
Construct a :code:`dim`-by-:code:`dim` Hermitian operator. These matrices
span the entire space of :code:`dim`-by-:code:`dim` matrices as
:code:`ind_1` and :code:`ind_2` range from 0 to :code:`dim-1`, inclusive,
and they generalize the Pauli operators when :code:`dim = 2` and the
Gell-Mann operators when :code:`dim = 3`.
Examples
==========
The generalized Gell-Mann matrix for :code:`ind_1 = 0`, :code:`ind_2 = 1`
and :code:`dim = 2` is given as
.. math::
G_{0, 1, 2} = \begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}.
This can be obtained in :code:`toqito` as follows.
>>> from toqito.matrices import gen_gell_mann
>>> gen_gell_mann(0, 1, 2)
[[0., 1.],
[1., 0.]])
The generalized Gell-Mann matrix :code:`ind_1 = 2`, :code:`ind_2 = 3`, and
:code:`dim = 4` is given as
.. math::
G_{2, 3, 4} = \begin{pmatrix}
0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0
\end{pmatrix}.
This can be obtained in :code:`toqito` as follows.
>>> from toqito.matrices import gen_gell_mann
>>> gen_gell_mann(2, 3, 4)
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 1., 0.]])
References
==========
.. [WikGM2] Wikipedia: Gell-Mann matrices,
https://en.wikipedia.org/wiki/Gell-Mann_matrices
:param ind_1: A non-negative integer from 0 to :code:`dim-1` (inclusive).
:param ind_2: A non-negative integer from 0 to :code:`dim-1` (inclusive).
:param dim: The dimension of the Gell-Mann operator.
:param is_sparse: If set to :code:`True`, the returned Gell-Mann
operator is a sparse lil_matrix and if set to
:code:`False`, the returned Gell-Mann operator is a
dense :code:`numpy` array.
:return: The generalized Gell-Mann operator.
"""
if ind_1 == ind_2:
if ind_1 == 0:
gm_op = sparse.eye(dim)
else:
scalar = np.sqrt(2 / (ind_1 * (ind_1 + 1)))
diag = np.ones((ind_1, 1))
diag = np.append(diag, -ind_1)
diag = scalar * np.append(diag, np.zeros((dim - ind_1 - 1, 1)))
gm_op = sparse.lil_matrix((dim, dim))
gm_op.setdiag(diag)
else:
e_mat = sparse.lil_matrix((dim, dim))
e_mat[ind_1, ind_2] = 1
if ind_1 < ind_2:
gm_op = e_mat + e_mat.conj().T
else:
gm_op = 1j * e_mat - 1j * e_mat.conj().T
if not is_sparse:
return gm_op.todense()
return gm_op
|
[
"scipy.sparse.lil_matrix",
"numpy.sqrt",
"numpy.ones",
"scipy.sparse.eye",
"numpy.append",
"numpy.zeros"
] |
[((2791, 2820), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(dim, dim)'], {}), '((dim, dim))\n', (2808, 2820), False, 'from scipy import sparse\n'), ((2437, 2452), 'scipy.sparse.eye', 'sparse.eye', (['dim'], {}), '(dim)\n', (2447, 2452), False, 'from scipy import sparse\n'), ((2488, 2522), 'numpy.sqrt', 'np.sqrt', (['(2 / (ind_1 * (ind_1 + 1)))'], {}), '(2 / (ind_1 * (ind_1 + 1)))\n', (2495, 2522), True, 'import numpy as np\n'), ((2542, 2561), 'numpy.ones', 'np.ones', (['(ind_1, 1)'], {}), '((ind_1, 1))\n', (2549, 2561), True, 'import numpy as np\n'), ((2581, 2604), 'numpy.append', 'np.append', (['diag', '(-ind_1)'], {}), '(diag, -ind_1)\n', (2590, 2604), True, 'import numpy as np\n'), ((2702, 2731), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(dim, dim)'], {}), '((dim, dim))\n', (2719, 2731), False, 'from scipy import sparse\n'), ((2649, 2679), 'numpy.zeros', 'np.zeros', (['(dim - ind_1 - 1, 1)'], {}), '((dim - ind_1 - 1, 1))\n', (2657, 2679), True, 'import numpy as np\n')]
|
import matplotlib.widgets as mwidgets
class Slider(mwidgets.Slider):
"""Slider widget to select a value from a floating point range.
Parameters
----------
ax : :class:`~matplotlib.axes.Axes` instance
The parent axes for the widget
value_range : (float, float)
(min, max) value allowed for value.
label : str
The slider label.
value : float
Initial value. If None, set to value in middle of value range.
on_slide : function
Callback function for slide event. Function should expect slider value.
value_fmt : str
Format string for formatting the slider text.
slidermin, slidermax : float
Used to contrain the value of this slider to the values
of other sliders.
dragging : bool
If True, slider is responsive to mouse.
pad : float
Padding (in axes coordinates) between `label`/`value_fmt` and slider.
Attributes
----------
value : float
Current slider value.
"""
def __init__(self, ax, value_range, label='', value=None, on_slide=None,
value_fmt='%1.2f', slidermin=None, slidermax=None,
dragging=True, pad=0.02):
mwidgets.AxesWidget.__init__(self, ax)
self.valmin, self.valmax = value_range
if value is None:
value = 0.5 * (self.valmin + self.valmax)
self.val = value
self.valinit = value
self.valfmt = value_fmt
y0 = 0.5
x_low = [self.valmin, value]
x_high = [value, self.valmax]
self.line_low, = ax.plot(x_low, [y0, y0], color='0.5', lw=2)
self.line_high, = ax.plot(x_high, [y0, y0], color='0.7', lw=2)
self.val_handle, = ax.plot(value, y0, 'o',
mec='0.4', mfc='0.6', markersize=8)
ax.set_xlim(value_range)
ax.set_navigate(False)
ax.set_axis_off()
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(-pad, y0, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.show_value = False if value_fmt is None else True
if self.show_value:
self.valtext = ax.text(1 + pad, y0, value_fmt % value,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
self.cnt = 0
self.observers = {}
if on_slide is not None:
self.on_changed(on_slide)
# Attributes for matplotlib.widgets.Slider compatibility
self.closedmin = self.closedmax = True
@property
def value(self):
return self.val
@value.setter
def value(self, value):
self.val = value
self.line_low.set_xdata([self.valmin, value])
self.line_high.set_xdata([value, self.valmax])
self.val_handle.set_xdata([value])
if self.show_value:
self.valtext.set_text(self.valfmt % value)
def set_val(self, value):
"""Set value of slider."""
# Override matplotlib.widgets.Slider to update graphics objects.
self.value = value
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in self.observers.items():
func(value)
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
ax = plt.subplot2grid((10, 1), (0, 0), rowspan=8)
ax_slider = plt.subplot2grid((10, 1), (9, 0))
a0 = 5
x = np.arange(0.0, 1.0, 0.001)
y = np.sin(6 * np.pi * x)
line, = ax.plot(x, a0 * y, lw=2, color='red')
ax.axis([x.min(), x.max(), -10, 10])
def update(val):
amp = samp.value
line.set_ydata(amp * y)
samp = Slider(ax_slider, (0.1, 10.0), on_slide=update,
label='Amplitude:', value=a0)
plt.show()
|
[
"numpy.sin",
"matplotlib.widgets.AxesWidget.__init__",
"matplotlib.pyplot.subplot2grid",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((3822, 3866), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(10, 1)', '(0, 0)'], {'rowspan': '(8)'}), '((10, 1), (0, 0), rowspan=8)\n', (3838, 3866), True, 'import matplotlib.pyplot as plt\n'), ((3883, 3916), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(10, 1)', '(9, 0)'], {}), '((10, 1), (9, 0))\n', (3899, 3916), True, 'import matplotlib.pyplot as plt\n'), ((3937, 3963), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(0.001)'], {}), '(0.0, 1.0, 0.001)\n', (3946, 3963), True, 'import numpy as np\n'), ((3972, 3993), 'numpy.sin', 'np.sin', (['(6 * np.pi * x)'], {}), '(6 * np.pi * x)\n', (3978, 3993), True, 'import numpy as np\n'), ((4278, 4288), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4286, 4288), True, 'import matplotlib.pyplot as plt\n'), ((1215, 1253), 'matplotlib.widgets.AxesWidget.__init__', 'mwidgets.AxesWidget.__init__', (['self', 'ax'], {}), '(self, ax)\n', (1243, 1253), True, 'import matplotlib.widgets as mwidgets\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function,division,absolute_import
import logging
log = logging.getLogger(__name__) # __name__ is "foo.bar" here
import numpy as np
import numbers
np.seterr(all='ignore')
def findSlice(array,lims):
start = np.ravel(np.argwhere(array>lims[0]))[0]
stop = np.ravel(np.argwhere(array<lims[1]))[-1]
return slice(int(start),int(stop))
def approx(values,approx_values):
""" returns array where every value is replaced by the closest in approx_values
This funciton is useful for rebinning; careful, can be slow with many bins...
Example:
-------
approx( np.arange(0,1,0.1), [0,0.3,0.7] )
array([ 0. , 0. , 0.3, 0.3, 0.3, 0.7, 0.7, 0.7, 0.7, 0.7])
"""
# make sure they are arrays
values = np.asarray(values)
approx_values = np.asarray(approx_values)
# create outter difference
diff = np.abs(values[:,np.newaxis] - approx_values)
args = np.argmin(diff,axis=1)
values = approx_values[args]
#values = np.asarray( [ approx_values[np.argmin(np.abs(v-approx_values))] for v in values] )
return values
def rebin(values,bins):
""" returns array where every value is replaced by the closest in approx_values
This funciton is useful for rebinning
Example:
-------
approx( np.arange(0,1,0.1), [0,0.3,0.7] )
array([ 0. , 0. , 0.3, 0.3, 0.3, 0.7, 0.7, 0.7, 0.7, 0.7])
"""
# make sure they are arrays
bins = np.asarray(bins)
idx = np.digitize(values,bins)
idx[idx > bins.shape[0]-1] = bins.shape[0]-1
return (bins[idx]+bins[idx-1])/2
def reshapeToBroadcast(what,ref):
""" expand the 1d array 'what' to allow broadbasting to match
multidimentional array 'ref'. The two arrays have to same the same
dimensions along the first axis
"""
if what.shape == ref.shape: return what
assert what.shape[0] == ref.shape[0],\
"automatic reshaping requires same first dimention"
shape = [ref.shape[0],] + [1,]*(ref.ndim-1)
return what.reshape(shape)
def removeBackground(x,data,xlims=None,max_iter=100,background_regions=[],**kw):
from dualtree import dualtree
if data.ndim == 1: data = data[np.newaxis,:]
if xlims is not None:
idx = findSlice(x,xlims)
x = x[idx]
data = data[:,idx].copy()
else:
data = data.copy(); # create local copy
# has to be a list of lists ..
if background_regions != [] and isinstance(background_regions[0],numbers.Real):
background_regions = [background_regions,]
background_regions = [findSlice(x,brange) for brange in background_regions]
for i in range(len(data)):
data[i] = data[i] - dualtree.baseline(data[i],max_iter=max_iter,
background_regions=background_regions,**kw)
return x,np.squeeze(data)
def find_hist_ranges(hist,x=None,max_frac=0.1):
high_idx = np.squeeze( np.argwhere(hist>np.nanmax(hist)*max_frac) )
# remove consecutive indices
edges = high_idx[ np.gradient(high_idx).astype(int) != 1 ]
edges = [high_idx[0],] + list(edges) + [high_idx[-1],]
if x is not None:
edges = x[edges]
if len(edges)%2 == 1:
edges = edges[:-1]
n_ranges = int(len(edges)/2)
ranges = edges.reshape( (n_ranges,2) )
return ranges
|
[
"logging.getLogger",
"numpy.abs",
"numpy.digitize",
"numpy.asarray",
"numpy.squeeze",
"numpy.argwhere",
"numpy.nanmax",
"numpy.argmin",
"dualtree.dualtree.baseline",
"numpy.gradient",
"numpy.seterr"
] |
[((109, 136), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'import logging\n'), ((202, 225), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (211, 225), True, 'import numpy as np\n'), ((815, 833), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (825, 833), True, 'import numpy as np\n'), ((854, 879), 'numpy.asarray', 'np.asarray', (['approx_values'], {}), '(approx_values)\n', (864, 879), True, 'import numpy as np\n'), ((922, 967), 'numpy.abs', 'np.abs', (['(values[:, np.newaxis] - approx_values)'], {}), '(values[:, np.newaxis] - approx_values)\n', (928, 967), True, 'import numpy as np\n'), ((978, 1001), 'numpy.argmin', 'np.argmin', (['diff'], {'axis': '(1)'}), '(diff, axis=1)\n', (987, 1001), True, 'import numpy as np\n'), ((1521, 1537), 'numpy.asarray', 'np.asarray', (['bins'], {}), '(bins)\n', (1531, 1537), True, 'import numpy as np\n'), ((1548, 1573), 'numpy.digitize', 'np.digitize', (['values', 'bins'], {}), '(values, bins)\n', (1559, 1573), True, 'import numpy as np\n'), ((2820, 2836), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (2830, 2836), True, 'import numpy as np\n'), ((273, 301), 'numpy.argwhere', 'np.argwhere', (['(array > lims[0])'], {}), '(array > lims[0])\n', (284, 301), True, 'import numpy as np\n'), ((323, 351), 'numpy.argwhere', 'np.argwhere', (['(array < lims[1])'], {}), '(array < lims[1])\n', (334, 351), True, 'import numpy as np\n'), ((2696, 2791), 'dualtree.dualtree.baseline', 'dualtree.baseline', (['data[i]'], {'max_iter': 'max_iter', 'background_regions': 'background_regions'}), '(data[i], max_iter=max_iter, background_regions=\n background_regions, **kw)\n', (2713, 2791), False, 'from dualtree import dualtree\n'), ((2932, 2947), 'numpy.nanmax', 'np.nanmax', (['hist'], {}), '(hist)\n', (2941, 2947), True, 'import numpy as np\n'), ((3017, 3038), 'numpy.gradient', 'np.gradient', (['high_idx'], {}), '(high_idx)\n', (3028, 3038), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
m_f = np.load('objects/simulation_model_freq.npy')[:50]
m_p = np.load('objects/simulation_model_power.npy')[:50]
eeg_f = np.load('objects/real_eeg_freq.npy0.npy')[:50]
eeg_p = np.load('objects/real_eeg_power_0.npy')[:50]
plt.figure()
plt.semilogy(eeg_f, eeg_p,linewidth=2.0,c = 'b')
plt.xlabel('frequency [Hz]')
plt.ylabel('Linear spectrum [V RMS]')
plt.title('Power spectrum (scipy.signal.welch)')
plt.show()
|
[
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.show"
] |
[((53, 76), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (66, 76), True, 'import matplotlib.pyplot as plt\n'), ((301, 313), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (311, 313), True, 'import matplotlib.pyplot as plt\n'), ((314, 362), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['eeg_f', 'eeg_p'], {'linewidth': '(2.0)', 'c': '"""b"""'}), "(eeg_f, eeg_p, linewidth=2.0, c='b')\n", (326, 362), True, 'import matplotlib.pyplot as plt\n'), ((363, 391), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency [Hz]"""'], {}), "('frequency [Hz]')\n", (373, 391), True, 'import matplotlib.pyplot as plt\n'), ((392, 429), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Linear spectrum [V RMS]"""'], {}), "('Linear spectrum [V RMS]')\n", (402, 429), True, 'import matplotlib.pyplot as plt\n'), ((430, 478), 'matplotlib.pyplot.title', 'plt.title', (['"""Power spectrum (scipy.signal.welch)"""'], {}), "('Power spectrum (scipy.signal.welch)')\n", (439, 478), True, 'import matplotlib.pyplot as plt\n'), ((479, 489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (487, 489), True, 'import matplotlib.pyplot as plt\n'), ((84, 128), 'numpy.load', 'np.load', (['"""objects/simulation_model_freq.npy"""'], {}), "('objects/simulation_model_freq.npy')\n", (91, 128), True, 'import numpy as np\n'), ((140, 185), 'numpy.load', 'np.load', (['"""objects/simulation_model_power.npy"""'], {}), "('objects/simulation_model_power.npy')\n", (147, 185), True, 'import numpy as np\n'), ((199, 240), 'numpy.load', 'np.load', (['"""objects/real_eeg_freq.npy0.npy"""'], {}), "('objects/real_eeg_freq.npy0.npy')\n", (206, 240), True, 'import numpy as np\n'), ((254, 293), 'numpy.load', 'np.load', (['"""objects/real_eeg_power_0.npy"""'], {}), "('objects/real_eeg_power_0.npy')\n", (261, 293), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.tools.statistics Provides statistical functions.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import numpy as np
# Import astronomical modules
from astropy.stats import sigma_clip, sigma_clipped_stats
# Import the relevant PTS classes and modules
from . import general
from ..basics.mask import Mask
# -----------------------------------------------------------------
# Calculate sigma-to-FWHM and FWHM-to-sigma conversion factors
sigma_to_fwhm = (8 * np.log(2))**0.5
fwhm_to_sigma = 1.0 / sigma_to_fwhm
# -----------------------------------------------------------------
def sigma_clip_mask_list(data, sigma=3.0, mask=None):
"""
This function ...
:param data:
:param sigma:
:param mask:
:return:
"""
masked_list = sigma_clip(data, sigma=sigma, iters=None, copy=False)
new_mask = copy.deepcopy(mask) if mask is not None else [0]*len(data)
for i, masked in enumerate(masked_list.mask):
if masked: new_mask[i] = True
# Return the new or updated mask
return new_mask
# -----------------------------------------------------------------
def sigma_clip_mask(data, sigma_level=3.0, mask=None):
"""
This function ...
:param data:
:param sigma_level:
:param mask:
:return:
"""
# Split the x, y and z values of the data, without the masked values
x_values, y_values, z_values = general.split_xyz(data, mask=mask)
# Sigma-clip z-values that are outliers
masked_z_values = sigma_clip(z_values, sigma=sigma_level, iters=None, copy=False)
# Copy the mask or create a new one if none was provided
new_mask = copy.deepcopy(mask) if mask is not None else Mask(np.zeros_like(data))
for i, masked in enumerate(masked_z_values.mask):
if masked:
x = x_values[i]
y = y_values[i]
new_mask[y,x] = True
#if not isinstance(new_mask, Mask): print(new_mask, mask)
# Assert the mask is of type 'Mask'
assert isinstance(new_mask, Mask)
# Return the new or updated mask
return new_mask
# -----------------------------------------------------------------
def sigma_clipped_median(data, sigma=3.0, mask=None):
"""
This function ...
:param data:
:param sigma:
:param mask:
:return:
"""
# Calculate the sigma-clipped mean and median
_, median, _ = sigma_clipped_stats(data, mask=mask, sigma=sigma)
# Return the median value
return median
# -----------------------------------------------------------------
def sigma_clipped_statistics(data, sigma=3.0, mask=None):
"""
This function ...
:param data:
:param sigma:
:param mask:
:return:
"""
# Calculate the sigma-clipped mean and median
mean, median, stddev = sigma_clipped_stats(data, mask=mask, sigma=sigma)
# Return the statistical parameters
return mean, median, stddev
# -----------------------------------------------------------------
def sigma_clip_split(input_list, criterion, sigma=3.0, only_high=False, only_low=False, nans="low"):
"""
This function ...
:param input_list:
:param criterion:
:param sigma:
:param only_high:
:param only_low:
:param nans:
:return:
"""
# Initialize an empty list of widths
determinants = []
# Loop over all the star candidates and calculate their width
for item in input_list: determinants.append(criterion(item))
# Use sigma clipping to seperate stars and unidentified objects
mask = sigma_clip_mask_list(determinants, sigma=sigma)
# Calculate the mean value of the determinants that are not masked
mean = np.ma.mean(np.ma.masked_array(determinants, mask=mask))
# Create a seperate list for the stars and for the ufos
valid_list = []
invalid_list = []
# Loop over all items in the input list, putting them in either the valid or invalid list
for index, item in enumerate(input_list):
value = criterion(item)
if only_high:
if mask[index] and value > mean: invalid_list.append(item)
else: valid_list.append(item)
elif only_low:
if mask[index] and value < mean: invalid_list.append(item)
else: valid_list.append(item)
else:
if mask[index]: invalid_list.append(item)
else: valid_list.append(item)
# Return the valid and invalid lists
return valid_list, invalid_list
# -----------------------------------------------------------------
def cutoff(values, method, limit):
"""
This function ...
:param values:
:param method:
:param limit:
"""
# Percentage method
if method == "percentage":
# Create a sorted list for the input values
sorted_values = sorted(values)
# Determine the splitting point
split = (1.0-limit) * len(sorted_values)
index = int(round(split))
# Return the corresponding value in the sorted list
return sorted_values[index]
# Sigma-clipping method
elif method == "sigma_clip":
# Perform sigma clipping on the input list
masked_values = sigma_clip(np.array(values), sigma=limit, iters=None, copy=False)
# Calculate the maximum of the masked array
return np.ma.max(masked_values)
else: raise ValueError("Invalid cutoff method (must be 'percentage' or 'sigma_clip'")
# -----------------------------------------------------------------
|
[
"numpy.ma.max",
"astropy.stats.sigma_clip",
"numpy.log",
"numpy.array",
"copy.deepcopy",
"numpy.ma.masked_array",
"astropy.stats.sigma_clipped_stats",
"numpy.zeros_like"
] |
[((1256, 1309), 'astropy.stats.sigma_clip', 'sigma_clip', (['data'], {'sigma': 'sigma', 'iters': 'None', 'copy': '(False)'}), '(data, sigma=sigma, iters=None, copy=False)\n', (1266, 1309), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats\n'), ((1979, 2042), 'astropy.stats.sigma_clip', 'sigma_clip', (['z_values'], {'sigma': 'sigma_level', 'iters': 'None', 'copy': '(False)'}), '(z_values, sigma=sigma_level, iters=None, copy=False)\n', (1989, 2042), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats\n'), ((2854, 2903), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', (['data'], {'mask': 'mask', 'sigma': 'sigma'}), '(data, mask=mask, sigma=sigma)\n', (2873, 2903), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats\n'), ((3263, 3312), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', (['data'], {'mask': 'mask', 'sigma': 'sigma'}), '(data, mask=mask, sigma=sigma)\n', (3282, 3312), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats\n'), ((957, 966), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (963, 966), True, 'import numpy as np\n'), ((1326, 1345), 'copy.deepcopy', 'copy.deepcopy', (['mask'], {}), '(mask)\n', (1339, 1345), False, 'import copy\n'), ((2120, 2139), 'copy.deepcopy', 'copy.deepcopy', (['mask'], {}), '(mask)\n', (2133, 2139), False, 'import copy\n'), ((4150, 4193), 'numpy.ma.masked_array', 'np.ma.masked_array', (['determinants'], {'mask': 'mask'}), '(determinants, mask=mask)\n', (4168, 4193), True, 'import numpy as np\n'), ((2170, 2189), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (2183, 2189), True, 'import numpy as np\n'), ((5778, 5802), 'numpy.ma.max', 'np.ma.max', (['masked_values'], {}), '(masked_values)\n', (5787, 5802), True, 'import numpy as np\n'), ((5655, 5671), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (5663, 5671), True, 'import numpy as np\n')]
|
import sys
import numpy as np
from skimage.measure import label
def getSegType(mid):
m_type = np.uint64
if mid<2**8:
m_type = np.uint8
elif mid<2**16:
m_type = np.uint16
elif mid<2**32:
m_type = np.uint32
return m_type
def seg2Count(seg,do_sort=True,rm_zero=False):
sm = seg.max()
if sm==0:
return None,None
if sm>1:
segIds,segCounts = np.unique(seg,return_counts=True)
if rm_zero:
segCounts = segCounts[segIds>0]
segIds = segIds[segIds>0]
if do_sort:
sort_id = np.argsort(-segCounts)
segIds=segIds[sort_id]
segCounts=segCounts[sort_id]
else:
segIds=np.array([1])
segCounts=np.array([np.count_nonzero(seg)])
return segIds, segCounts
def removeSeg(seg, did, invert=False):
sm = seg.max()
did = did[did<=sm]
if invert:
rl = np.zeros(1+sm).astype(seg.dtype)
rl[did] = did
else:
rl = np.arange(1+sm).astype(seg.dtype)
rl[did] = 0
return rl[seg]
def remove_small(seg, thres=100,bid=None):
if thres>0:
if bid is None:
uid, uc = np.unique(seg, return_counts=True)
bid = uid[uc<thres]
if len(bid)>0:
sz = seg.shape
seg = removeSeg(seg,bid)
return seg
def relabel(seg, uid=None,nid=None,do_sort=False,do_type=False):
if seg is None or seg.max()==0:
return seg
if do_sort:
uid,_ = seg2Count(seg,do_sort=True)
else:
# get the unique labels
if uid is None:
uid = np.unique(seg)
else:
uid = np.array(uid)
uid = uid[uid>0] # leave 0 as 0, the background seg-id
# get the maximum label for the segment
mid = int(max(uid)) + 1
# create an array from original segment id to reduced id
# format opt
m_type = seg.dtype
if do_type:
mid2 = len(uid) if nid is None else max(nid)+1
m_type = getSegType(mid2)
mapping = np.zeros(mid, dtype=m_type)
if nid is None:
mapping[uid] = np.arange(1,1+len(uid), dtype=m_type)
else:
mapping[uid] = nid.astype(m_type)
# if uid is given, need to remove bigger seg id
seg[seg>=mid] = 0
return mapping[seg]
def get_bb(seg, do_count=False):
dim = len(seg.shape)
a=np.where(seg>0)
if len(a[0])==0:
return [-1]*dim*2
out=[]
for i in range(dim):
out+=[a[i].min(), a[i].max()]
if do_count:
out+=[len(a[0])]
return out
def label_chunk(get_chunk, numC, rr=1, rm_sz=0, m_type=np.uint64):
# label chunks or slices
mid = 0
seg = [None]*numC
for zi in range(numC):
print('%d/%d [%d], '%(zi,numC,mid)),
sys.stdout.flush()
tmp = get_chunk(zi)>0
sz = tmp.shape
numD = len(sz)
if numD==2:
tmp = tmp[np.newaxis]
seg_c = np.zeros(sz).astype(m_type)
bb=get_bb(tmp)
print(bb)
seg_c[bb[0]:bb[1]+1,bb[2]:bb[3]+1,bb[4]:bb[5]+1] = \
label(tmp[bb[0]:bb[1]+1,bb[2]:bb[3]+1,bb[4]:bb[5]+1]).astype(m_type)
if rm_sz>0:
# preserve continuous id
seg_c = remove_small(seg_c, rm_sz)
seg_c = relabel(seg_c).astype(m_type)
if zi == 0: # first seg, relabel seg index
print('_%d_'%0)
slice_b = seg_c[-1]
seg[zi] = seg_c[:,::rr,::rr] # save a low-res one
mid += seg[zi].max()
rlA = np.arange(mid+1,dtype=m_type)
else: # link to previous slice
slice_t = seg_c[0]
slices = label(np.stack([slice_b>0, slice_t>0],axis=0)).astype(m_type)
# create mapping for seg cur
lc = np.unique(seg_c);lc=lc[lc>0]
rl_c = np.zeros(int(lc.max())+1, dtype=int)
# merge curr seg
# for 1 pre seg id -> slices id -> cur seg ids
l0_p = np.unique(slice_b*(slices[0]>0))
bbs = get_bb_label2d_v2(slice_b,uid=l0_p)[:,1:]
#bbs2 = get_bb_label2d_v2(slices[1])
print('_%d_'%len(l0_p))
for i,l in enumerate(l0_p):
bb = bbs[i]
sid = np.unique(slices[0,bb[0]:bb[1]+1,bb[2]:bb[3]+1]*(slice_b[bb[0]:bb[1]+1,bb[2]:bb[3]+1]==l))
sid = sid[sid>0]
# multiple ids
if len(sid)==1:
#bb = bbs2[bbs2[:,0]==sid,1:]
#cid = np.unique(slice_t[bb[0]:bb[1]+1,bb[2]:bb[3]+1]*(slices[1,bb[0]:bb[1]+1,bb[2]:bb[3]+1]==sid))
cid = np.unique(slice_t*(slices[1]==sid))
else:
cid = np.unique(slice_t*np.in1d(slices[1].reshape(-1),sid).reshape(sz[-2:]))
rl_c[cid[cid>0]] = l
# new id
new_num = np.where(rl_c==0)[0][1:] # except the first one
new_id = np.arange(mid+1,mid+1+len(new_num),dtype=m_type)
rl_c[new_num] = new_id
slice_b = rl_c[seg_c[-1]] # save a high-res
seg[zi] = rl_c[seg_c[:,::rr,::rr]]
mid += len(new_num)
# update global id
rlA = np.hstack([rlA,new_id])
# merge prev seg
# for 1 cur seg id -> slices id -> prev seg ids
l1_c = np.unique(slice_t*(slices[1]>0))
for l in l1_c:
sid = np.unique(slices[1]*(slice_t==l))
sid = sid[sid>0]
pid = np.unique(slice_b*np.in1d(slices[0].reshape(-1),sid).reshape(sz[-2:]))
pid = pid[pid>0]
# get all previous m-to-1 labels
pid_p = np.where(np.in1d(rlA,rlA[pid]))[0]
if len(pid_p)>1:
rlA[pid_p] = pid.max()
# memory reduction: each seg
m2_type = getSegType(seg[zi].max())
seg[zi] = seg[zi].astype(m2_type)
# memory reduction: final output
m2_type = getSegType(rlA.max())
rlA = rlA.astype(m2_type)
print('output type:',m2_type)
return rlA[np.vstack(seg)]
def get_bb_label3d_v2(seg,do_count=False, uid=None):
sz = seg.shape
assert len(sz)==3
if uid is None:
uid = np.unique(seg)
uid = uid[uid>0]
um = int(uid.max())
out = np.zeros((1+um,7+do_count),dtype=np.uint32)
out[:,0] = np.arange(out.shape[0])
out[:,1] = sz[0]
out[:,3] = sz[1]
out[:,5] = sz[2]
# for each slice
zids = np.where((seg>0).sum(axis=1).sum(axis=1)>0)[0]
for zid in zids:
sid = np.unique(seg[zid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,1] = np.minimum(out[sid,1],zid)
out[sid,2] = np.maximum(out[sid,2],zid)
# for each row
rids = np.where((seg>0).sum(axis=0).sum(axis=1)>0)[0]
for rid in rids:
sid = np.unique(seg[:,rid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,3] = np.minimum(out[sid,3],rid)
out[sid,4] = np.maximum(out[sid,4],rid)
# for each col
cids = np.where((seg>0).sum(axis=0).sum(axis=0)>0)[0]
for cid in cids:
sid = np.unique(seg[:,:,cid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,5] = np.minimum(out[sid,5],cid)
out[sid,6] = np.maximum(out[sid,6],cid)
if do_count:
ui,uc = np.unique(seg,return_counts=True)
out[ui[ui<=um],-1]=uc[ui<=um]
return out[uid]
|
[
"numpy.unique",
"numpy.minimum",
"numpy.hstack",
"numpy.where",
"numpy.in1d",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.count_nonzero",
"numpy.stack",
"numpy.vstack",
"skimage.measure.label",
"numpy.maximum",
"sys.stdout.flush",
"numpy.arange"
] |
[((2022, 2049), 'numpy.zeros', 'np.zeros', (['mid'], {'dtype': 'm_type'}), '(mid, dtype=m_type)\n', (2030, 2049), True, 'import numpy as np\n'), ((2348, 2365), 'numpy.where', 'np.where', (['(seg > 0)'], {}), '(seg > 0)\n', (2356, 2365), True, 'import numpy as np\n'), ((6322, 6371), 'numpy.zeros', 'np.zeros', (['(1 + um, 7 + do_count)'], {'dtype': 'np.uint32'}), '((1 + um, 7 + do_count), dtype=np.uint32)\n', (6330, 6371), True, 'import numpy as np\n'), ((6381, 6404), 'numpy.arange', 'np.arange', (['out.shape[0]'], {}), '(out.shape[0])\n', (6390, 6404), True, 'import numpy as np\n'), ((410, 444), 'numpy.unique', 'np.unique', (['seg'], {'return_counts': '(True)'}), '(seg, return_counts=True)\n', (419, 444), True, 'import numpy as np\n'), ((712, 725), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (720, 725), True, 'import numpy as np\n'), ((2758, 2776), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2774, 2776), False, 'import sys\n'), ((6101, 6115), 'numpy.vstack', 'np.vstack', (['seg'], {}), '(seg)\n', (6110, 6115), True, 'import numpy as np\n'), ((6248, 6262), 'numpy.unique', 'np.unique', (['seg'], {}), '(seg)\n', (6257, 6262), True, 'import numpy as np\n'), ((6583, 6602), 'numpy.unique', 'np.unique', (['seg[zid]'], {}), '(seg[zid])\n', (6592, 6602), True, 'import numpy as np\n'), ((6661, 6689), 'numpy.minimum', 'np.minimum', (['out[sid, 1]', 'zid'], {}), '(out[sid, 1], zid)\n', (6671, 6689), True, 'import numpy as np\n'), ((6709, 6737), 'numpy.maximum', 'np.maximum', (['out[sid, 2]', 'zid'], {}), '(out[sid, 2], zid)\n', (6719, 6737), True, 'import numpy as np\n'), ((6849, 6871), 'numpy.unique', 'np.unique', (['seg[:, rid]'], {}), '(seg[:, rid])\n', (6858, 6871), True, 'import numpy as np\n'), ((6929, 6957), 'numpy.minimum', 'np.minimum', (['out[sid, 3]', 'rid'], {}), '(out[sid, 3], rid)\n', (6939, 6957), True, 'import numpy as np\n'), ((6977, 7005), 'numpy.maximum', 'np.maximum', (['out[sid, 4]', 'rid'], {}), '(out[sid, 4], rid)\n', (6987, 7005), True, 'import numpy as np\n'), ((7121, 7146), 'numpy.unique', 'np.unique', (['seg[:, :, cid]'], {}), '(seg[:, :, cid])\n', (7130, 7146), True, 'import numpy as np\n'), ((7203, 7231), 'numpy.minimum', 'np.minimum', (['out[sid, 5]', 'cid'], {}), '(out[sid, 5], cid)\n', (7213, 7231), True, 'import numpy as np\n'), ((7251, 7279), 'numpy.maximum', 'np.maximum', (['out[sid, 6]', 'cid'], {}), '(out[sid, 6], cid)\n', (7261, 7279), True, 'import numpy as np\n'), ((7312, 7346), 'numpy.unique', 'np.unique', (['seg'], {'return_counts': '(True)'}), '(seg, return_counts=True)\n', (7321, 7346), True, 'import numpy as np\n'), ((588, 610), 'numpy.argsort', 'np.argsort', (['(-segCounts)'], {}), '(-segCounts)\n', (598, 610), True, 'import numpy as np\n'), ((1174, 1208), 'numpy.unique', 'np.unique', (['seg'], {'return_counts': '(True)'}), '(seg, return_counts=True)\n', (1183, 1208), True, 'import numpy as np\n'), ((1608, 1622), 'numpy.unique', 'np.unique', (['seg'], {}), '(seg)\n', (1617, 1622), True, 'import numpy as np\n'), ((1655, 1668), 'numpy.array', 'np.array', (['uid'], {}), '(uid)\n', (1663, 1668), True, 'import numpy as np\n'), ((3527, 3559), 'numpy.arange', 'np.arange', (['(mid + 1)'], {'dtype': 'm_type'}), '(mid + 1, dtype=m_type)\n', (3536, 3559), True, 'import numpy as np\n'), ((3780, 3796), 'numpy.unique', 'np.unique', (['seg_c'], {}), '(seg_c)\n', (3789, 3796), True, 'import numpy as np\n'), ((3972, 4008), 'numpy.unique', 'np.unique', (['(slice_b * (slices[0] > 0))'], {}), '(slice_b * (slices[0] > 0))\n', (3981, 4008), True, 'import numpy as np\n'), ((5234, 5258), 'numpy.hstack', 'np.hstack', (['[rlA, new_id]'], {}), '([rlA, new_id])\n', (5243, 5258), True, 'import numpy as np\n'), ((5366, 5402), 'numpy.unique', 'np.unique', (['(slice_t * (slices[1] > 0))'], {}), '(slice_t * (slices[1] > 0))\n', (5375, 5402), True, 'import numpy as np\n'), ((754, 775), 'numpy.count_nonzero', 'np.count_nonzero', (['seg'], {}), '(seg)\n', (770, 775), True, 'import numpy as np\n'), ((917, 933), 'numpy.zeros', 'np.zeros', (['(1 + sm)'], {}), '(1 + sm)\n', (925, 933), True, 'import numpy as np\n'), ((995, 1012), 'numpy.arange', 'np.arange', (['(1 + sm)'], {}), '(1 + sm)\n', (1004, 1012), True, 'import numpy as np\n'), ((2924, 2936), 'numpy.zeros', 'np.zeros', (['sz'], {}), '(sz)\n', (2932, 2936), True, 'import numpy as np\n'), ((3070, 3131), 'skimage.measure.label', 'label', (['tmp[bb[0]:bb[1] + 1, bb[2]:bb[3] + 1, bb[4]:bb[5] + 1]'], {}), '(tmp[bb[0]:bb[1] + 1, bb[2]:bb[3] + 1, bb[4]:bb[5] + 1])\n', (3075, 3131), False, 'from skimage.measure import label\n'), ((4241, 4351), 'numpy.unique', 'np.unique', (['(slices[0, bb[0]:bb[1] + 1, bb[2]:bb[3] + 1] * (slice_b[bb[0]:bb[1] + 1, bb\n [2]:bb[3] + 1] == l))'], {}), '(slices[0, bb[0]:bb[1] + 1, bb[2]:bb[3] + 1] * (slice_b[bb[0]:bb[1\n ] + 1, bb[2]:bb[3] + 1] == l))\n', (4250, 4351), True, 'import numpy as np\n'), ((5448, 5485), 'numpy.unique', 'np.unique', (['(slices[1] * (slice_t == l))'], {}), '(slices[1] * (slice_t == l))\n', (5457, 5485), True, 'import numpy as np\n'), ((4624, 4663), 'numpy.unique', 'np.unique', (['(slice_t * (slices[1] == sid))'], {}), '(slice_t * (slices[1] == sid))\n', (4633, 4663), True, 'import numpy as np\n'), ((4872, 4891), 'numpy.where', 'np.where', (['(rl_c == 0)'], {}), '(rl_c == 0)\n', (4880, 4891), True, 'import numpy as np\n'), ((3666, 3710), 'numpy.stack', 'np.stack', (['[slice_b > 0, slice_t > 0]'], {'axis': '(0)'}), '([slice_b > 0, slice_t > 0], axis=0)\n', (3674, 3710), True, 'import numpy as np\n'), ((5723, 5745), 'numpy.in1d', 'np.in1d', (['rlA', 'rlA[pid]'], {}), '(rlA, rlA[pid])\n', (5730, 5745), True, 'import numpy as np\n')]
|
"""
LFW dataloading
"""
import argparse
import time
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import os
import glob
import matplotlib.pyplot as plt
class LFWDataset(Dataset):
def __init__(self, path_to_folder: str, transform) -> None:
self.imgs_path = path_to_folder
file_list = glob.glob(self.imgs_path + "*")
# print(file_list)
self.data = []
for class_path in file_list:
class_name = class_path.split("\\")[-1]
for img_path in glob.glob(class_path + "\\*.jpg"):
self.data.append([img_path, class_name])
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index: int) -> torch.Tensor:
entry = self.data[index]
image = Image.open(entry[0])
label = entry[1]
return self.transform(image), label
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-path_to_folder', default='lfw/', type=str)
parser.add_argument('-batch_size', default=1028, type=int)
parser.add_argument('-num_workers', default=0, type=int)
parser.add_argument('-visualize_batch', action='store_true')
parser.add_argument('-get_timing', action='store_true')
parser.add_argument('-batches_to_check', default=5, type=int)
args = parser.parse_args()
lfw_trans = transforms.Compose([
transforms.RandomAffine(5, (0.1, 0.1), (0.5, 2.0)),
transforms.ToTensor()
])
# Define dataset
dataset = LFWDataset(args.path_to_folder, lfw_trans)
# Define dataloader
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers
)
if args.visualize_batch:
# TODO: visualize a batch of images
figure = plt.figure(figsize=(14, 8))
cols, rows = int(len(dataloader)/2), 2
batch = next(iter(dataloader))
images = batch[0]
labels = batch[1]
for i in range(1, cols * rows + 1):
img, label = images[i - 1], labels[i - 1]
figure.add_subplot(rows, cols, i)
plt.title(label)
plt.axis("off")
plt.imshow(img.permute(1,2,0), cmap="gray")
plt.savefig("visualization.jpg")
if args.get_timing:
# lets do some repetitions
res = [ ]
for _ in range(5):
start = time.time()
for batch_idx, batch in enumerate(dataloader):
if batch_idx > args.batches_to_check:
break
end = time.time()
res.append(end - start)
res = np.array(res)
print(f'Timing: {np.mean(res)}+-{np.std(res)}')
|
[
"numpy.mean",
"PIL.Image.open",
"matplotlib.pyplot.savefig",
"torchvision.transforms.RandomAffine",
"argparse.ArgumentParser",
"numpy.std",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.figure",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.title",
"torchvision.transforms.ToTensor",
"time.time",
"glob.glob"
] |
[((1044, 1069), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1067, 1069), False, 'import argparse\n'), ((1762, 1859), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers'}), '(dataset, batch_size=args.batch_size, shuffle=False, num_workers=\n args.num_workers)\n', (1772, 1859), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((398, 429), 'glob.glob', 'glob.glob', (["(self.imgs_path + '*')"], {}), "(self.imgs_path + '*')\n", (407, 429), False, 'import glob\n'), ((895, 915), 'PIL.Image.open', 'Image.open', (['entry[0]'], {}), '(entry[0])\n', (905, 915), False, 'from PIL import Image\n'), ((1990, 2017), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)'}), '(figsize=(14, 8))\n', (2000, 2017), True, 'import matplotlib.pyplot as plt\n'), ((2421, 2453), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualization.jpg"""'], {}), "('visualization.jpg')\n", (2432, 2453), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2853), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2848, 2853), True, 'import numpy as np\n'), ((597, 630), 'glob.glob', 'glob.glob', (["(class_path + '\\\\*.jpg')"], {}), "(class_path + '\\\\*.jpg')\n", (606, 630), False, 'import glob\n'), ((1540, 1590), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', (['(5)', '(0.1, 0.1)', '(0.5, 2.0)'], {}), '(5, (0.1, 0.1), (0.5, 2.0))\n', (1563, 1590), False, 'from torchvision import transforms\n'), ((1600, 1621), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1619, 1621), False, 'from torchvision import transforms\n'), ((2312, 2328), 'matplotlib.pyplot.title', 'plt.title', (['label'], {}), '(label)\n', (2321, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2356), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2349, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2595, 2606), 'time.time', 'time.time', ([], {}), '()\n', (2604, 2606), False, 'import time\n'), ((2764, 2775), 'time.time', 'time.time', ([], {}), '()\n', (2773, 2775), False, 'import time\n'), ((2879, 2891), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (2886, 2891), True, 'import numpy as np\n'), ((2895, 2906), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (2901, 2906), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from rsgeo.geometry import Polygon # noqa
class TestPolygon:
def setup_method(self):
self.p = Polygon([(0, 0), (1, 1), (1, 0), (0, 0)])
def test_repr(self):
str_repr = str(self.p)
exp = "Polygon([(0, 0), (1, 1), (1, 0), (0, 0)])"
assert str_repr == exp
def test_seq_to_2darray(self):
seq = [(1, 2), (3, 4)]
res = self.p._seq_to_2darray(seq)
np.testing.assert_array_equal(res, np.array([[1, 2], [3, 4]]))
def test_seq_to_2darray_sad_case(self):
seq = [(1, 2, 3), (4, 5, 6)]
with pytest.raises(ValueError):
_ = self.p._seq_to_2darray(seq)
@pytest.mark.parametrize("x, expected", [
(np.array([1, 2, 3]), np.array([1, 2, 3])),
(np.array([[1], [2], [3]]), np.array([1, 2, 3])),
])
def test_to_1d(self, x, expected):
result = self.p._to_1d(x)
np.testing.assert_array_equal(result, expected)
def test_to_1d_sad_case(self):
x = np.array([(1, 2, 3), (4, 5, 6)])
with pytest.raises(ValueError):
_ = self.p._to_1d(x)
def test_contains(self, xs, ys):
res = self.p.contains(xs, ys)
np.testing.assert_array_equal(res, np.array([False, False, False, True]))
def test_distance(self, xs, ys):
result = self.p.distance(xs, ys)
np.testing.assert_array_equal(result, np.array([0, 0, 1.4142135623730951, 0]))
|
[
"numpy.testing.assert_array_equal",
"numpy.array",
"pytest.raises",
"rsgeo.geometry.Polygon"
] |
[((143, 184), 'rsgeo.geometry.Polygon', 'Polygon', (['[(0, 0), (1, 1), (1, 0), (0, 0)]'], {}), '([(0, 0), (1, 1), (1, 0), (0, 0)])\n', (150, 184), False, 'from rsgeo.geometry import Polygon\n'), ((922, 969), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'expected'], {}), '(result, expected)\n', (951, 969), True, 'import numpy as np\n'), ((1018, 1050), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 6)]'], {}), '([(1, 2, 3), (4, 5, 6)])\n', (1026, 1050), True, 'import numpy as np\n'), ((483, 509), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (491, 509), True, 'import numpy as np\n'), ((606, 631), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (619, 631), False, 'import pytest\n'), ((1064, 1089), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1077, 1089), False, 'import pytest\n'), ((1243, 1280), 'numpy.array', 'np.array', (['[False, False, False, True]'], {}), '([False, False, False, True])\n', (1251, 1280), True, 'import numpy as np\n'), ((1407, 1446), 'numpy.array', 'np.array', (['[0, 0, 1.4142135623730951, 0]'], {}), '([0, 0, 1.4142135623730951, 0])\n', (1415, 1446), True, 'import numpy as np\n'), ((733, 752), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (741, 752), True, 'import numpy as np\n'), ((754, 773), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (762, 773), True, 'import numpy as np\n'), ((785, 810), 'numpy.array', 'np.array', (['[[1], [2], [3]]'], {}), '([[1], [2], [3]])\n', (793, 810), True, 'import numpy as np\n'), ((812, 831), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (820, 831), True, 'import numpy as np\n')]
|
import numpy as np
from sim.sim2d import sim_run
# Simulator options.
options = {}
options['FIG_SIZE'] = [8,8]
options['OBSTACLES'] = True
class ModelPredictiveControl:
def __init__(self):
self.horizon = 20
self.dt = 0.2
# Reference or set point the controller will achieve.
self.reference1 = [10, 0, 45*3.14/180]
self.reference2 = None
self.x_obs = 5
self.y_obs = 0.1
def plant_model(self,prev_state, dt, pedal, steering):
x_t = prev_state[0]
y_t = prev_state[1]
psi_t = prev_state[2]
v_t = prev_state[3]
v_t = v_t + dt * pedal - v_t/25.0
x_t = x_t + dt * v_t * np.cos(psi_t)
y_t = y_t + dt * v_t * np.sin(psi_t)
psi_t += dt * v_t *np.tan(steering)/2.5
return [x_t, y_t, psi_t, v_t]
def cost_function(self,u, *args):
state = args[0]
ref = args[1]
cost = 0.0
for k in range(self.horizon):
v_start = state[3]
state = self.plant_model(state, self.dt, u[k*2], u[k*2+1])
x_diff = abs(state[0] - ref[0])
y_diff = abs(state[1] - ref[1])
psi_diff = abs(state[2] - ref[2])
obs_dist_x = abs(state[0] - self.x_obs)
obs_dist_y = abs(state[1] - self.y_obs)
obs_dist = np.sqrt(obs_dist_x**2 + obs_dist_y**2)
cost += np.sqrt(x_diff**2+y_diff**2 + psi_diff**2 + (state[3] - v_start)**2)
cost += 1/obs_dist**2*10
speed_kph = state[3]*3.6
if speed_kph > 10.0:
cost += speed_kph * 100
return cost
sim_run(options, ModelPredictiveControl)
|
[
"numpy.sqrt",
"numpy.tan",
"sim.sim2d.sim_run",
"numpy.cos",
"numpy.sin"
] |
[((1671, 1711), 'sim.sim2d.sim_run', 'sim_run', (['options', 'ModelPredictiveControl'], {}), '(options, ModelPredictiveControl)\n', (1678, 1711), False, 'from sim.sim2d import sim_run\n'), ((1370, 1412), 'numpy.sqrt', 'np.sqrt', (['(obs_dist_x ** 2 + obs_dist_y ** 2)'], {}), '(obs_dist_x ** 2 + obs_dist_y ** 2)\n', (1377, 1412), True, 'import numpy as np\n'), ((1431, 1509), 'numpy.sqrt', 'np.sqrt', (['(x_diff ** 2 + y_diff ** 2 + psi_diff ** 2 + (state[3] - v_start) ** 2)'], {}), '(x_diff ** 2 + y_diff ** 2 + psi_diff ** 2 + (state[3] - v_start) ** 2)\n', (1438, 1509), True, 'import numpy as np\n'), ((690, 703), 'numpy.cos', 'np.cos', (['psi_t'], {}), '(psi_t)\n', (696, 703), True, 'import numpy as np\n'), ((735, 748), 'numpy.sin', 'np.sin', (['psi_t'], {}), '(psi_t)\n', (741, 748), True, 'import numpy as np\n'), ((777, 793), 'numpy.tan', 'np.tan', (['steering'], {}), '(steering)\n', (783, 793), True, 'import numpy as np\n')]
|
import numpy as np
import random
import cv2
import os
import json
from csv_utils import load_csv
import rect
import mask
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
"""
description: Plots one bounding box on image img,
this function comes from YoLov5 project.
arguments:
x(list): a box likes [x1,y1,x2,y2]
img(np array): a opencv image object in BGR format
color(tuple): color to draw rectangle, such as (0,255,0)
label(str): the class name
line_thickness(int): the thickness of the line
return:
no return
"""
tl = (
line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
) # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(
img,
label,
(c1[0], c1[1] - 2),
0,
tl / 3,
[225, 255, 255],
thickness=tf,
lineType=cv2.LINE_AA,
)
def plot_one_polygon(pts, img, color=None, label=None, line_thickness=None):
"""
description: Plots one bounding box on image img,
this function comes from YoLov5 project.
arguments:
pts(np array): a numpy array of size [1,1,2]
img(np array): a opencv image object in BGR format
color(tuple): color to draw rectangle, such as (0,255,0)
label(str): the class name
line_thickness(int): the thickness of the line
return:
no return
"""
tl = (
line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
) # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
cv2.polylines(img, [pts], isClosed=True, color=color, thickness=tl)
if label:
c1 = (int(np.min(pts[:,:,0])), int(np.min(pts[:,:,1])))
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(
img,
label,
(c1[0], c1[1] - 2),
0,
tl / 3,
[225, 255, 255],
thickness=tf,
lineType=cv2.LINE_AA,
)
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--path_imgs', required=True, help='the path to the input image folder')
ap.add_argument('--path_out', required=True, help='the path to the output folder')
ap.add_argument('--path_csv', default='labels.csv', help='[optinal] the path of a csv file that corresponds to path_imgs, default="labels.csv" in path_imgs')
ap.add_argument('--class_map_json', default=None, help='[optinal] the path of a class map json file')
args = vars(ap.parse_args())
path_imgs = args['path_imgs']
path_csv = args['path_csv'] if args['path_csv']!='labels.csv' else os.path.join(path_imgs, args['path_csv'])
output_path = args['path_out']
if args['class_map_json']:
with open(args['class_map_json']) as f:
class_map = json.load(f)
print(f'loaded class map: {class_map}')
else:
class_map = None
if not os.path.isfile(path_csv):
raise Exception(f'Not found file: {path_csv}')
assert path_imgs!=output_path, 'output path must be different with input path'
if not os.path.isdir(output_path):
os.makedirs(output_path)
fname_to_shape, class_map = load_csv(path_csv, path_imgs, class_map)
min_id = min(class_map.values())
colors = [(0,0,255),(255,0,0),(0,255,0),(102,51,153),(255,140,0),(105,105,105),(127,25,27),(9,200,100)]
color_map = {}
for cls in class_map:
i = class_map[cls]
if min_id != 0:
i -= 1
if i < len(colors):
color_map[cls] = colors[i]
else:
color_map[cls] = tuple([random.randint(0,255) for _ in range(3)])
for im_name in fname_to_shape:
print(f'[FILE] {im_name}')
shapes = fname_to_shape[im_name]
im = cv2.imread(shapes[0].fullpath)
for shape in shapes:
if isinstance(shape, rect.Rect):
box = shape.up_left + shape.bottom_right
plot_one_box(box, im, label=shape.category, color=color_map[shape.category])
elif isinstance(shape, mask.Mask):
pts = np.array([[x,y] for x,y in zip(shape.X,shape.Y)])
pts = pts.reshape((-1, 1, 2))
plot_one_polygon(pts, im, label=shape.category, color=color_map[shape.category])
outname = os.path.join(output_path, im_name)
cv2.imwrite(outname, im)
|
[
"cv2.rectangle",
"cv2.imwrite",
"argparse.ArgumentParser",
"os.makedirs",
"cv2.polylines",
"csv_utils.load_csv",
"os.path.join",
"cv2.putText",
"os.path.isfile",
"os.path.isdir",
"numpy.min",
"json.load",
"cv2.getTextSize",
"cv2.imread",
"random.randint"
] |
[((882, 951), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color'], {'thickness': 'tl', 'lineType': 'cv2.LINE_AA'}), '(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\n', (895, 951), False, 'import cv2\n'), ((2169, 2236), 'cv2.polylines', 'cv2.polylines', (['img', '[pts]'], {'isClosed': '(True)', 'color': 'color', 'thickness': 'tl'}), '(img, [pts], isClosed=True, color=color, thickness=tl)\n', (2182, 2236), False, 'import cv2\n'), ((2843, 2868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2866, 2868), False, 'import argparse\n'), ((4015, 4055), 'csv_utils.load_csv', 'load_csv', (['path_csv', 'path_imgs', 'class_map'], {}), '(path_csv, path_imgs, class_map)\n', (4023, 4055), False, 'from csv_utils import load_csv\n'), ((1152, 1202), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color', '(-1)', 'cv2.LINE_AA'], {}), '(img, c1, c2, color, -1, cv2.LINE_AA)\n', (1165, 1202), False, 'import cv2\n'), ((1221, 1332), 'cv2.putText', 'cv2.putText', (['img', 'label', '(c1[0], c1[1] - 2)', '(0)', '(tl / 3)', '[225, 255, 255]'], {'thickness': 'tf', 'lineType': 'cv2.LINE_AA'}), '(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255],\n thickness=tf, lineType=cv2.LINE_AA)\n', (1232, 1332), False, 'import cv2\n'), ((2501, 2551), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color', '(-1)', 'cv2.LINE_AA'], {}), '(img, c1, c2, color, -1, cv2.LINE_AA)\n', (2514, 2551), False, 'import cv2\n'), ((2570, 2681), 'cv2.putText', 'cv2.putText', (['img', 'label', '(c1[0], c1[1] - 2)', '(0)', '(tl / 3)', '[225, 255, 255]'], {'thickness': 'tf', 'lineType': 'cv2.LINE_AA'}), '(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255],\n thickness=tf, lineType=cv2.LINE_AA)\n', (2581, 2681), False, 'import cv2\n'), ((3456, 3497), 'os.path.join', 'os.path.join', (['path_imgs', "args['path_csv']"], {}), "(path_imgs, args['path_csv'])\n", (3468, 3497), False, 'import os\n'), ((3744, 3768), 'os.path.isfile', 'os.path.isfile', (['path_csv'], {}), '(path_csv)\n', (3758, 3768), False, 'import os\n'), ((3921, 3947), 'os.path.isdir', 'os.path.isdir', (['output_path'], {}), '(output_path)\n', (3934, 3947), False, 'import os\n'), ((3957, 3981), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (3968, 3981), False, 'import os\n'), ((4600, 4630), 'cv2.imread', 'cv2.imread', (['shapes[0].fullpath'], {}), '(shapes[0].fullpath)\n', (4610, 4630), False, 'import cv2\n'), ((5135, 5169), 'os.path.join', 'os.path.join', (['output_path', 'im_name'], {}), '(output_path, im_name)\n', (5147, 5169), False, 'import os\n'), ((5178, 5202), 'cv2.imwrite', 'cv2.imwrite', (['outname', 'im'], {}), '(outname, im)\n', (5189, 5202), False, 'import cv2\n'), ((776, 798), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (790, 798), False, 'import random\n'), ((1029, 1086), 'cv2.getTextSize', 'cv2.getTextSize', (['label', '(0)'], {'fontScale': '(tl / 3)', 'thickness': 'tf'}), '(label, 0, fontScale=tl / 3, thickness=tf)\n', (1044, 1086), False, 'import cv2\n'), ((2123, 2145), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2137, 2145), False, 'import random\n'), ((2378, 2435), 'cv2.getTextSize', 'cv2.getTextSize', (['label', '(0)'], {'fontScale': '(tl / 3)', 'thickness': 'tf'}), '(label, 0, fontScale=tl / 3, thickness=tf)\n', (2393, 2435), False, 'import cv2\n'), ((3636, 3648), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3645, 3648), False, 'import json\n'), ((2269, 2289), 'numpy.min', 'np.min', (['pts[:, :, 0]'], {}), '(pts[:, :, 0])\n', (2275, 2289), True, 'import numpy as np\n'), ((2294, 2314), 'numpy.min', 'np.min', (['pts[:, :, 1]'], {}), '(pts[:, :, 1])\n', (2300, 2314), True, 'import numpy as np\n'), ((4433, 4455), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (4447, 4455), False, 'import random\n')]
|
""" This is a modified version of simple.py script bundled with Read Until API"""
import argparse
import logging
import sys
import traceback
import time
import numpy
import read_until
import cffi
import os
import h5py
import glob
import concurrent.futures
import dyss
def _get_parser():
parser = argparse.ArgumentParser('Dyss -- a tiny program for selective sequencing on MinION')
parser.add_argument('--port', type=int, default=8000,
help='MinKNOW server port.')
parser.add_argument('--analysis_delay', type=int, default=1,
help='Period to wait before starting analysis.')
parser.add_argument('--run_time', type=int, default=900,
help='Period to run the analysis.')
parser.add_argument('--min_chunk_size', type=int, default=3500,
help='Minimum read chunk size to receive.')
parser.add_argument('--control_group', default=2, type=int,
help='Inverse proportion of channels in control group.')
parser.add_argument('--batch_size', default=30, type=int,
help='Inverse proportion of channels in control group.')
parser.add_argument(
'--debug', help="Print all debugging information",
action="store_const", dest="log_level",
const=logging.DEBUG, default=logging.WARNING,
)
parser.add_argument(
'--verbose', help="Print verbose messaging.",
action="store_const", dest="log_level",
const=logging.INFO,
)
parser.add_argument('--num_scouts', default=14, type=int,
help='number of scouts. Default is 14')
parser.add_argument('--num_packs', default=3, type=int,
help='number of packs. Default is 3')
parser.add_argument('--reference', required=True,
help='reference seqence to be amplified. Currently reference size is bounded by 100Kbp.')
parser.add_argument('--model', required=True,
help='model file.')
parser.add_argument('--param', required=True,
help='training data.')
parser.add_argument('--power', default=9, type=int,
help='chunking power. Integer type. Default is 9.')
parser.add_argument('--referencesize', default=400000, type=int,
help='Reference size(Event num = 2*bp). Default is 400000')
return parser
def signal_based_analysis(client, classifier, batch_size=30, delay=1, throttle=0.5, control_group=16):
"""A tiny analysis function based on raw signal comparison.
:param client: an instance of a `ReadUntilClient` object.
:param batch_size: number of reads to pull from `client` at a time.
:param delay: number of seconds to wait before starting analysis.
:param throttle: minimum interval between requests to `client`.
:param dyss: an instance of Dyss object constructed by libdyss.construct_dyss().
:param debug: flag whether or not output every query into stdout.
"""
logger = logging.getLogger('Dyss')
logger.warn(
'Initialization of Dyss classification'
'When debug and verbose flag is on, it generates literaly all inputs.'
'If you want to apply this application to real sequencing experiment,'
'it is highly recommended to turn these flags off.'
)
# we sleep a little simply to ensure the client has started initialised
logger.info('Starting analysis of reads in {}s.'.format(delay))
time.sleep(delay)
while client.is_running:
# If thre are too many queries, reject them all.
if client.queue_length > 300:
read_batch = client.get_read_chunks(batch_size = client.queue_length, last = True)
for (channel, read) in read_batch:
read.raw_data = read_until.NullRaw
if channel % control_group != 0:
client.unblock_read(channel, read.number)
client.stop_receiving_read(channel, read.number)
t0 = time.time()
# Then, running usual classification step
read_batch = client.get_read_chunks(batch_size=batch_size, last=True)
# convert the read data into a numpy array of correct type
queries = [(read.id, channel,read.number,
numpy.fromstring(read.raw_data, client.signal_dtype).tolist()
) for (channel,read) in read_batch if channel % control_group != 0]
querylen = len(queries)
# clear the raw reads from allocated memory
for (channel,read) in read_batch:
read.raw_data = read_until.NullRaw
if channel % control_group == 0:
client.stop_receiving_read(channel, read.number)
result = classifier.batch_classify(queries)
if result is not None:
for (status,channel,number,id) in result:
if status == 0:
# The i th read doesn't seems to be in the target region. Reject it.
client.unblock_read(channel, number)
client.stop_receiving_read(channel, number)
logger.info('Rejected {} {} {}'.format(id,channel,number))
elif status == 1:
# The i th read seems to be in the target region.
client.stop_receiving_read(channel, number)
logger.info('Accepted {} {} {}'.format(id,channel,number))
else:
logger.info('Chunked {} {} {}'.format(id,channel, number))
# else, the i th read didn't have enough signal. Keep going.
# limit the rate at which we make requests
t1 = time.time()
if t0 + throttle > t1:
time.sleep(throttle + t0 - t1)
logger.info('process {} reads in {}.'.format(querylen, t1-t0))
logger.info('Finished analysis of reads.')
classifier.free()
def main():
args = _get_parser().parse_args()
logging.basicConfig(format='[%(asctime)s - %(name)s] %(message)s',
datefmt='%H:%M:%S', level=args.log_level)
logger = logging.getLogger('Manager')
classifier = dyss.Dyss(num_scouts=args.num_scouts,
num_packs=args.num_packs,
reference=args.reference,
model=args.model,
param=args.param,
power=args.power,
referencesize=args.referencesize)
read_until_client = read_until.ReadUntilClient(
mk_port=args.port, one_chunk=False, filter_strands=True
)
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = list()
futures.append(executor.submit(
read_until_client.run, runner_kwargs={
'run_time':args.run_time, 'min_chunk_size':args.min_chunk_size
}
))
futures.append(executor.submit(
signal_based_analysis, read_until_client, classifier,
batch_size=args.batch_size, delay=args.analysis_delay,control_group=args.control_group
))
for f in concurrent.futures.as_completed(futures):
if f.exception() is not None:
logger.warning(f.exception())
if __name__=="__main__":
main()
|
[
"logging.getLogger",
"logging.basicConfig",
"argparse.ArgumentParser",
"dyss.Dyss",
"time.sleep",
"read_until.ReadUntilClient",
"numpy.fromstring",
"time.time"
] |
[((301, 390), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Dyss -- a tiny program for selective sequencing on MinION"""'], {}), "(\n 'Dyss -- a tiny program for selective sequencing on MinION')\n", (324, 390), False, 'import argparse\n'), ((3065, 3090), 'logging.getLogger', 'logging.getLogger', (['"""Dyss"""'], {}), "('Dyss')\n", (3082, 3090), False, 'import logging\n'), ((3528, 3545), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (3538, 3545), False, 'import time\n'), ((6012, 6125), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s - %(name)s] %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'level': 'args.log_level'}), "(format='[%(asctime)s - %(name)s] %(message)s', datefmt=\n '%H:%M:%S', level=args.log_level)\n", (6031, 6125), False, 'import logging\n'), ((6158, 6186), 'logging.getLogger', 'logging.getLogger', (['"""Manager"""'], {}), "('Manager')\n", (6175, 6186), False, 'import logging\n'), ((6204, 6390), 'dyss.Dyss', 'dyss.Dyss', ([], {'num_scouts': 'args.num_scouts', 'num_packs': 'args.num_packs', 'reference': 'args.reference', 'model': 'args.model', 'param': 'args.param', 'power': 'args.power', 'referencesize': 'args.referencesize'}), '(num_scouts=args.num_scouts, num_packs=args.num_packs, reference=\n args.reference, model=args.model, param=args.param, power=args.power,\n referencesize=args.referencesize)\n', (6213, 6390), False, 'import dyss\n'), ((6533, 6620), 'read_until.ReadUntilClient', 'read_until.ReadUntilClient', ([], {'mk_port': 'args.port', 'one_chunk': '(False)', 'filter_strands': '(True)'}), '(mk_port=args.port, one_chunk=False,\n filter_strands=True)\n', (6559, 6620), False, 'import read_until\n'), ((4057, 4068), 'time.time', 'time.time', ([], {}), '()\n', (4066, 4068), False, 'import time\n'), ((5720, 5731), 'time.time', 'time.time', ([], {}), '()\n', (5729, 5731), False, 'import time\n'), ((5775, 5805), 'time.sleep', 'time.sleep', (['(throttle + t0 - t1)'], {}), '(throttle + t0 - t1)\n', (5785, 5805), False, 'import time\n'), ((4335, 4387), 'numpy.fromstring', 'numpy.fromstring', (['read.raw_data', 'client.signal_dtype'], {}), '(read.raw_data, client.signal_dtype)\n', (4351, 4387), False, 'import numpy\n')]
|
import numpy as np
import xarray as xr
from numpy import asarray
import scipy.sparse
from itertools import product
from .util import get_shape_of_data
from .grid_stretching_transforms import scs_transform
from .constants import R_EARTH_m
def get_troposphere_mask(ds):
"""
Returns a mask array for picking out the tropospheric grid boxes.
Args:
ds: xarray Dataset
Dataset containing certain met field variables (i.e.
Met_TropLev, Met_BXHEIGHT).
Returns:
tropmask: numpy ndarray
Tropospheric mask. False denotes grid boxes that are
in the troposphere and True in the stratosphere
(as per Python masking logic).
"""
# ==================================================================
# Initialization
# ==================================================================
# Make sure ds is an xarray Dataset object
if not isinstance(ds, xr.Dataset):
raise TypeError("The ds argument must be an xarray Dataset!")
# Make sure certain variables are found
if "Met_BXHEIGHT" not in ds.data_vars.keys():
raise ValueError("Met_BXHEIGHT could not be found!")
if "Met_TropLev" not in ds.data_vars.keys():
raise ValueError("Met_TropLev could not be found!")
# Mask of tropospheric grid boxes in the Ref dataset
shape = get_shape_of_data(np.squeeze(ds["Met_BXHEIGHT"]))
# Determine if this is GCHP data
is_gchp = "nf" in ds["Met_BXHEIGHT"].dims
# ==================================================================
# Create the mask arrays for the troposphere
#
# Convert the Met_TropLev DataArray objects to numpy ndarrays of
# integer. Also subtract 1 to convert from Fortran to Python
# array index notation.
# ==================================================================
multi_time_slices = (is_gchp and len(shape) == 5) or \
(not is_gchp and len(shape) == 4)
if multi_time_slices:
# --------------------------------------------------------------
# GCC: There are multiple time slices
# --------------------------------------------------------------
# Create the tropmask array with dims
# (time, lev, nf*lat*lon) for GCHP, or
# (time, lev, lat*lon ) for GCC
tropmask = np.ones((shape[0], shape[1],
np.prod(np.array(shape[2:]))), bool)
# Loop over each time
for t in range(tropmask.shape[0]):
# Pick the tropopause level and make a 1-D array
values = ds["Met_TropLev"].isel(time=t).values
lev = np.int_(np.squeeze(values) - 1)
lev_1d = lev.flatten()
# Create the tropospheric mask array
for x in range(tropmask.shape[2]):
tropmask[t, 0: lev_1d[x], x] = False
else:
# --------------------------------------------------------------
# There is only one time slice
# --------------------------------------------------------------
# Create the tropmask array with dims (lev, lat*lon)
tropmask = np.ones((shape[0], np.prod(np.array(shape[1:]))), bool)
# Pick the tropopause level and make a 1-D array
values = ds["Met_TropLev"].values
lev = np.int_(np.squeeze(values) - 1)
lev_1d = lev.flatten()
# Create the tropospheric mask array
for x in range(tropmask.shape[1]):
tropmask[0: lev_1d[x], x] = False
# Reshape into the same shape as Met_BxHeight
return tropmask.reshape(shape)
def get_input_res(data):
"""
Returns resolution of dataset passed to compare_single_level or compare_zonal_means
Args:
data: xarray Dataset
Input GEOS-Chem dataset
Returns:
res: str or int
Lat/lon res of the form 'latresxlonres' or cubed-sphere resolution
gridtype: str
'll' for lat/lon or 'cs' for cubed-sphere
"""
vdims = data.dims
if "lat" in vdims and "lon" in vdims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
return lon.size, "cs"
else:
lat.sort()
lon.sort()
# use increment of second and third coordinates
# to avoid polar mischief
lat_res = np.abs(lat[2] - lat[1])
lon_res = np.abs(lon[2] - lon[1])
return str(lat_res) + "x" + str(lon_res), "ll"
else:
#print("grid is cs: ", vdims)
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
if isinstance(data.dims, tuple):
return len(data["Xdim"].values), "cs"
else:
return data.dims["Xdim"], "cs"
def call_make_grid(res, gridtype, in_extent=[-180, 180, -90, 90],
out_extent=[-180, 180, -90, 90], sg_params=[1, 170, -90]):
"""
Create a mask with NaN values removed from an input array
Args:
res: str or int
Resolution of grid (format 'latxlon' or csres)
gridtype: str
'll' for lat/lon or 'cs' for cubed-sphere
Keyword Args (optional):
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of input data
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Desired minimum and maximum latitude and longitude of output grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
sg_params: list[float, float, float] (stretch_factor, target_longitude, target_latitude)
Desired stretched-grid parameters in the format
[stretch_factor, target_longitude, target_latitude].
Will trigger stretched-grid creation if not default values.
Default value: [1, 170, -90] (no stretching)
Returns:
[grid, grid_list]: list(dict, list(dict))
Returns the created grid.
grid_list is a list of grids if gridtype is 'cs', else it is None
"""
# call appropriate make_grid function and return new grid
if gridtype == "ll":
return [make_grid_LL(res, in_extent, out_extent), None]
elif sg_params == [1, 170, -90]:
# standard CS
return make_grid_CS(res)
else:
return make_grid_SG(res, *sg_params)
def get_grid_extents(data, edges=True):
"""
Get min and max lat and lon from an input GEOS-Chem xarray dataset or grid dict
Args:
data: xarray Dataset or dict
A GEOS-Chem dataset or a grid dict
edges (optional): bool
Whether grid extents should use cell edges instead of centers
Default value: True
Returns:
minlon: float
Minimum longitude of data grid
maxlon: float
Maximum longitude of data grid
minlat: float
Minimum latitude of data grid
maxlat: float
Maximum latitude of data grid
"""
if isinstance(data, dict):
if "lon_b" in data and edges:
return np.min(
data["lon_b"]), np.max(
data["lon_b"]), np.min(
data["lat_b"]), np.max(
data["lat_b"])
elif not edges:
return np.min(
data["lon"]), np.max(
data["lon"]), np.min(
data["lat"]), np.max(
data["lat"])
else:
return -180, 180, -90, 90
elif "lat" in data.dims and "lon" in data.dims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
# No extents for CS plots right now
return -180, 180, -90, 90
else:
lat = np.sort(lat)
minlat = np.min(lat)
if abs(abs(lat[1]) - abs(lat[0])
) != abs(abs(lat[2]) - abs(lat[1])):
#pole is cutoff
minlat = minlat - 1
maxlat = np.max(lat)
if abs(abs(lat[-1]) - abs(lat[-2])
) != abs(abs(lat[-2]) - abs(lat[-3])):
maxlat = maxlat + 1
# add longitude res to max longitude
lon = np.sort(lon)
minlon = np.min(lon)
maxlon = np.max(lon) + abs(abs(lon[-1]) - abs(lon[-2]))
return minlon, maxlon, minlat, maxlat
else:
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
return -180, 180, -90, 90
def get_vert_grid(dataset, AP=[], BP=[]):
"""
Determine vertical grid of input dataset
Args:
dataset: xarray Dataset
A GEOS-Chem output dataset
Keyword Args (optional):
AP: list-like type
Hybrid grid parameter A in hPa
Default value: []
BP: list-like type
Hybrid grid parameter B (unitless)
Default value: []
Returns:
p_edge: numpy array
Edge pressure values for vertical grid
p_mid: numpy array
Midpoint pressure values for vertical grid
nlev: int
Number of levels in vertical grid
"""
if dataset.sizes["lev"] in (72, 73):
return GEOS_72L_grid.p_edge(), GEOS_72L_grid.p_mid(), 72
elif dataset.sizes["lev"] in (47, 48):
return GEOS_47L_grid.p_edge(), GEOS_47L_grid.p_mid(), 47
elif AP == [] or BP == []:
if dataset.sizes["lev"] == 1:
AP = [1, 1]
BP = [1]
new_grid = vert_grid(AP, BP)
return new_grid.p_edge(), new_grid.p_mid(), np.size(AP)
else:
raise ValueError(
"Only 72/73 or 47/48 level vertical grids are automatically determined" +
"from input dataset by get_vert_grid(), please pass grid parameters AP and BP" +
"as keyword arguments")
else:
new_grid = vert_grid(AP, BP)
return new_grid.p_edge(), new_grid.p_mid(), np.size(AP)
def get_pressure_indices(pedge, pres_range):
"""
Get indices where edge pressure values are within a given pressure range
Args:
pedge: numpy array
A GEOS-Chem output dataset
pres_range: list(float, float)
Contains minimum and maximum pressure
Returns:
numpy array
Indices where edge pressure values are within a given pressure range
"""
return np.where(
(pedge <= np.max(pres_range)) & (
pedge >= np.min(pres_range)))[0]
def pad_pressure_edges(pedge_ind, max_ind, pmid_len):
"""
Add outer indices to edge pressure index list
Args:
pedge_ind: list
List of edge pressure indices
max_ind: int
Maximum index
pmid_len: int
Length of pmid which should not be exceeded by indices
Returns:
pedge_ind: list
List of edge pressure indices, possibly with new minimum and maximum indices
"""
if max_ind > pmid_len:
# don't overstep array bounds for full array
max_ind = max_ind - 1
if min(pedge_ind) != 0:
pedge_ind = np.append(min(pedge_ind) - 1, pedge_ind)
if max(pedge_ind) != max_ind:
pedge_ind = np.append(pedge_ind, max(pedge_ind) + 1)
return pedge_ind
def get_ind_of_pres(dataset, pres):
"""
Get index of pressure level that contains the requested pressure value.
Args:
dataset: xarray Dataset
GEOS-Chem dataset
pres: int or float
Desired pressure value
Returns:
index: int
Index of level in dataset that corresponds to requested pressure
"""
pedge, pmid, _ = get_vert_grid(dataset)
converted_dataset = convert_lev_to_pres(dataset, pmid, pedge)
return np.argmin(np.abs(converted_dataset['lev'] - pres).values)
def convert_lev_to_pres(dataset, pmid, pedge, lev_type='pmid'):
"""
Convert lev dimension to pressure in a GEOS-Chem dataset
Args:
dataset: xarray Dataset
GEOS-Chem dataset
pmid: np.array
Midpoint pressure values
pedge: np.array
Edge pressure values
lev_type (optional): str
Denote whether lev is 'pedge' or 'pmid' if grid is not 72/73 or 47/48 levels
Default value: 'pmid'
Returns:
dataset: xarray Dataset
Input dataset with "lev" dimension values replaced with pressure values
"""
if dataset.sizes["lev"] in (72, 47):
dataset["lev"] = pmid
elif dataset.sizes["lev"] in (73, 48):
dataset["lev"] = pedge
elif lev_type == 'pmid':
print('Warning: Assuming levels correspond with midpoint pressures')
dataset["lev"] = pmid
else:
dataset["lev"] = pedge
dataset["lev"].attrs["unit"] = "hPa"
dataset["lev"].attrs["long_name"] = "level pressure"
return dataset
class vert_grid:
def __init__(self, AP=None, BP=None, p_sfc=1013.25):
if (len(AP) != len(BP)) or (AP is None):
# Throw error?
print('Inconsistent vertical grid specification')
self.AP = np.array(AP)
self.BP = np.array(BP)
self.p_sfc = p_sfc
def p_edge(self):
# Calculate pressure edges using eta coordinate
return self.AP + self.BP * self.p_sfc
def p_mid(self):
p_edge = self.p_edge()
return (p_edge[1:] + p_edge[:-1]) / 2.0
# Standard vertical grids
_GEOS_72L_AP = np.array([0.000000e+00,
4.804826e-02,
6.593752e+00,
1.313480e+01,
1.961311e+01,
2.609201e+01,
3.257081e+01,
3.898201e+01,
4.533901e+01,
5.169611e+01,
5.805321e+01,
6.436264e+01,
7.062198e+01,
7.883422e+01,
8.909992e+01,
9.936521e+01,
1.091817e+02,
1.189586e+02,
1.286959e+02,
1.429100e+02,
1.562600e+02,
1.696090e+02,
1.816190e+02,
1.930970e+02,
2.032590e+02,
2.121500e+02,
2.187760e+02,
2.238980e+02,
2.243630e+02,
2.168650e+02,
2.011920e+02,
1.769300e+02,
1.503930e+02,
1.278370e+02,
1.086630e+02,
9.236572e+01,
7.851231e+01,
6.660341e+01,
5.638791e+01,
4.764391e+01,
4.017541e+01,
3.381001e+01,
2.836781e+01,
2.373041e+01,
1.979160e+01,
1.645710e+01,
1.364340e+01,
1.127690e+01,
9.292942e+00,
7.619842e+00,
6.216801e+00,
5.046801e+00,
4.076571e+00,
3.276431e+00,
2.620211e+00,
2.084970e+00,
1.650790e+00,
1.300510e+00,
1.019440e+00,
7.951341e-01,
6.167791e-01,
4.758061e-01,
3.650411e-01,
2.785261e-01,
2.113490e-01,
1.594950e-01,
1.197030e-01,
8.934502e-02,
6.600001e-02,
4.758501e-02,
3.270000e-02,
2.000000e-02,
1.000000e-02])
_GEOS_72L_BP = np.array([1.000000e+00,
9.849520e-01,
9.634060e-01,
9.418650e-01,
9.203870e-01,
8.989080e-01,
8.774290e-01,
8.560180e-01,
8.346609e-01,
8.133039e-01,
7.919469e-01,
7.706375e-01,
7.493782e-01,
7.211660e-01,
6.858999e-01,
6.506349e-01,
6.158184e-01,
5.810415e-01,
5.463042e-01,
4.945902e-01,
4.437402e-01,
3.928911e-01,
3.433811e-01,
2.944031e-01,
2.467411e-01,
2.003501e-01,
1.562241e-01,
1.136021e-01,
6.372006e-02,
2.801004e-02,
6.960025e-03,
8.175413e-09,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00])
GEOS_72L_grid = vert_grid(_GEOS_72L_AP, _GEOS_72L_BP)
# Reduced grid
_GEOS_47L_AP = np.zeros(48)
_GEOS_47L_BP = np.zeros(48)
# Fill in the values for the surface
_GEOS_47L_AP[0] = _GEOS_72L_AP[0]
_GEOS_47L_BP[0] = _GEOS_72L_BP[0]
# Build the GEOS 72-layer to 47-layer mapping matrix at the same time
_xmat_i = np.zeros((72))
_xmat_j = np.zeros((72))
_xmat_s = np.zeros((72))
# Index here is the 1-indexed layer number
for _i_lev in range(1, 37):
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Sparse matrix for regridding
# Below layer 37, it's 1:1
_xct = _x_lev
_xmat_i[_xct] = _x_lev
_xmat_j[_xct] = _x_lev
_xmat_s[_xct] = 1.0
# Copy over the pressure edge for the top of the grid cell
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev]
# Now deal with the lumped layers
_skip_size_vec = [2, 4]
_number_lumped = [4, 7]
# Initialize
_i_lev = 36
_i_lev_72 = 36
for _lump_seg in range(2):
_skip_size = _skip_size_vec[_lump_seg]
# 1-indexed starting point in the 47-layer grid
_first_lev_47 = _i_lev + 1
_first_lev_72 = _i_lev_72 + 1
# Loop over the coarse vertical levels (47-layer grid)
for _i_lev_offset in range(_number_lumped[_lump_seg]):
# i_lev is the index for the current level on the 47-level grid
_i_lev = _first_lev_47 + _i_lev_offset
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Get the 1-indexed location of the last layer in the 72-layer grid
# which is below the start of the current lumping region
_i_lev_72_base = _first_lev_72 + (_i_lev_offset * _skip_size) - 1
# Get the 1-indexed location of the uppermost level in the 72-layer
# grid which is within the target layer on the 47-layer grid
_i_lev_72 = _i_lev_72_base + _skip_size
# Do the pressure edges first
# These are the 0-indexed locations of the upper edge for the
# target layers in 47- and 72-layer grids
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev_72]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev_72]
# Get the total pressure delta across the layer on the lumped grid
# We are within the fixed pressure levels so don't need to account
# for variations in surface pressure
_dp_total = _GEOS_47L_AP[_i_lev - 1] - _GEOS_47L_AP[_i_lev]
# Now figure out the mapping
for _i_lev_offset_72 in range(_skip_size):
# Source layer in the 72 layer grid (0-indexed)
_x_lev_72 = _i_lev_72_base + _i_lev_offset_72
_xct = _x_lev_72
_xmat_i[_xct] = _x_lev_72
# Target in the 47 layer grid
_xmat_j[_xct] = _x_lev
# Proportion of 72-layer grid cell, by pressure, within expanded
# layer
_xmat_s[_xct] = (_GEOS_72L_AP[_x_lev_72] -
_GEOS_72L_AP[_x_lev_72 + 1]) / _dp_total
_start_pt = _i_lev
# Do last entry separately (no layer to go with it)
_xmat_72to47 = scipy.sparse.coo_matrix(
(_xmat_s, (_xmat_i, _xmat_j)), shape=(72, 47))
GEOS_47L_grid = vert_grid(_GEOS_47L_AP, _GEOS_47L_BP)
# CAM 26-layer grid
_CAM_26L_AP = np.flip(np.array([219.4067, 489.5209, 988.2418, 1805.201,
2983.724, 4462.334, 6160.587, 7851.243,
7731.271, 7590.131, 7424.086, 7228.744,
6998.933, 6728.574, 6410.509, 6036.322,
5596.111, 5078.225, 4468.96, 3752.191,
2908.949, 2084.739, 1334.443, 708.499,
252.136, 0., 0.]), axis=0) * 0.01
_CAM_26L_BP = np.flip(np.array([0., 0., 0., 0.,
0., 0., 0., 0.,
0.01505309, 0.03276228, 0.05359622, 0.07810627,
0.1069411, 0.14086370, 0.180772, 0.227722,
0.2829562, 0.3479364, 0.4243822, 0.5143168,
0.6201202, 0.7235355, 0.8176768, 0.8962153,
0.9534761, 0.9851122, 1.]), axis=0)
CAM_26L_grid = vert_grid(_CAM_26L_AP, _CAM_26L_BP)
def make_grid_LL(llres, in_extent=[-180, 180, -90, 90], out_extent=[]):
"""
Creates a lat/lon grid description.
Args:
llres: str
lat/lon resolution in 'latxlon' format (e.g. '4x5')
Keyword Args (optional):
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of initial grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of target grid
in the format [minlon, maxlon, minlat, maxlat]. Needed when intending
to use grid to trim extent of input data
Default value: [] (assumes value of in_extent)
Returns:
llgrid: dict
dict grid description of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
"""
# get initial bounds of grid
[minlon, maxlon, minlat, maxlat] = in_extent
[dlat, dlon] = list(map(float, llres.split('x')))
lon_b = np.linspace(minlon - dlon / 2, maxlon - dlon /
2, int((maxlon - minlon) / dlon) + 1)
lat_b = np.linspace(minlat - dlat / 2, maxlat + dlat / 2,
int((maxlat - minlat) / dlat) + 2)
if minlat <= -90:
lat_b = lat_b.clip(-90, None)
if maxlat >= 90:
lat_b = lat_b.clip(None, 90)
lat = (lat_b[1:] + lat_b[:-1]) / 2
lon = (lon_b[1:] + lon_b[:-1]) / 2
# trim grid bounds when your desired extent is not the same as your
# initial grid extent
if out_extent == []:
out_extent = in_extent
if out_extent != in_extent:
[minlon, maxlon, minlat, maxlat] = out_extent
minlon_ind = np.nonzero(lon >= minlon)
maxlon_ind = np.nonzero(lon <= maxlon)
lon_inds = np.intersect1d(minlon_ind, maxlon_ind)
lon = lon[lon_inds]
# make sure to get edges of grid correctly
lon_inds = np.append(lon_inds, np.max(lon_inds) + 1)
lon_b = lon_b[lon_inds]
minlat_ind = np.nonzero(lat >= minlat)
maxlat_ind = np.nonzero(lat <= maxlat)
lat_inds = np.intersect1d(minlat_ind, maxlat_ind)
lat = lat[lat_inds]
# make sure to get edges of grid correctly
lat_inds = np.append(lat_inds, np.max(lat_inds) + 1)
lat_b = lat_b[lat_inds]
llgrid = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
return llgrid
def make_grid_CS(csres):
"""
Creates a cubed-sphere grid description.
Args:
csres: int
cubed-sphere resolution of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres)
csgrid_list = [None] * 6
for i in range(6):
csgrid_list[i] = {'lat': csgrid['lat'][i],
'lon': csgrid['lon'][i],
'lat_b': csgrid['lat_b'][i],
'lon_b': csgrid['lon_b'][i]}
return [csgrid, csgrid_list]
def make_grid_SG(csres, stretch_factor, target_lon, target_lat):
"""
Creates a stretched-grid grid description.
Args:
csres: int
cubed-sphere resolution of target grid
stretch_factor: float
stretch factor of target grid
target_lon: float
target stretching longitude of target grid
target_lon: float
target stretching latitude of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres, offset=0)
csgrid_list = [None] * 6
for i in range(6):
lat = csgrid['lat'][i].flatten()
lon = csgrid['lon'][i].flatten()
lon, lat = scs_transform(
lon, lat, stretch_factor, target_lon, target_lat)
lat = lat.reshape((csres, csres))
lon = lon.reshape((csres, csres))
lat_b = csgrid['lat_b'][i].flatten()
lon_b = csgrid['lon_b'][i].flatten()
lon_b, lat_b = scs_transform(
lon_b, lat_b, stretch_factor, target_lon, target_lat)
lat_b = lat_b.reshape((csres + 1, csres + 1))
lon_b = lon_b.reshape((csres + 1, csres + 1))
csgrid_list[i] = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
for i in range(6):
csgrid['lat'][i] = csgrid_list[i]['lat']
csgrid['lon'][i] = csgrid_list[i]['lon']
csgrid['lat_b'][i] = csgrid_list[i]['lat_b']
csgrid['lon_b'][i] = csgrid_list[i]['lon_b']
return [csgrid, csgrid_list]
def calc_rectilinear_lon_edge(lon_stride, center_at_180):
""" Compute longitude edge vector for a rectilinear grid.
Parameters
----------
lon_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lon_stride would be 5.
center_at_180: bool
Whether or not the grid should have a cell center at 180 degrees (i.e.
on the date line). If true, the first grid cell is centered on the date
line; if false, the first grid edge is on the date line.
Returns
-------
Longitudes of cell edges in degrees East.
Notes
-----
All values are forced to be between [-180,180]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lon_edge
>>> calc_rectilinear_lon_edge(5.0,true)
np.array([177.5,-177.5,-172.5,...,177.5])
See Also
--------
[NONE]
"""
n_lon = np.round(360.0 / lon_stride)
lon_edge = np.linspace(-180.0, 180.0, num=n_lon + 1)
if center_at_180:
lon_edge = lon_edge - (lon_stride / 2.0)
lon_edge[lon_edge < -180.0] = lon_edge[lon_edge < -180] + 360.0
lon_edge[lon_edge > 180.0] = lon_edge[lon_edge > 180.0] - 360.0
return lon_edge
def calc_rectilinear_lat_edge(lat_stride, half_polar_grid):
""" Compute latitude edge vector for a rectilinear grid.
Parameters
----------
lat_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lat_stride would be 4.
half_polar_grid: bool
Whether or not the grid should be "half-polar" (i.e. bands at poles are
half the size). In either case the grid will start and end at -/+ 90,
but when half_polar_grid is True, the first and last bands will have a
width of 1/2 the normal lat_stride.
Returns
-------
Latitudes of cell edges in degrees North.
Notes
-----
All values are forced to be between [-90,90]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lat_edge
>>> calc_rectilinear_lat_edge(4.0,true)
np.array([-90,-88,-84,-80,...,84,88,90])
See Also
--------
[NONE]
"""
if half_polar_grid:
start_pt = 90.0 + (lat_stride / 2.0)
else:
start_pt = 90.0
lat_edge = np.linspace(-1.0 * start_pt, start_pt,
num=1 + np.round(2.0 * start_pt / lat_stride))
# Force back onto +/- 90
lat_edge[lat_edge > 90.0] = 90.0
lat_edge[lat_edge < -90.0] = -90.0
return lat_edge
def calc_rectilinear_grid_area(lon_edge, lat_edge):
""" Compute grid cell areas (in m2) for a rectilinear grid.
Parameters
----------
#TODO
Returns
-------
#TODO
Notes
-----
#TODO
Examples
--------
#TODO
See Also
--------
[NONE]
"""
# Convert from km to m
_radius_earth_m = R_EARTH_m
lon_edge = asarray(lon_edge, dtype=float)
lat_edge = asarray(lat_edge, dtype=float)
n_lon = (lon_edge.size) - 1
n_lat = (lat_edge.size) - 1
grid_area = np.zeros((n_lat, n_lon))
sfc_area_const = 2.0 * np.pi * _radius_earth_m * _radius_earth_m
# Longitudes loop, so need to be careful
lon_delta = calc_delta_lon(lon_edge)
# Convert into weights relative to the total circle
lon_delta = lon_delta / 360.0
# Precalculate this
sin_lat_edge = np.sin(np.deg2rad(lat_edge))
for i_lat in range(0, n_lat):
sin_diff = sin_lat_edge[i_lat + 1] - sin_lat_edge[i_lat]
grid_area[i_lat, :] = sin_diff * sfc_area_const * lon_delta
return grid_area
def calc_delta_lon(lon_edge):
""" Compute grid cell longitude widths from an edge vector.
Parameters
----------
lon_edge: float
Vector of longitude edges, in degrees East.
Returns
-------
Width of each cell, degrees East
Notes
-----
Accounts for looping over the domain.
Examples
--------
#TODO
"""
n_lon = (lon_edge.size) - 1
lon_edge = asarray(lon_edge)
# Set up output array
lon_delta = np.zeros((n_lon))
offset = 0.0
next_lon = lon_edge[0]
for i_lon in range(0, n_lon):
last_lon = next_lon
next_lon = lon_edge[i_lon + 1] + offset
while next_lon < last_lon:
offset = offset + 360.0
next_lon = next_lon + 360.0
lon_delta[i_lon] = next_lon - last_lon
return lon_delta
def csgrid_GMAO(res, offset=-10):
"""
Return cubedsphere coordinates with GMAO face orientation
Parameters
----------
res: cubed-sphere Resolution
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
CS = CSGrid(res, offset=offset)
lon = CS.lon_center.transpose(2, 0, 1)
lon_b = CS.lon_edge.transpose(2, 0, 1)
lat = CS.lat_center.transpose(2, 0, 1)
lat_b = CS.lat_edge.transpose(2, 0, 1)
lon[lon < 0] += 360
lon_b[lon_b < 0] += 360
for a in [lon, lon_b, lat, lat_b]:
for tile in [0, 1, 3, 4]:
a[tile] = a[tile].T
for tile in [3, 4]:
a[tile] = np.flip(a[tile], 1)
for tile in [3, 4, 2, 5]:
a[tile] = np.flip(a[tile], 0)
a[2], a[5] = a[5].copy(), a[2].copy() # swap north&south pole
return {'lon': lon, 'lat': lat, 'lon_b': lon_b, 'lat_b': lat_b}
_INV_SQRT_3 = 1.0 / np.sqrt(3.0)
_ASIN_INV_SQRT_3 = np.arcsin(_INV_SQRT_3)
class CSGrid(object):
"""Generator for cubed-sphere grid geometries.
CSGrid computes the latitutde and longitudes of cell centers and edges
on a cubed-sphere grid, providing a way to retrieve these geometries
on-the-fly if your model output data does not include them.
Attributes
----------
{lon,lat}_center: np.ndarray
lat/lon coordinates for each cell center along the cubed-sphere mesh
{lon,lat}_edge: np.ndarray
lat/lon coordinates for the midpoint of the edges separating each
element on the cubed-sphere mesh.
xyz_{center,edge}: np.ndarray
As above, except coordinates are projected into a 3D cartesian space
with common origin to the original lat/lon coordinate system, assuming
a unit sphere.
This class was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
def __init__(self, c, offset=None):
"""
Parameters
----------
c: int
Number edges along each cubed-sphere edge.
======= ====================
C Lat/Lon Resolution
------- --------------------
24 4 deg x 5 deg
48,45 2 deg x 2.5 deg
96,90 1 deg x 1.25 deg
192,180 0.5 deg x 0.625 deg
384,360 0.25 deg x 0.3125 deg
720 0.12g deg x 0.15625 deg
offset: float (optional)
Degrees to offset the first faces' edge in the latitudinal
direction. If not passed, then the western edge of the first face
will align with the prime meridian.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
self.c = c
self.delta_y = 2. * _ASIN_INV_SQRT_3 / c
self.nx = self.ny = c + 1
self.offset = offset
self._initialize()
def _initialize(self):
c = self.c
nx, ny = self.nx, self.ny
lambda_rad = np.zeros((nx, ny))
lambda_rad[0, :] = 3. * np.pi / 4. # West edge
lambda_rad[-1, :] = 5. * np.pi / 4. # East edge
theta_rad = np.zeros((nx, ny))
theta_rad[0, :] = -_ASIN_INV_SQRT_3 + \
(self.delta_y * np.arange(c + 1)) # West edge
theta_rad[-1, :] = theta_rad[0, :] # East edge
# Cache the reflection points - our upper-left and lower-right corners
lonMir1, lonMir2 = lambda_rad[0, 0], lambda_rad[-1, -1]
latMir1, latMir2 = theta_rad[0, 0], theta_rad[-1, -1]
xyzMir1 = latlon_to_cartesian(lonMir1, latMir1)
xyzMir2 = latlon_to_cartesian(lonMir2, latMir2)
xyzCross = np.cross(xyzMir1, xyzMir2)
norm = np.sqrt(np.sum(xyzCross**2))
xyzCross /= norm
for i in range(1, c):
lonRef, latRef = lambda_rad[0, i], theta_rad[0, i]
xyzRef = np.asarray(latlon_to_cartesian(lonRef, latRef, ))
xyzDot = np.sum(xyzCross * xyzRef)
xyzImg = xyzRef - (2. * xyzDot * xyzCross)
xsImg, ysImg, zsImg = xyzImg
lonImg, latImg = cartesian_to_latlon(xsImg, ysImg, zsImg)
lambda_rad[i, 0] = lonImg
lambda_rad[i, -1] = lonImg
theta_rad[i, 0] = latImg
theta_rad[i, -1] = -latImg
pp = np.zeros([3, c + 1, c + 1])
# Set the four corners
# print("CORNERS")
for i, j in product([0, -1], [0, -1]):
# print(i, j)
pp[:, i, j] = latlon_to_cartesian(
lambda_rad[i, j], theta_rad[i, j])
# Map the edges on the sphere back to the cube.
#Note that all intersections are at x = -rsq3
# print("EDGES")
for ij in range(1, c + 1):
# print(ij)
pp[:, 0, ij] = latlon_to_cartesian(
lambda_rad[0, ij], theta_rad[0, ij])
pp[1, 0, ij] = -pp[1, 0, ij] * _INV_SQRT_3 / pp[0, 0, ij]
pp[2, 0, ij] = -pp[2, 0, ij] * _INV_SQRT_3 / pp[0, 0, ij]
pp[:, ij, 0] = latlon_to_cartesian(
lambda_rad[ij, 0], theta_rad[ij, 0])
pp[1, ij, 0] = -pp[1, ij, 0] * _INV_SQRT_3 / pp[0, ij, 0]
pp[2, ij, 0] = -pp[2, ij, 0] * _INV_SQRT_3 / pp[0, ij, 0]
# # Map interiors
pp[0, :, :] = -_INV_SQRT_3
# print("INTERIOR")
for i in range(1, c + 1):
for j in range(1, c + 1):
# Copy y-z face of the cube along j=1
pp[1, i, j] = pp[1, i, 0]
# Copy along i=1
pp[2, i, j] = pp[2, 0, j]
_pp = pp.copy()
llr, ttr = vec_cartesian_to_latlon(_pp[0], _pp[1], _pp[2])
lambda_rad, theta_rad = llr.copy(), ttr.copy()
# Make grid symmetrical to i = im/2 + 1
for j in range(1, c + 1):
for i in range(1, c + 1):
# print("({}, {}) -> ({}, {})".format(i, 0, i, j))
lambda_rad[i, j] = lambda_rad[i, 0]
for j in range(c + 1):
for i in range(c // 2):
isymm = c - i
# print(isymm)
avgPt = 0.5 * (lambda_rad[i, j] - lambda_rad[isymm, j])
# print(lambda_rad[i, j], lambda_rad[isymm, j], avgPt)
lambda_rad[i, j] = avgPt + np.pi
lambda_rad[isymm, j] = np.pi - avgPt
avgPt = 0.5 * (theta_rad[i, j] + theta_rad[isymm, j])
theta_rad[i, j] = avgPt
theta_rad[isymm, j] = avgPt
# Make grid symmetrical to j = im/2 + 1
for j in range(c // 2):
jsymm = c - j
for i in range(1, c + 1):
avgPt = 0.5 * (lambda_rad[i, j] + lambda_rad[i, jsymm])
lambda_rad[i, j] = avgPt
lambda_rad[i, jsymm] = avgPt
avgPt = 0.5 * (theta_rad[i, j] - theta_rad[i, jsymm])
theta_rad[i, j] = avgPt
theta_rad[i, jsymm] = -avgPt
# Final correction
lambda_rad -= np.pi
llr, ttr = lambda_rad.copy(), theta_rad.copy()
#######################################################################
# MIRROR GRIDS
#######################################################################
new_xgrid = np.zeros((c + 1, c + 1, 6))
new_ygrid = np.zeros((c + 1, c + 1, 6))
xgrid = llr.copy()
ygrid = ttr.copy()
new_xgrid[..., 0] = xgrid.copy()
new_ygrid[..., 0] = ygrid.copy()
# radius = 6370.0e3
radius = 1.
for face in range(1, 6):
for j in range(c + 1):
for i in range(c + 1):
x = xgrid[i, j]
y = ygrid[i, j]
z = radius
if face == 1:
# Rotate about z only
new_xyz = rotate_sphere_3D(x, y, z, -np.pi / 2., 'z')
elif face == 2:
# Rotate about z, then x
temp_xyz = rotate_sphere_3D(x, y, z, -np.pi / 2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'x')
elif face == 3:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi, 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'x')
if ((c % 2) != 0) and (j == c // 2 - 1):
print(i, j, face)
new_xyz = (np.pi, *new_xyz)
elif face == 4:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'y')
elif face == 5:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'y')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, 0., 'z')
# print((x, y, z), "\n", new_xyz, "\n" + "--"*40)
new_x, new_y, _ = new_xyz
new_xgrid[i, j, face] = new_x
new_ygrid[i, j, face] = new_y
lon_edge, lat_edge = new_xgrid.copy(), new_ygrid.copy()
#######################################################################
# CLEANUP GRID
#######################################################################
for i, j, f in product(range(c + 1), range(c + 1), range(6)):
new_lon = lon_edge[i, j, f]
if new_lon < 0:
new_lon += 2 * np.pi
if np.abs(new_lon) < 1e-10:
new_lon = 0.
lon_edge[i, j, f] = new_lon
if np.abs(lat_edge[i, j, f]) < 1e-10:
lat_edge[i, j, f] = 0.
lon_edge_deg = np.rad2deg(lon_edge)
lat_edge_deg = np.rad2deg(lat_edge)
#######################################################################
# COMPUTE CELL CENTROIDS
#######################################################################
lon_ctr = np.zeros((c, c, 6))
lat_ctr = np.zeros((c, c, 6))
xyz_ctr = np.zeros((3, c, c, 6))
xyz_edge = np.zeros((3, c + 1, c + 1, 6))
for f in range(6):
for i in range(c):
last_x = (i == (c - 1))
for j in range(c):
last_y = (j == (c - 1))
# Get the four corners
lat_corner = [
lat_edge[i, j, f],
lat_edge[i + 1, j, f],
lat_edge[i + 1, j + 1, f],
lat_edge[i, j + 1, f]]
lon_corner = [
lon_edge[i, j, f],
lon_edge[i + 1, j, f],
lon_edge[i + 1, j + 1, f],
lon_edge[i, j + 1, f]]
# Convert from lat-lon back to cartesian
xyz_corner = np.asarray(
vec_latlon_to_cartesian(
lon_corner, lat_corner))
# Store the edge information
xyz_edge[:, i, j, f] = xyz_corner[:, 0]
if last_x:
xyz_edge[:, i + 1, j, f] = xyz_corner[:, 1]
if last_x or last_y:
xyz_edge[:, i + 1, j + 1, f] = xyz_corner[:, 2]
if last_y:
xyz_edge[:, i, j + 1, f] = xyz_corner[:, 3]
e_mid = np.sum(xyz_corner, axis=1)
e_abs = np.sqrt(np.sum(e_mid * e_mid))
if e_abs > 0:
e_mid = e_mid / e_abs
xyz_ctr[:, i, j, f] = e_mid
_lon, _lat = cartesian_to_latlon(*e_mid)
lon_ctr[i, j, f] = _lon
lat_ctr[i, j, f] = _lat
lon_ctr_deg = np.rad2deg(lon_ctr)
lat_ctr_deg = np.rad2deg(lat_ctr)
if self.offset is not None:
lon_edge_deg += self.offset
lon_ctr_deg += self.offset
#######################################################################
# CACHE
#######################################################################
self.lon_center = lon_ctr_deg
self.lat_center = lat_ctr_deg
self.lon_edge = lon_edge_deg
self.lat_edge = lat_edge_deg
self.xyz_center = xyz_ctr
self.xyz_edge = xyz_edge
def latlon_to_cartesian(lon, lat):
""" Convert latitude/longitude coordinates along the unit sphere to cartesian
coordinates defined by a vector pointing from the sphere's center to its
surface.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
x = np.cos(lat) * np.cos(lon)
y = np.cos(lat) * np.sin(lon)
z = np.sin(lat)
return x, y, z
vec_latlon_to_cartesian = np.vectorize(latlon_to_cartesian)
def cartesian_to_latlon(x, y, z, ret_xyz=False):
""" Convert a cartesian coordinate to latitude/longitude coordinates.
Optionally return the original cartesian coordinate as a tuple.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
xyz = np.array([x, y, z])
vector_length = np.sqrt(np.sum(xyz * xyz, axis=0))
xyz /= vector_length
x, y, z = xyz
if (np.abs(x) + np.abs(y)) < 1e-20:
lon = 0.
else:
lon = np.arctan2(y, x)
if lon < 0.:
lon += 2 * np.pi
lat = np.arcsin(z)
# If not normalizing vector, take lat = np.arcsin(z/vector_length)
if ret_xyz:
return lon, lat, xyz
else:
return lon, lat
vec_cartesian_to_latlon = np.vectorize(cartesian_to_latlon)
def spherical_to_cartesian(theta, phi, r=1):
""" Convert spherical coordinates in the form (theta, phi[, r]) to
cartesian, with the origin at the center of the original spherical
coordinate system.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
x = r * np.cos(phi) * np.cos(theta)
y = r * np.cos(phi) * np.sin(theta)
z = r * np.sin(phi)
return x, y, z
vec_spherical_to_cartesian = np.vectorize(spherical_to_cartesian)
def cartesian_to_spherical(x, y, z):
""" Convert cartesian coordinates to spherical in the form
(theta, phi[, r]) with the origin remaining at the center of the
original spherical coordinate system.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
r = np.sqrt(x**2 + y**2 + z**2)
#theta = np.arccos(z / r)
theta = np.arctan2(y, x)
phi = np.arctan2(z, np.sqrt(x**2 + y**2))
# if np.abs(x) < 1e-16:
# phi = np.pi
# else:
# phi = np.arctan(y / x)
return theta, phi, r
vec_cartesian_to_spherical = np.vectorize(cartesian_to_spherical)
def rotate_sphere_3D(theta, phi, r, rot_ang, rot_axis='x'):
""" Rotate a spherical coordinate in the form (theta, phi[, r])
about the indicating axis, 'rot_axis'.
This method accomplishes the rotation by projecting to a
cartesian coordinate system and performing a solid body rotation
around the requested axis.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
cos_ang = np.cos(rot_ang)
sin_ang = np.sin(rot_ang)
x, y, z = spherical_to_cartesian(theta, phi, r)
if rot_axis == 'x':
x_new = x
y_new = cos_ang * y + sin_ang * z
z_new = -sin_ang * y + cos_ang * z
elif rot_axis == 'y':
x_new = cos_ang * x - sin_ang * z
y_new = y
z_new = sin_ang * x + cos_ang * z
elif rot_axis == 'z':
x_new = cos_ang * x + sin_ang * y
y_new = -sin_ang * x + cos_ang * y
z_new = z
theta_new, phi_new, r_new = cartesian_to_spherical(x_new, y_new, z_new)
return theta_new, phi_new, r_new
|
[
"numpy.sqrt",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.arange",
"numpy.flip",
"numpy.cross",
"numpy.sort",
"itertools.product",
"numpy.asarray",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.rad2deg",
"numpy.round",
"numpy.abs",
"numpy.size",
"numpy.squeeze",
"numpy.deg2rad",
"numpy.cos",
"numpy.nonzero",
"numpy.vectorize",
"numpy.intersect1d",
"numpy.arcsin",
"numpy.sum",
"numpy.zeros"
] |
[((13666, 14423), 'numpy.array', 'np.array', (['[0.0, 0.04804826, 6.593752, 13.1348, 19.61311, 26.09201, 32.57081, 38.98201,\n 45.33901, 51.69611, 58.05321, 64.36264, 70.62198, 78.83422, 89.09992, \n 99.36521, 109.1817, 118.9586, 128.6959, 142.91, 156.26, 169.609, \n 181.619, 193.097, 203.259, 212.15, 218.776, 223.898, 224.363, 216.865, \n 201.192, 176.93, 150.393, 127.837, 108.663, 92.36572, 78.51231, \n 66.60341, 56.38791, 47.64391, 40.17541, 33.81001, 28.36781, 23.73041, \n 19.7916, 16.4571, 13.6434, 11.2769, 9.292942, 7.619842, 6.216801, \n 5.046801, 4.076571, 3.276431, 2.620211, 2.08497, 1.65079, 1.30051, \n 1.01944, 0.7951341, 0.6167791, 0.4758061, 0.3650411, 0.2785261, \n 0.211349, 0.159495, 0.119703, 0.08934502, 0.06600001, 0.04758501, \n 0.0327, 0.02, 0.01]'], {}), '([0.0, 0.04804826, 6.593752, 13.1348, 19.61311, 26.09201, 32.57081,\n 38.98201, 45.33901, 51.69611, 58.05321, 64.36264, 70.62198, 78.83422, \n 89.09992, 99.36521, 109.1817, 118.9586, 128.6959, 142.91, 156.26, \n 169.609, 181.619, 193.097, 203.259, 212.15, 218.776, 223.898, 224.363, \n 216.865, 201.192, 176.93, 150.393, 127.837, 108.663, 92.36572, 78.51231,\n 66.60341, 56.38791, 47.64391, 40.17541, 33.81001, 28.36781, 23.73041, \n 19.7916, 16.4571, 13.6434, 11.2769, 9.292942, 7.619842, 6.216801, \n 5.046801, 4.076571, 3.276431, 2.620211, 2.08497, 1.65079, 1.30051, \n 1.01944, 0.7951341, 0.6167791, 0.4758061, 0.3650411, 0.2785261, \n 0.211349, 0.159495, 0.119703, 0.08934502, 0.06600001, 0.04758501, \n 0.0327, 0.02, 0.01])\n', (13674, 14423), True, 'import numpy as np\n'), ((16515, 17115), 'numpy.array', 'np.array', (['[1.0, 0.984952, 0.963406, 0.941865, 0.920387, 0.898908, 0.877429, 0.856018,\n 0.8346609, 0.8133039, 0.7919469, 0.7706375, 0.7493782, 0.721166, \n 0.6858999, 0.6506349, 0.6158184, 0.5810415, 0.5463042, 0.4945902, \n 0.4437402, 0.3928911, 0.3433811, 0.2944031, 0.2467411, 0.2003501, \n 0.1562241, 0.1136021, 0.06372006, 0.02801004, 0.006960025, 8.175413e-09,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.984952, 0.963406, 0.941865, 0.920387, 0.898908, 0.877429, \n 0.856018, 0.8346609, 0.8133039, 0.7919469, 0.7706375, 0.7493782, \n 0.721166, 0.6858999, 0.6506349, 0.6158184, 0.5810415, 0.5463042, \n 0.4945902, 0.4437402, 0.3928911, 0.3433811, 0.2944031, 0.2467411, \n 0.2003501, 0.1562241, 0.1136021, 0.06372006, 0.02801004, 0.006960025, \n 8.175413e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0])\n', (16523, 17115), True, 'import numpy as np\n'), ((19434, 19446), 'numpy.zeros', 'np.zeros', (['(48)'], {}), '(48)\n', (19442, 19446), True, 'import numpy as np\n'), ((19462, 19474), 'numpy.zeros', 'np.zeros', (['(48)'], {}), '(48)\n', (19470, 19474), True, 'import numpy as np\n'), ((19662, 19674), 'numpy.zeros', 'np.zeros', (['(72)'], {}), '(72)\n', (19670, 19674), True, 'import numpy as np\n'), ((19687, 19699), 'numpy.zeros', 'np.zeros', (['(72)'], {}), '(72)\n', (19695, 19699), True, 'import numpy as np\n'), ((19712, 19724), 'numpy.zeros', 'np.zeros', (['(72)'], {}), '(72)\n', (19720, 19724), True, 'import numpy as np\n'), ((34976, 34998), 'numpy.arcsin', 'np.arcsin', (['_INV_SQRT_3'], {}), '(_INV_SQRT_3)\n', (34985, 34998), True, 'import numpy as np\n'), ((47239, 47272), 'numpy.vectorize', 'np.vectorize', (['latlon_to_cartesian'], {}), '(latlon_to_cartesian)\n', (47251, 47272), True, 'import numpy as np\n'), ((48083, 48116), 'numpy.vectorize', 'np.vectorize', (['cartesian_to_latlon'], {}), '(cartesian_to_latlon)\n', (48095, 48116), True, 'import numpy as np\n'), ((48627, 48663), 'numpy.vectorize', 'np.vectorize', (['spherical_to_cartesian'], {}), '(spherical_to_cartesian)\n', (48639, 48663), True, 'import numpy as np\n'), ((49314, 49350), 'numpy.vectorize', 'np.vectorize', (['cartesian_to_spherical'], {}), '(cartesian_to_spherical)\n', (49326, 49350), True, 'import numpy as np\n'), ((23083, 23353), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01505309, 0.03276228, 0.05359622,\n 0.07810627, 0.1069411, 0.1408637, 0.180772, 0.227722, 0.2829562, \n 0.3479364, 0.4243822, 0.5143168, 0.6201202, 0.7235355, 0.8176768, \n 0.8962153, 0.9534761, 0.9851122, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01505309, 0.03276228, \n 0.05359622, 0.07810627, 0.1069411, 0.1408637, 0.180772, 0.227722, \n 0.2829562, 0.3479364, 0.4243822, 0.5143168, 0.6201202, 0.7235355, \n 0.8176768, 0.8962153, 0.9534761, 0.9851122, 1.0])\n', (23091, 23353), True, 'import numpy as np\n'), ((30288, 30316), 'numpy.round', 'np.round', (['(360.0 / lon_stride)'], {}), '(360.0 / lon_stride)\n', (30296, 30316), True, 'import numpy as np\n'), ((30332, 30373), 'numpy.linspace', 'np.linspace', (['(-180.0)', '(180.0)'], {'num': '(n_lon + 1)'}), '(-180.0, 180.0, num=n_lon + 1)\n', (30343, 30373), True, 'import numpy as np\n'), ((32429, 32459), 'numpy.asarray', 'asarray', (['lon_edge'], {'dtype': 'float'}), '(lon_edge, dtype=float)\n', (32436, 32459), False, 'from numpy import asarray\n'), ((32475, 32505), 'numpy.asarray', 'asarray', (['lat_edge'], {'dtype': 'float'}), '(lat_edge, dtype=float)\n', (32482, 32505), False, 'from numpy import asarray\n'), ((32588, 32612), 'numpy.zeros', 'np.zeros', (['(n_lat, n_lon)'], {}), '((n_lat, n_lon))\n', (32596, 32612), True, 'import numpy as np\n'), ((33538, 33555), 'numpy.asarray', 'asarray', (['lon_edge'], {}), '(lon_edge)\n', (33545, 33555), False, 'from numpy import asarray\n'), ((33599, 33614), 'numpy.zeros', 'np.zeros', (['n_lon'], {}), '(n_lon)\n', (33607, 33614), True, 'import numpy as np\n'), ((34944, 34956), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (34951, 34956), True, 'import numpy as np\n'), ((47179, 47190), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (47185, 47190), True, 'import numpy as np\n'), ((47621, 47640), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (47629, 47640), True, 'import numpy as np\n'), ((47891, 47903), 'numpy.arcsin', 'np.arcsin', (['z'], {}), '(z)\n', (47900, 47903), True, 'import numpy as np\n'), ((49029, 49062), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (49036, 49062), True, 'import numpy as np\n'), ((49099, 49115), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (49109, 49115), True, 'import numpy as np\n'), ((49843, 49858), 'numpy.cos', 'np.cos', (['rot_ang'], {}), '(rot_ang)\n', (49849, 49858), True, 'import numpy as np\n'), ((49873, 49888), 'numpy.sin', 'np.sin', (['rot_ang'], {}), '(rot_ang)\n', (49879, 49888), True, 'import numpy as np\n'), ((1392, 1422), 'numpy.squeeze', 'np.squeeze', (["ds['Met_BXHEIGHT']"], {}), "(ds['Met_BXHEIGHT'])\n", (1402, 1422), True, 'import numpy as np\n'), ((13326, 13338), 'numpy.array', 'np.array', (['AP'], {}), '(AP)\n', (13334, 13338), True, 'import numpy as np\n'), ((13357, 13369), 'numpy.array', 'np.array', (['BP'], {}), '(BP)\n', (13365, 13369), True, 'import numpy as np\n'), ((22587, 22869), 'numpy.array', 'np.array', (['[219.4067, 489.5209, 988.2418, 1805.201, 2983.724, 4462.334, 6160.587, \n 7851.243, 7731.271, 7590.131, 7424.086, 7228.744, 6998.933, 6728.574, \n 6410.509, 6036.322, 5596.111, 5078.225, 4468.96, 3752.191, 2908.949, \n 2084.739, 1334.443, 708.499, 252.136, 0.0, 0.0]'], {}), '([219.4067, 489.5209, 988.2418, 1805.201, 2983.724, 4462.334, \n 6160.587, 7851.243, 7731.271, 7590.131, 7424.086, 7228.744, 6998.933, \n 6728.574, 6410.509, 6036.322, 5596.111, 5078.225, 4468.96, 3752.191, \n 2908.949, 2084.739, 1334.443, 708.499, 252.136, 0.0, 0.0])\n', (22595, 22869), True, 'import numpy as np\n'), ((25549, 25574), 'numpy.nonzero', 'np.nonzero', (['(lon >= minlon)'], {}), '(lon >= minlon)\n', (25559, 25574), True, 'import numpy as np\n'), ((25596, 25621), 'numpy.nonzero', 'np.nonzero', (['(lon <= maxlon)'], {}), '(lon <= maxlon)\n', (25606, 25621), True, 'import numpy as np\n'), ((25641, 25679), 'numpy.intersect1d', 'np.intersect1d', (['minlon_ind', 'maxlon_ind'], {}), '(minlon_ind, maxlon_ind)\n', (25655, 25679), True, 'import numpy as np\n'), ((25874, 25899), 'numpy.nonzero', 'np.nonzero', (['(lat >= minlat)'], {}), '(lat >= minlat)\n', (25884, 25899), True, 'import numpy as np\n'), ((25921, 25946), 'numpy.nonzero', 'np.nonzero', (['(lat <= maxlat)'], {}), '(lat <= maxlat)\n', (25931, 25946), True, 'import numpy as np\n'), ((25966, 26004), 'numpy.intersect1d', 'np.intersect1d', (['minlat_ind', 'maxlat_ind'], {}), '(minlat_ind, maxlat_ind)\n', (25980, 26004), True, 'import numpy as np\n'), ((32912, 32932), 'numpy.deg2rad', 'np.deg2rad', (['lat_edge'], {}), '(lat_edge)\n', (32922, 32932), True, 'import numpy as np\n'), ((37095, 37113), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (37103, 37113), True, 'import numpy as np\n'), ((37248, 37266), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (37256, 37266), True, 'import numpy as np\n'), ((37769, 37795), 'numpy.cross', 'np.cross', (['xyzMir1', 'xyzMir2'], {}), '(xyzMir1, xyzMir2)\n', (37777, 37795), True, 'import numpy as np\n'), ((38414, 38441), 'numpy.zeros', 'np.zeros', (['[3, c + 1, c + 1]'], {}), '([3, c + 1, c + 1])\n', (38422, 38441), True, 'import numpy as np\n'), ((38521, 38546), 'itertools.product', 'product', (['[0, -1]', '[0, -1]'], {}), '([0, -1], [0, -1])\n', (38528, 38546), False, 'from itertools import product\n'), ((41377, 41404), 'numpy.zeros', 'np.zeros', (['(c + 1, c + 1, 6)'], {}), '((c + 1, c + 1, 6))\n', (41385, 41404), True, 'import numpy as np\n'), ((41425, 41452), 'numpy.zeros', 'np.zeros', (['(c + 1, c + 1, 6)'], {}), '((c + 1, c + 1, 6))\n', (41433, 41452), True, 'import numpy as np\n'), ((44033, 44053), 'numpy.rad2deg', 'np.rad2deg', (['lon_edge'], {}), '(lon_edge)\n', (44043, 44053), True, 'import numpy as np\n'), ((44077, 44097), 'numpy.rad2deg', 'np.rad2deg', (['lat_edge'], {}), '(lat_edge)\n', (44087, 44097), True, 'import numpy as np\n'), ((44311, 44330), 'numpy.zeros', 'np.zeros', (['(c, c, 6)'], {}), '((c, c, 6))\n', (44319, 44330), True, 'import numpy as np\n'), ((44349, 44368), 'numpy.zeros', 'np.zeros', (['(c, c, 6)'], {}), '((c, c, 6))\n', (44357, 44368), True, 'import numpy as np\n'), ((44387, 44409), 'numpy.zeros', 'np.zeros', (['(3, c, c, 6)'], {}), '((3, c, c, 6))\n', (44395, 44409), True, 'import numpy as np\n'), ((44429, 44459), 'numpy.zeros', 'np.zeros', (['(3, c + 1, c + 1, 6)'], {}), '((3, c + 1, c + 1, 6))\n', (44437, 44459), True, 'import numpy as np\n'), ((46174, 46193), 'numpy.rad2deg', 'np.rad2deg', (['lon_ctr'], {}), '(lon_ctr)\n', (46184, 46193), True, 'import numpy as np\n'), ((46216, 46235), 'numpy.rad2deg', 'np.rad2deg', (['lat_ctr'], {}), '(lat_ctr)\n', (46226, 46235), True, 'import numpy as np\n'), ((47111, 47122), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (47117, 47122), True, 'import numpy as np\n'), ((47125, 47136), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (47131, 47136), True, 'import numpy as np\n'), ((47145, 47156), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (47151, 47156), True, 'import numpy as np\n'), ((47159, 47170), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (47165, 47170), True, 'import numpy as np\n'), ((47669, 47694), 'numpy.sum', 'np.sum', (['(xyz * xyz)'], {'axis': '(0)'}), '(xyz * xyz, axis=0)\n', (47675, 47694), True, 'import numpy as np\n'), ((47821, 47837), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (47831, 47837), True, 'import numpy as np\n'), ((48499, 48512), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (48505, 48512), True, 'import numpy as np\n'), ((48539, 48552), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (48545, 48552), True, 'import numpy as np\n'), ((48565, 48576), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (48571, 48576), True, 'import numpy as np\n'), ((49140, 49164), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (49147, 49164), True, 'import numpy as np\n'), ((4402, 4425), 'numpy.abs', 'np.abs', (['(lat[2] - lat[1])'], {}), '(lat[2] - lat[1])\n', (4408, 4425), True, 'import numpy as np\n'), ((4448, 4471), 'numpy.abs', 'np.abs', (['(lon[2] - lon[1])'], {}), '(lon[2] - lon[1])\n', (4454, 4471), True, 'import numpy as np\n'), ((11987, 12026), 'numpy.abs', 'np.abs', (["(converted_dataset['lev'] - pres)"], {}), "(converted_dataset['lev'] - pres)\n", (11993, 12026), True, 'import numpy as np\n'), ((34685, 34704), 'numpy.flip', 'np.flip', (['a[tile]', '(1)'], {}), '(a[tile], 1)\n', (34692, 34704), True, 'import numpy as np\n'), ((34761, 34780), 'numpy.flip', 'np.flip', (['a[tile]', '(0)'], {}), '(a[tile], 0)\n', (34768, 34780), True, 'import numpy as np\n'), ((37819, 37840), 'numpy.sum', 'np.sum', (['(xyzCross ** 2)'], {}), '(xyzCross ** 2)\n', (37825, 37840), True, 'import numpy as np\n'), ((38053, 38078), 'numpy.sum', 'np.sum', (['(xyzCross * xyzRef)'], {}), '(xyzCross * xyzRef)\n', (38059, 38078), True, 'import numpy as np\n'), ((47748, 47757), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (47754, 47757), True, 'import numpy as np\n'), ((47760, 47769), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (47766, 47769), True, 'import numpy as np\n'), ((48485, 48496), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (48491, 48496), True, 'import numpy as np\n'), ((48525, 48536), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (48531, 48536), True, 'import numpy as np\n'), ((3344, 3362), 'numpy.squeeze', 'np.squeeze', (['values'], {}), '(values)\n', (3354, 3362), True, 'import numpy as np\n'), ((7272, 7293), 'numpy.min', 'np.min', (["data['lon_b']"], {}), "(data['lon_b'])\n", (7278, 7293), True, 'import numpy as np\n'), ((7312, 7333), 'numpy.max', 'np.max', (["data['lon_b']"], {}), "(data['lon_b'])\n", (7318, 7333), True, 'import numpy as np\n'), ((7352, 7373), 'numpy.min', 'np.min', (["data['lat_b']"], {}), "(data['lat_b'])\n", (7358, 7373), True, 'import numpy as np\n'), ((7392, 7413), 'numpy.max', 'np.max', (["data['lat_b']"], {}), "(data['lat_b'])\n", (7398, 7413), True, 'import numpy as np\n'), ((7950, 7962), 'numpy.sort', 'np.sort', (['lat'], {}), '(lat)\n', (7957, 7962), True, 'import numpy as np\n'), ((7984, 7995), 'numpy.min', 'np.min', (['lat'], {}), '(lat)\n', (7990, 7995), True, 'import numpy as np\n'), ((8186, 8197), 'numpy.max', 'np.max', (['lat'], {}), '(lat)\n', (8192, 8197), True, 'import numpy as np\n'), ((8406, 8418), 'numpy.sort', 'np.sort', (['lon'], {}), '(lon)\n', (8413, 8418), True, 'import numpy as np\n'), ((8440, 8451), 'numpy.min', 'np.min', (['lon'], {}), '(lon)\n', (8446, 8451), True, 'import numpy as np\n'), ((25798, 25814), 'numpy.max', 'np.max', (['lon_inds'], {}), '(lon_inds)\n', (25804, 25814), True, 'import numpy as np\n'), ((26123, 26139), 'numpy.max', 'np.max', (['lat_inds'], {}), '(lat_inds)\n', (26129, 26139), True, 'import numpy as np\n'), ((31884, 31921), 'numpy.round', 'np.round', (['(2.0 * start_pt / lat_stride)'], {}), '(2.0 * start_pt / lat_stride)\n', (31892, 31921), True, 'import numpy as np\n'), ((37343, 37359), 'numpy.arange', 'np.arange', (['(c + 1)'], {}), '(c + 1)\n', (37352, 37359), True, 'import numpy as np\n'), ((43825, 43840), 'numpy.abs', 'np.abs', (['new_lon'], {}), '(new_lon)\n', (43831, 43840), True, 'import numpy as np\n'), ((43935, 43960), 'numpy.abs', 'np.abs', (['lat_edge[i, j, f]'], {}), '(lat_edge[i, j, f])\n', (43941, 43960), True, 'import numpy as np\n'), ((2430, 2449), 'numpy.array', 'np.array', (['shape[2:]'], {}), '(shape[2:])\n', (2438, 2449), True, 'import numpy as np\n'), ((2680, 2698), 'numpy.squeeze', 'np.squeeze', (['values'], {}), '(values)\n', (2690, 2698), True, 'import numpy as np\n'), ((3193, 3212), 'numpy.array', 'np.array', (['shape[1:]'], {}), '(shape[1:])\n', (3201, 3212), True, 'import numpy as np\n'), ((7474, 7493), 'numpy.min', 'np.min', (["data['lon']"], {}), "(data['lon'])\n", (7480, 7493), True, 'import numpy as np\n'), ((7512, 7531), 'numpy.max', 'np.max', (["data['lon']"], {}), "(data['lon'])\n", (7518, 7531), True, 'import numpy as np\n'), ((7550, 7569), 'numpy.min', 'np.min', (["data['lat']"], {}), "(data['lat'])\n", (7556, 7569), True, 'import numpy as np\n'), ((7588, 7607), 'numpy.max', 'np.max', (["data['lat']"], {}), "(data['lat'])\n", (7594, 7607), True, 'import numpy as np\n'), ((8473, 8484), 'numpy.max', 'np.max', (['lon'], {}), '(lon)\n', (8479, 8484), True, 'import numpy as np\n'), ((10160, 10171), 'numpy.size', 'np.size', (['AP'], {}), '(AP)\n', (10167, 10171), True, 'import numpy as np\n'), ((10633, 10651), 'numpy.max', 'np.max', (['pres_range'], {}), '(pres_range)\n', (10639, 10651), True, 'import numpy as np\n'), ((10678, 10696), 'numpy.min', 'np.min', (['pres_range'], {}), '(pres_range)\n', (10684, 10696), True, 'import numpy as np\n'), ((45787, 45813), 'numpy.sum', 'np.sum', (['xyz_corner'], {'axis': '(1)'}), '(xyz_corner, axis=1)\n', (45793, 45813), True, 'import numpy as np\n'), ((9778, 9789), 'numpy.size', 'np.size', (['AP'], {}), '(AP)\n', (9785, 9789), True, 'import numpy as np\n'), ((45850, 45871), 'numpy.sum', 'np.sum', (['(e_mid * e_mid)'], {}), '(e_mid * e_mid)\n', (45856, 45871), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
"""
InstrumentData Class -- defines data format, wavelength info, mask geometry
Instruments/masks supported:
NIRISS AMI
GPI, VISIR, NIRC2 removed - too much changed for the JWST NIRISS class
"""
# Standard Imports
import numpy as np
from astropy.io import fits
import os, sys, time
import copy
# Module imports
import synphot
# import stsynphot
# mask geometries, GPI, NIRISS, VISIR supported...
from nrm_analysis.misctools.mask_definitions import NRM_mask_definitions
from nrm_analysis.misctools import utils
from nrm_analysis.misctools import lpl_ianc
um = 1.0e-6
# utility routines for InstrumentData classes
def show_cvsupport_threshold(instr):
""" Show threshold for where 'splodge' data in CV space contains signal """
print("InstrumentData: ", "cvsupport_threshold is: ", instr.cvsupport_threshold)
print("InstrumentData: ", instr.cvsupport_threshold)
def set_cvsupport_threshold(instr, k, v):
""" Set threshold for where 'splodge' data in CV space contains signal
Parameters
----------
instr: InstrumentData instance
thresh: Threshold for the absolute value of the FT(interferogram).
Normalize abs(CV = FT(a)) for unity peak, and define the support
of "good" CV when this is above threshold
"""
instr.cvsupport_threshold[k] = v
print("InstrumentData: ", "New cvsupport_threshold is: ", instr.cvsupport_threshold)
class NIRISS:
def __init__(self, filt,
objname="obj",
src='A0V',
chooseholes=None,
affine2d=None,
bandpass=None,
nbadpix=4,
usebp=True,
firstfew=None,
nspecbin=None,
**kwargs):
"""
Initialize NIRISS class
ARGUMENTS:
kwargs:
UTR
Or just look at the file structure
Either user has webbpsf and filter file can be read, or...
chooseholes: None, or e.g. ['B2', 'B4', 'B5', 'B6'] for a four-hole mask
filt: Filter name string like "F480M"
bandpass: None or [(wt,wlen),(wt,wlen),...]. Monochromatic would be e.g. [(1.0, 4.3e-6)]
Explicit bandpass arg will replace *all* niriss filter-specific variables with
the given bandpass (src, nspecbin, filt), so you can simulate 21cm psfs through
something called "F430M". Can also be synphot.spectrum.SourceSpectrum object.
firstfew: None or the number of slices to truncate input cube to in memory,
the latter for fast developmpent
nbadpix: Number of good pixels to use when fixing bad pixels DEPRECATED
usebp: Convert to usedq during initialization
Internally this is changed to sellf.usedq = usebp immediately for code clarity
True (default) do not use DQ with DO_NOT_USE flag in input MAST data when
fitting data with model. False: Assume no bad pixels in input
noise: standard deviation of noise added to perfect images to enable candid
plots without crashing on np.inf limits! Image assumed to be in (np.float64) dn.
Suggested noise: 1e-6.
src: source spectral type string e.g. "A0V" OR user-defined synphot.spectrum.SourceSpectrum object
nspecbin: Number of wavelength bins to use across the bandpass. Replaces deprecated `usespecbin` which
set **number of wavelengths to into each bin**, not nbins.
"""
self.verbose = False
if "verbose" in kwargs:
self.verbose = kwargs["verbose"]
self.noise = None
if "noise" in kwargs:
self.noise = kwargs["noise"]
if "usespecbin" in kwargs: # compatability with previous arg
# but not really, usespecbin was binning factor, not number of bins
nspecbin = kwargs["usespecbin"]
# change how many wavelength bins will be used across the bandpass
if nspecbin is None:
nspecbin = 19
self.lam_bin = nspecbin
# src can be either a spectral type string or a user-defined synphot spectrum object
if isinstance(src, synphot.spectrum.SourceSpectrum):
print("Using user-defined synphot SourceSpectrum")
if chooseholes:
print("InstrumentData.NIRISS: ", chooseholes)
self.chooseholes = chooseholes
# USEBP is USEDQ in the rest of code - use
self.usedq = usebp
print("Fitting omits bad pixels (identified by DO_NOT_USE value in the DQ extension)")
self.jwst_dqflags() # creates dicts self.bpval, self.bpgroup
# self.bpexist set True/False if DQ fits image extension exists/doesn't
self.firstfew = firstfew
if firstfew is not None: print("InstrumentData.NIRISS: analysing firstfew={:d} slices".format(firstfew))
self.objname = objname
self.filt = filt
if bandpass is not None:
print("InstrumentData.NIRISS: OVERRIDING BANDPASS WITH USER-SUPPLIED VALUES.")
print("\t src, filt, nspecbin parameters will not be used")
# check type of bandpass. can be synphot spectrum
# if so, get throughput and wavelength arrays
if isinstance(bandpass, synphot.spectrum.SpectralElement):
wl, wt = bandpass._get_arrays(bandpass.waveset)
self.throughput = np.array((wt,wl)).T
else:
self.throughput = np.array(bandpass) # type simplification
else:
filt_spec = utils.get_filt_spec(self.filt)
src_spec = utils.get_src_spec(src)
# **NOTE**: As of WebbPSF version 1.0.0 filter is trimmed to where throughput is 10% of peak
# For consistency with WebbPSF simultions, use trim=0.1
self.throughput = utils.combine_src_filt(filt_spec,
src_spec,
trim=0.01,
nlambda=nspecbin,
verbose=self.verbose,
plot=False)
self.lam_c, self.lam_w = utils.get_cw_beta(self.throughput)
if self.verbose: print("InstrumentData.NIRISS: ", self.filt,
": central wavelength {:.4e} microns, ".format(self.lam_c/um), end="")
if self.verbose: print("InstrumentData.NIRISS: ", "fractional bandpass {:.3f}".format(self.lam_w))
self.wls = [self.throughput,]
if self.verbose: print("self.throughput:\n", self.throughput)
# Wavelength info for NIRISS bands F277W, F380M, F430M, or F480M
self.wavextension = ([self.lam_c,], [self.lam_w,])
self.nwav=1 # these are 'slices' if the data is pure imaging integrations -
# nwav is old nomenclature from GPI IFU data. Refactor one day...
#############################
# only one NRM on JWST:
self.telname = "JWST"
self.instrument = "NIRISS"
self.arrname = "jwst_g7s6c" # implaneia mask set with this - unify to short form later
self.holeshape="hex"
self.mask = NRM_mask_definitions(maskname=self.arrname, chooseholes=chooseholes,
holeshape=self.holeshape )
# save affine deformation of pupil object or create a no-deformation object.
# We apply this when sampling the PSF, not to the pupil geometry.
# This will set a default Ideal or a measured rotation, for example,
# and include pixel scale changes due to pupil distortion.
# Separating detector tilt pixel scale effects from pupil distortion effects is
# yet to be determined... see comments in Affine class definition.
# AS AZG 2018 08 15 <NAME>
if affine2d is None:
self.affine2d = utils.Affine2d(mx=1.0,my=1.0,
sx=0.0,sy=0.0,
xo=0.0,yo=0.0, name="Ideal")
else:
self.affine2d = affine2d
# finding centroid from phase slope only considered cv_phase data
# when cv_abs data exceeds this cvsupport_threshold.
# Absolute value of cv data normalized to unity maximum
# for the threshold application.
# Data reduction gurus: tweak the threshold value with experience...
# Gurus: tweak cvsupport with use...
self.cvsupport_threshold = {"F277W":0.02, "F380M": 0.02, "F430M": 0.02, "F480M": 0.02}
if self.verbose: show_cvsupport_threshold(self)
self.threshold = self.cvsupport_threshold[filt]
def set_pscale(self, pscalex_deg=None, pscaley_deg=None):
"""
Override pixel scale in header
"""
if pscalex_deg is not None:
self.pscalex_deg = pscalex_deg
if pscaley_deg is not None:
self.pscaley_deg = pscaley_deg
self.pscale_mas = 0.5 * (pscalex_deg + pscaley_deg) * (60*60*1000)
self.pscale_rad = utils.mas2rad(self.pscale_mas)
def read_data(self, fn, mode="slice"):
# mode options are slice or UTR
# for single slice data, need to read as 3D (1, npix, npix)
# for utr data, need to read as 3D (ngroup, npix, npix)
# fix bad pixels using DQ extension and LPL local averaging,
# but send bad pixel array down to where fringes are fit so they can be ignored.
# For perfectly noiseless data we add GFaussian zero mean self.noise std dev
# to imagge data. Then std devs don't cause plot crashes with limits problems.
with fits.open(fn, memmap=False, do_not_scale_image_data=True) as fitsfile:
# use context manager, memmap=False, deepcopy to avoid memory leaks
scidata = copy.deepcopy(fitsfile[1].data)
if self.noise is not None: scidata += np.random.normal(0, self.noise, scidata.shape)
# usually DQ ext in MAST file... make it non-fatal for DQ to be missing
try:
bpdata=copy.deepcopy(fitsfile['DQ'].data).astype(np.uint32) # bad pixel extension, forced to uint32
self.bpexist = True
dqmask = bpdata & self.bpval["DO_NOT_USE"] == self.bpval["DO_NOT_USE"] #
del bpdata # free memory
# True => driver wants to omit using pixels with dqflag raised in fit,
if self.usedq == True:
print('InstrumentData.NIRISS.read_data: will not use flagged DQ pixels in fit')
except Exception as e:
print('InstrumentData.NIRISS.read_data: raised exception', e)
self.bpexist = False
dqmask = np.zeros(scidata.shape, dtype=np.uint32) # so it doesn't break if issues with DQ data
if scidata.ndim == 3: #len(scidata.shape)==3:
print("read_data() input: 3D cube")
# Truncate all but the first few slices od data and DQ array for rapid development
if self.firstfew is not None:
if scidata.shape[0] > self.firstfew:
scidata = scidata[:self.firstfew, :, :]
dqmask = dqmask[:self.firstfew, :, :]
# 'nwav' name (historical) is actually number of data slices in the 3Dimage cube
self.nwav=scidata.shape[0]
[self.wls.append(self.wls[0]) for f in range(self.nwav-1)]
elif len(scidata.shape)==2: # 'cast' 2d array to 3d with shape[0]=1
print("'InstrumentData.NIRISS.read_data: 2D data array converting to 3D one-slice cube")
scidata = np.array([scidata,])
dqmask = np.array([dqmask,])
else:
sys.exit("InstrumentData.NIRISS.read_data: invalid data dimensions for NIRISS. \nShould have dimensionality of 2 or 3.")
# refpix removal by trimming
scidata = scidata[:,4:, :] # [all slices, imaxis[0], imaxis[1]]
print('\tRefpix-trimmed scidata:', scidata.shape)
#### fix pix using bad pixel map - runs now. Need to sanity-check.
if self.bpexist:
# refpix removal by trimming to match image trim
dqmask = dqmask[:,4:, :] # dqmask bool array to match image trimmed shape
print('\tRefpix-trimmed dqmask: ', dqmask.shape)
prihdr=fitsfile[0].header
scihdr=fitsfile[1].header
# MAST header or similar kwds info for oifits writer:
self.updatewithheaderinfo(prihdr, scihdr)
# Directory name into which to write txt observables & optional fits diagnostic files
# The input fits image or cube of images file rootname is used to create the output
# text&fits dir, using the data file's root name as the directory name: for example,
# /abc/.../imdir/xyz_calints.fits results in a directory /abc/.../imdir/xyz_calints/
self.rootfn = fn.split('/')[-1].replace('.fits', '')
return prihdr, scihdr, scidata, dqmask
def cdmatrix_to_sky(self, vec, cd11, cd12, cd21, cd22):
""" use the global header values explicitly, for clarity
vec is 2d, units of pixels
cdij 4 scalars, conceptually 2x2 array in units degrees/pixel
"""
return np.array((cd11*vec[0] + cd12*vec[1], cd21*vec[0] + cd22*vec[1]))
def degrees_per_pixel(self, hdr):
"""
input: hdr: fits data file's header with or without CDELT1, CDELT2 (degrees per pixel)
returns: cdelt1, cdelt2: tuple, degrees per pixel along axes 1, 2
EITHER: read from header CDELT[12] keywords
OR: calculated using CD matrix (Jacobian of RA-TAN, DEC-TAN degrees
to pixel directions 1,2. No deformation included in this routine,
but the CD matric includes non-linear field distortion.
No pupil distortion or rotation here.
MISSING: If keywords are missing default hardcoded cdelts are returned.
The exact algorithm may substitute this later.
Below seems good to ~5th significant figure when compared to
cdelts header values prior to replacement by cd matrix approach.
N.D. at stsci 11 Mar 20212
We start in Level 1 with the PC matrix and CDELT.
CDELTs come from the SIAF.
The PC matrix is computed from the roll angle, V3YANG and the parity.
The code is here
https://github.com/spacetelescope/jwst/blob/master/jwst/assign_wcs/util.py#L153
In the level 2 imaging pipeline, assign_wcs adds the distortion to the files.
At the end it computes an approximation of the entire distortion transformation
by fitting a polynomial. This approximated distortion is represented as SIP
polynomials in the FITS headers.
Because SIP, by definition, uses a CD matrix, the PC + CDELT are replaced by CD.
How to get CDELTs back?
I think once the rotation, skew and scale are in the CD matrix it's very hard to
disentangle them. The best way IMO is to calculate the local scale using three
point difference. There is a function in jwst that does this.
Using a NIRISS image as an example:
from jwst.assign_wcs import util
from jwst import datamodels
im=datamodels.open('niriss_image_assign_wcs.fits')
util.compute_scale(im.meta.wcs, (im.meta.wcsinfo.ra_ref, im.meta.wcsinfo.dec_ref))
1.823336635353374e-05
The function returns a constant scale. Is this sufficient for what you need or
do you need scales and sheer along each axis? The code in util.compute_scale can
help with figuring out how to get scales along each axis.
I hope this answers your question.
"""
if 'CD1_1' in hdr.keys() and 'CD1_2' in hdr.keys() and \
'CD2_1' in hdr.keys() and 'CD2_2' in hdr.keys():
cd11 = hdr['CD1_1']
cd12 = hdr['CD1_2']
cd21 = hdr['CD2_1']
cd22 = hdr['CD2_2']
# Create unit vectors in detector pixel X and Y directions, units: detector pixels
dxpix = np.array((1.0, 0.0)) # axis 1 step
dypix = np.array((0.0, 1.0)) # axis 2 step
# transform pixel x and y steps to RA-tan, Dec-tan degrees
dxsky = self.cdmatrix_to_sky(dxpix, cd11, cd12, cd21, cd22)
dysky = self.cdmatrix_to_sky(dypix, cd11, cd12, cd21, cd22)
print("Used CD matrix for pixel scales")
return np.linalg.norm(dxsky, ord=2), np.linalg.norm(dysky, ord=2)
elif 'CDELT1' in hdr.keys() and 'CDELT2' in hdr.keys():
return hdr['CDELT1'], hdr['CDELT2']
print("Used CDDELT[12] for pixel scales")
else:
print('InstrumentData.NIRISS: Warning: NIRISS pixel scales not in header. Using 65.6 mas in deg/pix')
return 65.6/(60.0*60.0*1000), 65.6/(60.0*60.0*1000)
def updatewithheaderinfo(self, ph, sh):
""" input: primary header, science header MAST"""
# The info4oif_dict will get pickled to disk when we write txt files of results.
# That way we don't drag in objects like InstrumentData into code that reads text results
# and writes oifits files - a simple built-in dictionary is the only object used in this transfer.
info4oif_dict = {}
info4oif_dict['telname'] = self.telname
info4oif_dict['filt'] = self.filt
info4oif_dict['lam_c'] = self.lam_c
info4oif_dict['lam_w'] = self.lam_w
info4oif_dict['lam_bin'] = self.lam_bin
# Target information - 5/21 targname UNKNOWN in nis019 rehearsal data
# Name in the proposal always non-trivial, targname still UNKNOWN...:
if ph["TARGNAME"] == 'UNKNOWN': objname = ph['TARGPROP']
else: objname = ph['TARGNAME'] # allegedly apt name for archive, standard form
#
# if target name has confusing-to-astroquery dash
self.objname = objname.replace('-', ' '); info4oif_dict['objname'] = self.objname
# AB Dor, ab dor, AB DOR, ab dor are all acceptable.
#
self.ra = ph["TARG_RA"]; info4oif_dict['ra'] = self.ra
self.dec = ph["TARG_DEC"]; info4oif_dict['dec'] = self.dec
# / axis 1 DS9 coordinate of the reference pixel (always POS1)
# / axis 2 DS9 coordinate of the reference pixel (always POS1)
self.crpix1 = sh["CRPIX1"]; info4oif_dict['crpix1'] = self.crpix1
self.crpix2 = sh["CRPIX2"]; info4oif_dict['crpix2'] = self.crpix2
# need <NAME>'s table for actual crval[1,2] for true pointing to detector pixel coords (DS9)
self.instrument = ph["INSTRUME"]; info4oif_dict['instrument'] = self.instrument
self.pupil = ph["PUPIL"]; info4oif_dict['pupil'] = self.pupil
# "ImPlaneIA internal mask name" - oifwriter looks for 'mask'...
self.arrname = "jwst_g7s6c" # implaneia internal name - historical
info4oif_dict['arrname'] = 'g7s6' # for oif
info4oif_dict['mask'] = info4oif_dict['arrname'] # Soulain mask goes into oif arrname
# if data was generated on the average pixel scale of the header
# then this is the right value that gets read in, and used in fringe fitting
pscalex_deg, pscaley_deg = self.degrees_per_pixel(sh)
#
info4oif_dict['pscalex_deg'] = pscalex_deg
info4oif_dict['pscaley_deg'] = pscaley_deg
# Whatever we did set is averaged for isotropic pixel scale here
self.pscale_mas = 0.5 * (pscalex_deg + pscaley_deg) * (60*60*1000); \
info4oif_dict['pscale_mas'] = self.pscale_mas
self.pscale_rad = utils.mas2rad(self.pscale_mas); info4oif_dict['pscale_rad'] = self.pscale_rad
self.mask = NRM_mask_definitions(maskname=self.arrname, chooseholes=self.chooseholes,
holeshape=self.holeshape) # for STAtions x y in oifs
self.date = ph["DATE-OBS"] + "T" + ph["TIME-OBS"]; info4oif_dict['date'] = self.date
datestr = ph["DATE-OBS"]
self.year = datestr[:4]; info4oif_dict['year'] = self.year
self.month = datestr[5:7]; info4oif_dict['month'] = self.month
self.day = datestr[8:10]; info4oif_dict['day'] = self.day
self.parangh= sh["ROLL_REF"]; info4oif_dict['parangh'] = self.parangh
self.pa = sh["PA_V3"]; info4oif_dict['pa'] = self.pa
self.vparity = sh["VPARITY"]; info4oif_dict['vparity'] = self.vparity
# An INTegration is NGROUPS "frames", not relevant here but context info.
# 2d => "cal" file combines all INTegrations (ramps)
# 3d=> "calints" file is a cube of all INTegrations (ramps)
if sh["NAXIS"] == 2:
# all INTegrations or 'ramps'
self.itime = ph["EFFINTTM"] * ph["NINTS"]; info4oif_dict['itime'] = self.itime
elif sh["NAXIS"] == 3:
# each slice is one INTegration or 'ramp'
self.itime = ph["EFFINTTM"]; info4oif_dict['itime'] = self.itime
np.set_printoptions(precision=5, suppress=True, linewidth=160,
formatter={'float': lambda x: "%10.5f," % x})
self.v3i_yang = sh['V3I_YANG'] # Angle from V3 axis to Ideal y axis (deg)
# rotate mask hole center coords by PAV3 # RAC 2021
ctrs_sky = self.mast2sky()
oifctrs = np.zeros(self.mask.ctrs.shape)
oifctrs[:,0] = ctrs_sky[:,1].copy() * -1
oifctrs[:,1] = ctrs_sky[:,0].copy() * -1
info4oif_dict['ctrs_eqt'] = oifctrs # mask centers rotated by PAV3 (equatorial coords)
info4oif_dict['ctrs_inst'] = self.mask.ctrs # as-built instrument mask centers
info4oif_dict['hdia'] = self.mask.hdia
info4oif_dict['nslices'] = self.nwav # nwav: number of image slices or IFU cube slices - AMI is imager
self.info4oif_dict = info4oif_dict # save it when writing extracted observables txt
# rather than calling InstrumentData in the niriss example just to reset just call this routine
def reset_nwav(self, nwav):
print("InstrumentData.NIRISS: ", "Resetting InstrumentData instantiation's nwave to", nwav)
self.nwav = nwav
def jwst_dqflags(self):
"""
dqdata is a 2d (32-bit U?)INT array from the DQ extension of the input file.
We ignore all data with a non-zero DQ flag. I copied all values from a 7.5 build jwst...
but we ignore any non-zero flag meaning, and ignore the pixel in fringe-fitting
The refpix are non-zero DQ, btw...
I changed "pixel" to self.pbval and "group" to self.bpgroup. We may use these later,
so here they are but initially we just discriminate between good (zero value) and non-good.
"""
""" JWST Data Quality Flags
The definitions are documented in the JWST RTD:
https://jwst-pipeline.readthedocs.io/en/latest/jwst/references_general/references_general.html#data-quality-flags
"""
""" JWST Data Quality Flags
The definitions are documented in the JWST RTD:
https://jwst-pipeline.readthedocs.io/en/latest/jwst/references_general/references_general.html#data-quality-flags
Implementation
-------------
The flags are implemented as "bit flags": Each flag is assigned a bit position
in a byte, or multi-byte word, of memory. If that bit is set, the flag assigned
to that bit is interpreted as being set or active.
The data structure that stores bit flags is just the standard Python `int`,
which provides 32 bits. Bits of an integer are most easily referred to using
the formula `2**bit_number` where `bit_number` is the 0-index bit of interest.
2**n is gauche but not everyone loves 1<<n
Rachel uses:
from jwst.datamodels import dqflags
DO_NOT_USE = dqflags.pixel["DO_NOT_USE"]
dqmask = pxdq0 & DO_NOT_USE == DO_NOT_USE
pxdq = np.where(dqmask, pxdq0, 0)
"""
# Pixel-specific flags
self.bpval = {
'GOOD': 0, # No bits set, all is good
'DO_NOT_USE': 2**0, # Bad pixel. Do not use.
'SATURATED': 2**1, # Pixel saturated during exposure
'JUMP_DET': 2**2, # Jump detected during exposure
'DROPOUT': 2**3, # Data lost in transmission
'OUTLIER': 2**4, # Flagged by outlier detection. Was RESERVED_1
'RESERVED_2': 2**5, #
'RESERVED_3': 2**6, #
'RESERVED_4': 2**7, #
'UNRELIABLE_ERROR': 2**8, # Uncertainty exceeds quoted error
'NON_SCIENCE': 2**9, # Pixel not on science portion of detector
'DEAD': 2**10, # Dead pixel
'HOT': 2**11, # Hot pixel
'WARM': 2**12, # Warm pixel
'LOW_QE': 2**13, # Low quantum efficiency
'RC': 2**14, # RC pixel
'TELEGRAPH': 2**15, # Telegraph pixel
'NONLINEAR': 2**16, # Pixel highly nonlinear
'BAD_REF_PIXEL': 2**17, # Reference pixel cannot be used
'NO_FLAT_FIELD': 2**18, # Flat field cannot be measured
'NO_GAIN_VALUE': 2**19, # Gain cannot be measured
'NO_LIN_CORR': 2**20, # Linearity correction not available
'NO_SAT_CHECK': 2**21, # Saturation check not available
'UNRELIABLE_BIAS': 2**22, # Bias variance large
'UNRELIABLE_DARK': 2**23, # Dark variance large
'UNRELIABLE_SLOPE': 2**24, # Slope variance large (i.e., noisy pixel)
'UNRELIABLE_FLAT': 2**25, # Flat variance large
'OPEN': 2**26, # Open pixel (counts move to adjacent pixels)
'ADJ_OPEN': 2**27, # Adjacent to open pixel
'UNRELIABLE_RESET': 2**28, # Sensitive to reset anomaly
'MSA_FAILED_OPEN': 2**29, # Pixel sees light from failed-open shutter
'OTHER_BAD_PIXEL': 2**30, # A catch-all flag
'REFERENCE_PIXEL': 2**31, # Pixel is a reference pixel
}
# Group-specific flags. Once groups are combined, these flags
# are equivalent to the pixel-specific flags.
self.bpgroup = {
'GOOD': self.bpval['GOOD'],
'DO_NOT_USE': self.bpval['DO_NOT_USE'],
'SATURATED': self.bpval['SATURATED'],
'JUMP_DET': self.bpval['JUMP_DET'],
'DROPOUT': self.bpval['DROPOUT'],
}
def mast2sky(self):
"""
Rotate hole center coordinates:
Clockwise by the V3 position angle - V3I_YANG from north in degrees if VPARITY = -1
Counterclockwise by the V3 position angle - V3I_YANG from north in degrees if VPARITY = 1
Hole center coords are in the V2, V3 plane in meters.
Return rotated coordinates to be put in info4oif_dict.
implane2oifits.ObservablesFromText uses these to calculate baselines.
"""
pa = self.pa
mask_ctrs = copy.deepcopy(self.mask.ctrs)
# rotate by an extra 90 degrees (RAC 9/21)
# these coords are just used to orient output in OIFITS files
# NOT used for the fringe fitting itself
mask_ctrs = utils.rotate2dccw(mask_ctrs,np.pi/2.)
vpar = self.vparity # Relative sense of rotation between Ideal xy and V2V3
v3iyang = self.v3i_yang
rot_ang = pa - v3iyang # subject to change!
if pa != 0.0:
# Using rotate2sccw, which rotates **vectors** CCW in a fixed coordinate system,
# so to rotate coord system CW instead of the vector, reverse sign of rotation angle. Double-check comment
if vpar == -1:
# rotate clockwise <rotate coords clockwise?>
ctrs_rot = utils.rotate2dccw(mask_ctrs, np.deg2rad(-rot_ang))
print(f'InstrumentData.mast2sky: Rotating mask hole centers clockwise by {rot_ang:.3f} degrees')
else:
# counterclockwise <rotate coords counterclockwise?>
ctrs_rot = utils.rotate2dccw(mask_ctrs, np.deg2rad(rot_ang))
print('InstrumentData.mast2sky: Rotating mask hole centers counterclockwise by {rot_ang:.3f} degrees')
else:
ctrs_rot = mask_ctrs
return ctrs_rot
|
[
"numpy.random.normal",
"nrm_analysis.misctools.utils.get_src_spec",
"nrm_analysis.misctools.mask_definitions.NRM_mask_definitions",
"nrm_analysis.misctools.utils.Affine2d",
"nrm_analysis.misctools.utils.get_filt_spec",
"sys.exit",
"nrm_analysis.misctools.utils.combine_src_filt",
"numpy.linalg.norm",
"nrm_analysis.misctools.utils.get_cw_beta",
"astropy.io.fits.open",
"numpy.array",
"numpy.zeros",
"numpy.deg2rad",
"nrm_analysis.misctools.utils.rotate2dccw",
"copy.deepcopy",
"nrm_analysis.misctools.utils.mas2rad",
"numpy.set_printoptions"
] |
[((6375, 6409), 'nrm_analysis.misctools.utils.get_cw_beta', 'utils.get_cw_beta', (['self.throughput'], {}), '(self.throughput)\n', (6392, 6409), False, 'from nrm_analysis.misctools import utils\n'), ((7371, 7469), 'nrm_analysis.misctools.mask_definitions.NRM_mask_definitions', 'NRM_mask_definitions', ([], {'maskname': 'self.arrname', 'chooseholes': 'chooseholes', 'holeshape': 'self.holeshape'}), '(maskname=self.arrname, chooseholes=chooseholes,\n holeshape=self.holeshape)\n', (7391, 7469), False, 'from nrm_analysis.misctools.mask_definitions import NRM_mask_definitions\n'), ((9243, 9273), 'nrm_analysis.misctools.utils.mas2rad', 'utils.mas2rad', (['self.pscale_mas'], {}), '(self.pscale_mas)\n', (9256, 9273), False, 'from nrm_analysis.misctools import utils\n'), ((13596, 13668), 'numpy.array', 'np.array', (['(cd11 * vec[0] + cd12 * vec[1], cd21 * vec[0] + cd22 * vec[1])'], {}), '((cd11 * vec[0] + cd12 * vec[1], cd21 * vec[0] + cd22 * vec[1]))\n', (13604, 13668), True, 'import numpy as np\n'), ((20287, 20317), 'nrm_analysis.misctools.utils.mas2rad', 'utils.mas2rad', (['self.pscale_mas'], {}), '(self.pscale_mas)\n', (20300, 20317), False, 'from nrm_analysis.misctools import utils\n'), ((20386, 20489), 'nrm_analysis.misctools.mask_definitions.NRM_mask_definitions', 'NRM_mask_definitions', ([], {'maskname': 'self.arrname', 'chooseholes': 'self.chooseholes', 'holeshape': 'self.holeshape'}), '(maskname=self.arrname, chooseholes=self.chooseholes,\n holeshape=self.holeshape)\n', (20406, 20489), False, 'from nrm_analysis.misctools.mask_definitions import NRM_mask_definitions\n'), ((21648, 21761), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)', 'suppress': '(True)', 'linewidth': '(160)', 'formatter': "{'float': lambda x: '%10.5f,' % x}"}), "(precision=5, suppress=True, linewidth=160, formatter={\n 'float': lambda x: '%10.5f,' % x})\n", (21667, 21761), True, 'import numpy as np\n'), ((21982, 22012), 'numpy.zeros', 'np.zeros', (['self.mask.ctrs.shape'], {}), '(self.mask.ctrs.shape)\n', (21990, 22012), True, 'import numpy as np\n'), ((28017, 28046), 'copy.deepcopy', 'copy.deepcopy', (['self.mask.ctrs'], {}), '(self.mask.ctrs)\n', (28030, 28046), False, 'import copy\n'), ((28237, 28278), 'nrm_analysis.misctools.utils.rotate2dccw', 'utils.rotate2dccw', (['mask_ctrs', '(np.pi / 2.0)'], {}), '(mask_ctrs, np.pi / 2.0)\n', (28254, 28278), False, 'from nrm_analysis.misctools import utils\n'), ((5739, 5769), 'nrm_analysis.misctools.utils.get_filt_spec', 'utils.get_filt_spec', (['self.filt'], {}), '(self.filt)\n', (5758, 5769), False, 'from nrm_analysis.misctools import utils\n'), ((5793, 5816), 'nrm_analysis.misctools.utils.get_src_spec', 'utils.get_src_spec', (['src'], {}), '(src)\n', (5811, 5816), False, 'from nrm_analysis.misctools import utils\n'), ((6020, 6130), 'nrm_analysis.misctools.utils.combine_src_filt', 'utils.combine_src_filt', (['filt_spec', 'src_spec'], {'trim': '(0.01)', 'nlambda': 'nspecbin', 'verbose': 'self.verbose', 'plot': '(False)'}), '(filt_spec, src_spec, trim=0.01, nlambda=nspecbin,\n verbose=self.verbose, plot=False)\n', (6042, 6130), False, 'from nrm_analysis.misctools import utils\n'), ((8070, 8146), 'nrm_analysis.misctools.utils.Affine2d', 'utils.Affine2d', ([], {'mx': '(1.0)', 'my': '(1.0)', 'sx': '(0.0)', 'sy': '(0.0)', 'xo': '(0.0)', 'yo': '(0.0)', 'name': '"""Ideal"""'}), "(mx=1.0, my=1.0, sx=0.0, sy=0.0, xo=0.0, yo=0.0, name='Ideal')\n", (8084, 8146), False, 'from nrm_analysis.misctools import utils\n'), ((9844, 9901), 'astropy.io.fits.open', 'fits.open', (['fn'], {'memmap': '(False)', 'do_not_scale_image_data': '(True)'}), '(fn, memmap=False, do_not_scale_image_data=True)\n', (9853, 9901), False, 'from astropy.io import fits\n'), ((10017, 10048), 'copy.deepcopy', 'copy.deepcopy', (['fitsfile[1].data'], {}), '(fitsfile[1].data)\n', (10030, 10048), False, 'import copy\n'), ((16749, 16769), 'numpy.array', 'np.array', (['(1.0, 0.0)'], {}), '((1.0, 0.0))\n', (16757, 16769), True, 'import numpy as np\n'), ((16806, 16826), 'numpy.array', 'np.array', (['(0.0, 1.0)'], {}), '((0.0, 1.0))\n', (16814, 16826), True, 'import numpy as np\n'), ((5659, 5677), 'numpy.array', 'np.array', (['bandpass'], {}), '(bandpass)\n', (5667, 5677), True, 'import numpy as np\n'), ((10099, 10145), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.noise', 'scidata.shape'], {}), '(0, self.noise, scidata.shape)\n', (10115, 10145), True, 'import numpy as np\n'), ((17128, 17156), 'numpy.linalg.norm', 'np.linalg.norm', (['dxsky'], {'ord': '(2)'}), '(dxsky, ord=2)\n', (17142, 17156), True, 'import numpy as np\n'), ((17158, 17186), 'numpy.linalg.norm', 'np.linalg.norm', (['dysky'], {'ord': '(2)'}), '(dysky, ord=2)\n', (17172, 17186), True, 'import numpy as np\n'), ((5587, 5605), 'numpy.array', 'np.array', (['(wt, wl)'], {}), '((wt, wl))\n', (5595, 5605), True, 'import numpy as np\n'), ((10940, 10980), 'numpy.zeros', 'np.zeros', (['scidata.shape'], {'dtype': 'np.uint32'}), '(scidata.shape, dtype=np.uint32)\n', (10948, 10980), True, 'import numpy as np\n'), ((11894, 11913), 'numpy.array', 'np.array', (['[scidata]'], {}), '([scidata])\n', (11902, 11913), True, 'import numpy as np\n'), ((11940, 11958), 'numpy.array', 'np.array', (['[dqmask]'], {}), '([dqmask])\n', (11948, 11958), True, 'import numpy as np\n'), ((11994, 12127), 'sys.exit', 'sys.exit', (['"""InstrumentData.NIRISS.read_data: invalid data dimensions for NIRISS. \nShould have dimensionality of 2 or 3."""'], {}), '(\n """InstrumentData.NIRISS.read_data: invalid data dimensions for NIRISS. \nShould have dimensionality of 2 or 3."""\n )\n', (12002, 12127), False, 'import os, sys, time\n'), ((28824, 28844), 'numpy.deg2rad', 'np.deg2rad', (['(-rot_ang)'], {}), '(-rot_ang)\n', (28834, 28844), True, 'import numpy as np\n'), ((29103, 29122), 'numpy.deg2rad', 'np.deg2rad', (['rot_ang'], {}), '(rot_ang)\n', (29113, 29122), True, 'import numpy as np\n'), ((10279, 10313), 'copy.deepcopy', 'copy.deepcopy', (["fitsfile['DQ'].data"], {}), "(fitsfile['DQ'].data)\n", (10292, 10313), False, 'import copy\n')]
|
# load .t7 file and save as .pkl data
import torchfile
import cv2
import numpy as np
import scipy.io as sio
import pickle
import time
data_path = './data/test_PC/'
# panoContext
#img_tr = torchfile.load('./data/panoContext_img_train.t7')
#print(img_tr.shape)
#lne_tr = torchfile.load('./data/panoContext_line_train.t7')
#print(lne_tr.shape)
#edg_tr = torchfile.load('./data/panoContext_edge_train.t7')
#print(edg_tr.shape)
#junc_tr = torchfile.load('./data/panoContext_cor_train.t7')
#print(junc_tr.shape)
#print('done')
#img_tr = torchfile.load('./data/panoContext_img_val.t7')
#print(img_tr.shape)
#lne_tr = torchfile.load('./data/panoContext_line_val.t7')
#print(lne_tr.shape)
#edg_tr = torchfile.load('./data/panoContext_edge_val.t7')
#print(edg_tr.shape)
#junc_tr = torchfile.load('./data/panoContext_cor_val.t7')
#print(junc_tr.shape)
#print('done')
img_tr = torchfile.load('./data/panoContext_img_test.t7')
print(img_tr.shape)
lne_tr = torchfile.load('./data/panoContext_line_test.t7')
print(lne_tr.shape)
edg_tr = torchfile.load('./data/panoContext_edge_test.t7')
print(edg_tr.shape)
junc_tr = torchfile.load('./data/panoContext_cor_test.t7')
print(junc_tr.shape)
print('done')
# stanford
#img_tr = torchfile.load('./data/stanford2d-3d_img_area_5.t7')
#print(img_tr.shape)
#lne_tr = torchfile.load('./data/stanford2d-3d_line_area_5.t7')
#print(lne_tr.shape)
#edg_tr = torchfile.load('./data/stanford2d-3d_edge_area_5.t7')
#print(edg_tr.shape)
#junc_tr = torchfile.load('./data/stanford2d-3d_cor_area_5.t7')
#print(junc_tr.shape)
#print('done')
gt_txt_path = './data/panoContext_testmap.txt'
gt_path = './data/layoutnet_dataset/test/label_cor/'
# Load data
namelist = []
id_num = []
with open(gt_txt_path, 'r') as f:
while(True):
line = f.readline().strip()
if not line:
break
id_num0 = line.split()
id_num0 = int(id_num0[1])
id_num.append(id_num0)
namelist.append(line)
id_num = np.array(id_num)
cnt = 0
for num in range(img_tr.shape[0]):
print(num)
image = img_tr[num]
image = np.transpose(image, (1,2,0))#*255.0
line = lne_tr[num]
line = np.transpose(line, (1,2,0))
edge = edg_tr[num]
edge = np.transpose(edge, (1,2,0))
junc = junc_tr[num]
junc = np.transpose(junc, (1,2,0))
# corner gt
idn = np.where(id_num == num)
idn = idn[0][0]
filename = namelist[idn]
filename = filename.split()
filename = gt_path+filename[0][:-4]+'.txt'#'.mat'
cnt+=1
cor = np.loadtxt(filename)
cor_sum = 0
for cor_num in range(cor.shape[0]):
cor_sum+=junc[int(cor[cor_num,1]),int(cor[cor_num,0]),0]
#print(cor_sum)
#time.sleep(0.5)
# pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'PC_'+"{:04d}".format(num)+'.pkl', "wb" ) )
pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'PCts_'+"{:04d}".format(num)+'.pkl', "wb" ) )
# pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'PCval_'+"{:04d}".format(num)+'.pkl', "wb" ) )
# pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'area5_'+"{:04d}".format(num)+'.pkl', "wb" ) )
|
[
"numpy.where",
"torchfile.load",
"numpy.array",
"numpy.loadtxt",
"numpy.transpose"
] |
[((870, 918), 'torchfile.load', 'torchfile.load', (['"""./data/panoContext_img_test.t7"""'], {}), "('./data/panoContext_img_test.t7')\n", (884, 918), False, 'import torchfile\n'), ((948, 997), 'torchfile.load', 'torchfile.load', (['"""./data/panoContext_line_test.t7"""'], {}), "('./data/panoContext_line_test.t7')\n", (962, 997), False, 'import torchfile\n'), ((1027, 1076), 'torchfile.load', 'torchfile.load', (['"""./data/panoContext_edge_test.t7"""'], {}), "('./data/panoContext_edge_test.t7')\n", (1041, 1076), False, 'import torchfile\n'), ((1107, 1155), 'torchfile.load', 'torchfile.load', (['"""./data/panoContext_cor_test.t7"""'], {}), "('./data/panoContext_cor_test.t7')\n", (1121, 1155), False, 'import torchfile\n'), ((1959, 1975), 'numpy.array', 'np.array', (['id_num'], {}), '(id_num)\n', (1967, 1975), True, 'import numpy as np\n'), ((2079, 2109), 'numpy.transpose', 'np.transpose', (['image', '(1, 2, 0)'], {}), '(image, (1, 2, 0))\n', (2091, 2109), True, 'import numpy as np\n'), ((2149, 2178), 'numpy.transpose', 'np.transpose', (['line', '(1, 2, 0)'], {}), '(line, (1, 2, 0))\n', (2161, 2178), True, 'import numpy as np\n'), ((2211, 2240), 'numpy.transpose', 'np.transpose', (['edge', '(1, 2, 0)'], {}), '(edge, (1, 2, 0))\n', (2223, 2240), True, 'import numpy as np\n'), ((2274, 2303), 'numpy.transpose', 'np.transpose', (['junc', '(1, 2, 0)'], {}), '(junc, (1, 2, 0))\n', (2286, 2303), True, 'import numpy as np\n'), ((2328, 2351), 'numpy.where', 'np.where', (['(id_num == num)'], {}), '(id_num == num)\n', (2336, 2351), True, 'import numpy as np\n'), ((2511, 2531), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (2521, 2531), True, 'import numpy as np\n')]
|
#
# BSD 3-Clause License
#
# Copyright (c) 2022 University of Wisconsin - Madison
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.#
import rclpy
from rclpy.node import Node
from art_msgs.msg import VehicleState
from art_perception_msgs.msg import ObjectArray, Object
from sensor_msgs.msg import Image
from ament_index_python.packages import get_package_share_directory
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Path
from rclpy.qos import QoSHistoryPolicy
from rclpy.qos import QoSProfile
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.interpolate import interp1d,splev,splprep
import os
import json
class PathPlanningNode(Node):
def __init__(self):
super().__init__('path_planning_node')
#update frequency of this node
self.freq = 10.0
# READ IN SHARE DIRECTORY LOCATION
package_share_directory = get_package_share_directory('path_planning')
# READ IN PARAMETERS
self.declare_parameter('vis', False)
self.vis = self.get_parameter('vis').get_parameter_value().bool_value
self.declare_parameter('lookahead', 2.0)
self.lookahead = self.get_parameter('lookahead').get_parameter_value().double_value
#data that will be used by this class
self.state = VehicleState()
self.path = Path()
# self.objects = ObjectArray()
self.green_cones = np.array([])
self.red_cones = np.array([])
self.go = False
#subscribers
qos_profile = QoSProfile(depth=1)
qos_profile.history = QoSHistoryPolicy.KEEP_LAST
self.sub_state = self.create_subscription(VehicleState, '~/input/vehicle_state', self.state_callback, qos_profile)
self.sub_objects = self.create_subscription(ObjectArray, '~/input/objects', self.objects_callback, qos_profile)
if self.vis:
matplotlib.use("TKAgg")
self.fig, self.ax = plt.subplots()
plt.title("Path Planning")
self.patches = []
self.ax.set_xlim((-1,11))
self.ax.set_ylim((-6,6))
self.left_boundary = None
self.right_boundary = None
#publishers
self.pub_path = self.create_publisher(Path, '~/output/path', 10)
self.timer = self.create_timer(1/self.freq, self.pub_callback)
#function to process data this class subscribes to
def state_callback(self, msg):
# self.get_logger().info("Received '%s'" % msg)
self.state = msg
def objects_callback(self, msg):
# self.get_logger().info("Received '%s'" % msg)
# self.objects = msg
self.go = True
self.green_cones = []
self.red_cones = []
# self.get_logger().info("Detected cones: %s" % (str(len(msg.objects))))
for obj in msg.objects:
pos = [obj.pose.position.x,obj.pose.position.y,obj.pose.position.z]
id = obj.classification.classification
#calculate position from camera parameters, rect, and distance
if(id == 1):
self.red_cones.append(pos)
elif(id == 2):
self.green_cones.append(pos)
else:
self.get_logger().info("Object with unknown label detected {}".format(id))
def order_cones(self,cones,start):
ordered_cones = [start]
ego = start
for i in range(len(cones)):
dist_2 = np.sum((cones - ego)**2, axis=1)
id = np.argmin(dist_2)
ordered_cones.append(cones[id,:])
cones = np.delete(cones,id,axis=0)
ordered_cones = np.asarray(ordered_cones)
total_dist = 0
for i in range(len(ordered_cones)-1):
total_dist += np.linalg.norm(ordered_cones[i,:] - ordered_cones[i+1,:])
return ordered_cones, total_dist
def plan_path(self):
self.red_cones = np.asarray(self.red_cones)
self.green_cones = np.asarray(self.green_cones)
if(len(self.red_cones) == 0):
self.red_cones = np.asarray([1,-1.5,0]) #phantom cone to right if none are seen
if(len(self.green_cones) == 0):
self.green_cones = np.asarray([1,1.5,0]) #phantom cone to right if none are seen
self.red_cones = self.red_cones.reshape((-1,3))
self.green_cones = self.green_cones.reshape((-1,3))
left, l_dist = self.order_cones(self.green_cones,np.array([0.0,.5,0]))
right, r_dist = self.order_cones(self.red_cones,np.array([0.0,-.5,0]))
max_dist = 4
left_spline,u = splprep(left[:,0:2].transpose(),k=max(1,min(int(len(left)/2),5)))
left_samples = np.linspace(0, max_dist / l_dist, 100)
b_left = splev(left_samples,left_spline)
right_spline,u = splprep(right[:,0:2].transpose(),k=max(1,min(int(len(right)/2),5)))
right_samples = np.linspace(0, max_dist / r_dist, 100)
b_right = splev(right_samples,right_spline)
center_line = np.array([(b_left[0] + b_right[0]) / 2, (b_left[1] + b_right[1]) / 2])
# center_line = center_line[:,min(len(b_right[0]),len(b_left[0]))]
distances = np.sum((center_line)**2, axis=0)
id = np.argmin(np.abs(distances - self.lookahead**2))
target_pt = center_line[:,id]
# self.get_logger().info("B Left Spline: %s" % (str(len(b_left))))
if(self.vis):
[p.remove() for p in self.patches]
self.patches.clear()
if(self.left_boundary == None):
self.left_boundary, = self.ax.plot(b_left[0],b_left[1],c='g')
else:
self.left_boundary.set_data(b_left[0],b_left[1])
if(self.right_boundary == None):
self.right_boundary, = self.ax.plot(b_right[0],b_right[1],c='r')
else:
self.right_boundary.set_data(b_right[0],b_right[1])
for pos in right:
circ = patches.Circle(pos[0:2],radius=.1,color='r')
self.ax.add_patch(circ)
self.patches.append(circ)
for pos in left:
circ = patches.Circle(pos[0:2],radius=.1,color='g')
self.ax.add_patch(circ)
self.patches.append(circ)
circ = patches.Circle(target_pt,radius=.1,color='b')
self.ax.add_patch(circ)
self.patches.append(circ)
return target_pt
#callback to run a loop and publish data this class generates
def pub_callback(self):
if(not self.go):
return
msg = Path()
target_pt = self.plan_path()
#calculate path from current cone locations
if(self.vis):
plt.draw()
plt.pause(0.0001)
pt = PoseStamped()
pt.pose.position.x = target_pt[0]
pt.pose.position.y = target_pt[1]
msg.poses.append(pt)
self.pub_path.publish(msg)
def main(args=None):
# print("=== Starting Path Planning Node ===")
rclpy.init(args=args)
planner = PathPlanningNode()
rclpy.spin(planner)
planner.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
[
"numpy.array",
"numpy.linalg.norm",
"rclpy.init",
"numpy.delete",
"numpy.asarray",
"ament_index_python.packages.get_package_share_directory",
"numpy.linspace",
"scipy.interpolate.splev",
"numpy.argmin",
"rclpy.shutdown",
"matplotlib.patches.Circle",
"numpy.abs",
"matplotlib.use",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.title",
"matplotlib.pyplot.draw",
"rclpy.qos.QoSProfile",
"rclpy.spin",
"nav_msgs.msg.Path",
"art_msgs.msg.VehicleState",
"numpy.sum",
"geometry_msgs.msg.PoseStamped",
"matplotlib.pyplot.subplots"
] |
[((8571, 8592), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (8581, 8592), False, 'import rclpy\n'), ((8630, 8649), 'rclpy.spin', 'rclpy.spin', (['planner'], {}), '(planner)\n', (8640, 8649), False, 'import rclpy\n'), ((8681, 8697), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (8695, 8697), False, 'import rclpy\n'), ((2396, 2440), 'ament_index_python.packages.get_package_share_directory', 'get_package_share_directory', (['"""path_planning"""'], {}), "('path_planning')\n", (2423, 2440), False, 'from ament_index_python.packages import get_package_share_directory\n'), ((2812, 2826), 'art_msgs.msg.VehicleState', 'VehicleState', ([], {}), '()\n', (2824, 2826), False, 'from art_msgs.msg import VehicleState\n'), ((2847, 2853), 'nav_msgs.msg.Path', 'Path', ([], {}), '()\n', (2851, 2853), False, 'from nav_msgs.msg import Path\n'), ((2921, 2933), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2929, 2933), True, 'import numpy as np\n'), ((2959, 2971), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2967, 2971), True, 'import numpy as np\n'), ((3041, 3060), 'rclpy.qos.QoSProfile', 'QoSProfile', ([], {'depth': '(1)'}), '(depth=1)\n', (3051, 3060), False, 'from rclpy.qos import QoSProfile\n'), ((5170, 5195), 'numpy.asarray', 'np.asarray', (['ordered_cones'], {}), '(ordered_cones)\n', (5180, 5195), True, 'import numpy as np\n'), ((5452, 5478), 'numpy.asarray', 'np.asarray', (['self.red_cones'], {}), '(self.red_cones)\n', (5462, 5478), True, 'import numpy as np\n'), ((5506, 5534), 'numpy.asarray', 'np.asarray', (['self.green_cones'], {}), '(self.green_cones)\n', (5516, 5534), True, 'import numpy as np\n'), ((6219, 6257), 'numpy.linspace', 'np.linspace', (['(0)', '(max_dist / l_dist)', '(100)'], {}), '(0, max_dist / l_dist, 100)\n', (6230, 6257), True, 'import numpy as np\n'), ((6275, 6307), 'scipy.interpolate.splev', 'splev', (['left_samples', 'left_spline'], {}), '(left_samples, left_spline)\n', (6280, 6307), False, 'from scipy.interpolate import interp1d, splev, splprep\n'), ((6425, 6463), 'numpy.linspace', 'np.linspace', (['(0)', '(max_dist / r_dist)', '(100)'], {}), '(0, max_dist / r_dist, 100)\n', (6436, 6463), True, 'import numpy as np\n'), ((6482, 6516), 'scipy.interpolate.splev', 'splev', (['right_samples', 'right_spline'], {}), '(right_samples, right_spline)\n', (6487, 6516), False, 'from scipy.interpolate import interp1d, splev, splprep\n'), ((6539, 6609), 'numpy.array', 'np.array', (['[(b_left[0] + b_right[0]) / 2, (b_left[1] + b_right[1]) / 2]'], {}), '([(b_left[0] + b_right[0]) / 2, (b_left[1] + b_right[1]) / 2])\n', (6547, 6609), True, 'import numpy as np\n'), ((6714, 6746), 'numpy.sum', 'np.sum', (['(center_line ** 2)'], {'axis': '(0)'}), '(center_line ** 2, axis=0)\n', (6720, 6746), True, 'import numpy as np\n'), ((8133, 8139), 'nav_msgs.msg.Path', 'Path', ([], {}), '()\n', (8137, 8139), False, 'from nav_msgs.msg import Path\n'), ((8332, 8345), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (8343, 8345), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3395, 3418), 'matplotlib.use', 'matplotlib.use', (['"""TKAgg"""'], {}), "('TKAgg')\n", (3409, 3418), False, 'import matplotlib\n'), ((3451, 3465), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3463, 3465), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3504), 'matplotlib.pyplot.title', 'plt.title', (['"""Path Planning"""'], {}), "('Path Planning')\n", (3487, 3504), True, 'import matplotlib.pyplot as plt\n'), ((4976, 5010), 'numpy.sum', 'np.sum', (['((cones - ego) ** 2)'], {'axis': '(1)'}), '((cones - ego) ** 2, axis=1)\n', (4982, 5010), True, 'import numpy as np\n'), ((5026, 5043), 'numpy.argmin', 'np.argmin', (['dist_2'], {}), '(dist_2)\n', (5035, 5043), True, 'import numpy as np\n'), ((5110, 5138), 'numpy.delete', 'np.delete', (['cones', 'id'], {'axis': '(0)'}), '(cones, id, axis=0)\n', (5119, 5138), True, 'import numpy as np\n'), ((5291, 5352), 'numpy.linalg.norm', 'np.linalg.norm', (['(ordered_cones[i, :] - ordered_cones[i + 1, :])'], {}), '(ordered_cones[i, :] - ordered_cones[i + 1, :])\n', (5305, 5352), True, 'import numpy as np\n'), ((5603, 5627), 'numpy.asarray', 'np.asarray', (['[1, -1.5, 0]'], {}), '([1, -1.5, 0])\n', (5613, 5627), True, 'import numpy as np\n'), ((5737, 5760), 'numpy.asarray', 'np.asarray', (['[1, 1.5, 0]'], {}), '([1, 1.5, 0])\n', (5747, 5760), True, 'import numpy as np\n'), ((5974, 5997), 'numpy.array', 'np.array', (['[0.0, 0.5, 0]'], {}), '([0.0, 0.5, 0])\n', (5982, 5997), True, 'import numpy as np\n'), ((6052, 6076), 'numpy.array', 'np.array', (['[0.0, -0.5, 0]'], {}), '([0.0, -0.5, 0])\n', (6060, 6076), True, 'import numpy as np\n'), ((6770, 6809), 'numpy.abs', 'np.abs', (['(distances - self.lookahead ** 2)'], {}), '(distances - self.lookahead ** 2)\n', (6776, 6809), True, 'import numpy as np\n'), ((7834, 7882), 'matplotlib.patches.Circle', 'patches.Circle', (['target_pt'], {'radius': '(0.1)', 'color': '"""b"""'}), "(target_pt, radius=0.1, color='b')\n", (7848, 7882), True, 'import matplotlib.patches as patches\n'), ((8277, 8287), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (8285, 8287), True, 'import matplotlib.pyplot as plt\n'), ((8300, 8317), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (8309, 8317), True, 'import matplotlib.pyplot as plt\n'), ((7507, 7554), 'matplotlib.patches.Circle', 'patches.Circle', (['pos[0:2]'], {'radius': '(0.1)', 'color': '"""r"""'}), "(pos[0:2], radius=0.1, color='r')\n", (7521, 7554), True, 'import matplotlib.patches as patches\n'), ((7687, 7734), 'matplotlib.patches.Circle', 'patches.Circle', (['pos[0:2]'], {'radius': '(0.1)', 'color': '"""g"""'}), "(pos[0:2], radius=0.1, color='g')\n", (7701, 7734), True, 'import matplotlib.patches as patches\n')]
|
from __future__ import absolute_import
import os.path
import argparse
import logging
import json
from six import iteritems
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
from keras.models import load_model
from tensorflow.python.client import device_lib
from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity
from features import catboost_features
from preprocessing import clean_text, convert_text2seq, split_data, parse_seq
from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions
from train import train
from metrics import get_metrics, print_metrics
def get_kwargs(kwargs):
parser = argparse.ArgumentParser(description='-f TRAIN_FILE -t TEST_FILE -o OUTPUT_FILE -e EMBEDS_FILE [-l LOGGER_FILE] [--swear-words SWEAR_FILE] [--wrong-words WRONG_WORDS_FILE] [--format-embeds FALSE]')
parser.add_argument('-f', '--train', dest='train', action='store', help='/path/to/trian_file', type=str)
parser.add_argument('-t', '--test', dest='test', action='store', help='/path/to/test_file', type=str)
parser.add_argument('-o', '--output', dest='output', action='store', help='/path/to/output_file', type=str)
parser.add_argument('-we', '--word_embeds', dest='word_embeds', action='store', help='/path/to/embeds_file', type=str)
parser.add_argument('-ce', '--char_embeds', dest='char_embeds', action='store', help='/path/to/embeds_file', type=str)
parser.add_argument('-c','--config', dest='config', action='store', help='/path/to/config.json', type=str)
parser.add_argument('-l', '--logger', dest='logger', action='store', help='/path/to/log_file', type=str, default=None)
parser.add_argument('--mode', dest='mode', action='store', help='preprocess / train / validate / all', type=str, default='all')
parser.add_argument('--max-words', dest='max_words', action='store', type=int, default=300000)
parser.add_argument('--use-only-exists-words', dest='use_only_exists_words', action='store_true')
parser.add_argument('--swear-words', dest='swear_words', action='store', help='/path/to/swear_words_file', type=str, default=None)
parser.add_argument('--wrong-words', dest='wrong_words', action='store', help='/path/to/wrong_words_file', type=str, default=None)
parser.add_argument('--format-embeds', dest='format_embeds', action='store', help='file | json | pickle | binary', type=str, default='raw')
parser.add_argument('--output-dir', dest='output_dir', action='store', help='/path/to/dir', type=str, default='.')
parser.add_argument('--norm-prob', dest='norm_prob', action='store_true')
parser.add_argument('--norm-prob-koef', dest='norm_prob_koef', action='store', type=float, default=1)
parser.add_argument('--gpus', dest='gpus', action='store', help='count GPUs', type=int, default=0)
for key, value in iteritems(parser.parse_args().__dict__):
kwargs[key] = value
def main(*kargs, **kwargs):
get_kwargs(kwargs)
train_fname = kwargs['train']
test_fname = kwargs['test']
result_fname = kwargs['output']
word_embeds_fname = kwargs['word_embeds']
char_embeds_fname = kwargs['char_embeds']
logger_fname = kwargs['logger']
mode = kwargs['mode']
max_words = kwargs['max_words']
use_only_exists_words = kwargs['use_only_exists_words']
swear_words_fname = kwargs['swear_words']
wrong_words_fname = kwargs['wrong_words']
embeds_format = kwargs['format_embeds']
config = kwargs['config']
output_dir = kwargs['output_dir']
norm_prob = kwargs['norm_prob']
norm_prob_koef = kwargs['norm_prob_koef']
gpus = kwargs['gpus']
seq_col_name_words = 'comment_seq_lw_use_exist{}_{}k'.format(int(use_only_exists_words), int(max_words/1000))
seq_col_name_ll3 = 'comment_seq_ll3_use_exist{}_{}k'.format(int(use_only_exists_words), int(max_words/1000))
model_file = {
'dense': os.path.join(output_dir, 'dense.h5'),
'cnn': os.path.join(output_dir, 'cnn.h5'),
'lstm': os.path.join(output_dir, 'lstm.h5'),
'lr': os.path.join(output_dir, '{}_logreg.bin'),
'catboost': os.path.join(output_dir, '{}_catboost.bin')
}
# ====Create logger====
logger = Logger(logging.getLogger(), logger_fname)
# ====Detect GPUs====
logger.debug(device_lib.list_local_devices())
# ====Load data====
logger.info('Loading data...')
train_df = load_data(train_fname)
test_df = load_data(test_fname)
target_labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
num_classes = len(target_labels)
# ====Load additional data====
logger.info('Loading additional data...')
swear_words = load_data(swear_words_fname, func=lambda x: set(x.T[0]), header=None)
wrong_words_dict = load_data(wrong_words_fname, func=lambda x: {val[0] : val[1] for val in x})
# ====Load word vectors====
logger.info('Loading embeddings...')
embeds_word = Embeds().load(word_embeds_fname, embeds_format)
embeds_ll3 = Embeds().load(char_embeds_fname, embeds_format)
# ====Clean texts====
if mode in ('preprocess', 'all'):
logger.info('Cleaning text...')
train_df['comment_text_clear'] = clean_text(train_df['comment_text'], wrong_words_dict, autocorrect=True)
test_df['comment_text_clear'] = clean_text(test_df['comment_text'], wrong_words_dict, autocorrect=True)
train_df.to_csv(os.path.join(output_dir, 'train_clear.csv'), index=False)
test_df.to_csv(os.path.join(output_dir, 'test_clear.csv'), index=False)
# ====Calculate maximum seq length====
logger.info('Calc text length...')
train_df.fillna('__NA__', inplace=True)
test_df.fillna('__NA__', inplace=True)
train_df['text_len'] = train_df['comment_text_clear'].apply(lambda words: len(words.split()))
test_df['text_len'] = test_df['comment_text_clear'].apply(lambda words: len(words.split()))
max_seq_len = np.round(train_df['text_len'].mean() + 3*train_df['text_len'].std()).astype(int)
max_char_seq_len = 2000 # empirical
logger.debug('Max seq length = {}'.format(max_seq_len))
# ====Prepare data to NN====
logger.info('Converting texts to sequences...')
if mode in ('preprocess', 'all'):
train_df[seq_col_name_words], test_df[seq_col_name_words], word_index, train_df[seq_col_name_ll3], test_df[seq_col_name_ll3], ll3_index = convert_text2seq(
train_df['comment_text_clear'].tolist(),
test_df['comment_text_clear'].tolist(),
max_words,
max_seq_len,
max_char_seq_len,
embeds_word,
lower=True,
oov_token='__<PASSWORD>',
uniq=False,
use_only_exists_words=use_only_exists_words)
logger.debug('Dictionary size use_exist{} = {}'.format(int(use_only_exists_words), len(word_index)))
logger.debug('Char dict size use_exist{} = {}'.format(int(use_only_exists_words), len(ll3_index)))
logger.info('Preparing embedding matrix...')
words_not_found = embeds_word.set_matrix(max_words, word_index)
embeds_ll3.matrix = np.random.normal(size=(len(ll3_index), embeds_word.shape[1]))
embeds_ll3.word_index = ll3_index
embeds_ll3.word_index_reverse = {val: key for key, val in ll3_index.items()}
embeds_ll3.shape = np.shape(embeds_ll3.matrix)
embeds_word.save(os.path.join(output_dir, 'wiki.embeds_lw.{}k'.format(int(max_words/1000))))
embeds_ll3.save(os.path.join(output_dir, 'wiki.embeds_ll3.{}k'.format(int(max_words/1000))))
# ====Get text vector====
pooling = {
'max': {'func': np.max},
'avg': {'func': np.sum, 'normalize': True},
'sum': {'func': np.sum, 'normalize': False}
}
for p in ['max', 'avg', 'sum']:
train_df['comment_vec_{}'.format(p)] = train_df[seq_col_name_words].apply(lambda x: embed_aggregate(x, embeds_word, **pooling[p]))
test_df['comment_vec_{}'.format(p)] = test_df[seq_col_name_words].apply(lambda x: embed_aggregate(x, embeds_word, **pooling[p]))
train_df.to_csv(os.path.join(output_dir, 'train_clear1.csv'), index=False)
test_df.to_csv(os.path.join(output_dir, 'test_clear1.csv'), index=False)
else:
for col in train_df.columns:
if col.startswith('comment_seq'):
train_df[col] = train_df[col].apply(lambda x: parse_seq(x, int))
test_df[col] = test_df[col].apply(lambda x: parse_seq(x, int))
elif col.startswith('comment_vec'):
train_df[col] = train_df[col].apply(lambda x: parse_seq(x, float))
test_df[col] = test_df[col].apply(lambda x: parse_seq(x, float))
logger.debug('Embedding matrix shape = {}'.format(embeds_word.shape))
logger.debug('Number of null word embeddings = {}'.format(np.sum(np.sum(embeds_word.matrix, axis=1) == 0)))
# ====END OF `PREPROCESS`====
if mode == 'preprocess':
return True
# ====Train/test split data====
x = np.array(train_df[seq_col_name_words].values.tolist())
y = np.array(train_df[target_labels].values.tolist())
x_train_nn, x_val_nn, y_train, y_val, train_idxs, val_idxs = split_data(x, y, test_size=0.2, shuffle=True, random_state=42)
x_test_nn = np.array(test_df[seq_col_name_words].values.tolist())
x_char = np.array(train_df[seq_col_name_ll3].values.tolist())
x_char_train_nn = x_char[train_idxs]
x_char_val_nn = x_char[val_idxs]
x_char_test_nn = np.array(test_df[seq_col_name_ll3].values.tolist())
x_train_tfidf = train_df['comment_text_clear'].values[train_idxs]
x_val_tfidf = train_df['comment_text_clear'].values[val_idxs]
x_test_tfidf = test_df['comment_text_clear'].values
catboost_cols = catboost_features(train_df, test_df)
x_train_cb = train_df[catboost_cols].values[train_idxs].T
x_val_cb = train_df[catboost_cols].values[val_idxs].T
x_test_cb = test_df[catboost_cols].values.T
# ====Train models====
nn_models = {
'cnn': cnn,
'dense': dense,
'rnn': rnn
}
params = Params(config)
metrics = {}
predictions = {}
for param in params['models']:
for model_label, model_params in param.items():
if model_params.get('common', {}).get('warm_start', False) and os.path.exists(model_params.get('common', {}).get('model_file', '')):
logger.info('{} warm starting...'.format(model_label))
model = load_model(model_params.get('common', {}).get('model_file', None))
elif model_label in nn_models:
model = nn_models[model_label](
embeds_word.matrix,
embeds_ll3.matrix,
num_classes,
max_seq_len,
max_char_seq_len,
gpus=gpus,
**model_params['init'])
model_alias = model_params.get('common', {}).get('alias', None)
if model_alias is None or not model_alias:
model_alias = '{}_{}'.format(model_label, i)
logger.info("training {} ...".format(model_label))
if model_label == 'dense':
x_tr = [x_train_nn, x_char_train_nn]
x_val = [x_val_nn, x_char_val_nn]
x_test = [x_test_nn, x_char_test_nn]
else:
x_tr = x_train_nn
x_val = x_val_nn
x_test = x_test_nn
hist = train(x_tr,
y_train,
model,
logger=logger,
**model_params['train'])
predictions[model_alias] = model.predict(x_val)
save_predictions(test_df, model.predict(x_test), target_labels, model_alias)
elif model_label == 'tfidf':
model = TFIDF(target_labels, **model_params['init'])
model.fit(x_train_tfidf, y_train, **model_params['train'])
predictions[model_alias] = model.predict(x_val_tfidf)
save_predictions(test_df, model.predict(x_test_tfidf), target_labels, model_alias)
elif model_label == 'catboost':
model = CatBoost(target_labels, **model_params['init'])
model.fit(x_train_cb, y_train, eval_set=(x_val_cb, y_val), use_best_model=True)
predictions[model_alias] = model.predict_proba(x_val_cb)
save_predictions(test_df, model.predict_proba(x_test_cb), target_labels, model_alias)
metrics[model_alias] = get_metrics(y_val, predictions[model_alias], target_labels)
logger.debug('{} params:\n{}'.format(model_alias, model_params))
logger.debug('{} metrics:\n{}'.format(model_alias, print_metrics(metrics[model_alias])))
model.save(os.path.join(output_dir, model_params['common']['model_file']))
logger.info('Saving metrics...')
with open(os.path.join(output_dir, 'metrics.json'), 'w') as f:
f.write(json.dumps(metrics))
# ====END OF `VALIDATE`====
if mode == 'validate':
return True
# Meta catboost
logger.info('training catboost as metamodel...')
x_meta = [predictions[model_alias] for model_alias in sorted(predictions.keys())]
x_meta = np.array(x_train_meta).T
x_train_meta, x_val_meta, y_train_meta, y_val_meta = train_test_split(x_meta, y_val, test_size=0.20, random_state=42)
meta_model = CatBoost(target_labels,
loss_function='Logloss',
iterations=1000,
depth=6,
learning_rate=0.03,
rsm=1
)
meta_model.fit(x_train_meta, y_train_meta, eval_set=(x_val_meta, y_val_meta), use_best_model=True)
y_hat_meta = meta_model.predict_proba(x_val_meta)
metrics_meta = get_metrics(y_val_meta, y_hat_meta, target_labels)
#model.save(os.path.join(output_dir, 'meta.catboost')
logger.debug('{} metrics:\n{}'.format('META', print_metrics(metrics_meta)))
# ====Predict====
logger.info('Applying models...')
test_cols = []
for model_alias in sorted(predictions.keys()):
for label in target_labels:
test_cols.append('{}_{}'.format(model_alias, label))
x_test = test_df[test_cols].values
preds = meta_model.predict_proba(x_test)
for i, label in enumerate(target_labels):
test_df[label] = preds[:, i]
# ====Normalize probabilities====
if norm_prob:
for label in target_labels:
test_df[label] = norm_prob_koef * test_df[label]
# ====Save results====
logger.info('Saving results...')
test_df[['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].to_csv(result_fname, index=False, header=True)
test_df.to_csv('{}_tmp'.format(result_fname), index=False, header=True)
if __name__=='__main__':
main()
|
[
"preprocessing.split_data",
"logging.getLogger",
"tensorflow.python.client.device_lib.list_local_devices",
"utils.load_data",
"numpy.array",
"models.CatBoost",
"preprocessing.clean_text",
"argparse.ArgumentParser",
"json.dumps",
"metrics.get_metrics",
"features.catboost_features",
"utils.embed_aggregate",
"sklearn.model_selection.train_test_split",
"models.TFIDF",
"utils.Params",
"numpy.shape",
"train.train",
"metrics.print_metrics",
"preprocessing.parse_seq",
"numpy.sum",
"utils.Embeds"
] |
[((697, 903), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""-f TRAIN_FILE -t TEST_FILE -o OUTPUT_FILE -e EMBEDS_FILE [-l LOGGER_FILE] [--swear-words SWEAR_FILE] [--wrong-words WRONG_WORDS_FILE] [--format-embeds FALSE]"""'}), "(description=\n '-f TRAIN_FILE -t TEST_FILE -o OUTPUT_FILE -e EMBEDS_FILE [-l LOGGER_FILE] [--swear-words SWEAR_FILE] [--wrong-words WRONG_WORDS_FILE] [--format-embeds FALSE]'\n )\n", (720, 903), False, 'import argparse\n'), ((4432, 4454), 'utils.load_data', 'load_data', (['train_fname'], {}), '(train_fname)\n', (4441, 4454), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((4469, 4490), 'utils.load_data', 'load_data', (['test_fname'], {}), '(test_fname)\n', (4478, 4490), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((4817, 4891), 'utils.load_data', 'load_data', (['wrong_words_fname'], {'func': '(lambda x: {val[0]: val[1] for val in x})'}), '(wrong_words_fname, func=lambda x: {val[0]: val[1] for val in x})\n', (4826, 4891), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((9974, 10036), 'preprocessing.split_data', 'split_data', (['x', 'y'], {'test_size': '(0.2)', 'shuffle': '(True)', 'random_state': '(42)'}), '(x, y, test_size=0.2, shuffle=True, random_state=42)\n', (9984, 10036), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((10539, 10575), 'features.catboost_features', 'catboost_features', (['train_df', 'test_df'], {}), '(train_df, test_df)\n', (10556, 10575), False, 'from features import catboost_features\n'), ((10873, 10887), 'utils.Params', 'Params', (['config'], {}), '(config)\n', (10879, 10887), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((14293, 14356), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_meta', 'y_val'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(x_meta, y_val, test_size=0.2, random_state=42)\n', (14309, 14356), False, 'from sklearn.model_selection import train_test_split\n'), ((14375, 14480), 'models.CatBoost', 'CatBoost', (['target_labels'], {'loss_function': '"""Logloss"""', 'iterations': '(1000)', 'depth': '(6)', 'learning_rate': '(0.03)', 'rsm': '(1)'}), "(target_labels, loss_function='Logloss', iterations=1000, depth=6,\n learning_rate=0.03, rsm=1)\n", (14383, 14480), False, 'from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions\n'), ((14783, 14833), 'metrics.get_metrics', 'get_metrics', (['y_val_meta', 'y_hat_meta', 'target_labels'], {}), '(y_val_meta, y_hat_meta, target_labels)\n', (14794, 14833), False, 'from metrics import get_metrics, print_metrics\n'), ((4245, 4264), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4262, 4264), False, 'import logging\n'), ((4324, 4355), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (4353, 4355), False, 'from tensorflow.python.client import device_lib\n'), ((5244, 5316), 'preprocessing.clean_text', 'clean_text', (["train_df['comment_text']", 'wrong_words_dict'], {'autocorrect': '(True)'}), "(train_df['comment_text'], wrong_words_dict, autocorrect=True)\n", (5254, 5316), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((5357, 5428), 'preprocessing.clean_text', 'clean_text', (["test_df['comment_text']", 'wrong_words_dict'], {'autocorrect': '(True)'}), "(test_df['comment_text'], wrong_words_dict, autocorrect=True)\n", (5367, 5428), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((8070, 8097), 'numpy.shape', 'np.shape', (['embeds_ll3.matrix'], {}), '(embeds_ll3.matrix)\n', (8078, 8097), True, 'import numpy as np\n'), ((14210, 14232), 'numpy.array', 'np.array', (['x_train_meta'], {}), '(x_train_meta)\n', (14218, 14232), True, 'import numpy as np\n'), ((4985, 4993), 'utils.Embeds', 'Embeds', ([], {}), '()\n', (4991, 4993), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((5050, 5058), 'utils.Embeds', 'Embeds', ([], {}), '()\n', (5056, 5058), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((13488, 13547), 'metrics.get_metrics', 'get_metrics', (['y_val', 'predictions[model_alias]', 'target_labels'], {}), '(y_val, predictions[model_alias], target_labels)\n', (13499, 13547), False, 'from metrics import get_metrics, print_metrics\n'), ((13934, 13953), 'json.dumps', 'json.dumps', (['metrics'], {}), '(metrics)\n', (13944, 13953), False, 'import json\n'), ((14942, 14969), 'metrics.print_metrics', 'print_metrics', (['metrics_meta'], {}), '(metrics_meta)\n', (14955, 14969), False, 'from metrics import get_metrics, print_metrics\n'), ((8662, 8707), 'utils.embed_aggregate', 'embed_aggregate', (['x', 'embeds_word'], {}), '(x, embeds_word, **pooling[p])\n', (8677, 8707), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((8803, 8848), 'utils.embed_aggregate', 'embed_aggregate', (['x', 'embeds_word'], {}), '(x, embeds_word, **pooling[p])\n', (8818, 8848), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((9624, 9658), 'numpy.sum', 'np.sum', (['embeds_word.matrix'], {'axis': '(1)'}), '(embeds_word.matrix, axis=1)\n', (9630, 9658), True, 'import numpy as np\n'), ((12371, 12438), 'train.train', 'train', (['x_tr', 'y_train', 'model'], {'logger': 'logger'}), "(x_tr, y_train, model, logger=logger, **model_params['train'])\n", (12376, 12438), False, 'from train import train\n'), ((13688, 13723), 'metrics.print_metrics', 'print_metrics', (['metrics[model_alias]'], {}), '(metrics[model_alias])\n', (13701, 13723), False, 'from metrics import get_metrics, print_metrics\n'), ((9169, 9186), 'preprocessing.parse_seq', 'parse_seq', (['x', 'int'], {}), '(x, int)\n', (9178, 9186), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((9248, 9265), 'preprocessing.parse_seq', 'parse_seq', (['x', 'int'], {}), '(x, int)\n', (9257, 9265), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((12777, 12821), 'models.TFIDF', 'TFIDF', (['target_labels'], {}), "(target_labels, **model_params['init'])\n", (12782, 12821), False, 'from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions\n'), ((9377, 9396), 'preprocessing.parse_seq', 'parse_seq', (['x', 'float'], {}), '(x, float)\n', (9386, 9396), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((9458, 9477), 'preprocessing.parse_seq', 'parse_seq', (['x', 'float'], {}), '(x, float)\n', (9467, 9477), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((13134, 13181), 'models.CatBoost', 'CatBoost', (['target_labels'], {}), "(target_labels, **model_params['init'])\n", (13142, 13181), False, 'from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions\n')]
|
#!/usr/bin/env python
#import standard libraries
import obspy.imaging.beachball
import datetime
import os
import csv
import pandas as pd
import numpy as np
import fnmatch
from geopy.distance import geodesic
from math import *
#from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import path
class NewFile:
'''Creates a file object with associated uncertainty and event type'''
def __init__(self, filename, unc, event_type, source):
self.filename = filename
self.event_type = event_type
self.unc = unc
self.name = source
def maketime(timestring):
'''Used in argument parser below. Makes a datetime object from a timestring.'''
TIMEFMT = '%Y-%m-%dT%H:%M:%S'
DATEFMT = '%Y-%m-%d'
TIMEFMT2 = '%m-%d-%YT%H:%M:%S.%f'
outtime = None
try:
outtime = datetime.strptime(timestring, TIMEFMT)
except:
try:
outtime = datetime.strptime(timestring, DATEFMT)
except:
try:
outtime = datetime.strptime(timestring, TIMEFMT2)
except:
print('Could not parse time or date from %s' % timestring)
print (outtime)
return outtime
def infile(s):
'''Stores filename, event type, and uncertainty where provided from comma separated string.'''
default_uncertainty = 15
try:
infile,unc,etype = s.split(',')
unc = float(unc)
return (infile, unc, etype)
except:
try:
s = s.split(',')
infile, unc, etype = s[0], default_uncertainty, s[1]
return (infile, unc, etype)
except:
raise argparse.ArgumentTypeError('Input file information must be \
given as infile,unc,etype or as infile,etype')
def datelinecross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a positive longitude. Stays the same if the input was positive,
is changed to positive if the input was negative '''
if x<0:
return x+360
else:
return x
###############################################
### 9 ###
###############################################
## Written GLM
def meridiancross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>180:
return x-360
else:
return x
def northcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x<90:
return x+360
else:
return x
def unnorthcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>360:
return x-360
else:
return x
def zerothreesixty(data):
data['lon']=data.apply(lambda row: datelinecross(row['lon']),axis=1)
return data
def oneeighty(data):
data['lon']=data.apply(lambda row: meridiancross(row['lon']),axis=1)
return data
def northernaz(data):
data['az']=data.apply(lambda row: northcross(row['az']),axis=1)
return data
def notnorthanymore(data):
data['az']=data.apply(lambda row: unnorthcross(row['az']),axis=1)
return data
def writetofile(input_file, output_file, event_type, uncertainty, args, catalogs, file_no, seismo_thick, slabname, name):
''' Writes an input file object to the given output file.
Acquires the necessary columns from the file, calculates moment tensor information.
Eliminates rows of data that do not fall within the specified bounds
(date, magnitude, & location).
If the event type is an earthquake, the catalog is compared to all previously
entered catalogs. Duplicate events are removed from the subsequent entries
(prioritization is determined by the order in which catalogs are entered).
Writes filtered dataframe to output file and prints progress to console.
Arguments: input_file - input file from input or slab2database
output_file - file where new dataset will be written
event_type - two letter ID that indicates the type of data (AS, EQ, BA, etc)
uncertainty - the default uncertainty associated with this file or event type
args - arguments provided from command line (bounds, magnitude limits, etc)
catalogs - a list of EQ catalogs that are being written to this file
file_no - file number, used for making event IDs '''
in_file = open(input_file)
fcsv = (input_file[:-4]+'.csv')
# Reading .csv file into dataframe - all files must be in .csv format
try:
if input_file.endswith('.csv'):
data = pd.read_csv(input_file, low_memory=False)
else:
print ('Input file %s was not written to file. MUST BE IN .CSV FORMAT' % input_file)
pass
except:
print ('Could not read file %s. A header line of column labels \
followed by a deliminated dataset is expected. Check file format to ensure this \
is such. All files must be in .csv format.' % input_file)
if 'ID' in data.columns:
pass
elif 'id_no' in data.columns:
data['ID'] = data['id_no'].values
else:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['ID'] = ID
data = makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname)
data = inbounds(args, data, slabname)
#If option is chosen at command line, removes duplicate entries for the same event
#alternate preference for global or regional catalogues depending upon input arguments
try:
regional_pref
except NameError:
pass
else:
try:
tup = (data, fcsv)
if len(catalogs) > 0:
for idx, row in enumerate(catalogs):
if fnmatch.fnmatch(row, '*global*'):
position = idx
name_of_file = row
if regional_pref == 0 and position != 0:
first_file = catalogs[0]
catalogs[position] = first_file
catalogs[0] = name_of_file
elif regional_pref == 1 and position != (len(catalogs)-1):
last_file = catalogs[(len(catalogs)-1)]
catalogs[position] = first_file
catalogs[(len(catalogs)-1)] = name_of_file
else:
pass
for cat in catalogs:
data = rid_matches(cat[0], data, cat[1], fcsv)
elif len(catalogs) == 0:
catalogs.append(tup)
except:
print ('If file contains earthquake information (event-type = EQ), \
required columns include: lat,lon,depth,mag,time. The columns of the current \
file: %s. Check file format to ensure these columns are present and properly \
labeled.' % data.columns)
#MF 8.9.16 add source to output file
try:
listints = data['ID'].values.astype(int)
except:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['id_no'] = data['ID'].values
data['ID'] = ID
data['src'] = name
write_data(data, output_file)
print ('The file: %s was written to %s' % (input_file, output_file))
print ('---------------------------------------------------------------------------------')
def castfloats(data):
'''Casts all numerical and nan values to floats to avoid error in calculations'''
data[['lat']] = data[['lat']].astype(float)
data[['lon']] = data[['lon']].astype(float)
data[['depth']] = data[['depth']].astype(float)
data[['unc']] = data[['unc']].astype(float)
if 'mag' in data.columns:
data[['mag']] = data[['mag']].astype(float)
if 'mrr' in data.columns:
data[['mrr']] = data[['mrr']].astype(float)
data[['mtt']] = data[['mtt']].astype(float)
data[['mpp']] = data[['mpp']].astype(float)
data[['mrt']] = data[['mrt']].astype(float)
data[['mrp']] = data[['mrp']].astype(float)
data[['mtp']] = data[['mtp']].astype(float)
if 'Paz' in data.columns and 'Ppl' in data.columns:
data[['Paz']] = data[['Paz']].astype(float)
data[['Ppl']] = data[['Ppl']].astype(float)
data[['Taz']] = data[['Taz']].astype(float)
data[['Tpl']] = data[['Tpl']].astype(float)
data[['S1']] = data[['S1']].astype(float)
data[['D1']] = data[['D1']].astype(float)
data[['R1']] = data[['R1']].astype(float)
data[['S2']] = data[['S2']].astype(float)
data[['D2']] = data[['D2']].astype(float)
data[['R2']] = data[['R2']].astype(float)
return data
def rid_nans(df):
'''Removes points where lat,lon,depth, or uncertainty values are not provided.'''
df = df[np.isfinite(df['lat'])]
df = df[np.isfinite(df['lon'])]
df = df[np.isfinite(df['depth'])]
df = df[np.isfinite(df['unc'])]
return df
def write_data(df, output_file):
''' Arguments: df - filtered dataframe to be written to file
output_file - output file where data is to be written '''
# If file name does not exist, creates file and writes filtered dataframe to it
df = castfloats(df)
df = rid_nans(df)
if not os.path.isfile(output_file):
with open(output_file, 'w') as f:
df.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
# If the output file already exists, new filtered data points are appended to
# existing information
else:
old = pd.read_csv(output_file)
all = pd.concat([old,df],sort=True)
all = castfloats(all)
all = rid_nans(all)
if len(df.columns) > len(old.columns):
all = all[df.columns]
else:
all = all[old.columns]
# Writes desired columns of a filtered dataframe to the output file
with open(output_file, 'w') as f:
all.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
def inbounds(args, data, slab):
''' Originally written by Ginvera, modified by MAF July 2016 '''
''' Arguments: args - input arguments provided from command line arguments
data - dataframe to be filtered based on bounds
Returns: data - filtered dataframe based on bounds '''
# Eliminates data points that are not within specified bounds where provided
if 'time' in data.columns:
try:
data['time'] = pd.to_datetime(data['time'])
except:
try:
data['time'] = pd.to_datetime(data['time'],format='%m-%d-%YT%H:%M:%S')
except:
try:
data['time'] = pd.to_datetime(data['time'],format='%m-%d-%YT%H:%M:%S.%f')
except:
data = data[data.time != '9-14-2012T29:54:59.53']
data = data.reset_index(drop=True)
for index,row in data.iterrows():
print (row['time'])
try:
row['time'] = pd.to_datetime(row['time'],format='%m-%d-%YT%H:%M:%S')
except:
try:
row['time'] = pd.to_datetime(row['time'],format='%m-%d-%YT%H:%M:%S.%f')
except:
print ('this row could not be added, invalid time')
print ('lon,lat,depth,mag,time')
print (row['lon'],row['lat'],row['depth'],row['mag'],row['time'])
data.drop(index, inplace=True)
stime = datetime.datetime(1900,1,1)
etime = datetime.datetime.utcnow()
if args.startTime and args.endTime and args.startTime >= args.endTime:
print ('End time must be greater than start time. Your inputs: Start %s \
End %s' % (args.startTime, args.endTime))
sys.exit(1)
if args.bounds is not None:
lonmin = args.bounds[0]
lonmax = args.bounds[1]
latmin = args.bounds[2]
latmax = args.bounds[3]
minwest = lonmin > 0 and lonmin < 180
maxeast = lonmax < 0 and lonmax > -180
if minwest and maxeast:
data = data[(data.lon >= lonmin) | (data.lon <= lonmax)]
else:
data = data[(data.lon >= lonmin) & (data.lon <= lonmax)]
data = data[(data.lat >= latmin) & (data.lat <= latmax)]
else:
#first filter data within the slab outline (just gets locations though - doesn't filter by rest of info!)
#also, original data was a dataframe
data = getDataInRect(slab,data)
if len(data) > 0:
data_lon = data['lon']
data_lat = data['lat']
data_coords = list(zip(data_lon,data_lat))
indexes_of_bad_data = getDataInPolygon(slab,data_coords)
data_to_keep = data.drop(data.index[indexes_of_bad_data])
data = data_to_keep
else:
return data
if args.startTime is not None and 'time' in data.columns:
stime = args.startTime
data = data[data.time >= stime]
if args.endTime is not None and 'time' in data.columns:
etime = args.endTime
data = data[data.time <= etime]
if args.magRange is not None and 'mag' in data.columns:
magmin = args.magRange[0]
magmax = args.magRange[1]
data = data[(data.mag >= magmin) & (data.mag <= magmax)]
return data
def slabpolygon(slabname):
#####################################
#written by <NAME>, 7/19/2016#
#####################################
'''
inputting the slabname (3 character code) will return the polygon boundaries
'''
#load file with slab polygon boundaries
slabfile = 'library/misc/slab_polygons.txt'
filerows = []
with open(slabfile) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
filerows.append(row)
csvfile.close()
#iterate through list to match the slabname and retrieve coordinates
slabbounds = []
for i in range(len(filerows)):
if slabname == filerows[i][0]:
slabbounds = filerows[i][1:]
slabbounds.append(slabbounds)
return slabbounds
def determine_polygon_extrema(slabname):
#####################################
#written by <NAME>, 7/18/2016#
#####################################
'''
inputs: slabname to be referenced against stored slab coordinates
outputs: the maximum and minimum latitude and longitude values for the input slab
'''
#calls slabpolygon function to get bounds for this slab region
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i]
if is_odd(i):
lats.append(val)
else:
lons.append(val)
x1 = int(min(lons))
x2 = int(max(lons))
y1 = int(min(lats))
y2 = int(max(lats))
return x1,x2,y1,y2
def create_grid_nodes(grd_space,slabname):
#####################################
#written by <NAME>, 7/18/2016#
#####################################
'''
inputs: grid spacing between nodes of regular grid (must be an integer), slab code
outputs: coordinates of each node (corner/intersection) within the regular grid (numpy array)
'''
xmin,xmax,ymin,ymax = determine_polygon_extrema(slabname)
total_degrees_lon = xmax-xmin
total_degrees_lat = ymax-ymin
#max_iter represents max number of iterations in the y direction (longitude direction)
max_iter = total_degrees_lon/grd_space
#define a grid to divide the area
#accounts for a non-even division
q1, r1 = divmod(total_degrees_lat, grd_space)
q2, r2 = divmod(total_degrees_lon, grd_space)
if r1 > 0:
grid_y = total_degrees_lat/grd_space
else:
grid_y = total_degrees_lat/grd_space + 1
if r2 > 0:
grid_x = total_degrees_lon/grd_space
else:
grid_x = total_degrees_lon/grd_space + 1
#the total number of grids
boxes = grid_y*grid_x
#initialize array to save time
boundaries = np.zeros([boxes,4])
'''
count keeps track of iterations of longitude
holds latmin/latmax steady while lonmin/lonmax changes across
when max iterations in longitude have completed (gone across area)
the latmin/latmix will adjust and lonmin/lonmax will also be reset.
This process will continue until the number of boxes has been reached.
'''
count = 0
for i in range(boxes):
if count == max_iter-1:
lonmax = xmax + grd_space*count
lonmin = xmin + grd_space*count
count = 0
latmax = ymax
latmin = ymin
boundaries[i,0] = lonmin
boundaries[i,1] = lonmax
boundaries[i,2] = latmin
boundaries[i,3] = latmax
ymax = ymax - grd_space
ymin = ymin - grd_space
else:
lonmax = xmax + grd_space*count
lonmin = xmin + grd_space*count
count = count+1
latmax = ymax
latmin = ymin
boundaries[i,0] = lonmin
boundaries[i,1] = lonmax
boundaries[i,2] = latmin
boundaries[i,3] = latmax
return boundaries
def getDataInPolygon(slabname,data):
#####################################
#written by <NAME>, 7/20/2016#
#####################################
''' creates a grid of 1 or nan based on if they are within a clipping mask or not. DEP.6.29.16 '''
''' modified to fit this script by MAF 7/18/16 '''
### Input:
# slabname: a 3 digit character code identifying a slab region
#data: the input data which may or may not be within the polygon
### Output:
#contained_data: an array of coordinate pairs (lon,lat) that reside within the polygon region
#check if slabbounds are already defined. If not, acquire them
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i][1:]
if is_odd(i):
lats.append(val)
else:
lons.append(val)
#create tuple of locations (with zip) to use in contains_points
xy = list(zip(lons,lats))
poly = path.Path(xy)
temp = poly.contains_points(data[:])
mask = np.zeros(len(temp),)*np.nan
mask[temp] = 1
keepers = []
for i in range(len(data)):
points_in_poly = np.dot(mask[i],data[i])
if i > 0:
keepers = np.vstack((keepers,points_in_poly))
else:
keepers = points_in_poly
rows_to_drop = []
for i in range(len(keepers)):
if np.isnan(keepers[i][0]) == True:
rows_to_drop.append(i)
return rows_to_drop
def getDataInRect(slabname,data1):
#####################################
#written by <NAME>, 7/20/2016#
#####################################
''' creates a grid of 1 or nan based on if they are within a clipping mask or not. DEP.6.29.16 '''
''' modified to fit this script by MAF 7/18/16 '''
### Input:
# slabname: a 3 digit character code identifying a slab region
#data: the input data which may or may not be within the polygon
### Output:
#contained_data: an array of coordinate pairs (lon,lat) that reside within the polygon region
#check if slabbounds are already defined. If not, acquire them
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i][1:]
try:
val = float(val)
except:
break
if is_odd(i):
lats.append(val)
else:
lons.append(val)
lonmin = min(lons)
lonmax = max(lons)
latmin = min(lats)
latmax = max(lats)
if lonmin < 0 and lonmax < 0:
data1 = oneeighty(data1)
else:
data1 = zerothreesixty(data1)
data1 = data1[(data1.lon > lonmin) & (data1.lon < lonmax) &(data1.lat > latmin) &(data1.lat < latmax)]
return data1
def cmtfilter(data,seismo_thick):
''' Arguments: data - data with all shallow/nonshallow and thrust/nonthrust earthquake
Returns: filtered - fitered dataframe which DEPENDS ON WHAT YOU DO/DONT COMMENT OUT
(1) filters only shallow earthquakes that have MT criteria which are non thrust
all other shallow earthquakes WITHOUT MT info are NOT filtered
OR
(2) filters ALL shallow earthquakes UNLESS they have MT info and that
MT info has the criteria of a thrust event. '''
# Removes non-thrust events from depths shallower than seismogenic zone
deep_data = data[data.depth >= seismo_thick]
# Includes shallow data without MT info (1) - comment out next two lines for (2)
dfn = data[np.isnan(data['Paz'])]
dfn = dfn[data.depth < seismo_thick]
data = data[np.isfinite(data['Paz'])]
shallow_data = data[data.depth < seismo_thick]
# Depending on which MT info are provided, filters non-thrust, shallow events
if 'Ndip' in shallow_data.columns:
thrust_rake = (shallow_data.Tpl>50) & (shallow_data.Ndip<=30)
else:
thrust_rake = ((shallow_data.R1>30) & (shallow_data.R2>30)
& (shallow_data.R1<150) & (shallow_data.R2<150))
shallow_data = shallow_data[thrust_rake]
# Includes shallow data without MT info (1) - comment out next line for (2)
filtered = pd.concat([deep_data, shallow_data, dfn],sort=True)
# Only includes shallow thrust events (2) - uncomment line below for (2) and comment necessary lines above
# filtered = pd.concat([deep_data, shallow_data],sort=True)
# Rearranges columns / filters out unecessary columns
filtered=filtered[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2','mlon','mlat','mdep']]
return filtered
def make_moment_tensor(mrr,mtt,mpp,mrt,mrp,mtp): #r,t,p = x,y,z
'''Used in m_to_planes below. Makes a moment tensor object from moment tensor components'''
return obspy.imaging.beachball.MomentTensor(mrr,mtt,mpp,mrt,mrp,mtp,1)
def m_to_planes(mrr,mtt,mpp,mrt,mrp,mtp,n):
'''Takes a moment tensor and calculates the P, N, and T axes and nodal plane information.
Used in moment_calc below. Returns one of these values as specified by input (n).
The integer input specifies which index of the array of outputs to return. '''
mt = make_moment_tensor(mrr,mtt,mpp,mrt,mrp,mtp)
#axes = obspy.imaging.beachball.MT2Axes(mt) #returns T, N, P
#fplane = obspy.imaging.beachball.MT2Plane(mt)#returns strike, dip, rake
#aplane = obspy.imaging.beachball.AuxPlane(fplane.strike, fplane.dip, fplane.rake)
#MAF changed because functions use lowercase, and aux_plane name includes underscore
axes = obspy.imaging.beachball.mt2axes(mt) #returns T, N, P
fplane = obspy.imaging.beachball.mt2plane(mt)#returns strike, dip, rake
aplane = obspy.imaging.beachball.aux_plane(fplane.strike, fplane.dip, fplane.rake)
Tstrike = axes[0].strike
Tdip = axes[0].dip
Pstrike = axes[2].strike
Pdip = axes[2].dip
S1 = fplane.strike
D1 = fplane.dip
R1 = fplane.rake
S2 = aplane[0]
D2 = aplane[1]
R2 = aplane[2]
mplanes = [Pstrike,Pdip,Tstrike,Tdip,S1,D1,R1,S2,D2,R2]
return mplanes[n]
def moment_calc(df, args, seismo_thick,slabname):
''' Creates and appends columns with Principal Axis and Nodal Plane information.
Used in makeframe below. Takes moment tensor information from input dataframe
columns and creates 11 new columns with information used to distinguish between thrust
and non-thrust earthquakes.
Arguments: df - dataframe with mt information in the form mrr,mtt,mpp,mrt,mrp,mtp
args - input arguments provided from command line arguments
Returns: df - dataframe with mt information in the form Paz,Ppl,Taz,Tpl,S1,D1,R1,S2,D2,R2
'''
#try:
# Only calculates MT info where it exists in EQ datasets
df = inbounds(args, df, slabname)
dfm = df[np.isfinite(df['mrr'])]
dfn = df[df['mrr'].isnull()]
#except:
# raise Exception,'If file contains earthquake information (event-type = EQ), \
# required columns include: lat,lon,depth,mag,time. The columns of the current \
# file: %s. Check file format to ensure these columns are present and properly \
# labeled.' % df.columns
# Calculates each new column of MT info
try:
dfm['Paz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],0),axis=1)
dfm['Ppl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],1),axis=1)
dfm['Taz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],2),axis=1)
dfm['Tpl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],3),axis=1)
dfm['S1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],4),axis=1)
dfm['D1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],5),axis=1)
dfm['R1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],6),axis=1)
dfm['S2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],7),axis=1)
dfm['D2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],8),axis=1)
dfm['R2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],9),axis=1)
# Concatenates events with and without MT info
#dfm = cmtfilter(dfm,seismo_thick)
df = pd.concat([dfm,dfn],sort=True)
# Rearranges columns and returns
if 'mlon' in df.columns:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2','mlon','mlat','mdep']]
else:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2']]
df['mlon'] = df['lon'].values*1.0
df['mlat'] = df['lat'].values*1.0
df['mdep'] = df['depth'].values*1.0
return df
except:
# if exception is caught, try to return only events without MT info
try:
if len(dfm) == 0:
return dfn
except:
print('Where moment tensor information is available, columns \
must be labeled: mrr,mpp,mtt,mrp,mrt,mtp')
def ymdhmsparse(input_file):
'''Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns.
Used in makeframe below. Returns a new dataframe with parsed datetimes. '''
ymdhms = {'time':['year','month','day','hour','min','sec']}
dparse = lambda x: pd.datetime.strptime(x, '%Y %m %d %H %M %S')
cols = ['year','month','day','hour','min','sec','lat','lon','depth','mag']
data = pd.read_csv(input_file, parse_dates=ymdhms, usecols=cols, date_parser=dparse)
return data
def raiseUnc(x):
''' Raises unreasonably low uncertainties for earthquakes to a value greater
than that of average active source data points (which is 5 km). '''
if x < 6:
return 6
else:
return x
def makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname):
''' Arguments: data - semi-filtered data frame to be filtered more and written to file
fcsv - filename of output file
event_type - kind of data i.e. BA, EQ, ER, TO etc
uncertainty - unc value provided in command line or set by default for etype
args - input arguments provided from command line arguments
Returns: data - fully filtered dataset to be written to output file '''
# Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns
if 'year' in data.columns and 'sec' in data.columns and 'mag' in data.columns:
data = ymdhmsparse(fcsv)
# If ISC-GEM data is provided, high quality, low uncertainties are included in place of
# the default values assigned in s2d.py main method.
if 'unc' in data.columns and 'q' in data.columns:
try:
data = data[(data.uq != 'C') & (data.unc < uncertainty)]
except:
print ('When adding a file with uncertainty quality, the column \
representing that quality must be labeled as uq')
# uses OG uncertainties where provided. Raises them if they are unreasonably low
elif 'unc' in data.columns:
uncert = data['unc'].values
try:
if isnan(uncert[1]):
data['unc'] = uncertainty
elif event_type == 'EQ':
data['unc'] = data.apply(lambda row: raiseUnc(row['unc']),axis=1)
else:
pass
except:
data['unc'] = uncertainty
# If no uncertainty column is included, the one provided in command line arguments is
# used to add a new column to the data, alternatively, the default value assigned in s2d.py is used
else:
data['unc'] = uncertainty
pd.options.mode.chained_assignment = None
# A new column marking the event type is added to the data. Everything is cast as a float
data['etype'] = event_type
data = castfloats(data)
# Calculates moment tensor info where applicable and removes shallow, non-thrust events
if 'mrr' in data.columns:
data = moment_calc(data, args, seismo_thick,slabname)
elif 'time' in data.columns and 'mag' in data.columns:
data = data[['lat','lon','depth','unc','ID','etype','mag','time']]
else:
pass
return data
##########################################################################################################
#The following serves to create a rough plot of the data types compiled with s2d.py.
##########################################################################################################
def plot_map(lons, lats, c, legend_label, projection='mill',
llcrnrlat=-80, urcrnrlat=90, llcrnrlon=-180, urcrnrlon=180, resolution='i'):
''' Optional Arguments: projection - map projection, default set as 'mill'
llcrnrlat - lower left corner latitude value, default is -80
urcrnrlat - upper right corner latitude value, default is 90
llcrnrlon - lower left corner longitude value, default is -180
urcrnrlon - upper right corner longitude value, default is 180
resolution - the resolution of the plot, default is 'i'
Required Arguments: lons - list of longitude values to be plotted
lats - list of latitude values to be plotted
c - the color of the points to be plotted
legend_label - how this set of points will be labeled on the legend
Returns: m - a basemap object defined by input bounds with input points included '''
# Creates a basic plot of a series of lat,lon points over a defined region
m = Basemap(projection=projection, llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, resolution=resolution)
m.drawcoastlines()
m.drawmapboundary()
m.drawcountries()
m.etopo()
m.drawmeridians(np.arange(llcrnrlon, urcrnrlon, 5), labels=[0,0,0,1], fontsize=10)
m.drawparallels(np.arange(llcrnrlat, urcrnrlat, 5), labels=[1,0,0,0], fontsize=10)
x,y = m(lons, lats)
m.scatter(x, y, color=c, label=legend_label, marker='o', edgecolor='none', s=10)
return m
def datelinecross(x):
'''Converts negative longitudes to their positive equivalent for the sake of plotting.'''
if x<0:
return x+360
else:
return x
##############################################################################################
#Everything below this point serves the purpose of identifying and
#eliminating duplicate events between multiple earthquake catalog entries.
##############################################################################################
class Earthquake:
'''Creates an earthquake object from which event information can be extracted'''
def __init__(self,time,coords,depth,lat,lon,mag,catalog):
self.time = time
self.coords = coords
self.depth = depth
self.lat = lat
self.lon = lon
self.mag = mag
self.catalog = catalog
def getvals(row):
'''Gathers time, lat, lon, depth, mag, information from row in dataframe.'''
time = row['time']
lat = row['lat']
lon = row['lon']
depth = row['depth']
mag = row['mag']
ep = (lat,lon)
return time,ep,depth,lat,lon,mag
def boundtrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across bounds
where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same region
lonmin1, lonmin2 = cat1['lon'].min(), cat2['lon'].min()
latmin1, latmin2 = cat1['lat'].min(), cat2['lat'].min()
lonmax1, lonmax2 = cat1['lon'].max(), cat2['lon'].max()
latmax1, latmax2 = cat1['lat'].max(), cat2['lat'].max()
minwest = (lonmax1 > 0 and lonmax1 < 180) or (lonmax2 > 0 and lonmax2 < 180)
maxeast = (lonmin1 < 0 and lonmin1 > -180) or (lonmin2 < 0 and lonmin2 > -180)
difference = abs(lonmin1-lonmax1)>180 or abs(lonmin2-lonmax2)>180
if minwest and maxeast and difference:
pass
else:
cat1 = cat1[(cat1.lon >= lonmin2) & (cat1.lon <= lonmax2)]
cat2 = cat2[(cat2.lon >= lonmin1) & (cat2.lon <= lonmax1)]
cat1 = cat1[(cat1.lat >= latmin2) & (cat1.lat <= latmax2)]
cat2 = cat2[(cat2.lat >= latmin1) & (cat2.lat <= latmax1)]
return cat1, cat2
def timetrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across time
frames where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same time range
cat1['time'] = pd.to_datetime(cat1['time'])
cat2['time'] = pd.to_datetime(cat2['time'])
cat1min, cat1max = cat1['time'].min(), cat1['time'].max()
cat2min, cat2max = cat2['time'].min(), cat2['time'].max()
cat1 = cat1[(cat1.time >= cat2min) & (cat1.time <= cat2max)]
cat2 = cat2[(cat2.time >= cat1min) & (cat2.time <= cat1max)]
return cat1, cat2
def earthquake_string(eqo):
''' Puts earthquake information into a string to be written or printed
Arguments: eqo - earthquake object
Returns: eqos - a string of information stored in earthquake object input argument '''
eqos = (str(eqo.lat) + ',' + str(eqo.lon) + ',' + str(eqo.depth) + ','
+ str(eqo.mag) + ',' + str(eqo.time) + ',' + eqo.catalog)
return eqos
def find_closest(eqo, eqm1, eqm2):
'''Determines which of two potential matches in one catalog is closer to an event in another.
Arguments: eqo - earthquake event in first catalog that matches two events in the second
eqm1 - the first event in the second catalog that matches eqo
eqm2 - the second event in the second catalog that matches eqo
Returns: closest - the closest event weighting time first, then distance, then magnitude '''
# Prints information to console to make user aware of more than one match
print ('-------------------------------------- lat %s lon %s depth %s mag %s time \
%s catlog' % (',',',',',',',',','))
print ('There is more than one match for event: %s' % earthquake_string(eqo))
print ('event1: %s' % earthquake_string(eqm1))
print ('event2: %s' % earthquake_string(eqm2))
# Gets distance between either event and the common match eqo
darc1 = geodesic(eqo.coords, eqm1.coords).meters/1000
darc2 = geodesic(eqo.coords, eqm2.coords).meters/1000
dh1 = abs(eqo.depth - eqm1.depth)
dh2 = abs(eqo.depth - eqm2.depth)
dist1 = sqrt(darc1*darc1 + dh1*dh1)
dist2 = sqrt(darc2*darc2 + dh2*dh2)
# Gets magnitude and time differences between each event and the common match
dtime1 = abs(eqo.time - eqm1.time)
dtime2 = abs(eqo.time - eqm2.time)
dmag1 = abs(eqo.mag - eqm1.mag)
dmag2 = abs(eqo.mag - eqm2.mag)
# Finds the closest match to eqo by checking time first, then distance, then magnitude
if dtime1 < dtime2:
closest = eqm1
elif dtime2 < dtime1:
closest = eqm2
elif dtime1 == dtime2 and dist1 < dist2:
closest = eqm1
elif dtime1 == dtime2 and dist2 < dist1:
closest = eqm1
elif dmag1 == dmag2 and dist1 == dist2 and dmag1 < dmag2:
closest = eqm1
elif dmag1 == dmag2 and dist1 == dist2 and dmag2 < dmag1:
closest = eqm2
# If all things are equal, the first event is chosen as a match by default
else:
print ('The two events are equidistant to the match in time, space, and magnitude.\
The second event was therefore determined independent.')
closest = eqm1
return closest
print ('>>>>closest event: %s' % earthquake_string(closest))
return closest
def removematches(dfo, dfm):
'''Eliminates events in dfo (dataframe) that are found in dfm (dataframe) '''
ind = (dfo.time.isin(dfm.time) & dfo.lat.isin(dfm.lat) & dfo.lon.isin(dfm.lon)
& dfo.mag.isin(dfm.mag) & dfo.depth.isin(dfm.depth))
dfo = dfo[~ind]
return dfo
def rid_matches(cat1, cat2, name1, name2):
''' Compares two catalogs, identifies and removes matching events from cat2.
Arguments: cat1 - the first catalog (dataframe), no events are removed from this catalog
cat2 - the second catalog (dataframe), events in this catalog that are close
in space, time, and magnitude to those in cat1 are filtered out
name1 - the name of the first catalog, used for printing/bookeeping purposes
name2 - the name of the second catalog, used for printing/bookeeping purposes
Returns: df - a filtered version of cat2 without events that match those in cat1 '''
# Setting constants that define matching criteria
tdelta = 30
distdelta = 100
magdelta = 0.5
# Ensuring that all times are in datetime object format & trimming catalogs to only extend
# accross the bounds and time constraints of the other
cat1['time'] = pd.to_datetime(cat1['time'])
cat2['time'] = pd.to_datetime(cat2['time'])
cat1c,cat2c = timetrim(cat1, cat2)
cat1c,cat2c = boundtrim(cat1c, cat2c)
# Making dataframe/filename to store matching events for bookeeping
try:
name1w = name1[-10:] #this doesn't make sense, and seems to chop the file name inappropriately - will have to resolve this later.
name2w = name2[-10:]
except:
name1w = name1[:-4]
name2w = name2[:-4]
matches = pd.DataFrame(columns = ['lat','lon','depth','mag','time','catalog'])
count = 0
# Compares each event in cat2 to each event in cat1
for index,row in cat1c.iterrows():
n = 0
# Getting earthquake info from event and storing it in an Earthquake object (cat1)
time1, ep1, depth1, lat1, lon1, mag1 = getvals(row)
eq1 = Earthquake(time1, ep1, depth1, lat1, lon1, mag1, name1w)
for index, r in cat2c.iterrows():
# Getting earthquake info from event and storing it in an Earthquake object (cat1)
time2, ep2, depth2, lat2, lon2, mag2 = getvals(r)
eq2 = Earthquake(time2, ep2, depth2, lat2, lon2, mag2, name2w)
# If events are close in time, space, and magnitude add event from cat2 to match list
if abs(time1-time2) < datetime.timedelta(seconds = tdelta):
if vincenty(ep1,ep2).meters/1000 <= distdelta:
if abs(mag1-mag2) < magdelta:
# If there is already a match for this event, find the closest
# The closest is stored and compared to third, fourth matches etc if they exist
if n >= 1:
match = find_closest(eq1, match, eq2)
n += 1
if n == 0:
match = eq2
n += 1
else:
pass
else:
pass
else:
pass
# Add matching events to match dataframe
if n > 0:
lat1,lon1,depth1,mag1,time1,name1
matches.loc[len(matches)+1] = [lat1, lon1, depth1, mag1, time1, name1w]
matches.loc[len(matches)+1] = [match.lat, match.lon, match.depth, match.mag,
match.time, name2w]
count += 1
# Write matches to matching file
matchfile = name1w + name2w + '-matches.csv'
# Remove matches from cat2
df = removematches(cat2, matches)
# Print general results to console
print ('%i matches were found between the catalogs: %s and %s.' % (count, name1, name2))
if count > 0:
with open(matchfile,'w') as f:
matches.to_csv(f, header=True, index=False, float_format='%0.4f')
print ('The pairs can be found in the file: ** %s **, which has been written added to the current directory.' % (name1w + name2w + '-matches.csv'))
print ('Based on the order of entry, in the instance of duplicate events, the entries in ** %s ** were added to the slab file while the entries in ** %s ** were not added.' % (name1, name2))
# Return filtered catalog to be written to output file
return df
def rectangleIntersectsPolygon(x1,x2,y1,y2):
####################################
#written by <NAME>, 8/4/2016#
####################################
def is_odd(num):
return num & 0x1
#create polygon from input rectangle
rect = Polygon([(x1,y2),(x2,y2),(x2,y1),(x1,y1)])
#read in slab boundaries
slabfile = 'slab_polygons.txt'
filerows = []
with open(slabfile) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
filerows.append(row)
csvfile.close()
#loop through the slabnames and slabboundaries by row to define each slab polygon
#then verify whether the input rectangle overlaps any of the defined slabs
slabbounds = []
slabname = []
slab = []
for i in range(len(filerows)-1):
lats =[]
lons = []
slabname = filerows[i][0]
slabbounds = filerows[i][1:]
slabbounds.append(slabbounds)
for j in range(1,(len(filerows[i][:]))):
val = float(filerows[i][j])
if is_odd(j):
lons.append(val)
else:
lats.append(val)
poly = list(zip(lons,lats))
if rect.overlaps(poly):
slab.append(slabname)
else:
continue
#if the input rectangle does not overlap with just one slab, let the user know
if len(slab) == 0:
print ('The input boundaries do not overlap any slabs. Please try again.')
elif len(slab) > 1:
response = raw_input('You have selected multiple slabs. Which slab would you like to model?: ' + str(lons) + ' Please enter a string: ')
slab = response
return slab
|
[
"pandas.read_csv",
"pandas.datetime.strptime",
"numpy.isfinite",
"datetime.timedelta",
"numpy.arange",
"pandas.to_datetime",
"datetime.datetime",
"matplotlib.path.Path",
"numpy.dot",
"fnmatch.fnmatch",
"numpy.vstack",
"pandas.DataFrame",
"csv.reader",
"numpy.size",
"os.path.isfile",
"numpy.isnan",
"datetime.strptime",
"geopy.distance.geodesic",
"datetime.datetime.utcnow",
"numpy.zeros",
"pandas.concat"
] |
[((12234, 12263), 'datetime.datetime', 'datetime.datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (12251, 12263), False, 'import datetime\n'), ((12274, 12300), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (12298, 12300), False, 'import datetime\n'), ((15450, 15469), 'numpy.size', 'np.size', (['slabbounds'], {}), '(slabbounds)\n', (15457, 15469), True, 'import numpy as np\n'), ((17029, 17049), 'numpy.zeros', 'np.zeros', (['[boxes, 4]'], {}), '([boxes, 4])\n', (17037, 17049), True, 'import numpy as np\n'), ((19042, 19061), 'numpy.size', 'np.size', (['slabbounds'], {}), '(slabbounds)\n', (19049, 19061), True, 'import numpy as np\n'), ((19440, 19453), 'matplotlib.path.Path', 'path.Path', (['xy'], {}), '(xy)\n', (19449, 19453), False, 'from matplotlib import path\n'), ((20769, 20788), 'numpy.size', 'np.size', (['slabbounds'], {}), '(slabbounds)\n', (20776, 20788), True, 'import numpy as np\n'), ((23054, 23106), 'pandas.concat', 'pd.concat', (['[deep_data, shallow_data, dfn]'], {'sort': '(True)'}), '([deep_data, shallow_data, dfn], sort=True)\n', (23063, 23106), True, 'import pandas as pd\n'), ((29568, 29645), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'parse_dates': 'ymdhms', 'usecols': 'cols', 'date_parser': 'dparse'}), '(input_file, parse_dates=ymdhms, usecols=cols, date_parser=dparse)\n', (29579, 29645), True, 'import pandas as pd\n'), ((37348, 37376), 'pandas.to_datetime', 'pd.to_datetime', (["cat1['time']"], {}), "(cat1['time'])\n", (37362, 37376), True, 'import pandas as pd\n'), ((37396, 37424), 'pandas.to_datetime', 'pd.to_datetime', (["cat2['time']"], {}), "(cat2['time'])\n", (37410, 37424), True, 'import pandas as pd\n'), ((41804, 41832), 'pandas.to_datetime', 'pd.to_datetime', (["cat1['time']"], {}), "(cat1['time'])\n", (41818, 41832), True, 'import pandas as pd\n'), ((41852, 41880), 'pandas.to_datetime', 'pd.to_datetime', (["cat2['time']"], {}), "(cat2['time'])\n", (41866, 41880), True, 'import pandas as pd\n'), ((42294, 42365), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['lat', 'lon', 'depth', 'mag', 'time', 'catalog']"}), "(columns=['lat', 'lon', 'depth', 'mag', 'time', 'catalog'])\n", (42306, 42365), True, 'import pandas as pd\n'), ((866, 904), 'datetime.strptime', 'datetime.strptime', (['timestring', 'TIMEFMT'], {}), '(timestring, TIMEFMT)\n', (883, 904), False, 'import datetime\n'), ((9305, 9327), 'numpy.isfinite', 'np.isfinite', (["df['lat']"], {}), "(df['lat'])\n", (9316, 9327), True, 'import numpy as np\n'), ((9341, 9363), 'numpy.isfinite', 'np.isfinite', (["df['lon']"], {}), "(df['lon'])\n", (9352, 9363), True, 'import numpy as np\n'), ((9377, 9401), 'numpy.isfinite', 'np.isfinite', (["df['depth']"], {}), "(df['depth'])\n", (9388, 9401), True, 'import numpy as np\n'), ((9415, 9437), 'numpy.isfinite', 'np.isfinite', (["df['unc']"], {}), "(df['unc'])\n", (9426, 9437), True, 'import numpy as np\n'), ((9775, 9802), 'os.path.isfile', 'os.path.isfile', (['output_file'], {}), '(output_file)\n', (9789, 9802), False, 'import os\n'), ((10077, 10101), 'pandas.read_csv', 'pd.read_csv', (['output_file'], {}), '(output_file)\n', (10088, 10101), True, 'import pandas as pd\n'), ((10116, 10147), 'pandas.concat', 'pd.concat', (['[old, df]'], {'sort': '(True)'}), '([old, df], sort=True)\n', (10125, 10147), True, 'import pandas as pd\n'), ((14487, 14521), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (14497, 14521), False, 'import csv\n'), ((19627, 19651), 'numpy.dot', 'np.dot', (['mask[i]', 'data[i]'], {}), '(mask[i], data[i])\n', (19633, 19651), True, 'import numpy as np\n'), ((22414, 22435), 'numpy.isnan', 'np.isnan', (["data['Paz']"], {}), "(data['Paz'])\n", (22422, 22435), True, 'import numpy as np\n'), ((22495, 22519), 'numpy.isfinite', 'np.isfinite', (["data['Paz']"], {}), "(data['Paz'])\n", (22506, 22519), True, 'import numpy as np\n'), ((25820, 25842), 'numpy.isfinite', 'np.isfinite', (["df['mrr']"], {}), "(df['mrr'])\n", (25831, 25842), True, 'import numpy as np\n'), ((28219, 28251), 'pandas.concat', 'pd.concat', (['[dfm, dfn]'], {'sort': '(True)'}), '([dfm, dfn], sort=True)\n', (28228, 28251), True, 'import pandas as pd\n'), ((29433, 29477), 'pandas.datetime.strptime', 'pd.datetime.strptime', (['x', '"""%Y %m %d %H %M %S"""'], {}), "(x, '%Y %m %d %H %M %S')\n", (29453, 29477), True, 'import pandas as pd\n'), ((34188, 34222), 'numpy.arange', 'np.arange', (['llcrnrlon', 'urcrnrlon', '(5)'], {}), '(llcrnrlon, urcrnrlon, 5)\n', (34197, 34222), True, 'import numpy as np\n'), ((34275, 34309), 'numpy.arange', 'np.arange', (['llcrnrlat', 'urcrnrlat', '(5)'], {}), '(llcrnrlat, urcrnrlat, 5)\n', (34284, 34309), True, 'import numpy as np\n'), ((45554, 45588), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (45564, 45588), False, 'import csv\n'), ((4939, 4980), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'low_memory': '(False)'}), '(input_file, low_memory=False)\n', (4950, 4980), True, 'import pandas as pd\n'), ((5565, 5596), 'numpy.arange', 'np.arange', (['start_ID', 'stop_ID', '(1)'], {}), '(start_ID, stop_ID, 1)\n', (5574, 5596), True, 'import numpy as np\n'), ((7548, 7579), 'numpy.arange', 'np.arange', (['start_ID', 'stop_ID', '(1)'], {}), '(start_ID, stop_ID, 1)\n', (7557, 7579), True, 'import numpy as np\n'), ((11049, 11077), 'pandas.to_datetime', 'pd.to_datetime', (["data['time']"], {}), "(data['time'])\n", (11063, 11077), True, 'import pandas as pd\n'), ((19691, 19727), 'numpy.vstack', 'np.vstack', (['(keepers, points_in_poly)'], {}), '((keepers, points_in_poly))\n', (19700, 19727), True, 'import numpy as np\n'), ((19847, 19870), 'numpy.isnan', 'np.isnan', (['keepers[i][0]'], {}), '(keepers[i][0])\n', (19855, 19870), True, 'import numpy as np\n'), ((39126, 39159), 'geopy.distance.geodesic', 'geodesic', (['eqo.coords', 'eqm1.coords'], {}), '(eqo.coords, eqm1.coords)\n', (39134, 39159), False, 'from geopy.distance import geodesic\n'), ((39184, 39217), 'geopy.distance.geodesic', 'geodesic', (['eqo.coords', 'eqm2.coords'], {}), '(eqo.coords, eqm2.coords)\n', (39192, 39217), False, 'from geopy.distance import geodesic\n'), ((952, 990), 'datetime.strptime', 'datetime.strptime', (['timestring', 'DATEFMT'], {}), '(timestring, DATEFMT)\n', (969, 990), False, 'import datetime\n'), ((43130, 43164), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'tdelta'}), '(seconds=tdelta)\n', (43148, 43164), False, 'import datetime\n'), ((6160, 6192), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['row', '"""*global*"""'], {}), "(row, '*global*')\n", (6175, 6192), False, 'import fnmatch\n'), ((11142, 11198), 'pandas.to_datetime', 'pd.to_datetime', (["data['time']"], {'format': '"""%m-%d-%YT%H:%M:%S"""'}), "(data['time'], format='%m-%d-%YT%H:%M:%S')\n", (11156, 11198), True, 'import pandas as pd\n'), ((1050, 1089), 'datetime.strptime', 'datetime.strptime', (['timestring', 'TIMEFMT2'], {}), '(timestring, TIMEFMT2)\n', (1067, 1089), False, 'import datetime\n'), ((11274, 11333), 'pandas.to_datetime', 'pd.to_datetime', (["data['time']"], {'format': '"""%m-%d-%YT%H:%M:%S.%f"""'}), "(data['time'], format='%m-%d-%YT%H:%M:%S.%f')\n", (11288, 11333), True, 'import pandas as pd\n'), ((11651, 11706), 'pandas.to_datetime', 'pd.to_datetime', (["row['time']"], {'format': '"""%m-%d-%YT%H:%M:%S"""'}), "(row['time'], format='%m-%d-%YT%H:%M:%S')\n", (11665, 11706), True, 'import pandas as pd\n'), ((11817, 11875), 'pandas.to_datetime', 'pd.to_datetime', (["row['time']"], {'format': '"""%m-%d-%YT%H:%M:%S.%f"""'}), "(row['time'], format='%m-%d-%YT%H:%M:%S.%f')\n", (11831, 11875), True, 'import pandas as pd\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.optimize import linprog
from cvxpy import *
class CuttingPlaneModel:
def __init__(self, dim, bounds):
self.dim = dim
self.bounds = bounds
self.coefficients = np.empty((0,dim+1))
def __call__(self, x):#REMOVE
y = [np.sum(np.multiply(coefficients_i, np.hstack((1,x)))) for coefficients_i in self.coefficients]
return np.max(y), 0
def get_constraints(self):
A_ub_x = np.asarray([[c[1],c[2]] for c in self.coefficients])
A_ub_y = np.asarray([-1 for i in range(self.coefficients.shape[0])])
b_ub = np.asarray([-c[0] for c in self.coefficients])
return A_ub_x, A_ub_y, b_ub
def add_plane(self, f, g, x):
c = f - np.sum(np.multiply(g,x))
new_plane = np.append(c,g)
self.coefficients = np.append(self.coefficients, [new_plane], axis=0)
def solve(self, lb, ub):
A_ub_x, A_ub_y, b_ub = self.get_constraints()
x = Variable(self.dim)
y = Variable() #TODO: yp yn se non funziona per min negativo
constraints = []
constraints.append(A_ub_x @ x + A_ub_y * y <= b_ub)
objective = Minimize(y)
problem = Problem(objective, constraints)
problem.solve(verbose=False)
#print("Problem status: ", problem.status)
on_border = problem.status in ['unbounded', 'infeasible']# TODO infeasible fix se possibile
if problem.status == 'infeasible':
print("Warning: Infeasible problem")
if on_border:
lb_constraint = [lb]*self.dim # Rewrite as two variables
ub_contraint = [ub]*self.dim
constraints.append(lb_constraint <= x)
constraints.append(x <= ub_contraint)
problem = Problem(objective, constraints)
problem.solve(verbose=False)
#print("Problem status: ", problem.status)
#print("MODEL min: ", x.value, y.value)
return x.value, y.value, on_border
def project_point(self, x0, level, max_distance_error=1e-2, verbose=False):
n = len(x0)
P = np.eye(n)
q = np.multiply(x0, -2)
x = cvxpy.Variable(n)
y = cvxpy.Variable()
A_ub_x, A_ub_y, b_ub = self.get_constraints()
objective = cvxpy.quad_form(x, P) + q.T @ x
constraints = []
constraints.append(A_ub_x @ x + A_ub_y * y <= b_ub)
constraints.append(y == level)
prob = cvxpy.Problem(cvxpy.Minimize(objective), constraints)
prob.solve(verbose=verbose, max_iter=10**7, time_limit=5)
#print("Solution = ", x.value, y.value)
if np.abs(level-y.value) > max_distance_error:
print("Warning, projection error above threshold: ", np.abs(level-y.value))
return x.value
def plot(self, points=[], temp_points=[]):
plt.figure()
xaxis = np.linspace(-self.bounds, self.bounds, num=100)
yaxis = np.linspace(-self.bounds, self.bounds, num=100)
result = np.zeros((len(xaxis),len(yaxis)))
for i, x in enumerate(xaxis):
for j, y in enumerate(yaxis):
result[j,i], _ = self.__call__([x,y])
c = plt.contour(xaxis, yaxis, result, 50)
plt.colorbar()
for p in temp_points:
plt.plot(p[0], p[1], 'o', color='red');
for i, p in enumerate(points):
plt.plot(p[0], p[1], 'o', color='black');
plt.text(p[0], p[1], str(i))
plt.show()
class LevelMethod2d:
def __init__(self, bounds=10, lambda_=0.29289, epsilon=0.001, max_iter=1000):
self.bounds = bounds
self.lambda_ = lambda_
self.epsilon = epsilon
self.max_iter = 1000
self.function = None
self.dim = None
self.function_points = None
self.current_iter = None
# Algorithm data
self.f_upstar = None
self.f_substar = None
self.x_upstar = None
self.x_substar = None
self.x = None
def cache_points(self, xaxis=None, yaxis=None):# Todo, for x of dim n
if xaxis is None:
xaxis = np.linspace(-self.bounds, self.bounds, num=100)
if yaxis is None:
yaxis = np.linspace(-self.bounds, self.bounds, num=100)
result = np.zeros((len(xaxis),len(yaxis)))
for i, x in enumerate(xaxis):
for j, y in enumerate(yaxis):
result[j,i], _ = self.function([x,y])
self.function_points = result
def plot(self, points=[], temp_points=[]):
plt.figure()
xaxis = np.linspace(-self.bounds, self.bounds, num=100)
yaxis = np.linspace(-self.bounds, self.bounds, num=100)
if self.function_points is None:
self.cache_points(xaxis, yaxis)
c = plt.contour(xaxis, yaxis, self.function_points, 50)
plt.colorbar()
for p in temp_points:
plt.plot(p[0], p[1], 'o', color='red');
for i, p in enumerate(points):
plt.plot(p[0], p[1], 'o', color='black');
plt.text(p[0], p[1], str(i))
plt.show()
def solve(self, function, x, verbose=False, plot=False):
self.function = function
self.dim = len(x)
self.x = x
# Build the cutting plane model
self.model = CuttingPlaneModel(self.dim, self.bounds)
plot_points = [x]
self.f_upstar = math.inf
self.x_upstar = None
gap = math.inf
self.current_iter = 0
print(f"Iteration\tf*\t\tModel Min\t\tGap\t\tLevel\t Is on boder?")
while gap > self.epsilon:
# Oracle computes f and g
current_f, current_g = function(self.x)
# Update model
self.model.add_plane(current_f, current_g, self.x)
# Compute f_substar, f_upstar, x_upstar
self.x_substar, self.f_substar, is_on_border = self.model.solve(-self.bounds,self.bounds)
if self.f_upstar > current_f:
self.f_upstar = current_f
self.x_upstar = self.x
# Project x onto level set
gap = self.f_upstar - self.f_substar
level = self.f_substar + self.lambda_ * gap
if gap < -0.1:
print("Warning: Negative gap ", gap)
break
if is_on_border: # Project x on the border, as target level is infinite.
self.x = self.x_substar
else: # Project x on the target level
self.x = self.model.project_point(self.x, level, verbose=verbose)
print(f"{self.current_iter}\t\t{self.f_upstar:.6f}\t{self.f_substar:.6f}\t\t{gap:.6f}\t{level:.6f} {is_on_border}")
if plot:
plot_points.append(self.x)
self.model.plot(plot_points)
self.plot(plot_points)
self.current_iter += 1
if self.current_iter > self.max_iter:
print("Warning: Maximum number of iterations reached.")
break
if __name__ == "__main__":
from test_function import TestFunction
f = LevelMethod2d(bounds = 20)
f.solve(TestFunction(), [-1,-3], plot=False)
|
[
"numpy.abs",
"numpy.eye",
"numpy.multiply",
"numpy.hstack",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.append",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.empty",
"test_function.TestFunction",
"matplotlib.pyplot.show"
] |
[((261, 283), 'numpy.empty', 'np.empty', (['(0, dim + 1)'], {}), '((0, dim + 1))\n', (269, 283), True, 'import numpy as np\n'), ((501, 554), 'numpy.asarray', 'np.asarray', (['[[c[1], c[2]] for c in self.coefficients]'], {}), '([[c[1], c[2]] for c in self.coefficients])\n', (511, 554), True, 'import numpy as np\n'), ((646, 694), 'numpy.asarray', 'np.asarray', (['[(-c[0]) for c in self.coefficients]'], {}), '([(-c[0]) for c in self.coefficients])\n', (656, 694), True, 'import numpy as np\n'), ((825, 840), 'numpy.append', 'np.append', (['c', 'g'], {}), '(c, g)\n', (834, 840), True, 'import numpy as np\n'), ((868, 917), 'numpy.append', 'np.append', (['self.coefficients', '[new_plane]'], {'axis': '(0)'}), '(self.coefficients, [new_plane], axis=0)\n', (877, 917), True, 'import numpy as np\n'), ((2142, 2151), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2148, 2151), True, 'import numpy as np\n'), ((2164, 2183), 'numpy.multiply', 'np.multiply', (['x0', '(-2)'], {}), '(x0, -2)\n', (2175, 2183), True, 'import numpy as np\n'), ((2884, 2896), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2894, 2896), True, 'import matplotlib.pyplot as plt\n'), ((2914, 2961), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (2925, 2961), True, 'import numpy as np\n'), ((2978, 3025), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (2989, 3025), True, 'import numpy as np\n'), ((3226, 3263), 'matplotlib.pyplot.contour', 'plt.contour', (['xaxis', 'yaxis', 'result', '(50)'], {}), '(xaxis, yaxis, result, 50)\n', (3237, 3263), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3286), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3284, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3521, 3523), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4597), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4595, 4597), True, 'import matplotlib.pyplot as plt\n'), ((4619, 4666), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4630, 4666), True, 'import numpy as np\n'), ((4687, 4734), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4698, 4734), True, 'import numpy as np\n'), ((4846, 4897), 'matplotlib.pyplot.contour', 'plt.contour', (['xaxis', 'yaxis', 'self.function_points', '(50)'], {}), '(xaxis, yaxis, self.function_points, 50)\n', (4857, 4897), True, 'import matplotlib.pyplot as plt\n'), ((4910, 4924), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4922, 4924), True, 'import matplotlib.pyplot as plt\n'), ((5175, 5185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5183, 5185), True, 'import matplotlib.pyplot as plt\n'), ((7238, 7252), 'test_function.TestFunction', 'TestFunction', ([], {}), '()\n', (7250, 7252), False, 'from test_function import TestFunction\n'), ((439, 448), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (445, 448), True, 'import numpy as np\n'), ((2673, 2696), 'numpy.abs', 'np.abs', (['(level - y.value)'], {}), '(level - y.value)\n', (2679, 2696), True, 'import numpy as np\n'), ((3330, 3368), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""red"""'}), "(p[0], p[1], 'o', color='red')\n", (3338, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3421, 3461), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""black"""'}), "(p[0], p[1], 'o', color='black')\n", (3429, 3461), True, 'import matplotlib.pyplot as plt\n'), ((4158, 4205), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4169, 4205), True, 'import numpy as np\n'), ((4252, 4299), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4263, 4299), True, 'import numpy as np\n'), ((4976, 5014), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""red"""'}), "(p[0], p[1], 'o', color='red')\n", (4984, 5014), True, 'import matplotlib.pyplot as plt\n'), ((5075, 5115), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""black"""'}), "(p[0], p[1], 'o', color='black')\n", (5083, 5115), True, 'import matplotlib.pyplot as plt\n'), ((787, 804), 'numpy.multiply', 'np.multiply', (['g', 'x'], {}), '(g, x)\n', (798, 804), True, 'import numpy as np\n'), ((2782, 2805), 'numpy.abs', 'np.abs', (['(level - y.value)'], {}), '(level - y.value)\n', (2788, 2805), True, 'import numpy as np\n'), ((364, 381), 'numpy.hstack', 'np.hstack', (['(1, x)'], {}), '((1, x))\n', (373, 381), True, 'import numpy as np\n')]
|
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
# generate training data
x = np.linspace(0.0,2*np.pi,20)
y = np.sin(x)
# option for fitting function
select = True # True / False
if select:
# Size with cosine function
nin = 1 # inputs
n1 = 1 # hidden layer 1 (linear)
n2 = 1 # hidden layer 2 (nonlinear)
n3 = 1 # hidden layer 3 (linear)
nout = 1 # outputs
else:
# Size with hyperbolic tangent function
nin = 1 # inputs
n1 = 2 # hidden layer 1 (linear)
n2 = 2 # hidden layer 2 (nonlinear)
n3 = 2 # hidden layer 3 (linear)
nout = 1 # outputs
# Initialize gekko
train = GEKKO()
test = GEKKO()
model = [train,test]
for m in model:
# input(s)
m.inpt = m.Param()
# layer 1
m.w1 = m.Array(m.FV, (nin,n1))
m.l1 = [m.Intermediate(m.w1[0,i]*m.inpt) for i in range(n1)]
# layer 2
m.w2a = m.Array(m.FV, (n1,n2))
m.w2b = m.Array(m.FV, (n1,n2))
if select:
m.l2 = [m.Intermediate(sum([m.cos(m.w2a[j,i]+m.w2b[j,i]*m.l1[j]) \
for j in range(n1)])) for i in range(n2)]
else:
m.l2 = [m.Intermediate(sum([m.tanh(m.w2a[j,i]+m.w2b[j,i]*m.l1[j]) \
for j in range(n1)])) for i in range(n2)]
# layer 3
m.w3 = m.Array(m.FV, (n2,n3))
m.l3 = [m.Intermediate(sum([m.w3[j,i]*m.l2[j] \
for j in range(n2)])) for i in range(n3)]
# output(s)
m.outpt = m.CV()
m.Equation(m.outpt==sum([m.l3[i] for i in range(n3)]))
# flatten matrices
m.w1 = m.w1.flatten()
m.w2a = m.w2a.flatten()
m.w2b = m.w2b.flatten()
m.w3 = m.w3.flatten()
# Fit parameter weights
m = train
m.inpt.value=x
m.outpt.value=y
m.outpt.FSTATUS = 1
for i in range(len(m.w1)):
m.w1[i].FSTATUS=1
m.w1[i].STATUS=1
m.w1[i].MEAS=1.0
for i in range(len(m.w2a)):
m.w2a[i].STATUS=1
m.w2b[i].STATUS=1
m.w2a[i].FSTATUS=1
m.w2b[i].FSTATUS=1
m.w2a[i].MEAS=1.0
m.w2b[i].MEAS=0.5
for i in range(len(m.w3)):
m.w3[i].FSTATUS=1
m.w3[i].STATUS=1
m.w3[i].MEAS=1.0
m.options.IMODE = 2
m.options.SOLVER = 3
m.options.EV_TYPE = 2
m.solve(disp=False)
# Test sample points
m = test
for i in range(len(m.w1)):
m.w1[i].MEAS=train.w1[i].NEWVAL
m.w1[i].FSTATUS = 1
print('w1['+str(i)+']: '+str(m.w1[i].MEAS))
for i in range(len(m.w2a)):
m.w2a[i].MEAS=train.w2a[i].NEWVAL
m.w2b[i].MEAS=train.w2b[i].NEWVAL
m.w2a[i].FSTATUS = 1
m.w2b[i].FSTATUS = 1
print('w2a['+str(i)+']: '+str(m.w2a[i].MEAS))
print('w2b['+str(i)+']: '+str(m.w2b[i].MEAS))
for i in range(len(m.w3)):
m.w3[i].MEAS=train.w3[i].NEWVAL
m.w3[i].FSTATUS = 1
print('w3['+str(i)+']: '+str(m.w3[i].MEAS))
m.inpt.value=np.linspace(-2*np.pi,4*np.pi,100)
m.options.IMODE = 2
m.options.SOLVER = 3
m.solve(disp=False)
plt.figure()
plt.plot(x,y,'bo',label='data')
plt.plot(test.inpt.value,test.outpt.value,'r-',label='predict')
plt.legend(loc='best')
plt.ylabel('y')
plt.xlabel('x')
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"gekko.GEKKO",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.sin",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((107, 138), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', '(20)'], {}), '(0.0, 2 * np.pi, 20)\n', (118, 138), True, 'import numpy as np\n'), ((139, 148), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (145, 148), True, 'import numpy as np\n'), ((660, 667), 'gekko.GEKKO', 'GEKKO', ([], {}), '()\n', (665, 667), False, 'from gekko import GEKKO\n'), ((676, 683), 'gekko.GEKKO', 'GEKKO', ([], {}), '()\n', (681, 683), False, 'from gekko import GEKKO\n'), ((2750, 2789), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(4 * np.pi)', '(100)'], {}), '(-2 * np.pi, 4 * np.pi, 100)\n', (2761, 2789), True, 'import numpy as np\n'), ((2846, 2858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2856, 2858), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2893), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""bo"""'], {'label': '"""data"""'}), "(x, y, 'bo', label='data')\n", (2867, 2893), True, 'import matplotlib.pyplot as plt\n'), ((2891, 2957), 'matplotlib.pyplot.plot', 'plt.plot', (['test.inpt.value', 'test.outpt.value', '"""r-"""'], {'label': '"""predict"""'}), "(test.inpt.value, test.outpt.value, 'r-', label='predict')\n", (2899, 2957), True, 'import matplotlib.pyplot as plt\n'), ((2955, 2977), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2965, 2977), True, 'import matplotlib.pyplot as plt\n'), ((2978, 2993), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2988, 2993), True, 'import matplotlib.pyplot as plt\n'), ((2994, 3009), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3004, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3018, 3020), True, 'import matplotlib.pyplot as plt\n')]
|
import argparse
import os
import sys
import numpy as np
import pdb
from tqdm import tqdm
import cv2
import glob
import numpy as np
from numpy import *
import matplotlib
#matplotlib.use("Agg")
#matplotlib.use("wx")
#matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import scipy
from scipy.special import softmax
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch.nn as nn
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from PIL import Image
# class load_data(Dataset):
# def __init__(self,args,img_path):
# super().__init__()
# self.args = args
# self.img_path = img_path
# def __getitem__(self,img_path):
# image = Image.open(self.img_path).convert('RGB')
# image = np.array(image).astype(np.float32).transpose((2, 0, 1))
# image = torch.from_numpy(image).float()
# return image
def get_model(nclass,args):
model = DeepLab(num_classes=nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
# Using cuda
if args.cuda:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
patch_replication_callback(model)
model = model.cuda()
checkpoint = torch.load(args.resume)
if args.cuda:
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
return model
def get_pred(img_path,model,args):
model.eval()
image = Image.open(img_path).convert('RGB')
#image = image.resize((512,512), Image.ANTIALIAS)
image = np.array(image).astype(np.float32).transpose((2, 0, 1))
image = np.expand_dims(image, axis=0)
image = torch.from_numpy(image).float()
if args.cuda:
image = image.cuda()
with torch.no_grad():
output = model(image)
#pdb.set_trace()
# normalize = nn.Softmax(dim=1)
# output = normalize(output)
pred = output.data.cpu().numpy()
return pred
def F1_loss(pred,target):
N = np.logical_or(pred,target) # logical
Tp = np.logical_and(pred,target)
Fn = np.subtract(target,Tp) # element-wise subtraction in pytorch
#Fn = np.bitwise_xor(target,Tp)
Fp = np.subtract(pred,Tp)
Tn = np.subtract(N,np.logical_or(Tp,Fp,Fn))
#pdb.set_trace()
precision = np.sum(Tp)/(np.sum(Tp)+np.sum(Fp))
recall = np.sum(Tp)/(np.sum(Tp)+np.sum(Fn))
F1 = (2*np.sum(Tp))/(2*np.sum(Tp)+np.sum(Fn)+np.sum(Fp))
#F1 = np.true_divide(np.add(2*Tp,Fn,Fp),2*Tp)
#F1 = np.true_divide(np.sum(np.multiply(2,Tp),Fn,Fp),np.multiply(2,Tp))
#F1 = np.true_divide(np.multiply(2,Tp),np.multiply(np.sum(Tp,Fn),np.sum(Tp,Fn)))
#accuracy = np.true_divide(np.add(Tp,Tn),np.add(Tp,Tn,Fp,Fn))
accuracy = np.sum(Tp+Tn)/np.sum(N)
return F1 , accuracy, precision, recall
def F1_rwi(pred,target):
#pred = pred[:,:,0] # using only the red channel
#target = target[:,:,0]
N = np.logical_or(pred, target) # logical
Tp = np.logical_and(pred, target)
Fn = np.bitwise_xor(target, Tp) # element-wise subtraction in pytorch
Fp = np.bitwise_xor(pred, Tp)
xx= np.logical_or(np.logical_or(Tp,Fp), Fn)
Tn = np.bitwise_xor(N, xx)
precision = Tp.sum()/(Tp.sum()+ Fp.sum() )
recall = Tp.sum()/(Tp.sum()+ Fn.sum())
F1 = 2*Tp.sum() /(2*Tp.sum()+ Fn.sum()+ Fp.sum())
accuracy = (Tp.sum()+Tn.sum())/N.sum()
return F1, accuracy, precision, recall
if __name__=='__main__':
#### Parameters and paths:
nclass = 2
save_rrc_res_path = "/path/to/deepLabV3Plus/deeplabv3plus_pixelWise/results/validation_images/B_260/"
model_path = "/path/to/deepLabV3Plus/deeplabv3plus_pixelWise/results/icdar_models/run/icdar/deeplab-resnet/model_best.pth.tar"
alphabet="#abcdefghijklmnopqrstuvwxyz1234567890@"
img_path = "/path/to/GAN_text/data/text_segmentation/test/A/"
gt_path = "/path/to/GAN_text/data/text_segmentation/test/B_gt_1chanel/"
### args
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Heatmap Prediction")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
##checking point
parser.add_argument('--resume', type=str, default= model_path,
help='put the path to resuming file if needed')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
image_files = sorted(glob.glob(img_path+'*.png')) #'*.jpg'))
trained_model = get_model(nclass,args)
f1_all = []
accuracy_all = []
f1_all_rwi = []
accuracy_all_rwi = []
#for img_path in sys.argv[1:]:
#for i in range(0,10):
for i in range(0,len(image_files)):
img_path = image_files[i]
print("image path is: {}".format(img_path))
img_name = img_path.split('/')[-1].split('.')[0]
gt = asarray(Image.open(gt_path+img_name+'.png'))
#trained_model = get_model(nclass,args)
#pdb.set_trace()
# load_test_data = load_data(args,img_path)
# dataloader = DataLoader(load_test_data)
# for ii, img_test in enumerate(dataloader):
pred = get_pred(img_path,trained_model,args)
pred = softmax(pred, axis=1)
#image_source = cv2.imread(img_path)
#image_source = cv2.resize(image_source, (512, 512))
#pdb.set_trace()
#fig = plt.figure()
# plt.imshow(pred.squeeze()[1,:,:])
# plt.show()
# res = pred.squeeze()[1,:,:]>0.3
#res = np.argmax(pred.squeeze(), axis=0)
#pdb.set_trace()
# plt.imshow(res)
# plt.show()
#ret,pred_bin = cv2.threshold(pred.squeeze()[1,:,:],0.2,255,cv2.THRESH_BINARY)
pred_bin = np.argmax(pred.squeeze(), axis=0)
#pdb.set_trace()
f1, acc, prc, rcl = F1_loss(pred_bin>5,gt>5)
print("F1 is {}, accuracy is {}, precision is {}, recall is {}".format(f1,acc,prc,rcl))
#pdb.set_trace()
pred_bin_8 = pred_bin.astype(np.uint8)
f1_rwi, acc_rwi, prc_rwi, rcl_rwi = F1_rwi(pred_bin_8>5,gt>5)
print("F1_rwi is {}, accuracy_rwi is {}, precision_rwi is {}, recall_rwi is {}".format(f1_rwi,acc_rwi,prc_rwi,rcl_rwi))
f1_all.append(f1)
accuracy_all.append(acc)
f1_all_rwi.append(f1_rwi)
accuracy_all_rwi.append(acc_rwi)
print("the average of F1 is {}".format(np.mean(f1_all)))
print("the average accuracy is {}".format(np.mean(accuracy_all)))
print("the average of F1_rwi is {}".format(np.mean(f1_all_rwi)))
print("the average accuracy_rwi is {}".format(np.mean(accuracy_all_rwi)))
|
[
"modeling.sync_batchnorm.replicate.patch_replication_callback",
"numpy.mean",
"PIL.Image.open",
"numpy.logical_and",
"argparse.ArgumentParser",
"torch.load",
"numpy.bitwise_xor",
"numpy.logical_or",
"numpy.subtract",
"torch.nn.DataParallel",
"torch.from_numpy",
"numpy.sum",
"numpy.array",
"torch.cuda.is_available",
"numpy.expand_dims",
"torch.no_grad",
"glob.glob",
"scipy.special.softmax"
] |
[((1440, 1463), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (1450, 1463), False, 'import torch\n'), ((1961, 1990), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1975, 1990), True, 'import numpy as np\n'), ((2333, 2360), 'numpy.logical_or', 'np.logical_or', (['pred', 'target'], {}), '(pred, target)\n', (2346, 2360), True, 'import numpy as np\n'), ((2380, 2408), 'numpy.logical_and', 'np.logical_and', (['pred', 'target'], {}), '(pred, target)\n', (2394, 2408), True, 'import numpy as np\n'), ((2417, 2440), 'numpy.subtract', 'np.subtract', (['target', 'Tp'], {}), '(target, Tp)\n', (2428, 2440), True, 'import numpy as np\n'), ((2524, 2545), 'numpy.subtract', 'np.subtract', (['pred', 'Tp'], {}), '(pred, Tp)\n', (2535, 2545), True, 'import numpy as np\n'), ((3258, 3285), 'numpy.logical_or', 'np.logical_or', (['pred', 'target'], {}), '(pred, target)\n', (3271, 3285), True, 'import numpy as np\n'), ((3305, 3333), 'numpy.logical_and', 'np.logical_and', (['pred', 'target'], {}), '(pred, target)\n', (3319, 3333), True, 'import numpy as np\n'), ((3343, 3369), 'numpy.bitwise_xor', 'np.bitwise_xor', (['target', 'Tp'], {}), '(target, Tp)\n', (3357, 3369), True, 'import numpy as np\n'), ((3417, 3441), 'numpy.bitwise_xor', 'np.bitwise_xor', (['pred', 'Tp'], {}), '(pred, Tp)\n', (3431, 3441), True, 'import numpy as np\n'), ((3499, 3520), 'numpy.bitwise_xor', 'np.bitwise_xor', (['N', 'xx'], {}), '(N, xx)\n', (3513, 3520), True, 'import numpy as np\n'), ((4288, 4367), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch DeeplabV3Plus Heatmap Prediction"""'}), "(description='PyTorch DeeplabV3Plus Heatmap Prediction')\n", (4311, 4367), False, 'import argparse\n'), ((1297, 1350), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'args.gpu_ids'}), '(model, device_ids=args.gpu_ids)\n', (1318, 1350), False, 'import torch\n'), ((1359, 1392), 'modeling.sync_batchnorm.replicate.patch_replication_callback', 'patch_replication_callback', (['model'], {}), '(model)\n', (1385, 1392), False, 'from modeling.sync_batchnorm.replicate import patch_replication_callback\n'), ((2092, 2107), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2105, 2107), False, 'import torch\n'), ((2573, 2598), 'numpy.logical_or', 'np.logical_or', (['Tp', 'Fp', 'Fn'], {}), '(Tp, Fp, Fn)\n', (2586, 2598), True, 'import numpy as np\n'), ((2636, 2646), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2642, 2646), True, 'import numpy as np\n'), ((2684, 2694), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2690, 2694), True, 'import numpy as np\n'), ((3073, 3088), 'numpy.sum', 'np.sum', (['(Tp + Tn)'], {}), '(Tp + Tn)\n', (3079, 3088), True, 'import numpy as np\n'), ((3087, 3096), 'numpy.sum', 'np.sum', (['N'], {}), '(N)\n', (3093, 3096), True, 'import numpy as np\n'), ((3464, 3485), 'numpy.logical_or', 'np.logical_or', (['Tp', 'Fp'], {}), '(Tp, Fp)\n', (3477, 3485), True, 'import numpy as np\n'), ((5542, 5567), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5565, 5567), False, 'import torch\n'), ((6002, 6031), 'glob.glob', 'glob.glob', (["(img_path + '*.png')"], {}), "(img_path + '*.png')\n", (6011, 6031), False, 'import glob\n'), ((6794, 6815), 'scipy.special.softmax', 'softmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (6801, 6815), False, 'from scipy.special import softmax\n'), ((1791, 1811), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1801, 1811), False, 'from PIL import Image\n'), ((2003, 2026), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2019, 2026), False, 'import torch\n'), ((2648, 2658), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2654, 2658), True, 'import numpy as np\n'), ((2659, 2669), 'numpy.sum', 'np.sum', (['Fp'], {}), '(Fp)\n', (2665, 2669), True, 'import numpy as np\n'), ((2696, 2706), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2702, 2706), True, 'import numpy as np\n'), ((2707, 2717), 'numpy.sum', 'np.sum', (['Fn'], {}), '(Fn)\n', (2713, 2717), True, 'import numpy as np\n'), ((2732, 2742), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2738, 2742), True, 'import numpy as np\n'), ((2769, 2779), 'numpy.sum', 'np.sum', (['Fp'], {}), '(Fp)\n', (2775, 2779), True, 'import numpy as np\n'), ((6442, 6481), 'PIL.Image.open', 'Image.open', (["(gt_path + img_name + '.png')"], {}), "(gt_path + img_name + '.png')\n", (6452, 6481), False, 'from PIL import Image\n'), ((7978, 7993), 'numpy.mean', 'np.mean', (['f1_all'], {}), '(f1_all)\n', (7985, 7993), True, 'import numpy as np\n'), ((8046, 8067), 'numpy.mean', 'np.mean', (['accuracy_all'], {}), '(accuracy_all)\n', (8053, 8067), True, 'import numpy as np\n'), ((8122, 8141), 'numpy.mean', 'np.mean', (['f1_all_rwi'], {}), '(f1_all_rwi)\n', (8129, 8141), True, 'import numpy as np\n'), ((8198, 8223), 'numpy.mean', 'np.mean', (['accuracy_all_rwi'], {}), '(accuracy_all_rwi)\n', (8205, 8223), True, 'import numpy as np\n'), ((2758, 2768), 'numpy.sum', 'np.sum', (['Fn'], {}), '(Fn)\n', (2764, 2768), True, 'import numpy as np\n'), ((1893, 1908), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1901, 1908), True, 'import numpy as np\n'), ((2747, 2757), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2753, 2757), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.integrate import odeint
class MorrisLecar:
"""
Creates a MorrisLecar model.
"""
def __init__(self, C=20, VL=-60, VCa=120, VK=-84, gL=2, gCa=4, gK=8,
V1=-1.2, V2=18, V3=12, V4=17.4, phi=0.06):
"""
Initializes the model.
Args:
C (int, float): Capacitance of the membrane.
VL (int, float): Potential L.
VCa (int, float): Potential Ca.
VK (int, float): Potential K.
gL (int, float): Conductance L.
gCa (int, float): Conductance Ca.
gK (int, float): Conductance K.
V1 (int, float): Potential at which Mss converges.
V2 (int, float): Reciprocal of slope of Mss.
V3 (int, float): Potential at which Nss converges.
V4 (int, float): Reciprocal of slope of Nss.
phi (int, float): Time scale recovery.
"""
self.C = C
self.VL = VL
self.VCa = VCa
self.VK = VK
self.gL = gL
self.gCa = gCa
self.gK = gK
self.V1 = V1
self.V2 = V2
self.V3 = V3
self.V4 = V4
self.phi = phi
self.t = None
self.dt = None
self.tvec = None
self.V = None
self.N = None
def __repr__(self):
"""
Visualize model parameters when printing.
"""
return (f'MorrisLecar(C={self.C}, VL={self.VL}, VCa={self.VCa}, VK={self.VK}, '
f'gL={self.gL}, gCa={self.gCa}, gK={self.gK}, V1={self.V1}, V2={self.V2}, '
f'V3={self.V3}, V4={self.V4}, phi={self.phi})')
def _system_equations(self, X, t, current):
"""
Defines the equations of the dynamical system for integration.
"""
Mss = (1 + np.tanh((X[0] - self.V1) / self.V2)) / 2
Nss = (1 + np.tanh((X[0] - self.V3) / self.V4)) / 2
tau = 1 / self.phi * (np.cosh((X[0] - self.V3) / (2 * self.V4)))
return [(1 / self.C) * (current - self.gL * (X[0] - self.VL) - self.gCa * Mss * (X[0] - self.VCa) - self.gK * X[1] * (X[0] - self.VK)),
(Nss - X[1]) / tau]
def run(self, X0=[0, 0], current=1, t=100, dt=0.01):
"""
Runs the model.
Args:
X0 (list, optional): Initial values of V and N. Defaults to [0, 0].
current (int, optional): External current. Defaults to 1.
t (int, optional): Total time for the simulation. Defaults to 100.
dt (float, optional): Simulation step. Defaults to 0.01.
"""
self.current = current
self.t = t
self.dt = dt
self.tvec = np.arange(0, self.t, self.dt)
X = odeint(self._system_equations, X0, self.tvec, (current,))
self.V, self.N = X[:, 0], X[:, 1]
|
[
"scipy.integrate.odeint",
"numpy.tanh",
"numpy.cosh",
"numpy.arange"
] |
[((2678, 2707), 'numpy.arange', 'np.arange', (['(0)', 'self.t', 'self.dt'], {}), '(0, self.t, self.dt)\n', (2687, 2707), True, 'import numpy as np\n'), ((2720, 2777), 'scipy.integrate.odeint', 'odeint', (['self._system_equations', 'X0', 'self.tvec', '(current,)'], {}), '(self._system_equations, X0, self.tvec, (current,))\n', (2726, 2777), False, 'from scipy.integrate import odeint\n'), ((1944, 1985), 'numpy.cosh', 'np.cosh', (['((X[0] - self.V3) / (2 * self.V4))'], {}), '((X[0] - self.V3) / (2 * self.V4))\n', (1951, 1985), True, 'import numpy as np\n'), ((1813, 1848), 'numpy.tanh', 'np.tanh', (['((X[0] - self.V1) / self.V2)'], {}), '((X[0] - self.V1) / self.V2)\n', (1820, 1848), True, 'import numpy as np\n'), ((1873, 1908), 'numpy.tanh', 'np.tanh', (['((X[0] - self.V3) / self.V4)'], {}), '((X[0] - self.V3) / self.V4)\n', (1880, 1908), True, 'import numpy as np\n')]
|
import logging
import numpy as np
from .transformer import Transformer, FFTTransformer
logger = logging.getLogger(__name__)
class MapScaler:
def __init__(self, xmap, scattering='xray'):
self.xmap = xmap
self.scattering = scattering
self._model_map = xmap.zeros_like(xmap)
def subtract(self, structure):
if self.xmap.hkl is not None:
hkl = self.xmap.hkl
transformer = FFTTransformer(
structure, self._model_map, hkl=hkl, scattering=self.scattering)
else:
transformer = Transformer(
structure, self._model_map, simple=True,
rmax=3, scattering=self.scattering)
logger.info("Subtracting density.")
transformer.density()
self.xmap.array -= self._model_map.array
def scale(self, structure, radius=1):
if self.xmap.hkl is not None:
hkl = self.xmap.hkl
transformer = FFTTransformer(structure, self._model_map,
hkl=hkl, scattering=self.scattering)
else:
transformer = Transformer(structure, self._model_map, simple=True,
rmax=3, scattering=self.scattering)
# Get all map coordinates of interest:
transformer.mask(radius)
mask = self._model_map.array > 0
# Calculate map based on structure:
transformer.reset(full=True)
transformer.density()
# Get all map values of interest
xmap_masked = self.xmap.array[mask]
model_masked = self._model_map.array[mask]
# Get the mean of masked observed and masked calculated map values
xmap_masked_mean = xmap_masked.mean()
model_masked_mean = model_masked.mean()
# Get optimal scaling factor and mean-difference.
xmap_masked -= xmap_masked_mean
model_masked -= model_masked_mean
s2 = np.dot(model_masked, xmap_masked)
s1 = np.dot(xmap_masked, xmap_masked)
scaling_factor = s2 / s1
k = model_masked_mean - scaling_factor * xmap_masked_mean
logger.info(f"L2 scaling: S = {scaling_factor:.2f}\tk = {k:.2f}")
# Scale the observed map to the calculated map
self.xmap.array = scaling_factor * self.xmap.array + k
transformer.reset(full=True)
def cutoff(self, cutoff_value, value=-1):
cutoff_mask = self.xmap.array < cutoff_value
self.xmap.array[cutoff_mask] = value
logger.info(f"Map absolute cutoff value: {cutoff_value:.2f}")
|
[
"logging.getLogger",
"numpy.dot"
] |
[((99, 126), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (116, 126), False, 'import logging\n'), ((1942, 1975), 'numpy.dot', 'np.dot', (['model_masked', 'xmap_masked'], {}), '(model_masked, xmap_masked)\n', (1948, 1975), True, 'import numpy as np\n'), ((1989, 2021), 'numpy.dot', 'np.dot', (['xmap_masked', 'xmap_masked'], {}), '(xmap_masked, xmap_masked)\n', (1995, 2021), True, 'import numpy as np\n')]
|
from robot.thymio_robot import ThymioII
from robot.vrep_robot import VrepRobot
from aseba.aseba import Aseba
from utility.util_functions import normalize
import numpy as np
T_SEN_MIN = 0
T_SEN_MAX = 4500
class EvolvedRobot(VrepRobot, ThymioII):
def __init__(self, name, client_id, id, op_mode, chromosome, robot_type):
VrepRobot.__init__(self, client_id, id, op_mode, robot_type)
ThymioII.__init__(self, name)
self.chromosome = chromosome
self.n_t_sensor_activation = np.array([])
self.t_sensor_activation = np.array([])
def t_read_prox(self):
self.t_sensor_activation = np.array(
super(EvolvedRobot, self).t_read_prox())
self.n_t_sensor_activation = np.array(
[normalize(xi, T_SEN_MIN, T_SEN_MAX, 0.0, 1.0) for xi in self.t_sensor_activation])
return self.n_t_sensor_activation
|
[
"numpy.array",
"robot.vrep_robot.VrepRobot.__init__",
"utility.util_functions.normalize",
"robot.thymio_robot.ThymioII.__init__"
] |
[((335, 395), 'robot.vrep_robot.VrepRobot.__init__', 'VrepRobot.__init__', (['self', 'client_id', 'id', 'op_mode', 'robot_type'], {}), '(self, client_id, id, op_mode, robot_type)\n', (353, 395), False, 'from robot.vrep_robot import VrepRobot\n'), ((404, 433), 'robot.thymio_robot.ThymioII.__init__', 'ThymioII.__init__', (['self', 'name'], {}), '(self, name)\n', (421, 433), False, 'from robot.thymio_robot import ThymioII\n'), ((509, 521), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (517, 521), True, 'import numpy as np\n'), ((557, 569), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (565, 569), True, 'import numpy as np\n'), ((756, 801), 'utility.util_functions.normalize', 'normalize', (['xi', 'T_SEN_MIN', 'T_SEN_MAX', '(0.0)', '(1.0)'], {}), '(xi, T_SEN_MIN, T_SEN_MAX, 0.0, 1.0)\n', (765, 801), False, 'from utility.util_functions import normalize\n')]
|
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import numpy as np
import ase.io
from os import path
from nomad.datamodel import EntryArchive
from nomad.units import ureg as units
from nomad.datamodel.metainfo.simulation.run import Run, Program, TimeRun
from nomad.datamodel.metainfo.simulation.system import (
System, Atoms)
from nomad.datamodel.metainfo.simulation.method import (
Method, Electronic, BasisSet)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, Dos, DosValues, Charges)
from nomad.parsing.file_parser import TextParser, Quantity
from .metainfo.lobster import x_lobster_section_cohp, x_lobster_section_coop
'''
This is a LOBSTER code parser.
'''
e = (1 * units.e).to_base_units().magnitude
eV = (1 * units.eV).to_base_units().magnitude
def parse_ICOXPLIST(fname, scc, method):
def icoxp_line_split(string):
tmp = string.split()
# LOBSTER version 3 and above
if len(tmp) == 8:
return [tmp[1], tmp[2], float(tmp[3]), [int(tmp[4]),
int(tmp[5]), int(tmp[6])], float(tmp[7])]
# LOBSTER versions below 3
elif len(tmp) == 6:
return [tmp[1], tmp[2], float(tmp[3]), float(tmp[4]), int(tmp[5])]
icoxplist_parser = TextParser(quantities=[
Quantity('icoxpslist_for_spin', r'\s*CO[OH]P.*spin\s*\d\s*([^#]+[-\d\.]+)',
repeats=True,
sub_parser=TextParser(quantities=[
Quantity('line',
# LOBSTER version 3 and above
r'(\s*\d+\s+\w+\s+\w+\s+[\.\d]+\s+[-\d]+\s+[-\d]+\s+[-\d]+\s+[-\.\d]+\s*)|'
# LOBSTER versions below 3
r'(\s*\d+\s+\w+\s+\w+\s+[\.\d]+\s+[-\.\d]+\s+[\d]+\s*)',
repeats=True, str_operation=icoxp_line_split)])
)
])
if not path.isfile(fname):
return
icoxplist_parser.mainfile = fname
icoxplist_parser.parse()
icoxp = []
for spin, icoxplist in enumerate(icoxplist_parser.get('icoxpslist_for_spin')):
lines = icoxplist.get('line')
if lines is None:
break
if type(lines[0][4]) is int:
a1, a2, distances, tmp, bonds = zip(*lines)
else:
a1, a2, distances, v, tmp = zip(*lines)
icoxp.append(0)
icoxp[-1] = list(tmp)
if spin == 0:
if method == 'o':
section = scc.m_create(x_lobster_section_coop)
elif method == 'h':
section = scc.m_create(x_lobster_section_cohp)
setattr(section, "x_lobster_number_of_co{}p_pairs".format(
method), len(list(a1)))
setattr(section, "x_lobster_co{}p_atom1_labels".format(
method), list(a1))
setattr(section, "x_lobster_co{}p_atom2_labels".format(
method), list(a2))
setattr(section, "x_lobster_co{}p_distances".format(
method), np.array(distances) * units.angstrom)
# version specific entries
if 'v' in locals():
setattr(section, "x_lobster_co{}p_translations".format(
method), list(v))
if 'bonds' in locals():
setattr(section, "x_lobster_co{}p_number_of_bonds".format(
method), list(bonds))
if len(icoxp) > 0:
setattr(section, "x_lobster_integrated_co{}p_at_fermi_level".format(
method), np.array(icoxp) * units.eV)
def parse_COXPCAR(fname, scc, method, logger):
coxpcar_parser = TextParser(quantities=[
Quantity('coxp_pairs', r'No\.\d+:(\w{1,2}\d+)->(\w{1,2}\d+)\(([\d\.]+)\)\s*?',
repeats=True),
Quantity('coxp_lines', r'\n\s*(-*\d+\.\d+(?:[ \t]+-*\d+\.\d+)+)',
repeats=True)
])
if not path.isfile(fname):
return
coxpcar_parser.mainfile = fname
coxpcar_parser.parse()
if method == 'o':
if not scc.x_lobster_section_coop:
section = scc.m_create(x_lobster_section_coop)
else:
section = scc.x_lobster_section_coop
elif method == 'h':
if not scc.x_lobster_section_cohp:
section = scc.m_create(x_lobster_section_cohp)
else:
section = scc.x_lobster_section_cohp
pairs = coxpcar_parser.get('coxp_pairs')
if pairs is None:
logger.warning('No CO{}P values detected in CO{}PCAR.lobster.'.format(
method.upper(), method.upper()))
return
a1, a2, distances = zip(*pairs)
number_of_pairs = len(list(a1))
setattr(section, "x_lobster_number_of_co{}p_pairs".format(
method), number_of_pairs)
setattr(section, "x_lobster_co{}p_atom1_labels".format(
method), list(a1))
setattr(section, "x_lobster_co{}p_atom2_labels".format(
method), list(a2))
setattr(section, "x_lobster_co{}p_distances".format(
method), np.array(distances) * units.angstrom)
coxp_lines = coxpcar_parser.get('coxp_lines')
if coxp_lines is None:
logger.warning('No CO{}P values detected in CO{}PCAR.lobster.'
'The file is likely incomplete'.format(
method.upper(), method.upper()))
return
coxp_lines = list(zip(*coxp_lines))
setattr(section, "x_lobster_number_of_co{}p_values".format(
method), len(coxp_lines[0]))
setattr(section, "x_lobster_co{}p_energies".format(
method), np.array(coxp_lines[0]) * units.eV)
if len(coxp_lines) == 2 * number_of_pairs + 3:
coxp = [[x] for x in coxp_lines[3::2]]
icoxp = [[x] for x in coxp_lines[4::2]]
acoxp = [coxp_lines[1]]
aicoxp = [coxp_lines[2]]
elif len(coxp_lines) == 4 * number_of_pairs + 5:
coxp = [x for x in zip(coxp_lines[5:number_of_pairs * 2 + 4:2],
coxp_lines[number_of_pairs * 2 + 5: 4 * number_of_pairs + 4:2])]
icoxp = [x for x in zip(coxp_lines[6:number_of_pairs * 2 + 5:2],
coxp_lines[number_of_pairs * 2 + 6: 4 * number_of_pairs + 5:2])]
acoxp = [coxp_lines[1], coxp_lines[3]]
aicoxp = [coxp_lines[2], coxp_lines[4]]
else:
logger.warning('Unexpected number of columns {} '
'in CO{}PCAR.lobster.'.format(len(coxp_lines),
method.upper()))
return
# FIXME: correct magnitude?
setattr(section, "x_lobster_co{}p_values".format(
method), np.array(coxp))
setattr(section, "x_lobster_average_co{}p_values".format(
method), np.array(acoxp))
setattr(section, "x_lobster_integrated_co{}p_values".format(
method), np.array(icoxp) * units.eV)
setattr(section, "x_lobster_average_integrated_co{}p_values".format(
method), np.array(aicoxp) * units.eV)
setattr(section, "x_lobster_integrated_co{}p_values".format(
method), np.array(icoxp) * units.eV)
def parse_CHARGE(fname, scc):
charge_parser = TextParser(quantities=[
Quantity(
'charges', r'\s*\d+\s+[A-Za-z]{1,2}\s+([-\d\.]+)\s+([-\d\.]+)\s*', repeats=True)
])
if not path.isfile(fname):
return
charge_parser.mainfile = fname
charge_parser.parse()
charges = charge_parser.get('charges')
if charges is not None:
sec_charges = scc.m_create(Charges)
sec_charges.analysis_method = "mulliken"
sec_charges.kind = "integrated"
sec_charges.value = np.array(list(zip(*charges))[0]) * units.elementary_charge
sec_charges = scc.m_create(Charges)
sec_charges.analysis_method = "loewdin"
sec_charges.kind = "integrated"
sec_charges.value = np.array(list(zip(*charges))[1]) * units.elementary_charge
def parse_DOSCAR(fname, run, logger):
def parse_species(run, atomic_numbers):
"""
If we don't have any structure from the underlying DFT code, we can
at least figure out what atoms we have in the structure. The best place
to get this info from is the DOSCAR.lobster
"""
if not run.system:
system = run.m_create(System)
system.atoms = Atoms(species=atomic_numbers, periodic=[True, True, True])
def translate_lm(lm):
lm_dictionary = {
's': [0, 0],
'p_z': [1, 0],
'p_x': [1, 1],
'p_y': [1, 2],
'd_z^2': [2, 0],
'd_xz': [2, 1],
'd_yz': [2, 2],
'd_xy': [2, 3],
'd_x^2-y^2': [2, 4],
'z^3': [3, 0],
'xz^2': [3, 1],
'yz^2': [3, 2],
'xyz': [3, 3],
'z(x^2-y^2)': [3, 4],
'x(x^2-3y^2)': [3, 5],
'y(3x^2-y^2)': [3, 6],
}
return lm_dictionary.get(lm[1:])
if not path.isfile(fname):
return
energies = []
dos_values = []
integral_dos = []
atom_projected_dos_values = []
atom_index = 0
n_atoms = 0
n_dos = 0
atomic_numbers = []
lms = []
with open(fname) as f:
for i, line in enumerate(f):
if i == 0:
n_atoms = int(line.split()[0])
if i == 1:
_ = float(line.split()[0]) * units.angstrom**3
if i == 5:
n_dos = int(line.split()[2])
if 'Z=' in line:
atom_index += 1
atom_projected_dos_values.append([])
lms.append((line.split(';')[-1]).split())
atomic_numbers.append(int(line.split(';')[-2].split('=')[1]))
continue
if i > 5:
line = [float(x) for x in line.split()]
if atom_index == 0:
energies.append(line[0])
if len(line) == 3:
dos_values.append([line[1]])
integral_dos.append([line[2]])
elif len(line) == 5:
dos_values.append([line[1], line[2]])
integral_dos.append([line[3], line[4]])
else:
atom_projected_dos_values[-1].append(line[1:])
if len(atomic_numbers) > 0 and len(atomic_numbers) == n_atoms:
parse_species(run, atomic_numbers)
if n_dos == 0:
return
if len(dos_values) == n_dos:
dos = run.calculation[0].m_create(Dos, Calculation.dos_electronic)
dos.n_energies = n_dos
dos.energies = energies * units.eV
value = list(zip(*dos_values))
n_electrons = sum(atomic_numbers)
index = (np.abs(energies)).argmin()
# integrated dos at the Fermi level should be the number of electrons
n_valence_electrons = int(round(sum(integral_dos[index])))
n_core_electrons = n_electrons - n_valence_electrons
value_integrated = np.array(list(zip(*integral_dos))) + n_core_electrons / len(integral_dos[0])
for spin_i in range(len(value)):
dos_total = dos.m_create(DosValues, Dos.total)
dos_total.spin = spin_i
dos_total.value = value[spin_i] * (1 / units.eV)
dos_total.value_integrated = value_integrated[spin_i]
else:
logger.warning('Unable to parse total dos from DOSCAR.lobster, \
it doesn\'t contain enough dos values')
return
for atom_i, pdos in enumerate(atom_projected_dos_values):
if len(pdos) != n_dos:
logger.warning('Unable to parse atom lm-projected dos from DOSCAR.lobster, \
it doesn\'t contain enough dos values')
continue
if len(lms[atom_i]) == len(pdos[0]):
# we have the same lm-projections for spin up and dn
dos_values = np.array([[lmdos] for lmdos in zip(*pdos)]) / eV
elif len(lms[atom_i]) * 2 == len(pdos[0]):
pdos_up = list(zip(*pdos))[0::2]
pdos_dn = list(zip(*pdos))[1::2]
dos_values = np.array([[a, b] for a, b in zip(pdos_up, pdos_dn)]) / eV
else:
logger.warning('Unexpected number of columns in DOSCAR.lobster')
return
for lm_i, lm in enumerate(lms[atom_i]):
for spin_i in range(len(dos_values[lm_i])):
section_pdos = dos.m_create(DosValues, Dos.atom_projected)
section_pdos.atom_index = atom_i
section_pdos.spin = spin_i
section_pdos.m_kind = 'real_orbital'
section_pdos.lm = translate_lm(lm)
section_pdos.value = dos_values[lm_i][spin_i]
mainfile_parser = TextParser(quantities=[
Quantity('program_version', r'^LOBSTER\s*v([\d\.]+)\s*', repeats=False),
Quantity('datetime', r'starting on host \S* on (\d{4}-\d\d-\d\d\sat\s\d\d:\d\d:\d\d)\s[A-Z]{3,4}',
repeats=False),
Quantity('x_lobster_code',
r'detecting used PAW program... (.*)', repeats=False),
Quantity('x_lobster_basis',
r'setting up local basis functions...\s*((?:[a-zA-Z]{1,2}\s+\(.+\)(?:\s+\d\S+)+\s+)+)',
repeats=False,
sub_parser=TextParser(quantities=[
Quantity('x_lobster_basis_species',
r'([a-zA-Z]+){1,2}\s+\((.+)\)((?:\s+\d\S+)+)\s+', repeats=True)
])),
Quantity('spilling', r'((?:spillings|abs. tot)[\s\S]*?charge\s*spilling:\s*\d+\.\d+%)',
repeats=True,
sub_parser=TextParser(quantities=[
Quantity('abs_total_spilling',
r'abs.\s*total\s*spilling:\s*(\d+\.\d+)%', repeats=False),
Quantity('abs_charge_spilling',
r'abs.\s*charge\s*spilling:\s*(\d+\.\d+)%', repeats=False)
])),
Quantity('finished', r'finished in (\d)', repeats=False),
])
class LobsterParser:
def __init__(self):
pass
def parse(self, mainfile: str, archive: EntryArchive, logger=None):
mainfile_parser.mainfile = mainfile
mainfile_path = path.dirname(mainfile)
mainfile_parser.parse()
run = archive.m_create(Run)
run.program = Program(
name='LOBSTER',
version=str(mainfile_parser.get('program_version')))
# FIXME: There is a timezone info present as well, but datetime support for timezones
# is bad and it doesn't support some timezones (for example CEST).
# That leads to test failures, so ignore it for now.
date = datetime.datetime.strptime(' '.join(mainfile_parser.get('datetime')),
'%Y-%m-%d at %H:%M:%S') - datetime.datetime(1970, 1, 1)
run.time_run = TimeRun(wall_start=date.total_seconds())
code = mainfile_parser.get('x_lobster_code')
# parse structure
if code is not None:
if code == 'VASP':
try:
structure = ase.io.read(mainfile_path + '/CONTCAR', format="vasp")
except FileNotFoundError:
logger.warning('Unable to parse structure info, no CONTCAR detected')
else:
logger.warning('Parsing of {} structure is not supported'.format(code))
if 'structure' in locals():
system = run.m_create(System)
system.atoms = Atoms(
lattice_vectors=structure.get_cell() * units.angstrom,
labels=structure.get_chemical_symbols(),
periodic=structure.get_pbc(),
positions=structure.get_positions() * units.angstrom)
if mainfile_parser.get('finished') is not None:
run.clean_end = True
else:
run.clean_end = False
scc = run.m_create(Calculation)
method = run.m_create(Method)
scc.method_ref = method
spilling = mainfile_parser.get('spilling')
if spilling is not None:
method.electronic = Electronic(n_spin_channels=len(spilling))
total_spilling = []
charge_spilling = []
for s in spilling:
total_spilling.append(s.get('abs_total_spilling'))
charge_spilling.append(s.get('abs_charge_spilling'))
scc.x_lobster_abs_total_spilling = np.array(total_spilling)
scc.x_lobster_abs_charge_spilling = np.array(charge_spilling)
method_keys = [
'x_lobster_code'
]
for key in method_keys:
val = mainfile_parser.get(key)
if val is not None:
setattr(method, key, val)
basis = mainfile_parser.get('x_lobster_basis')
if basis is not None:
species = basis.get('x_lobster_basis_species')
if species is not None:
method.basis_set.append(BasisSet(name=species[0][1]))
parse_ICOXPLIST(mainfile_path + '/ICOHPLIST.lobster', scc, 'h')
parse_ICOXPLIST(mainfile_path + '/ICOOPLIST.lobster', scc, 'o')
parse_COXPCAR(mainfile_path + '/COHPCAR.lobster', scc, 'h', logger)
parse_COXPCAR(mainfile_path + '/COOPCAR.lobster', scc, 'o', logger)
parse_CHARGE(mainfile_path + '/CHARGE.lobster', scc)
parse_DOSCAR(mainfile_path + '/DOSCAR.lobster', run, logger)
if run.system:
scc.system_ref = run.system[0]
|
[
"datetime.datetime",
"numpy.abs",
"nomad.datamodel.metainfo.simulation.method.BasisSet",
"os.path.isfile",
"numpy.array",
"os.path.dirname",
"nomad.datamodel.metainfo.simulation.system.Atoms",
"nomad.parsing.file_parser.Quantity"
] |
[((2539, 2557), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (2550, 2557), False, 'from os import path\n'), ((4514, 4532), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (4525, 4532), False, 'from os import path\n'), ((7185, 7199), 'numpy.array', 'np.array', (['coxp'], {}), '(coxp)\n', (7193, 7199), True, 'import numpy as np\n'), ((7280, 7295), 'numpy.array', 'np.array', (['acoxp'], {}), '(acoxp)\n', (7288, 7295), True, 'import numpy as np\n'), ((7842, 7860), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (7853, 7860), False, 'from os import path\n'), ((9504, 9522), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (9515, 9522), False, 'from os import path\n'), ((14707, 14729), 'os.path.dirname', 'path.dirname', (['mainfile'], {}), '(mainfile)\n', (14719, 14729), False, 'from os import path\n'), ((5614, 5633), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (5622, 5633), True, 'import numpy as np\n'), ((6154, 6177), 'numpy.array', 'np.array', (['coxp_lines[0]'], {}), '(coxp_lines[0])\n', (6162, 6177), True, 'import numpy as np\n'), ((7379, 7394), 'numpy.array', 'np.array', (['icoxp'], {}), '(icoxp)\n', (7387, 7394), True, 'import numpy as np\n'), ((7497, 7513), 'numpy.array', 'np.array', (['aicoxp'], {}), '(aicoxp)\n', (7505, 7513), True, 'import numpy as np\n'), ((7608, 7623), 'numpy.array', 'np.array', (['icoxp'], {}), '(icoxp)\n', (7616, 7623), True, 'import numpy as np\n'), ((8863, 8921), 'nomad.datamodel.metainfo.simulation.system.Atoms', 'Atoms', ([], {'species': 'atomic_numbers', 'periodic': '[True, True, True]'}), '(species=atomic_numbers, periodic=[True, True, True])\n', (8868, 8921), False, 'from nomad.datamodel.metainfo.simulation.system import System, Atoms\n'), ((13315, 13389), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""program_version"""', '"""^LOBSTER\\\\s*v([\\\\d\\\\.]+)\\\\s*"""'], {'repeats': '(False)'}), "('program_version', '^LOBSTER\\\\s*v([\\\\d\\\\.]+)\\\\s*', repeats=False)\n", (13323, 13389), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((13392, 13528), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""datetime"""', '"""starting on host \\\\S* on (\\\\d{4}-\\\\d\\\\d-\\\\d\\\\d\\\\sat\\\\s\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d)\\\\s[A-Z]{3,4}"""'], {'repeats': '(False)'}), "('datetime',\n 'starting on host \\\\S* on (\\\\d{4}-\\\\d\\\\d-\\\\d\\\\d\\\\sat\\\\s\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d)\\\\s[A-Z]{3,4}'\n , repeats=False)\n", (13400, 13528), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((13524, 13603), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""x_lobster_code"""', '"""detecting used PAW program... (.*)"""'], {'repeats': '(False)'}), "('x_lobster_code', 'detecting used PAW program... (.*)', repeats=False)\n", (13532, 13603), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((14445, 14501), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""finished"""', '"""finished in (\\\\d)"""'], {'repeats': '(False)'}), "('finished', 'finished in (\\\\d)', repeats=False)\n", (14453, 14501), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((15307, 15336), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (15324, 15336), False, 'import datetime\n'), ((16930, 16954), 'numpy.array', 'np.array', (['total_spilling'], {}), '(total_spilling)\n', (16938, 16954), True, 'import numpy as np\n'), ((17003, 17028), 'numpy.array', 'np.array', (['charge_spilling'], {}), '(charge_spilling)\n', (17011, 17028), True, 'import numpy as np\n'), ((4149, 4164), 'numpy.array', 'np.array', (['icoxp'], {}), '(icoxp)\n', (4157, 4164), True, 'import numpy as np\n'), ((4279, 4389), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""coxp_pairs"""', '"""No\\\\.\\\\d+:(\\\\w{1,2}\\\\d+)->(\\\\w{1,2}\\\\d+)\\\\(([\\\\d\\\\.]+)\\\\)\\\\s*?"""'], {'repeats': '(True)'}), "('coxp_pairs',\n 'No\\\\.\\\\d+:(\\\\w{1,2}\\\\d+)->(\\\\w{1,2}\\\\d+)\\\\(([\\\\d\\\\.]+)\\\\)\\\\s*?',\n repeats=True)\n", (4287, 4389), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((4398, 4489), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""coxp_lines"""', '"""\\\\n\\\\s*(-*\\\\d+\\\\.\\\\d+(?:[ \\\\t]+-*\\\\d+\\\\.\\\\d+)+)"""'], {'repeats': '(True)'}), "('coxp_lines', '\\\\n\\\\s*(-*\\\\d+\\\\.\\\\d+(?:[ \\\\t]+-*\\\\d+\\\\.\\\\d+)+)',\n repeats=True)\n", (4406, 4489), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((7720, 7826), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""charges"""', '"""\\\\s*\\\\d+\\\\s+[A-Za-z]{1,2}\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s*"""'], {'repeats': '(True)'}), "('charges',\n '\\\\s*\\\\d+\\\\s+[A-Za-z]{1,2}\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s*',\n repeats=True)\n", (7728, 7826), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((11273, 11289), 'numpy.abs', 'np.abs', (['energies'], {}), '(energies)\n', (11279, 11289), True, 'import numpy as np\n'), ((3654, 3673), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (3662, 3673), True, 'import numpy as np\n'), ((17464, 17492), 'nomad.datamodel.metainfo.simulation.method.BasisSet', 'BasisSet', ([], {'name': 'species[0][1]'}), '(name=species[0][1])\n', (17472, 17492), False, 'from nomad.datamodel.metainfo.simulation.method import Method, Electronic, BasisSet\n'), ((13845, 13954), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""x_lobster_basis_species"""', '"""([a-zA-Z]+){1,2}\\\\s+\\\\((.+)\\\\)((?:\\\\s+\\\\d\\\\S+)+)\\\\s+"""'], {'repeats': '(True)'}), "('x_lobster_basis_species',\n '([a-zA-Z]+){1,2}\\\\s+\\\\((.+)\\\\)((?:\\\\s+\\\\d\\\\S+)+)\\\\s+', repeats=True)\n", (13853, 13954), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((14173, 14270), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""abs_total_spilling"""', '"""abs.\\\\s*total\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%"""'], {'repeats': '(False)'}), "('abs_total_spilling',\n 'abs.\\\\s*total\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%', repeats=False)\n", (14181, 14270), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((14306, 14405), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""abs_charge_spilling"""', '"""abs.\\\\s*charge\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%"""'], {'repeats': '(False)'}), "('abs_charge_spilling',\n 'abs.\\\\s*charge\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%', repeats=False)\n", (14314, 14405), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((2096, 2329), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""line"""', '"""(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s*)|(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s+[\\\\d]+\\\\s*)"""'], {'repeats': '(True)', 'str_operation': 'icoxp_line_split'}), "('line',\n '(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s*)|(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s+[\\\\d]+\\\\s*)'\n , repeats=True, str_operation=icoxp_line_split)\n", (2104, 2329), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n')]
|
import numpy as np
MAX = 10000
matrix = np.full((MAX, MAX), False)
def pretty_print(matrix):
print_matrix = np.full(matrix.shape, ".")
print_matrix[matrix] = "#"
for row in print_matrix:
for symb in row:
print(symb, end="")
print()
def fold_once(matrix, axis, value):
if axis == "y": # top fold
matrix = matrix.T
assert matrix[:, value].sum() == 0
iter = matrix.shape[1] - value
for i in range(iter):
matrix[:, value-i] = np.logical_or(matrix[:, value-i], matrix[:, value+i])
matrix = matrix[:, :value]
return matrix if axis == "x" else matrix.T
max_x, max_y = float("-inf"), float("-inf")
while inp := input(): # walrus
y, x = [int(num) for num in inp.split(",")]
matrix[x, y] = True
max_x = max(x, max_x)
max_y = max(y, max_y)
if max_x % 2 != 0:
max_x += 1
if max_y % 2 != 0:
max_y += 1
first = False
matrix = matrix[:max_x+1, :max_y+1]
while inp := input():
axis, value = inp.split(" ")[-1].split("=")
value = int(value)
matrix = fold_once(matrix, axis, value)
if not first:
print("Part 1:", matrix.sum())
first = True
print()
print("PART 2")
print("======")
pretty_print(matrix)
|
[
"numpy.full",
"numpy.logical_or"
] |
[((41, 67), 'numpy.full', 'np.full', (['(MAX, MAX)', '(False)'], {}), '((MAX, MAX), False)\n', (48, 67), True, 'import numpy as np\n'), ((115, 141), 'numpy.full', 'np.full', (['matrix.shape', '"""."""'], {}), "(matrix.shape, '.')\n", (122, 141), True, 'import numpy as np\n'), ((502, 559), 'numpy.logical_or', 'np.logical_or', (['matrix[:, value - i]', 'matrix[:, value + i]'], {}), '(matrix[:, value - i], matrix[:, value + i])\n', (515, 559), True, 'import numpy as np\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str)
args = parser.parse_args()
im = cv2.imread(args.filename)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
ret, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY |
cv2.THRESH_OTSU)
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
# hsv_blurred = cv2.GaussianBlur(hsv, (5, 5), 0)
hsv_blurred = hsv
ret, thresh_h = cv2.threshold(hsv_blurred[:, :, 0], 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
ret, thresh_s = cv2.threshold(hsv_blurred[:, :, 1], 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
ret, thresh_v = cv2.threshold(hsv_blurred[:, :, 2], 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# threshold on each of the channels, see what happens
img = gray.copy()
cimg = im.copy()
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.imshow("Image", im)
cv2.imshow("thresh bw", thresh)
cv2.imshow("thresh hue", thresh_h)
cv2.imshow("thresh sat", thresh_s)
cv2.imshow("thresh val", thresh_v)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"argparse.ArgumentParser",
"cv2.threshold",
"cv2.HoughCircles",
"cv2.imshow",
"cv2.waitKey",
"cv2.circle",
"cv2.destroyAllWindows",
"numpy.around",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.imread"
] |
[((632, 657), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (655, 657), False, 'import argparse\n'), ((733, 758), 'cv2.imread', 'cv2.imread', (['args.filename'], {}), '(args.filename)\n', (743, 758), False, 'import cv2\n'), ((766, 802), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (778, 802), False, 'import cv2\n'), ((813, 846), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (829, 846), False, 'import cv2\n'), ((861, 928), 'cv2.threshold', 'cv2.threshold', (['blurred', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (874, 928), False, 'import cv2\n'), ((944, 979), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2HSV'], {}), '(im, cv2.COLOR_BGR2HSV)\n', (956, 979), False, 'import cv2\n'), ((1064, 1149), 'cv2.threshold', 'cv2.threshold', (['hsv_blurred[:, :, 0]', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(hsv_blurred[:, :, 0], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n )\n', (1077, 1149), False, 'import cv2\n'), ((1169, 1254), 'cv2.threshold', 'cv2.threshold', (['hsv_blurred[:, :, 1]', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(hsv_blurred[:, :, 1], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n )\n', (1182, 1254), False, 'import cv2\n'), ((1274, 1359), 'cv2.threshold', 'cv2.threshold', (['hsv_blurred[:, :, 2]', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(hsv_blurred[:, :, 2], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n )\n', (1287, 1359), False, 'import cv2\n'), ((1463, 1563), 'cv2.HoughCircles', 'cv2.HoughCircles', (['img', 'cv2.HOUGH_GRADIENT', '(1)', '(20)'], {'param1': '(50)', 'param2': '(30)', 'minRadius': '(0)', 'maxRadius': '(0)'}), '(img, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30,\n minRadius=0, maxRadius=0)\n', (1479, 1563), False, 'import cv2\n'), ((1807, 1843), 'cv2.imshow', 'cv2.imshow', (['"""detected circles"""', 'cimg'], {}), "('detected circles', cimg)\n", (1817, 1843), False, 'import cv2\n'), ((1847, 1870), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'im'], {}), "('Image', im)\n", (1857, 1870), False, 'import cv2\n'), ((1871, 1902), 'cv2.imshow', 'cv2.imshow', (['"""thresh bw"""', 'thresh'], {}), "('thresh bw', thresh)\n", (1881, 1902), False, 'import cv2\n'), ((1903, 1937), 'cv2.imshow', 'cv2.imshow', (['"""thresh hue"""', 'thresh_h'], {}), "('thresh hue', thresh_h)\n", (1913, 1937), False, 'import cv2\n'), ((1938, 1972), 'cv2.imshow', 'cv2.imshow', (['"""thresh sat"""', 'thresh_s'], {}), "('thresh sat', thresh_s)\n", (1948, 1972), False, 'import cv2\n'), ((1973, 2007), 'cv2.imshow', 'cv2.imshow', (['"""thresh val"""', 'thresh_v'], {}), "('thresh val', thresh_v)\n", (1983, 2007), False, 'import cv2\n'), ((2009, 2023), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2020, 2023), False, 'import cv2\n'), ((2024, 2047), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2045, 2047), False, 'import cv2\n'), ((1602, 1620), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (1611, 1620), True, 'import numpy as np\n'), ((1678, 1730), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (1688, 1730), False, 'import cv2\n'), ((1764, 1813), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (1774, 1813), False, 'import cv2\n')]
|
from itertools import combinations
import numpy as np
from PlanningCore.core.constants import State
from PlanningCore.core.physics import (
ball_ball_collision,
ball_cushion_collision,
cue_strike,
evolve_ball_motion,
get_ball_ball_collision_time,
get_ball_cushion_collision_time,
get_roll_time,
get_spin_time,
get_slide_time,
)
from PlanningCore.core.utils import get_rel_velocity
def evolve(pockets, balls, dt):
for ball in balls:
rvw, state = evolve_ball_motion(
pockets=pockets,
state=ball.state,
rvw=ball.rvw,
t=dt,
)
ball.set_rvw(rvw)
ball.set_state(state)
def resolve(collision, table):
if collision['type'] == 'ball_ball':
ball_id1, ball_id2 = collision['agents']
rvw1 = table.balls[ball_id1].rvw
rvw2 = table.balls[ball_id2].rvw
rvw1, rvw2 = ball_ball_collision(rvw1, rvw2)
s1, s2 = State.sliding, State.sliding
table.balls[ball_id1].set_rvw(rvw1)
table.balls[ball_id1].set_state(s1)
table.balls[ball_id2].set_rvw(rvw2)
table.balls[ball_id2].set_state(s2)
elif collision['type'] == 'ball_cushion':
ball_id, cushion_id = collision['agents']
rvw = table.balls[ball_id].rvw
normal = table.cushions[cushion_id]['normal']
rvw = ball_cushion_collision(rvw, normal)
s = State.sliding
table.balls[ball_id].set_rvw(rvw)
table.balls[ball_id].set_state(s)
def detect_collisions(table):
collisions = []
for i, ball1 in enumerate(table.balls):
for j, ball2 in enumerate(table.balls):
if i >= j:
continue
if ball1.state == State.stationary and ball2.state == State.stationary:
continue
if np.linalg.norm(ball1.rvw[0] - ball2.rvw[0]) <= (ball1.radius + ball2.radius):
collisions.append({
'type': 'ball_ball',
'agents': (i, j),
})
for i, ball in enumerate(table.balls):
ball_x, ball_y = ball.pos
if ball_x <= table.left + ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'L'),
})
elif ball_x >= table.right - ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'R'),
})
elif ball_y <= table.bottom + ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'B'),
})
elif ball_y >= table.top - ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'T'),
})
return collisions
def get_min_motion_event_time(balls):
t_min = np.inf
ball_id = None
motion_type = None
for i, ball in enumerate(balls):
if ball.state == State.rolling:
t = get_roll_time(ball.rvw)
tau_spin = get_spin_time(ball.rvw)
event_type = 'rolling_spinning' if tau_spin > t else 'rolling_stationary'
elif ball.state == State.sliding:
t = get_slide_time(ball.rvw)
event_type = 'sliding_rolling'
elif ball.state == State.spinning:
t = get_spin_time(ball.rvw)
event_type = 'spinning_stationary'
else:
continue
if t < t_min:
t_min = t
ball_id = i
motion_type = event_type
return t_min, (ball_id,), motion_type
def get_min_ball_ball_event_time(balls):
t_min = np.inf
ball_ids = (None, None)
for (i, ball1), (j, ball2) in combinations(enumerate(balls), 2):
if ball1.state == State.pocketed or ball2.state == State.pocketed:
continue
if ball1.state == State.stationary and ball2.state == State.stationary:
continue
t = get_ball_ball_collision_time(
rvw1=ball1.rvw,
rvw2=ball2.rvw,
s1=ball1.state,
s2=ball2.state,
)
if t < t_min:
ball_ids = (i, j)
t_min = t
return t_min, ball_ids
def get_min_ball_cushion_event_time(balls, cushions):
"""Returns minimum time until next ball-rail collision"""
t_min = np.inf
agents = (None, None)
for ball_id, ball in enumerate(balls):
if ball.state == State.stationary or ball.state == State.pocketed:
continue
for cushion_id, cushion in cushions.items():
t = get_ball_cushion_collision_time(
rvw=ball.rvw,
s=ball.state,
lx=cushion['lx'],
ly=cushion['ly'],
l0=cushion['l0'],
)
if t < t_min:
agents = (ball_id, cushion_id)
t_min = t
return t_min, agents
def get_next_event(table):
t_min = np.inf
agents = tuple()
event_type = None
t, ids, e = get_min_motion_event_time(table.balls)
if t < t_min:
t_min = t
event_type = e
agents = ids
t, ids = get_min_ball_ball_event_time(table.balls)
if t < t_min:
t_min = t
event_type = 'ball_ball'
agents = ids
t, ids = get_min_ball_cushion_event_time(table.balls, table.cushions)
if t < t_min:
t_min = t
event_type = 'ball_cushion'
agents = ids
return Event(event_type=event_type, event_time=t_min, agents=agents)
def simulate(table, dt=0.033, log=False, no_ball_cushion=False, return_once_pocket=False):
while True:
if return_once_pocket:
for ball in table.balls:
if ball.state == State.pocketed:
return True
if np.all([(ball.state == State.stationary or ball.state == State.pocketed)
for ball in table.balls]):
break
evolve(table.pockets, table.balls, dt)
if log:
table.snapshot(dt)
collisions = detect_collisions(table)
for collision in collisions:
if no_ball_cushion and collision['type'] == 'ball_cushion':
return False
resolve(collision, table)
if log:
table.snapshot(dt)
return True
def simulate_event_based(table, log=False, return_once_pocket=False):
event = Event()
while event.event_time < np.inf:
event = get_next_event(table)
if return_once_pocket:
for ball in table.balls:
if ball.state == State.pocketed:
return True
if np.all([(ball.state == State.stationary or ball.state == State.pocketed)
for ball in table.balls]):
break
evolve(table.pockets, table.balls, dt=event.event_time)
resolve(event.as_dict(), table)
if log:
table.snapshot(event.event_time)
return True
def shot(table, v_cue, phi, ball_index=0, theta=0, a=0, b=0):
v, w = cue_strike(v_cue, phi, theta, a, b)
rvw = table.balls[ball_index].rvw
rvw[1] = v
rvw[2] = w
state = State.rolling if np.abs(np.sum(get_rel_velocity(rvw))) <= 1e-10 else State.sliding
table.balls[ball_index].set_rvw(rvw)
table.balls[ball_index].set_state(state)
class Event(object):
def __init__(self, event_type=None, event_time=0, agents=None):
self.event_type = event_type
self.event_time = event_time
self.agents = agents
def as_dict(self):
return {
'type': self.event_type,
'time': self.event_time,
'agents': self.agents,
}
|
[
"PlanningCore.core.utils.get_rel_velocity",
"numpy.all",
"PlanningCore.core.physics.get_ball_cushion_collision_time",
"PlanningCore.core.physics.get_ball_ball_collision_time",
"PlanningCore.core.physics.get_roll_time",
"PlanningCore.core.physics.evolve_ball_motion",
"PlanningCore.core.physics.get_slide_time",
"PlanningCore.core.physics.cue_strike",
"PlanningCore.core.physics.get_spin_time",
"numpy.linalg.norm",
"PlanningCore.core.physics.ball_ball_collision",
"PlanningCore.core.physics.ball_cushion_collision"
] |
[((7095, 7130), 'PlanningCore.core.physics.cue_strike', 'cue_strike', (['v_cue', 'phi', 'theta', 'a', 'b'], {}), '(v_cue, phi, theta, a, b)\n', (7105, 7130), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((497, 570), 'PlanningCore.core.physics.evolve_ball_motion', 'evolve_ball_motion', ([], {'pockets': 'pockets', 'state': 'ball.state', 'rvw': 'ball.rvw', 't': 'dt'}), '(pockets=pockets, state=ball.state, rvw=ball.rvw, t=dt)\n', (515, 570), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((914, 945), 'PlanningCore.core.physics.ball_ball_collision', 'ball_ball_collision', (['rvw1', 'rvw2'], {}), '(rvw1, rvw2)\n', (933, 945), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((4003, 4099), 'PlanningCore.core.physics.get_ball_ball_collision_time', 'get_ball_ball_collision_time', ([], {'rvw1': 'ball1.rvw', 'rvw2': 'ball2.rvw', 's1': 'ball1.state', 's2': 'ball2.state'}), '(rvw1=ball1.rvw, rvw2=ball2.rvw, s1=ball1.state,\n s2=ball2.state)\n', (4031, 4099), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((5850, 5952), 'numpy.all', 'np.all', (['[(ball.state == State.stationary or ball.state == State.pocketed) for ball in\n table.balls]'], {}), '([(ball.state == State.stationary or ball.state == State.pocketed) for\n ball in table.balls])\n', (5856, 5952), True, 'import numpy as np\n'), ((6702, 6804), 'numpy.all', 'np.all', (['[(ball.state == State.stationary or ball.state == State.pocketed) for ball in\n table.balls]'], {}), '([(ball.state == State.stationary or ball.state == State.pocketed) for\n ball in table.balls])\n', (6708, 6804), True, 'import numpy as np\n'), ((1375, 1410), 'PlanningCore.core.physics.ball_cushion_collision', 'ball_cushion_collision', (['rvw', 'normal'], {}), '(rvw, normal)\n', (1397, 1410), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((3035, 3058), 'PlanningCore.core.physics.get_roll_time', 'get_roll_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3048, 3058), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((3082, 3105), 'PlanningCore.core.physics.get_spin_time', 'get_spin_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3095, 3105), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((4632, 4750), 'PlanningCore.core.physics.get_ball_cushion_collision_time', 'get_ball_cushion_collision_time', ([], {'rvw': 'ball.rvw', 's': 'ball.state', 'lx': "cushion['lx']", 'ly': "cushion['ly']", 'l0': "cushion['l0']"}), "(rvw=ball.rvw, s=ball.state, lx=cushion['lx'\n ], ly=cushion['ly'], l0=cushion['l0'])\n", (4663, 4750), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((1840, 1883), 'numpy.linalg.norm', 'np.linalg.norm', (['(ball1.rvw[0] - ball2.rvw[0])'], {}), '(ball1.rvw[0] - ball2.rvw[0])\n', (1854, 1883), True, 'import numpy as np\n'), ((3250, 3274), 'PlanningCore.core.physics.get_slide_time', 'get_slide_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3264, 3274), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((3377, 3400), 'PlanningCore.core.physics.get_spin_time', 'get_spin_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3390, 3400), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((7242, 7263), 'PlanningCore.core.utils.get_rel_velocity', 'get_rel_velocity', (['rvw'], {}), '(rvw)\n', (7258, 7263), False, 'from PlanningCore.core.utils import get_rel_velocity\n')]
|
"""
The :mod:`fatf.utils.models.models` module holds custom models.
The models implemented in this module are mainly used for used for
FAT Forensics package testing and the examples in the documentation.
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
import abc
from typing import Optional
import numpy as np
import fatf.utils.array.tools as fuat
import fatf.utils.array.validation as fuav
import fatf.utils.distances as fud
from fatf.exceptions import (IncorrectShapeError, PrefittedModelError,
UnfittedModelError)
__all__ = ['KNN']
class Model(abc.ABC):
"""
An abstract class used to implement predictive models.
This abstract class requires ``fit`` and ``predict`` methods and defines
an optional ``predict_proba`` method.
This is a scikit-learn-inspired model specification and it is being relied
on through out this package.
Raises
------
NotImplementedError
Any of the required methods -- ``fit`` or ``predict`` -- is not
implemented.
"""
# pylint: disable=invalid-name
@abc.abstractmethod
def __init__(self) -> None:
"""
Initialises the abstract model class.
"""
@abc.abstractmethod
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""
Fits this predictive model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array used to fit the model.
y : numpy.ndarray
A 1-dimensional numpy labels array used to fit the model.
"""
@abc.abstractmethod
def predict(self, X: np.ndarray) -> None:
"""
Predicts labels of new data points using this model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array for which labels are predicted.
"""
def predict_proba(self, X: np.ndarray) -> None:
"""
Predicts probabilities of labels for new data points using this model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array for which labels probabilities are
predicted.
Raises
------
NotImplementedError
By default this method is not required, hence it raises a
``NotImplementedError``.
"""
raise NotImplementedError
class KNN(Model):
"""
A K-Nearest Neighbours model based on Euclidean distance.
When the ``k`` parameter is set to 0 the model works as a majority class
classifier. In case the count of neighbours (within ``k``) results in a
tie the overall majority class for the whole training data is returned.
Finally, when the training data contains categorical (i.e. non-numerical,
e.g. strings) columns the distance for these columns is 0 when the value
matches and 1 otherwise.
This model can operate in two modes: *classifier* or *regressor*. The first
one works for categorical and numerical targets and provides two predictive
methods: ``predict`` -- for predicting labels and ``predict_proba`` for
predicting probabilities of labels. The regressor mode, on the other hand,
requires the target to be numerical and it only supports the ``predict``
method, which returns the average of the target value of the ``k``
neighbours for the queried data point.
Parameters
----------
k : integer, optional (default=3)
The number of neighbours used to make a prediction. Defaults to 3.
mode : string, optional (default='classifier')
The mode in which the model will operate. Either ``'classifier'``
(``'c'``) or ``'regressor'`` (``'r'``). In the latter case
``predict_proba`` method is disabled.
Raises
------
PrefittedModelError
Raised when trying to fit a model that has already been fitted. Usually
raised when calling the ``fit`` method for the second time. Try using
the ``clear`` method to reset the model before fitting it again.
TypeError
The ``k`` parameter is not an integer.
UnfittedModelError
Raised when trying to predict data with a model that has not been
fitted yet. Try using the ``fit`` method to fit the model first.
ValueError
The ``k`` parameter is a negative number or the ``mode`` parameter does
not have one of the allowed values: ``'c'``, ``'classifier'``, ``'r'``
or ``'regressor'``.
Attributes
----------
_MODES : Set[string]
Possible modes of the KNN model: ``'classifier'`` (``'c'``) or
``'regressor'`` (``'r'``).
_k : integer
The number of neighbours used to make a prediction.
_is_classifier : boolean
True when the model is initialised (and operates) as a classifier.
False when it acts as a regressor.
_is_fitted : boolean
A Boolean variable indicating whether the model is fitted.
_X : numpy.ndarray
The KNN model training data.
_y : numpy.ndarray
The KNN model training labels.
_X_n : integer
The number of data points in the training set.
_unique_y : numpy.ndarray
An array with unique labels in the training labels set ordered
lexicographically.
_unique_y_counts : numpy.ndarray
An array with counts of the unique labels in the training labels set.
_unique_y_probabilities : numpy.ndarray
Probabilities of labels calculated using their frequencies in the
training data.
_majority_label : Union[string, integer, float]
The most common label in the training set.
_is_structured : boolean
A Boolean variable indicating whether the model has been fitted on a
structured numpy array.
_categorical_indices : numpy.ndarray
An array with categorical indices in the training array.
_numerical_indices : numpy.ndarray
An array with numerical indices in the training array.
"""
# pylint: disable=too-many-instance-attributes
_MODES = set(['classifier', 'c', 'regressor', 'r'])
def __init__(self, k: int = 3, mode: Optional[str] = None) -> None:
"""
Initialises the KNN model with the selected ``k`` parameter.
"""
super().__init__()
if not isinstance(k, int):
raise TypeError('The k parameter has to be an integer.')
if k < 0:
raise ValueError('The k parameter has to be a positive integer.')
if mode is None:
self._is_classifier = True
else:
if mode in self._MODES:
self._is_classifier = mode[0] == 'c'
else:
raise ValueError(('The mode parameter has to have one of the '
'following values {}.').format(self._MODES))
self._k = k
self._is_fitted = False
self._X = np.ndarray((0, 0)) # pylint: disable=invalid-name
self._y = np.ndarray((0, ))
self._X_n = int() # pylint: disable=invalid-name
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._majority_label = None
self._is_structured = False
self._categorical_indices = np.ndarray((0, ))
self._numerical_indices = np.ndarray((0, ))
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""
Fits the model.
Parameters
----------
X : numpy.ndarray
The KNN training data.
y : numpy.ndarray
The KNN training labels.
Raises
------
IncorrectShapeError
Either the ``X`` array is not 2-dimensional, the ``y`` array is not
1-dimensional, the number of rows in ``X`` is not the same as the
number of elements in ``y`` or the ``X`` array has 0 rows or 0
columns.
PrefittedModelError
Trying to fit the model when it has already been fitted. Usually
raised when calling the ``fit`` method for the second time without
clearing the model first.
TypeError
Trying to fit a KNN predictor in a regressor mode with
non-numerical target variable.
"""
if self._is_fitted:
raise PrefittedModelError('This model has already been fitted.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('The training data must be a 2-'
'dimensional array.')
if not fuav.is_1d_array(y):
raise IncorrectShapeError('The training data labels must be a 1-'
'dimensional array.')
if X.shape[0] == 0:
raise IncorrectShapeError('The data array has to have at least '
'one data point.')
# If the array is structured the fuav.is_2d_array function takes care
# of checking whether there is at least one column
if not fuav.is_structured_array(X) and X.shape[1] == 0:
raise IncorrectShapeError('The data array has to have at least '
'one feature.')
if X.shape[0] != y.shape[0]:
raise IncorrectShapeError('The number of samples in X must be the '
'same as the number of labels in y.')
if not self._is_classifier and not fuav.is_numerical_array(y):
raise TypeError('Regressor can only be fitted for a numerical '
'target vector.')
numerical_indices, categorical_indices = fuat.indices_by_type(X)
self._numerical_indices = numerical_indices
self._categorical_indices = categorical_indices
self._is_structured = fuav.is_structured_array(X)
self._X = X
self._y = y
if self._is_classifier:
unique_y, unique_y_counts = np.unique(self._y, return_counts=True)
# Order labels lexicographically.
unique_y_sort_index = np.argsort(unique_y)
self._unique_y = unique_y[unique_y_sort_index]
self._unique_y_counts = unique_y_counts[unique_y_sort_index]
# How many other labels have the same count.
top_y_index = self._unique_y_counts == np.max(
self._unique_y_counts)
top_y_unique_sorted = np.sort(self._unique_y[top_y_index])
self._majority_label = top_y_unique_sorted[0]
self._unique_y_probabilities = (
self._unique_y_counts / self._y.shape[0])
else:
self._majority_label = self._y.mean()
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._X_n = self._X.shape[0]
self._is_fitted = True
def clear(self) -> None:
"""
Clears (unfits) the model.
Raises
------
UnfittedModelError
Raised when trying to clear a model that has not been fitted yet.
Try using the fit method to ``fit`` the model first.
"""
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
self._is_fitted = False
self._X = np.ndarray((0, 0))
self._y = np.ndarray((0, ))
self._X_n = int()
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._majority_label = None
self._is_structured = False
self._categorical_indices = np.ndarray((0, ))
self._numerical_indices = np.ndarray((0, ))
def _get_distances(self, X: np.ndarray) -> np.ndarray:
"""
Gets distances for a mixture of numerical and categorical features.
For numerical columns the distance is calculated as the Euclidean
distance. For categorical columns (i.e. non-numerical, e.g. strings)
the distance is 0 when the value matches and 1 otherwise.
Parameters
----------
X : numpy.ndarray
A data array for which distances to the training data will be
calculated.
Raises
------
AssertionError
Raised when the model is not fitted, X is not a 2-dimensional
array or X's dtype is different than training data's dtype. It is
also raised when the distances matrix is not 2-dimensional.
Returns
-------
distances : numpy.ndarray
An array of distances between X and the training data.
"""
# pylint: disable=invalid-name
assert self._is_fitted, 'Cannot calculate distances on unfitted model.'
assert fuav.is_2d_array(X), 'X must be a 2-dimensional array.'
assert fuav.are_similar_dtype_arrays(X, self._X), \
'X must have the same dtype as the training data.'
distances_shape = (self._X.shape[0], X.shape[0])
categorical_distances = np.zeros(distances_shape)
numerical_distances = np.zeros(distances_shape)
if self._is_structured:
if self._categorical_indices.size:
categorical_distances = fud.binary_array_distance(
self._X[self._categorical_indices],
X[self._categorical_indices])
if self._numerical_indices.size:
numerical_distances = fud.euclidean_array_distance(
self._X[self._numerical_indices],
X[self._numerical_indices])
else:
if self._categorical_indices.size:
categorical_distances = fud.binary_array_distance(
self._X[:, self._categorical_indices],
X[:, self._categorical_indices])
if self._numerical_indices.size:
numerical_distances = fud.euclidean_array_distance(
self._X[:, self._numerical_indices],
X[:, self._numerical_indices])
assert categorical_distances.shape == numerical_distances.shape, \
'Different number of point-wise distances for these feature types.'
distances = categorical_distances + numerical_distances
assert fuav.is_2d_array(distances), 'Distances matrix must be 2D.'
return distances
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Predicts labels of new instances with the fitted model.
Parameters
----------
X : numpy.ndarray
The data for which labels will be predicted.
Raises
------
IncorrectShapeError
X is not a 2-dimensional array, it has 0 rows or it has a different
number of columns than the training data.
UnfittedModelError
Raised when trying to predict data when the model has not been
fitted yet. Try using the ``fit`` method to fit the model first.
ValueError
X has a different dtype than the data used to fit the model.
Returns
-------
predictions : numpy.ndarray
Predicted class labels for each data point.
"""
# pylint: disable=too-many-locals,too-many-branches
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('X must be a 2-dimensional array. If '
'you want to predict a single data '
'point please format it as a single row '
'in a 2-dimensional array.')
if not fuav.are_similar_dtype_arrays(X, self._X):
raise ValueError('X must have the same dtype as the training '
'data.')
if not X.shape[0]:
raise IncorrectShapeError('X must have at least one row.')
# No need to check for columns in a structured array -> this is handled
# by the dtype checker.
if not fuav.is_structured_array(X):
if X.shape[1] != self._X.shape[1]:
raise IncorrectShapeError(('X must have the same number of '
'columns as the training data '
'({}).').format(self._X.shape[1]))
predictions = np.empty((X.shape[0], ))
if self._k < self._X_n:
distances = self._get_distances(X)
# If there are 3 nearest neighbours within distances 1, 2 and 2 and
# k is set to 2, then argpartition will always take the first
# within distance 2.
knn = np.argpartition(distances, self._k, axis=0)
predictions = []
for column in knn.T:
close_labels = self._y[column[:self._k]]
if self._is_classifier:
values, counts = np.unique(
close_labels, return_counts=True)
# If there is a tie in the counts take into consideration
# the overall label count in the training data to resolve
# it.
top_label_index = counts == counts.max()
top_label_unique_sorted = np.sort(values[top_label_index])
assert len(top_label_unique_sorted.shape) == 1, \
'This should be a flat array.'
if top_label_unique_sorted.shape[0] > 1:
# Resolve the tie.
# Get count of these label for the training data.
labels_filter = np.array(
self._unique_y.shape[0] * [False])
for top_prediction in top_label_unique_sorted:
unique_y_filter = self._unique_y == top_prediction
np.logical_or(
labels_filter,
unique_y_filter,
out=labels_filter)
g_top_label = self._unique_y[labels_filter]
g_top_label_counts = (
self._unique_y_counts[labels_filter])
# What if any of the global labels have the same count?
g_top_label_index = g_top_label_counts == np.max(
g_top_label_counts)
g_top_label_sorted = np.sort(
g_top_label[g_top_label_index])
prediction = g_top_label_sorted[0]
else:
prediction = top_label_unique_sorted[0]
else:
prediction = close_labels.mean()
predictions.append(prediction)
predictions = np.array(predictions)
else:
predictions = np.array(X.shape[0] * [self._majority_label])
return predictions
def predict_proba(self, X: np.ndarray) -> np.ndarray:
"""
Calculates label probabilities for new instances with the fitted model.
Parameters
----------
X : numpy.ndarray
The data for which labels probabilities will be predicted.
Raises
------
IncorrectShapeError
X is not a 2-dimensional array, it has 0 rows or it has a different
number of columns than the training data.
UnfittedModelError
Raised when trying to predict data when the model has not been
fitted yet. Try using the ``fit`` method to fit the model first.
RuntimeError
Raised when trying to use this method when the predictor is
initialised as a regressor.
ValueError
X has a different dtype than the data used to fit the model.
Returns
-------
probabilities : numpy.ndarray
Probabilities of each instance belonging to every class. The labels
in the return array are ordered by lexicographic order.
"""
if not self._is_classifier:
raise RuntimeError('This functionality is not available for a '
'regressor.')
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('X must be a 2-dimensional array. If '
'you want to predict a single data '
'point please format it as a single row '
'in a 2-dimensional array.')
if not fuav.are_similar_dtype_arrays(X, self._X):
raise ValueError('X must have the same dtype as the training '
'data.')
if not X.shape[0]:
raise IncorrectShapeError('X must have at least one row.')
# No need to check for columns in a structured array -> this is handled
# by the dtype checker.
if not fuav.is_structured_array(X):
if X.shape[1] != self._X.shape[1]:
raise IncorrectShapeError(('X must have the same number of '
'columns as the training data '
'({}).').format(self._X.shape[1]))
probabilities = np.empty((X.shape[0], self._unique_y.shape[0]))
if self._k < self._X_n:
distances = self._get_distances(X)
knn = np.argpartition(distances, self._k, axis=0)
probabilities = []
for column in knn.T:
close_labels = self._y[column[:self._k]]
values, counts = np.unique(close_labels, return_counts=True)
total_counts = np.sum(counts)
probs = np.zeros((self._unique_y.shape[0], ))
for i in range(values.shape[0]):
ind = np.where(self._unique_y == values[i])[0]
probs[ind] = counts[i] / total_counts
probabilities.append(probs)
probabilities = np.array(probabilities)
else:
probabilities = np.tile(self._unique_y_probabilities,
(X.shape[0], 1))
return probabilities
|
[
"fatf.exceptions.IncorrectShapeError",
"fatf.exceptions.UnfittedModelError",
"fatf.utils.array.validation.is_structured_array",
"numpy.argsort",
"fatf.utils.array.validation.is_1d_array",
"numpy.array",
"fatf.utils.array.validation.is_2d_array",
"fatf.exceptions.PrefittedModelError",
"numpy.where",
"numpy.sort",
"fatf.utils.array.validation.is_numerical_array",
"numpy.max",
"numpy.empty",
"numpy.tile",
"fatf.utils.distances.binary_array_distance",
"fatf.utils.array.validation.are_similar_dtype_arrays",
"numpy.unique",
"numpy.argpartition",
"numpy.logical_or",
"fatf.utils.distances.euclidean_array_distance",
"numpy.zeros",
"numpy.sum",
"fatf.utils.array.tools.indices_by_type",
"numpy.ndarray"
] |
[((6935, 6953), 'numpy.ndarray', 'np.ndarray', (['(0, 0)'], {}), '((0, 0))\n', (6945, 6953), True, 'import numpy as np\n'), ((7004, 7020), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7014, 7020), True, 'import numpy as np\n'), ((7105, 7121), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7115, 7121), True, 'import numpy as np\n'), ((7155, 7171), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7165, 7171), True, 'import numpy as np\n'), ((7212, 7228), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7222, 7228), True, 'import numpy as np\n'), ((7338, 7354), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7348, 7354), True, 'import numpy as np\n'), ((7390, 7406), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7400, 7406), True, 'import numpy as np\n'), ((9716, 9739), 'fatf.utils.array.tools.indices_by_type', 'fuat.indices_by_type', (['X'], {}), '(X)\n', (9736, 9739), True, 'import fatf.utils.array.tools as fuat\n'), ((9879, 9906), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (9903, 9906), True, 'import fatf.utils.array.validation as fuav\n'), ((11425, 11443), 'numpy.ndarray', 'np.ndarray', (['(0, 0)'], {}), '((0, 0))\n', (11435, 11443), True, 'import numpy as np\n'), ((11462, 11478), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11472, 11478), True, 'import numpy as np\n'), ((11531, 11547), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11541, 11547), True, 'import numpy as np\n'), ((11581, 11597), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11591, 11597), True, 'import numpy as np\n'), ((11638, 11654), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11648, 11654), True, 'import numpy as np\n'), ((11764, 11780), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11774, 11780), True, 'import numpy as np\n'), ((11816, 11832), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11826, 11832), True, 'import numpy as np\n'), ((12921, 12940), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (12937, 12940), True, 'import fatf.utils.array.validation as fuav\n'), ((12992, 13033), 'fatf.utils.array.validation.are_similar_dtype_arrays', 'fuav.are_similar_dtype_arrays', (['X', 'self._X'], {}), '(X, self._X)\n', (13021, 13033), True, 'import fatf.utils.array.validation as fuav\n'), ((13190, 13215), 'numpy.zeros', 'np.zeros', (['distances_shape'], {}), '(distances_shape)\n', (13198, 13215), True, 'import numpy as np\n'), ((13246, 13271), 'numpy.zeros', 'np.zeros', (['distances_shape'], {}), '(distances_shape)\n', (13254, 13271), True, 'import numpy as np\n'), ((14436, 14463), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['distances'], {}), '(distances)\n', (14452, 14463), True, 'import fatf.utils.array.validation as fuav\n'), ((16602, 16625), 'numpy.empty', 'np.empty', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (16610, 16625), True, 'import numpy as np\n'), ((21675, 21722), 'numpy.empty', 'np.empty', (['(X.shape[0], self._unique_y.shape[0])'], {}), '((X.shape[0], self._unique_y.shape[0]))\n', (21683, 21722), True, 'import numpy as np\n'), ((8386, 8444), 'fatf.exceptions.PrefittedModelError', 'PrefittedModelError', (['"""This model has already been fitted."""'], {}), "('This model has already been fitted.')\n", (8405, 8444), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((8460, 8479), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (8476, 8479), True, 'import fatf.utils.array.validation as fuav\n'), ((8499, 8570), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The training data must be a 2-dimensional array."""'], {}), "('The training data must be a 2-dimensional array.')\n", (8518, 8570), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((8627, 8646), 'fatf.utils.array.validation.is_1d_array', 'fuav.is_1d_array', (['y'], {}), '(y)\n', (8643, 8646), True, 'import fatf.utils.array.validation as fuav\n'), ((8666, 8744), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The training data labels must be a 1-dimensional array."""'], {}), "('The training data labels must be a 1-dimensional array.')\n", (8685, 8744), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((8832, 8906), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The data array has to have at least one data point."""'], {}), "('The data array has to have at least one data point.')\n", (8851, 8906), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((9167, 9238), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The data array has to have at least one feature."""'], {}), "('The data array has to have at least one feature.')\n", (9186, 9238), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((9335, 9441), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The number of samples in X must be the same as the number of labels in y."""'], {}), "(\n 'The number of samples in X must be the same as the number of labels in y.'\n )\n", (9354, 9441), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((10020, 10058), 'numpy.unique', 'np.unique', (['self._y'], {'return_counts': '(True)'}), '(self._y, return_counts=True)\n', (10029, 10058), True, 'import numpy as np\n'), ((10139, 10159), 'numpy.argsort', 'np.argsort', (['unique_y'], {}), '(unique_y)\n', (10149, 10159), True, 'import numpy as np\n'), ((10482, 10518), 'numpy.sort', 'np.sort', (['self._unique_y[top_y_index]'], {}), '(self._unique_y[top_y_index])\n', (10489, 10518), True, 'import numpy as np\n'), ((10774, 10790), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (10784, 10790), True, 'import numpy as np\n'), ((10828, 10844), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (10838, 10844), True, 'import numpy as np\n'), ((10889, 10905), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (10899, 10905), True, 'import numpy as np\n'), ((11316, 11373), 'fatf.exceptions.UnfittedModelError', 'UnfittedModelError', (['"""This model has not been fitted yet."""'], {}), "('This model has not been fitted yet.')\n", (11334, 11373), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((15484, 15541), 'fatf.exceptions.UnfittedModelError', 'UnfittedModelError', (['"""This model has not been fitted yet."""'], {}), "('This model has not been fitted yet.')\n", (15502, 15541), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((15557, 15576), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (15573, 15576), True, 'import fatf.utils.array.validation as fuav\n'), ((15596, 15763), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array."""'], {}), "(\n 'X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array.'\n )\n", (15615, 15763), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((15892, 15933), 'fatf.utils.array.validation.are_similar_dtype_arrays', 'fuav.are_similar_dtype_arrays', (['X', 'self._X'], {}), '(X, self._X)\n', (15921, 15933), True, 'import fatf.utils.array.validation as fuav\n'), ((16093, 16145), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must have at least one row."""'], {}), "('X must have at least one row.')\n", (16112, 16145), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((16273, 16300), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (16297, 16300), True, 'import fatf.utils.array.validation as fuav\n'), ((16912, 16955), 'numpy.argpartition', 'np.argpartition', (['distances', 'self._k'], {'axis': '(0)'}), '(distances, self._k, axis=0)\n', (16927, 16955), True, 'import numpy as np\n'), ((19096, 19117), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (19104, 19117), True, 'import numpy as np\n'), ((19158, 19203), 'numpy.array', 'np.array', (['(X.shape[0] * [self._majority_label])'], {}), '(X.shape[0] * [self._majority_label])\n', (19166, 19203), True, 'import numpy as np\n'), ((20555, 20612), 'fatf.exceptions.UnfittedModelError', 'UnfittedModelError', (['"""This model has not been fitted yet."""'], {}), "('This model has not been fitted yet.')\n", (20573, 20612), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((20628, 20647), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (20644, 20647), True, 'import fatf.utils.array.validation as fuav\n'), ((20667, 20834), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array."""'], {}), "(\n 'X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array.'\n )\n", (20686, 20834), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((20963, 21004), 'fatf.utils.array.validation.are_similar_dtype_arrays', 'fuav.are_similar_dtype_arrays', (['X', 'self._X'], {}), '(X, self._X)\n', (20992, 21004), True, 'import fatf.utils.array.validation as fuav\n'), ((21164, 21216), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must have at least one row."""'], {}), "('X must have at least one row.')\n", (21183, 21216), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((21344, 21371), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (21368, 21371), True, 'import fatf.utils.array.validation as fuav\n'), ((21821, 21864), 'numpy.argpartition', 'np.argpartition', (['distances', 'self._k'], {'axis': '(0)'}), '(distances, self._k, axis=0)\n', (21836, 21864), True, 'import numpy as np\n'), ((22417, 22440), 'numpy.array', 'np.array', (['probabilities'], {}), '(probabilities)\n', (22425, 22440), True, 'import numpy as np\n'), ((22483, 22537), 'numpy.tile', 'np.tile', (['self._unique_y_probabilities', '(X.shape[0], 1)'], {}), '(self._unique_y_probabilities, (X.shape[0], 1))\n', (22490, 22537), True, 'import numpy as np\n'), ((9100, 9127), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (9124, 9127), True, 'import fatf.utils.array.validation as fuav\n'), ((9516, 9542), 'fatf.utils.array.validation.is_numerical_array', 'fuav.is_numerical_array', (['y'], {}), '(y)\n', (9539, 9542), True, 'import fatf.utils.array.validation as fuav\n'), ((10401, 10430), 'numpy.max', 'np.max', (['self._unique_y_counts'], {}), '(self._unique_y_counts)\n', (10407, 10430), True, 'import numpy as np\n'), ((13392, 13488), 'fatf.utils.distances.binary_array_distance', 'fud.binary_array_distance', (['self._X[self._categorical_indices]', 'X[self._categorical_indices]'], {}), '(self._X[self._categorical_indices], X[self.\n _categorical_indices])\n', (13417, 13488), True, 'import fatf.utils.distances as fud\n'), ((13608, 13703), 'fatf.utils.distances.euclidean_array_distance', 'fud.euclidean_array_distance', (['self._X[self._numerical_indices]', 'X[self._numerical_indices]'], {}), '(self._X[self._numerical_indices], X[self.\n _numerical_indices])\n', (13636, 13703), True, 'import fatf.utils.distances as fud\n'), ((13841, 13943), 'fatf.utils.distances.binary_array_distance', 'fud.binary_array_distance', (['self._X[:, self._categorical_indices]', 'X[:, self._categorical_indices]'], {}), '(self._X[:, self._categorical_indices], X[:, self.\n _categorical_indices])\n', (13866, 13943), True, 'import fatf.utils.distances as fud\n'), ((14063, 14164), 'fatf.utils.distances.euclidean_array_distance', 'fud.euclidean_array_distance', (['self._X[:, self._numerical_indices]', 'X[:, self._numerical_indices]'], {}), '(self._X[:, self._numerical_indices], X[:, self\n ._numerical_indices])\n', (14091, 14164), True, 'import fatf.utils.distances as fud\n'), ((22019, 22062), 'numpy.unique', 'np.unique', (['close_labels'], {'return_counts': '(True)'}), '(close_labels, return_counts=True)\n', (22028, 22062), True, 'import numpy as np\n'), ((22094, 22108), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (22100, 22108), True, 'import numpy as np\n'), ((22133, 22169), 'numpy.zeros', 'np.zeros', (['(self._unique_y.shape[0],)'], {}), '((self._unique_y.shape[0],))\n', (22141, 22169), True, 'import numpy as np\n'), ((17152, 17195), 'numpy.unique', 'np.unique', (['close_labels'], {'return_counts': '(True)'}), '(close_labels, return_counts=True)\n', (17161, 17195), True, 'import numpy as np\n'), ((17510, 17542), 'numpy.sort', 'np.sort', (['values[top_label_index]'], {}), '(values[top_label_index])\n', (17517, 17542), True, 'import numpy as np\n'), ((17886, 17929), 'numpy.array', 'np.array', (['(self._unique_y.shape[0] * [False])'], {}), '(self._unique_y.shape[0] * [False])\n', (17894, 17929), True, 'import numpy as np\n'), ((18728, 18767), 'numpy.sort', 'np.sort', (['g_top_label[g_top_label_index]'], {}), '(g_top_label[g_top_label_index])\n', (18735, 18767), True, 'import numpy as np\n'), ((22246, 22283), 'numpy.where', 'np.where', (['(self._unique_y == values[i])'], {}), '(self._unique_y == values[i])\n', (22254, 22283), True, 'import numpy as np\n'), ((18137, 18201), 'numpy.logical_or', 'np.logical_or', (['labels_filter', 'unique_y_filter'], {'out': 'labels_filter'}), '(labels_filter, unique_y_filter, out=labels_filter)\n', (18150, 18201), True, 'import numpy as np\n'), ((18627, 18653), 'numpy.max', 'np.max', (['g_top_label_counts'], {}), '(g_top_label_counts)\n', (18633, 18653), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import sys
import haar_cascade as cascade
from datetime import datetime
import os.path
output_dir = "../images"
class SmileDetectStatus:
def __init__(self):
self.begin_take_photo = False
self.face_found = False
self.smile_detected = False
self.restart = False
self.completed = False
self.photo_taken = False
self.splash_screen = 0
self.no_smile_detect = 0
self.smile_detect = 0
class Image:
def __init__(self, cap):
self.cap = cap
def capture_image(self):
ret, img = self.cap.read()
self.captured = cv2.flip(img, 1)
self.annotated = np.copy(self.captured)
class Detector:
def __init__(self, image, status):
self.image = image
self.status = status
def detect_smiles(self):
faces = cascade.detect_faces(self.image.captured)
eyes_detected = False
mouth_detected = False
now_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for (x,y,w,h) in faces:
eyes = cascade.detect_eyes(self.image.captured, (x,y,w,h))
if len(eyes) == 2:
eyes_detected = True
mouth = cascade.detect_mouth(self.image.captured, (x,y,w,h))
if len(mouth) == 1:
mouth_detected = True
if self.status.smile_detected:
color = (0, 255, 0)
elif self.status.face_found:
color = (0, 255, 255)
else:
color = (0,0,255)
face = self.image.annotated[y:y+h, x:x+w]
cv2.rectangle(self.image.annotated, (x, y), (x+w,y+h), color, 2)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(face, (ex,ey), (ex+ew, ey+eh), color)
for (ex, ey, ew, eh) in mouth:
cv2.rectangle(face, (ex,ey), (ex+ew, ey+eh), color)
if self.status.begin_take_photo and not self.status.photo_taken:
print('Taking image')
cv2.imwrite(f'{output_dir}/img_{now_str}.jpg', self.image.captured)
self.status.photo_taken = True
if self.status.photo_taken:
self.image.annotated[:] = 255
self.status.splash_screen += 1
if self.status.splash_screen > 5:
self.status.completed = True
self.status.restart = True
if eyes_detected and mouth_detected and not self.status.photo_taken:
self.status.smile_detect += 1
self.status.no_smile_detect = 0
if self.status.smile_detect >= 25:
self.status.face_found = True
if self.status.smile_detect >= 50:
self.status.smile_detected = True
if self.status.smile_detect >= 100:
print("Smile detected")
self.status.begin_take_photo = True
else:
self.status.no_smile_detect += 1
if self.status.no_smile_detect == 20:
print("No smile was detected")
if self.status.no_smile_detect > 50:
self.status.restart = True
if not self.status.begin_take_photo or len(faces) == 0 or self.status.photo_taken:
cv2.imshow('Smile detector :)', self.image.annotated)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.image.cap.release()
cv2.destroyAllWindows()
sys.exit()
def main():
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cap = cv2.VideoCapture(0)
while True:
status = SmileDetectStatus()
while not status.begin_take_photo:
status = SmileDetectStatus()
image = Image(cap)
detector = Detector(image, status)
while not status.smile_detected:
image.capture_image()
detector.detect_smiles()
if status.restart:
print("Restarting...")
break
while status.smile_detected and not status.begin_take_photo:
image.capture_image()
detector.detect_smiles()
if status.restart:
print("Restarting...")
break
while not status.completed:
image.capture_image()
detector.detect_smiles()
if status.restart:
print("Restarting...")
break
if __name__ == '__main__':
main()
|
[
"cv2.rectangle",
"numpy.copy",
"cv2.imwrite",
"cv2.flip",
"cv2.imshow",
"haar_cascade.detect_faces",
"datetime.datetime.now",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"sys.exit",
"haar_cascade.detect_mouth",
"cv2.waitKey",
"haar_cascade.detect_eyes"
] |
[((3573, 3592), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3589, 3592), False, 'import cv2\n'), ((645, 661), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (653, 661), False, 'import cv2\n'), ((687, 709), 'numpy.copy', 'np.copy', (['self.captured'], {}), '(self.captured)\n', (694, 709), True, 'import numpy as np\n'), ((872, 913), 'haar_cascade.detect_faces', 'cascade.detect_faces', (['self.image.captured'], {}), '(self.image.captured)\n', (892, 913), True, 'import haar_cascade as cascade\n'), ((1091, 1145), 'haar_cascade.detect_eyes', 'cascade.detect_eyes', (['self.image.captured', '(x, y, w, h)'], {}), '(self.image.captured, (x, y, w, h))\n', (1110, 1145), True, 'import haar_cascade as cascade\n'), ((1232, 1287), 'haar_cascade.detect_mouth', 'cascade.detect_mouth', (['self.image.captured', '(x, y, w, h)'], {}), '(self.image.captured, (x, y, w, h))\n', (1252, 1287), True, 'import haar_cascade as cascade\n'), ((1633, 1702), 'cv2.rectangle', 'cv2.rectangle', (['self.image.annotated', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(self.image.annotated, (x, y), (x + w, y + h), color, 2)\n', (1646, 1702), False, 'import cv2\n'), ((2039, 2106), 'cv2.imwrite', 'cv2.imwrite', (['f"""{output_dir}/img_{now_str}.jpg"""', 'self.image.captured'], {}), "(f'{output_dir}/img_{now_str}.jpg', self.image.captured)\n", (2050, 2106), False, 'import cv2\n'), ((3263, 3316), 'cv2.imshow', 'cv2.imshow', (['"""Smile detector :)"""', 'self.image.annotated'], {}), "('Smile detector :)', self.image.annotated)\n", (3273, 3316), False, 'import cv2\n'), ((994, 1008), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1006, 1008), False, 'from datetime import datetime\n'), ((1756, 1812), 'cv2.rectangle', 'cv2.rectangle', (['face', '(ex, ey)', '(ex + ew, ey + eh)', 'color'], {}), '(face, (ex, ey), (ex + ew, ey + eh), color)\n', (1769, 1812), False, 'import cv2\n'), ((1867, 1923), 'cv2.rectangle', 'cv2.rectangle', (['face', '(ex, ey)', '(ex + ew, ey + eh)', 'color'], {}), '(face, (ex, ey), (ex + ew, ey + eh), color)\n', (1880, 1923), False, 'import cv2\n'), ((3424, 3447), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3445, 3447), False, 'import cv2\n'), ((3464, 3474), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3472, 3474), False, 'import sys\n'), ((3332, 3346), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3343, 3346), False, 'import cv2\n')]
|
import logging
import time
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch
import torch.nn.functional as functional
import numpy as np
from DTI import models, dataset, cli, utils, analyse
import os
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def main():
utils.setup_seed(18)
start_time = time.time()
# 数据集加载
train_set = dataset.CreateDataset(dataset.get_hcp_s1200(), usage='train')
val_set = dataset.CreateDataset(dataset.get_hcp_s1200(), usage='val')
train_loader = DataLoader(train_set, batch_size=args.batch_size, drop_last=False, shuffle=True, pin_memory=True,
num_workers=args.num_workers)
val_loader = DataLoader(val_set, batch_size=args.batch_size, drop_last=True, shuffle=True, pin_memory=True,
num_workers=args.num_workers)
# 网络加载
# if args.MODEL == '1D-CNN':
if args.INPUT_FEATURES != '4' and args.INPUT_FEATURES != 'all':
c = 1
else:
c = 4
if args.MODEL == '1D-CNN':
model = models.HARmodel(c, args.NUM_CLASSES).to(device)
elif args.MODEL == 'CAM-CNN':
model = models.CAM_CNN(c, args.NUM_CLASSES).to(device)
else:
model = None
if os.path.exists(args.LOAD_PATH):
model.load_state_dict(torch.load(args.LOAD_PATH))
optimizer = torch.optim.SGD(model.parameters(), lr=args.LR)
# 训练
val_loss = []
train_loss = []
val_acc = []
train_acc = []
val_precision = []
val_recall = []
# 打印训练信息
LOG.info("Args:{}".format(args))
for epoch in range(args.epochs):
train_results = train(train_loader, model, optimizer, epoch)
val_results = validation(model, val_loader)
# 训练结果记录
train_loss.append(train_results['train_loss'])
train_acc.append(train_results['train_acc'])
val_loss.append(val_results['val_loss'])
val_acc.append(val_results['val_acc'])
val_precision.append(val_results['val_precision'])
val_recall.append(val_results['val_recall'])
# 训练结果存档
torch.save(model.state_dict(), '.\\LOG\\{}.pkl'.format(time.strftime("%Y%m%d-%H%M%S", time.localtime())))
f = open(args.RECORD_PATH, 'a+')
f.writelines('args'+str(args)+'\n')
f.writelines('train_loss'+str(train_loss)+'\n')
f.writelines('train_acc' + str(train_acc)+'\n')
f.writelines('val_loss' + str(val_loss)+'\n')
f.writelines('val_acc' + str(val_acc)+'\n')
f.writelines('val_precision' + str(val_precision)+'\n')
f.writelines('val_recall' + str(val_recall)+'\n')
f.close()
LOG.info("--- main.py finish in %s seconds ---" % (time.time() - start_time))
def train(dataloader, model, optimizer, epoch):
model.train()
train_loss = 0
train_correct = 0
start_time = time.time()
for batch_index, batch_samples in enumerate(dataloader):
# 1.load data to CUDA
x, y = batch_samples['x'].to(device), batch_samples['y'].to(device)
if args.INPUT_FEATURES != '4' and args.INPUT_FEATURES != 'all':
x = x.unsqueeze(1)
else:
x = x.transpose(1, 2)
# 2.forward
output = model(x)
criteria = nn.CrossEntropyLoss()
loss = criteria(output, y.long())
# 3.backward
optimizer.zero_grad() # 把所有Variable的grad成员数值变为0
loss.backward() # 反向传播grad
optimizer.step() # 每个Variable的grad都被计算出来后,更新每个Variable的数值(优化更新)
# 6.result
pred = output.argmax(dim=1, keepdim=True)
train_correct += pred.eq(y.long().view_as(pred)).sum().item()
train_loss += loss
if batch_index % args.display_batch == 0:
LOG.info("--- training progress rate {}/{} ---".format(batch_index, len(dataloader)))
LOG.info("--- training epoch {} finish in {} seconds ---".format(epoch, (time.time() - start_time)))
LOG.info('\tLoss:{}\tCorrect:{}/{}({})'
.format(train_loss, train_correct, len(dataloader.dataset),
train_correct / len(dataloader.dataset)))
return {
'train_loss': round(train_loss.tolist(), 4),
'train_acc': round(train_correct / len(dataloader.dataset), 4)
}
def validation(model, val_loader):
model.eval()
test_loss = 0
correct = 0
start_time = time.time()
with torch.no_grad():
pred_list = []
target_list = []
for batch_index, batch_samples in enumerate(val_loader):
# 1.load data to CUDA
x, y = batch_samples['x'].to('cuda'), batch_samples['y'].to('cuda')
if args.INPUT_FEATURES != '4' and args.INPUT_FEATURES != 'all':
x = x.unsqueeze(1)
else:
x = x.transpose(2, 1)
# 2.forward
output = model(x)
pred = output.argmax(dim=1, keepdim=True)
# 3.result
criteria = nn.CrossEntropyLoss()
test_loss += criteria(output, y)
correct += pred.eq(y.view_as(pred)).sum().item()
y = y.cpu().numpy()
pred_list = np.append(pred_list, pred.cpu().numpy())
target_list = np.append(target_list, y)
LOG.info("--- validation epoch finish in {} seconds --- Loss:{}\tCorrect:{}/{}({})"
.format(time.time() - start_time, test_loss, correct, len(val_loader.dataset), correct / len(val_loader.dataset)))
val_result = analyse.analyse_3class(target_list, pred_list)
return {
'val_loss': round(test_loss.cpu().numpy().tolist(), 4),
'val_acc': round(correct / len(val_loader.dataset), 4),
'val_precision': val_result['precision'],
'val_recall': val_result['recall'],
}
if __name__ == '__main__':
LOG = logging.getLogger('main')
logging.basicConfig(level=logging.INFO)
args = cli.create_parser().parse_args()
main()
|
[
"logging.getLogger",
"os.path.exists",
"logging.basicConfig",
"DTI.dataset.get_hcp_s1200",
"DTI.models.HARmodel",
"torch.nn.CrossEntropyLoss",
"DTI.utils.setup_seed",
"torch.load",
"numpy.append",
"torch.cuda.is_available",
"DTI.analyse.analyse_3class",
"DTI.cli.create_parser",
"torch.utils.data.DataLoader",
"DTI.models.CAM_CNN",
"torch.no_grad",
"time.localtime",
"time.time"
] |
[((381, 401), 'DTI.utils.setup_seed', 'utils.setup_seed', (['(18)'], {}), '(18)\n', (397, 401), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((419, 430), 'time.time', 'time.time', ([], {}), '()\n', (428, 430), False, 'import time\n'), ((615, 747), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'drop_last': '(False)', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.num_workers'}), '(train_set, batch_size=args.batch_size, drop_last=False, shuffle=\n True, pin_memory=True, num_workers=args.num_workers)\n', (625, 747), False, 'from torch.utils.data import DataLoader\n'), ((790, 919), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'drop_last': '(True)', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.num_workers'}), '(val_set, batch_size=args.batch_size, drop_last=True, shuffle=\n True, pin_memory=True, num_workers=args.num_workers)\n', (800, 919), False, 'from torch.utils.data import DataLoader\n'), ((1326, 1356), 'os.path.exists', 'os.path.exists', (['args.LOAD_PATH'], {}), '(args.LOAD_PATH)\n', (1340, 1356), False, 'import os\n'), ((2890, 2901), 'time.time', 'time.time', ([], {}), '()\n', (2899, 2901), False, 'import time\n'), ((4388, 4399), 'time.time', 'time.time', ([], {}), '()\n', (4397, 4399), False, 'import time\n'), ((5490, 5536), 'DTI.analyse.analyse_3class', 'analyse.analyse_3class', (['target_list', 'pred_list'], {}), '(target_list, pred_list)\n', (5512, 5536), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((5818, 5843), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (5835, 5843), False, 'import logging\n'), ((5848, 5887), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (5867, 5887), False, 'import logging\n'), ((325, 350), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (348, 350), False, 'import torch\n'), ((482, 505), 'DTI.dataset.get_hcp_s1200', 'dataset.get_hcp_s1200', ([], {}), '()\n', (503, 505), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((558, 581), 'DTI.dataset.get_hcp_s1200', 'dataset.get_hcp_s1200', ([], {}), '()\n', (579, 581), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((3286, 3307), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3305, 3307), True, 'import torch.nn as nn\n'), ((4409, 4424), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4422, 4424), False, 'import torch\n'), ((1388, 1414), 'torch.load', 'torch.load', (['args.LOAD_PATH'], {}), '(args.LOAD_PATH)\n', (1398, 1414), False, 'import torch\n'), ((4978, 4999), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4997, 4999), True, 'import torch.nn as nn\n'), ((5230, 5255), 'numpy.append', 'np.append', (['target_list', 'y'], {}), '(target_list, y)\n', (5239, 5255), True, 'import numpy as np\n'), ((5899, 5918), 'DTI.cli.create_parser', 'cli.create_parser', ([], {}), '()\n', (5916, 5918), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((1142, 1178), 'DTI.models.HARmodel', 'models.HARmodel', (['c', 'args.NUM_CLASSES'], {}), '(c, args.NUM_CLASSES)\n', (1157, 1178), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((2255, 2271), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2269, 2271), False, 'import time\n'), ((2737, 2748), 'time.time', 'time.time', ([], {}), '()\n', (2746, 2748), False, 'import time\n'), ((3931, 3942), 'time.time', 'time.time', ([], {}), '()\n', (3940, 3942), False, 'import time\n'), ((5366, 5377), 'time.time', 'time.time', ([], {}), '()\n', (5375, 5377), False, 'import time\n'), ((1240, 1275), 'DTI.models.CAM_CNN', 'models.CAM_CNN', (['c', 'args.NUM_CLASSES'], {}), '(c, args.NUM_CLASSES)\n', (1254, 1275), False, 'from DTI import models, dataset, cli, utils, analyse\n')]
|
import numpy as np
from io import BytesIO
import wave
import struct
from dcase_models.util.gui import encode_audio
#from .utils import save_model_weights,save_model_json, get_data_train, get_data_test
#from .utils import init_model, evaluate_model, load_scaler, save, load
#from .model import debugg_model, prototype_loss,prototypeCNN_maxpool
#from .prototypes import Prototypes
import os
from keras.callbacks import ModelCheckpoint,CSVLogger
from keras.optimizers import Adam
import keras.backend as K
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
import dash_audio_components
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import librosa
import sys
from dcase_models.util.files import save_pickle, load_pickle
from dcase_models.util.data import get_fold_val
#from dcase_models.model.model import debugg_model, modelAPNet
colors = ['#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf']
class_names = (['air conditioner', 'car horn', 'children playing',
'dog bark', 'drilling', 'engine idling', 'gun shot',
'jack- hammer', 'siren', 'street music'])
class_names2 = (['air<br>conditioner', 'car<br>horn', 'children<br>playing',
'dog<br>bark', 'drilling', 'engine<br>idling', 'gun<br>shot',
'jack-<br>hammer', 'siren', 'street<br>music'])
class_names_av = ['AC', 'CH', 'CP', 'DB', 'DR', 'EI', 'GS', 'JA', 'SI', 'SM']
#from audio_prototypes.utils import load_training_log
from shutil import copyfile
import matplotlib.pyplot as plt
cm = plt.get_cmap('viridis')
def generate_figure2D(model_container, selectedpoints=[],
x_select=0, y_select=1, samples_per_class=10,
label_list=[]):
prototypes_feat,prototypes_mel,protoypes2D,prototypes_classes,_ = model_container.prototypes.get_all_instances()
n_classes = len(label_list) if len(label_list) > 0 else 10
prototype_ixs = np.arange(0,len(prototypes_feat))
x = []
y = []
classes = []
classes_ix = []
prototypes_ixs = []
for class_ix in range(n_classes):
prototypes_class_ix = protoypes2D[prototypes_classes == class_ix]
prototype_ixs_class = prototype_ixs[prototypes_classes == class_ix]
#print(prototypes_class_ix.shape)
xj = []
yj = []
classesj = []
for j in range(len(prototypes_class_ix)):
xj.append(prototypes_class_ix[j, x_select])
yj.append(prototypes_class_ix[j, y_select])
classesj.append('prototype'+str(prototype_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x.append(xj)
y.append(yj)
classes.append(classesj)
prototypes_ixs.append(prototype_ixs_class)
centers_feat,centers_mel,centers2D,centers_classes,centers_audio,centers_file_names = model_container.data_instances.get_all_instances()
centers_ixs = np.arange(0,len(centers2D))
x_centers = []
y_centers = []
classes_centers = []
classes_ix_centers = []
### Add this to tests. Delete!!!
#centers2D = self.model_containers[self.fold_test].data_instances.X_feat_2D['X']#self.X_2D[self.fold_test]['X']
#centers_classes = self.model_containers[self.fold_test].data_instances.X_feat_2D['Y']
#centers_ixs = np.arange(0,len(centers2D))
for class_ix in range(n_classes):
centers_class_ix = centers2D[centers_classes == class_ix]
centers_ixs_class = centers_ixs[centers_classes == class_ix]
xj = []
yj = []
classesj = []
for j in range(len(centers_class_ix)):
xj.append(centers_class_ix[j, x_select])
yj.append(centers_class_ix[j, y_select])
classesj.append('center'+str(centers_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x_centers.append(xj)
y_centers.append(yj)
classes_centers.append(classesj)
fig = make_subplots(rows=1, cols=1)#, column_widths=[0.8, 0.2])
size = 10
proto_list = []
for label in label_list:
proto_list.append(label + ' (protos.)')
for j in range(n_classes):
s = min(samples_per_class,len(x_centers[j]))
selectedpoints_j = None
if len(selectedpoints) > 0:
proto_ixs = prototypes_ixs[j]
selectedpoints_j = []
for point in selectedpoints:
if point in proto_ixs:
point_i = [i for i,x in enumerate(proto_ixs) if point == x][0]
selectedpoints_j.append(point_i)
fig.add_trace(
go.Scatter(
x=x[j], y=y[j], text=classes[j], name=proto_list[j],
mode='markers',selectedpoints=selectedpoints_j,
marker={'size': size, 'symbol':'cross', 'color':colors[j%10]}),
row=1, col=1
)
fig.add_trace(
go.Scatter(
x=x_centers[j][:s], y=y_centers[j][:s],
text=classes_centers[j][:s], name=label_list[j],
selectedpoints=None,mode='markers',
marker={'size': 5, 'color':colors[j%10], 'opacity':0.6}),
row=1, col=1
)
# if len(selectedpoints) == 0:
# fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name= label_list[j],mode='markers',marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
# fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name= label_list[j],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
# else:
# proto_ixs = prototypes_ixs[j]
# selectedpoints_j = []
# for point in selectedpoints:
# if point in proto_ixs:
# point_i = [i for i,x in enumerate(proto_ixs) if point == x][0]
# selectedpoints_j.append(point_i)
# fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name=label_list[j],mode='markers',selectedpoints=selectedpoints_j,marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
# fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name=label_list[j],selectedpoints=[],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
fig.update_layout()
components_dict = {0: 'First', 1: 'Second', 2: 'Third', 3: 'Fourth'}
fig.update_layout(
title="Prototypes and data instances in the 2D space (PCA)",
xaxis_title=components_dict[x_select] + " principal component (x)",
yaxis_title=components_dict[y_select] + " principal component (y)",
clickmode='event+select',
uirevision=True,
width=1000,
height=600,
)
return fig
def generate_figure_weights(model_container, selected=None, label_list=class_names):
fig_weights = go.Figure(
px.imshow(model_container.prototypes.W_dense.T,origin='lower'),
layout=go.Layout(title=go.layout.Title(text="A Bar Chart"))
)
fig_weights.update_traces(dict( showscale=False, colorbar_len=0.1,
coloraxis=None), selector={'type':'heatmap'})
#fig_weights.update_traces(showscale=False)
fig_weights.update_layout(clickmode='event+select')
if selected is not None:
fig_weights.add_trace(go.Scatter(x=[selected],y=[1]))
_,_,_,prototypes_classes,_ = model_container.prototypes.get_all_instances()
xticks = []
for j in range(len(label_list)):
tickj = np.mean(np.argwhere(np.array(prototypes_classes) == j))
xticks.append(tickj)
fig_weights.update_layout(
title="Weights of the last fully-connected layer",
xaxis_title="Prototypes",
yaxis_title="Classes",
#margin = {'l': 10, 'b': 10, 't': 10, 'r': 10},
xaxis = dict(
tickmode = 'array',
tickvals = xticks,
ticktext = label_list #class_names2
),
yaxis = dict(
tickmode = 'array',
tickvals = [i for i in range(len(class_names))],
ticktext = label_list #class_names
),
width=1000,
height=300,
)
return fig_weights
def generate_figure_mel(mel_spec):
figure = go.Figure(px.imshow(mel_spec.T,origin='lower'),layout=go.Layout(title=go.layout.Title(text="A Bar Chart")))
figure.update_traces(dict( showscale=False, colorbar_len=0.1,
coloraxis=None), selector={'type':'heatmap'})
figure.update_layout(
title="Mel-spectrogram",
xaxis_title="Time (hops)",
yaxis_title="Mel filter index",
#margin = {'l': 0, 'b': 0, 't': 40, 'r': 10}
)
#figure.layout.coloraxis.showscale = False
return figure
class GUI():
def __init__(self, model_containers, data, folds_files, exp_folder_input, exp_folder_output, label_list, params, plot_label_list=None,graph=None):
self.model_containers = model_containers
self.data = data
self.folds_files = folds_files
self.exp_folder_input = exp_folder_input
self.exp_folder_output = exp_folder_output
self.label_list = label_list
self.params = params
if plot_label_list is None:
self.plot_label_list = label_list
else:
self.plot_label_list = plot_label_list
self.graph = graph
self.fold_list = list(model_containers.keys())
self.fold_test = self.fold_list[0]
self.fold_val = get_fold_val(self.fold_test, self.fold_list)
self.samples_per_class = 10
self.x_select = 0
self.y_select = 1
self.click_timestamps = [0,0,0]
self.generate_figure2D()
self.generate_figure_weights()
def generate_layout(self, app):
import tensorflow as tf
external_stylesheets = [
'https://codepen.io/chriddyp/pen/bWLwgP.css',
{
'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',
'rel': 'stylesheet',
'integrity': '<KEY>',
'crossorigin': 'anonymous'
} ]
self.app = app
self.graph = tf.get_default_graph()
self.generate_figure2D()
self.generate_figure_weights()
plot2D = dcc.Graph(id='plot2D', figure=self.figure,
style={"height" : "100%", "width" : "100%"})
_,center_mel_blank,_,_,_,_ = self.model_containers[self.fold_test].data_instances.get_instance_by_index(0)
plot_mel = dcc.Graph(id="plot_mel",
figure = self.generate_figure_mel(center_mel_blank),
style={"width": "70%", "display": "inline-block",'float':'left'}
)
plot_weights = dcc.Graph(id="plot_weights",
figure = self.fig_weigths,
style={"width": "100%", "display": "inline-block"}
)
audio = dash_audio_components.DashAudioComponents(id='audio-player', src="",
autoPlay=False, controls=True)
button_delete = html.Button('Delete prototype',id='delete_and_convert', className='button',n_clicks_timestamp=0,style={'display':'none','width':'70%'})
button_eval = html.Button('Evaluate model',id='eval', className='button',n_clicks_timestamp=0,style={'width':'70%'})
button_load = html.Button('Load best weigths',id='load_weigths', className='button',n_clicks_timestamp=0,style={'width':'70%'})
button_train = html.Button('Train model',id='train', className='button',n_clicks_timestamp=0,style={'width':'70%'})
button_reset = html.Button('Reset model',id='reset', className='button',n_clicks_timestamp=0,style={'width':'70%'})
output_eval = html.Div(id='output_eval',style={'width':'20%'})
output_text = html.Div(id='output_text')
output_interval = html.Div(id='output_interval')
input_epochs = dcc.Input(id="input_epochs", type="number", placeholder="epochs",min=1, max=100, step=1,style={'width':'33%'})#,value=10)
input_lr = dcc.Input(id="learning_rate", type="number", placeholder="learning_rate",min=0.0000001, max=1,style={'width':'33%'})
input_bs = dcc.Input(id="batch_size", type="number", placeholder="batch_size",min=32, max=512, step=32,style={'width':'33%'})#,value=64)
slider_samples = html.Div(dcc.Slider(id='samples_per_class',min=1,max=500, step=1,value=10,vertical=False),style={'width':'100%'})
interval = dcc.Interval(id='interval-component', interval=1*1000, # in milliseconds
n_intervals=0)
options = []
for fold in self.fold_list:
option = {'value': fold, 'label': fold}
options.append(option)
fold_select = dcc.Dropdown(id='fold_select',options=options,value=self.fold_test,style={'width':'85%'})
#model_select = dcc.Dropdown(id='model_select',options=available_models,value=model_input_name,style={'width':'85%'})
#input_model_output = dcc.Input(id="input_model_output", type="text", placeholder="model output",style={'width':'70%'},value=model_output_name)#,value=64)
options = []
for j in range(4):
options.append({'label':'component '+str(j+1),'value':j})
x_select = dcc.Dropdown(id='x_select',options=options,value=0,style={'width': '80%'})
y_select = dcc.Dropdown(id='y_select',options=options,value=1,style={'width': '80%'})
eval_div = html.Div([button_eval,output_eval],style={'columnCount': 2,'width':'50%'})
train_div = html.Div([input_epochs,input_lr,input_bs],style={'width':'70%'})#,style={'columnCount': 4,'width':'80%'})
model_div = html.Div([fold_select],style={'columnCount': 3,'width':'80%'})
model_prop_div = html.Div([button_load,button_reset],style={'columnCount': 2,'width':'50%'})
#self.app.layout = html.Div([ html.Div([plot_mel, graph2,plot_weights ], className="nine columns",style={'height':'80vh'}) ])
self.app.layout = html.Div([
html.Div([
html.Div([x_select], className="two columns",style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),
html.Div([y_select], className="two columns",style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),
html.Div([slider_samples], className="three columns",style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),
], className="row", style={'height':'10vh'}),
html.Div([
#html.Div([slider_samples], className="one column"),
html.Div([plot2D], className="nine columns",style={'height':'80vh'}),
html.Div([plot_mel,html.Br(),audio,html.Br(),button_delete,html.Br(),button_eval,html.Br(),output_eval,html.Br(),button_load,html.Br(),
button_reset,html.Br(),button_train,html.Br(),train_div,html.Br(),
html.Br(),fold_select, #model_select,input_model_output
html.Br(),html.Br(),output_text,interval], className="three columns"),
], className="row", style={'height':'80vh'}),
# html.Div([
# html.Div([slider_samples], className="nine columns", style={'align':'center'})
# ], className="row"),
html.Div([
# html.Div([], className="two columns"),
html.Div([plot_weights], className="six columns"),
#html.Div([graph_log], className="six columns")
], className="row", style={'height':'30vh'})
])
def generate_figure2D(self,selectedpoints=[]):
prototypes_feat,prototypes_mel,protoypes2D,prototypes_classes,_ = self.model_containers[self.fold_test].prototypes.get_all_instances()
prototype_ixs = np.arange(0,len(prototypes_feat))
x = []
y = []
classes = []
classes_ix = []
prototypes_ixs = []
for class_ix in range(10):
prototypes_class_ix = protoypes2D[prototypes_classes == class_ix]
prototype_ixs_class = prototype_ixs[prototypes_classes == class_ix]
xj = []
yj = []
classesj = []
for j in range(len(prototypes_class_ix)):
xj.append(prototypes_class_ix[j,self.x_select])
yj.append(prototypes_class_ix[j,self.y_select])
classesj.append('prototype'+str(prototype_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x.append(xj)
y.append(yj)
classes.append(classesj)
prototypes_ixs.append(prototype_ixs_class)
centers_feat,centers_mel,centers2D,centers_classes,centers_audio,centers_file_names = self.model_containers[self.fold_test].data_instances.get_all_instances()
centers_ixs = np.arange(0,len(centers2D))
x_centers = []
y_centers = []
classes_centers = []
classes_ix_centers = []
### Add this to tests. Delete!!!
#centers2D = self.model_containers[self.fold_test].data_instances.X_feat_2D['X']#self.X_2D[self.fold_test]['X']
#centers_classes = self.model_containers[self.fold_test].data_instances.X_feat_2D['Y']
#centers_ixs = np.arange(0,len(centers2D))
for class_ix in range(10):
centers_class_ix = centers2D[centers_classes == class_ix]
centers_ixs_class = centers_ixs[centers_classes == class_ix]
xj = []
yj = []
classesj = []
for j in range(len(centers_class_ix)):
xj.append(centers_class_ix[j,self.x_select])
yj.append(centers_class_ix[j,self.y_select])
classesj.append('center'+str(centers_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x_centers.append(xj)
y_centers.append(yj)
classes_centers.append(classesj)
fig = make_subplots(rows=1, cols=1)#, column_widths=[0.8, 0.2])
size = 12
for j in range(10):
s = min(self.samples_per_class,len(x_centers[j]))
print(s,self.samples_per_class,len(x_centers[j]))
if len(selectedpoints) == 0:
fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name=self.label_list[j],mode='markers',marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name=self.label_list[j],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
else:
proto_ixs = prototypes_ixs[j]
selectedpoints_j = []
for point in selectedpoints:
if point in proto_ixs:
print(point,proto_ixs)
point_i = [i for i,x in enumerate(proto_ixs) if point == x][0]
selectedpoints_j.append(point_i)
fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name=self.label_list[j],mode='markers',selectedpoints=selectedpoints_j,marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name=self.label_list[j],selectedpoints=[],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
fig.update_layout()
components_dict = {0: 'First', 1: 'Second', 2: 'Third', 3: 'Fourth'}
fig.update_layout(
title="Prototypes and k-means centers in the 2D space (PCA)",
xaxis_title=components_dict[self.x_select] + " principal component (x)",
yaxis_title=components_dict[self.y_select] + " principal component (y)",
clickmode='event+select',uirevision=True
)
self.figure = fig
return fig
def generate_figure_training(self):
data = []
weights_folder = os.path.join(self.weights_folder, self.model_output_name)
if len(self.training_logs) > 0:
for j,training_log in enumerate(self.training_logs[self.fold_test]):
#print(training_log)
epochs,val_acc,name = training_log['epochs'],training_log['val_acc'],training_log['name']
if training_log['training'] == True:
epochs, val_acc = load_training_log(weights_folder,self.fold_test,row_ix=11)
if len(epochs) > 0:
best = 0
for val in val_acc:
if float(val)>best:
best = float(val)
data.append({'x':epochs,'y': val_acc,'name': name,'mode': 'markers','marker': {'size': 8, 'color': colors[j]}}) #'val_acc_'+
data.append({'x':[epochs[0],epochs[-1]],'y': [best,best],'name': 'best_'+name,'mode': 'lines','marker': {'color': colors[j]}}) #'best_val_acc_'+
self.figure_training = go.Figure(data=data)
self.figure_training.update_layout(
title="Accuracy on the validation set",
xaxis_title="Accuracy",
yaxis_title="Number of epochs",
clickmode= 'event+select',uirevision=True
)
else:
self.figure_training = go.Figure(data={'x':[0],'y': [0]})
self.figure_training.update_layout(
title="Accuracy on the validation set",
xaxis_title="Accuracy",
yaxis_title="Number of epochs",
clickmode= 'event+select',uirevision=True
)
return self.figure_training
def generate_figure_weights(self,selected=None):
fig_weigths = go.Figure(px.imshow(self.model_containers[self.fold_test].prototypes.W_dense.T,origin='lower'),layout=go.Layout(title=go.layout.Title(text="A Bar Chart")))
fig_weigths.update_layout(clickmode='event+select')
if selected is not None:
fig_weigths.add_trace(go.Scatter(x=[selected],y=[1]))
_,_,_,prototypes_classes,_ = self.model_containers[self.fold_test].prototypes.get_all_instances()
xticks = []
for j in range(10):
tickj = np.mean(np.argwhere(np.array(prototypes_classes) == j))
xticks.append(tickj)
self.fig_weigths = fig_weigths
self.fig_weigths.update_layout(
title="Weights of the last fully-connected layer",
xaxis_title="Prototypes",
yaxis_title="Classes",
#margin = {'l': 0, 'b': 0, 't': 40, 'r': 10}
xaxis = dict(
tickmode = 'array',
tickvals = xticks,
ticktext = class_names2
),
yaxis = dict(
tickmode = 'array',
tickvals = [i for i in range(len(class_names))],
ticktext = class_names
)
)
return fig_weigths
def generate_figure_mel(self,mel_spec):
figure = go.Figure(px.imshow(mel_spec.T,origin='lower'),layout=go.Layout(title=go.layout.Title(text="A Bar Chart")))
figure.update_layout(
title="Mel-spectrogram",
xaxis_title="Time (hops)",
yaxis_title="Mel filter index",
#margin = {'l': 0, 'b': 0, 't': 40, 'r': 10}
)
#figure.layout.coloraxis.showscale = False
return figure
def add_mel_to_figure(self,hoverData):
point = np.array([hoverData['points'][0]['x'],hoverData['points'][0]['y']])
dist_protos = self.model_containers[self.fold_test].prototypes.get_distances(point,components=(self.x_select,self.y_select))
dist_data = self.model_containers[self.fold_test].data_instances.get_distances(point,components=(self.x_select,self.y_select))
#print(np.amin(dist_data),np.amin(dist_protos))
if np.amin(dist_data) <= np.amin(dist_protos): # click on k-mean
arg_dist = np.argmin(dist_data)
# print(arg_dist)
(center_mel,center_feat,center_2D,
center_class,center_file,center_audio)=self.model_containers[self.fold_test].data_instances.get_center(arg_dist)
from PIL import Image
image_array = np.random.randint(0, 255, size=(100, 100)).astype('uint8')
center_mel = cm(center_mel.T)
#center_mel = 255*(center_mel-np.amin(center_mel))/(np.amax(center_mel)-np.amin(center_mel))
image = Image.fromarray((center_mel[:, :, :3] * 255).astype('uint8'))
layout= go.Layout(images= [dict(
source= image,
xref= "x",
yref= "y",
x= hoverData['points'][0]['x']-0.5,
y= hoverData['points'][0]['y']+2,
sizex= 2,
sizey= 2,
#sizing= "stretch",
opacity= 1.0#,layer= "below"
)])
self.figure.update_layout(layout)
else:
arg_dist = np.argmin(dist_protos)
(proto_feat,proto_mel,
proto_2D,proto_class,proto_audio) = self.model_containers[self.fold_test].prototypes.get_prototype_by_index(arg_dist)
def display_plot(self,clickData):
temp_mel = np.ones((64,128))
if isinstance(clickData,dict):
point = np.array([clickData['points'][0]['x'],clickData['points'][0]['y']])
print(self.fold_test)
dist_protos = self.model_containers[self.fold_test].prototypes.get_distances(point,components=(self.x_select,self.y_select))
dist_data = self.model_containers[self.fold_test].data_instances.get_distances(point,components=(self.x_select,self.y_select))
if np.amin(dist_data) <= np.amin(dist_protos): # click on k-mean
arg_dist = np.argmin(dist_data)
(center_feat,center_mel,center_2D,
center_class,center_audio,center_file)=self.model_containers[self.fold_test].data_instances.get_instance_by_index(arg_dist)
self.selected = {'type': 'center', 'id': arg_dist}
figure = self.generate_figure_mel(center_mel)
data, sr = librosa.core.load(center_file)
return [figure,
{'autoPlay': True, 'src': encode_audio(data,sr)}, #encode_audio(center_audio['data'],center_audio['sr'])
"Convert center to Prototype", {'display':'inline-block','width':'70%'}]
else:
arg_dist = np.argmin(dist_protos)
(proto_feat,proto_mel,
proto_2D,proto_class,proto_audio) = self.model_containers[self.fold_test].prototypes.get_instance_by_index(arg_dist)
self.selected = {'type': 'prototype', 'id': arg_dist}
figure = self.generate_figure_mel(proto_mel)
return [figure,
{'autoPlay': True, 'src': encode_audio(proto_audio['data'],proto_audio['sr'])},
"Delete Prototype", {'display':'inline-block','width':'70%'}]
else:
return [self.generate_figure_mel(temp_mel), {'autoPlay': False, 'src': ''},
"Select a point", {'display':'none','width':'70%'}]
def buttons_and_others(self,btn1,btn2,btn3,fold_selected,clickData,samples_per_class,x_select,y_select,epochs,learning_rate,batch_size,selectedData,selectedData_w):
if x_select != self.x_select:
self.x_select = x_select
self.generate_figure2D()
return [self.figure, self.fig_weigths]
if y_select != self.y_select:
self.y_select = y_select
self.generate_figure2D()
return [self.figure, self.fig_weigths]
if samples_per_class != self.samples_per_class:
#print(samples_per_class,self.samples_per_class)
self.samples_per_class = samples_per_class
self.generate_figure2D()
#print('new figure')
return [self.figure, self.fig_weigths]
# if model_select != self.model_input_name:
# print(model_select,self.model_input_name)
# self.model_input_name = model_select
# scaler_path = os.path.join(scaler_folder, 'base')
# self.load_model_prototypes_centers(folds_data_test,folds_files,scaler_path)
# return [self.figure, self.fig_weigths]
#print(clickData,selectedData_w)
#self.generate_figure2D(selectedpoints=clickData)
#print(fold_selected,self.fold_test)
if fold_selected != self.fold_test:
self.change_fold(fold_selected)
return [self.figure, self.fig_weigths]
#print(clickData)
if clickData is not None:
selected_prototype = clickData['points'][0]['x']
#print(selected_prototype,self.selected_prototype)
if selected_prototype != self.selected_prototype:
self.selected_prototype = selected_prototype
self.generate_figure2D([selected_prototype])
#print(clickData)
return [self.figure, self.fig_weigths]
#print(btn1,btn2,btn3,self.click_timestamps[0],self.click_timestamps[1],self.click_timestamps[2])
if int(btn1) > self.click_timestamps[0]:
self.click_timestamps[0] = int(btn1)
self.click_delete(selectedData)
if int(btn2) > self.click_timestamps[1]:
self.click_timestamps[1] = int(btn2)
self.click_reset()
if int(btn3) > self.click_timestamps[2]:
self.click_timestamps[2] = int(btn3)
msg = 'Button 3 was most recently clicked'
if epochs is not None:
self.params['train']['epochs'] = int(epochs)
if learning_rate is not None:
self.params['train']['learning_rate'] = learning_rate
if batch_size is not None:
self.params['train']['batch_size'] = int(batch_size)
print(epochs,learning_rate,batch_size)
self.train_model()
#self.model_output_name = model_output_name
#scaler_path = os.path.join(scaler_folder, 'base')
#weights_folder_debug_manual = os.path.join(weights_folder, 'debug_manual2')
#last_training_log = self.get_training_log()[-1]
#initial_epoch = int(last_training_log['epochs'][-1])+1
#self.train_model(folds_data=folds_data,folds_data_test=folds_data_test,folds_files=folds_files,scaler_path=scaler_path,
# epochs=epochs,learning_rate=learning_rate,batch_size=batch_size,fit_verbose=1,convert_audio_dict=convert_audio_dict,graph=graph,initial_epoch=initial_epoch)
return [self.figure, self.fig_weigths]
def btn_load(self,n_clicks_timestamp,n_clicks_timestamp2):
if n_clicks_timestamp > n_clicks_timestamp2:
#self.load_weights(weights_folder_debug_manual)
return "TODO"#"Weights loaded from " + weights_folder_debug_manual
elif n_clicks_timestamp2 > n_clicks_timestamp:
acc = self.eval_model()
return "Accuracy in fold {:s}: {:f}".format(self.fold_val, acc)
else:
return ""
def click_delete(self,selectedData):
#msg = 'Button 1 was most recently clicked'
point = np.array([selectedData['points'][0]['x'],selectedData['points'][0]['y']])
dist_protos = self.model_containers[self.fold_test].prototypes.get_distances(point,components=(self.x_select,self.y_select))
dist_data = self.model_containers[self.fold_test].data_instances.get_distances(point,components=(self.x_select,self.y_select))
#print(np.amin(dist_data),np.amin(dist_protos))
if np.amin(dist_data) <= np.amin(dist_protos): # click on k-mean
arg_dist = np.argmin(dist_data)
(center_feat,center_mel,center_2D,
center_class,_,center_file) = self.model_containers[self.fold_test].data_instances.remove_instance(arg_dist)
data, sr = librosa.core.load(center_file)
center_audio = {'data':data, 'sr': sr}
self.model_containers[self.fold_test].prototypes.add_instance(int(center_class),
center_mel,center_feat,
embedding2D=center_2D,audio=center_audio)
else:
arg_dist = np.argmin(dist_protos)
self.model_containers[self.fold_test].prototypes.remove_instance(arg_dist)
self.generate_figure2D()
self.generate_figure_weights()
return self.figure
def click_reset(self):
self.model_containers[self.fold_test].data_instances.reset()
self.model_containers[self.fold_test].prototypes.reset()
self.generate_figure2D()
self.generate_figure_weights()
return self.figure
def change_fold(self,fold_selected):
print('fold_selected', fold_selected)
self.fold_test = fold_selected
self.fold_val = get_fold_val(self.fold_test, self.fold_list)
print(self.fold_val)
self.generate_figure2D()
#self.generate_figure_training()
self.generate_figure_weights()
return self.figure
def eval_model(self):
with self.graph.as_default():
self.update_model_to_prototypes()
scaler_path = os.path.join(self.exp_folder_input, self.fold_test, 'scaler.pickle')
scaler = load_pickle(scaler_path)
acc,_,_ = self.model_containers[self.fold_test].evaluate(self.data[self.fold_val]['X'],self.data[self.fold_val]['Y'], scaler)
return acc
def update_model_to_prototypes(self):
N_protos = self.model_containers[self.fold_test].prototypes.get_number_of_instances()
n_classes = len(self.label_list)
#self.model_containers[self.fold_test].model = debugg_model(self.model_containers[self.fold_test].model,N_protos,n_classes)
#self.model_containers[self.fold_test].model.get_layer('prototype_distances').set_weights([self.model_containers[self.fold_test].prototypes.embeddings])
#self.model_containers[self.fold_test].model.get_layer('mean').set_weights([self.model_containers[self.fold_test].prototypes.W_mean])
#self.model_containers[self.fold_test].model.get_layer('logits').set_weights([self.model_containers[self.fold_test].prototypes.W_dense])
n_frames_cnn,n_freq_cnn = self.model_containers[self.fold_test].prototypes.mel_spectrograms[0].shape
N_filters_last = self.model_containers[self.fold_test].model.get_layer('features').output_shape[-1]
model = modelAPNet(n_prototypes=N_protos, n_frames_cnn=n_frames_cnn, n_freq_cnn=n_freq_cnn, N_filters=[16,16,N_filters_last])
for layer in model.layers:
if len(layer.get_weights()) > 0:
if layer.name == 'prototype_distances':
model.get_layer(layer.name).set_weights([self.model_containers[self.fold_test].prototypes.embeddings])
elif layer.name == 'mean':
model.get_layer(layer.name).set_weights([self.model_containers[self.fold_test].prototypes.W_mean])
elif layer.name == 'logits':
model.get_layer(layer.name).set_weights([self.model_containers[self.fold_test].prototypes.W_dense])
elif layer.name == 'input':
continue
else:
model.get_layer(layer.name).set_weights(self.model_containers[self.fold_test].model.get_layer(layer.name).get_weights())
self.model_containers[self.fold_test].model = model
def train_model(self):
with self.graph.as_default():
self.update_model_to_prototypes()
# paths
dataset = self.exp_folder_output.split("/")[-1] #TODO fix this
exp_folder_fold = os.path.join(self.exp_folder_output, self.fold_test)
weights_path = exp_folder_fold#os.path.join(exp_folder_fold, 'best_weights.hdf5')
log_path = os.path.join(exp_folder_fold, 'training.log')
scaler_path = os.path.join(self.exp_folder_input, self.fold_test, 'scaler.pickle')
params_model = self.params["models"]['APNet']
params_dataset = self.params["datasets"][dataset]
kwargs = self.params["train"]
if 'train_arguments' in params_model:
kwargs.update(params_model['train_arguments'])
kwargs.update({'init_last_layer': False})
# save model as json
self.model_containers[self.fold_test].save_model_json(exp_folder_fold)
X_train, Y_train, X_val, Y_val = get_data_train(self.data, self.fold_test, params_dataset["evaluation_mode"])
# HERE HAS TO BE DATA FOR TRAINING
#print(X_train.shape,Y_train.shape,X_val.shape,Y_val.shape)
scaler = load_pickle(scaler_path)
X_train = scaler.transform(X_train)
X_val = scaler.transform(X_val)
self.model_containers[self.fold_test].train(X_train, Y_train, X_val, Y_val,
weights_path=weights_path, log_path=log_path, **kwargs)
# load best_weights after training
self.model_containers[self.fold_test].model.load_weights(os.path.join(exp_folder_fold, 'best_weights.hdf5'))
# val_out_acc = history.history['val_out_acc']
# epochs = [i for i in range(initial_epoch,epochs+initial_epoch)]
# self.training_logs[self.fold_test][-1]['epochs'] = epochs
# self.training_logs[self.fold_test][-1]['val_acc'] = val_out_acc
# self.training_logs[self.fold_test][-1]['training'] = False
# #print(self.training_logs[self.fold_test][-1])
# #print(history.history)
print('Reloading the plot')
data_instances_path = os.path.join(exp_folder_fold, 'data_instances.pickle')
prototypes_path = os.path.join(exp_folder_fold, 'prototypes.pickle')
X_feat,X_train,Y_train,Files_names_train = get_data_test(self.model_containers[self.fold_test].model,self.data,self.fold_test,self.folds_files,scaler)
# TODO: data_centers[fold_test] = Data_centers(X_feat,X_train,Y_train,Files_names_train,n_classes=10,n_clusters=5)
mel_basis = np.load(os.path.join(params_dataset['feature_folder'], 'mel_basis.npy'))
convert_audio_params = {'sr': self.params['features']['sr'],
'scaler' : scaler,
'mel_basis' : mel_basis,
'audio_hop' : self.params['features']['audio_hop'],
'audio_win' : self.params['features']['audio_win']}
projection2D = self.model_containers[self.fold_test].data_instances.projection2D
self.model_containers[self.fold_test].get_prototypes(X_train,
convert_audio_params=convert_audio_params,
projection2D=projection2D)
data_instances_path = os.path.join(self.exp_folder_output, 'data_instances.pickle')
prototypes_path = os.path.join(self.exp_folder_output, 'prototypes.pickle')
save_pickle(self.model_containers[self.fold_test].data_instances, data_instances_path)
save_pickle(self.model_containers[self.fold_test].prototypes, prototypes_path)
self.generate_figure2D()
self.generate_figure_weights()
return self.figure
def load_weights(self, weights_folder=''):
weights_file = 'fold' + str(self.fold_test) + '.hdf5' #_{epoch:02d}
weights_path = os.path.join(weights_folder, weights_file)
self.model_containers[self.fold_test].model.load_weights(weights_path)
#scaler = load_scaler(scaler_path,self.fold_test)
def get_training_log(self):
return self.training_logs[self.fold_test]
def append_training_log(self,training_log_new):
self.training_logs[self.fold_test].append(training_log_new)
|
[
"dash_html_components.Button",
"dcase_models.util.files.save_pickle",
"numpy.array",
"plotly.graph_objects.layout.Title",
"dash_audio_components.DashAudioComponents",
"dash_html_components.Div",
"librosa.core.load",
"dash_html_components.Br",
"plotly.graph_objects.Scatter",
"numpy.argmin",
"plotly.express.imshow",
"tensorflow.get_default_graph",
"plotly.subplots.make_subplots",
"numpy.ones",
"numpy.amin",
"dash_core_components.Slider",
"matplotlib.pyplot.get_cmap",
"dash_core_components.Interval",
"dcase_models.util.gui.encode_audio",
"dcase_models.util.data.get_fold_val",
"os.path.join",
"plotly.graph_objects.Figure",
"dash_core_components.Dropdown",
"numpy.random.randint",
"dash_core_components.Graph",
"dash_core_components.Input",
"dcase_models.util.files.load_pickle"
] |
[((1746, 1769), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (1758, 1769), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4181), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(1)'}), '(rows=1, cols=1)\n', (4165, 4181), False, 'from plotly.subplots import make_subplots\n'), ((7168, 7231), 'plotly.express.imshow', 'px.imshow', (['model_container.prototypes.W_dense.T'], {'origin': '"""lower"""'}), "(model_container.prototypes.W_dense.T, origin='lower')\n", (7177, 7231), True, 'import plotly.express as px\n'), ((8536, 8573), 'plotly.express.imshow', 'px.imshow', (['mel_spec.T'], {'origin': '"""lower"""'}), "(mel_spec.T, origin='lower')\n", (8545, 8573), True, 'import plotly.express as px\n'), ((9779, 9823), 'dcase_models.util.data.get_fold_val', 'get_fold_val', (['self.fold_test', 'self.fold_list'], {}), '(self.fold_test, self.fold_list)\n', (9791, 9823), False, 'from dcase_models.util.data import get_fold_val\n'), ((10490, 10512), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10510, 10512), True, 'import tensorflow as tf\n'), ((10603, 10692), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""plot2D"""', 'figure': 'self.figure', 'style': "{'height': '100%', 'width': '100%'}"}), "(id='plot2D', figure=self.figure, style={'height': '100%', 'width':\n '100%'})\n", (10612, 10692), True, 'import dash_core_components as dcc\n'), ((11083, 11192), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""plot_weights"""', 'figure': 'self.fig_weigths', 'style': "{'width': '100%', 'display': 'inline-block'}"}), "(id='plot_weights', figure=self.fig_weigths, style={'width':\n '100%', 'display': 'inline-block'})\n", (11092, 11192), True, 'import dash_core_components as dcc\n'), ((11286, 11389), 'dash_audio_components.DashAudioComponents', 'dash_audio_components.DashAudioComponents', ([], {'id': '"""audio-player"""', 'src': '""""""', 'autoPlay': '(False)', 'controls': '(True)'}), "(id='audio-player', src='',\n autoPlay=False, controls=True)\n", (11327, 11389), False, 'import dash_audio_components\n'), ((11469, 11614), 'dash_html_components.Button', 'html.Button', (['"""Delete prototype"""'], {'id': '"""delete_and_convert"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'display': 'none', 'width': '70%'}"}), "('Delete prototype', id='delete_and_convert', className='button',\n n_clicks_timestamp=0, style={'display': 'none', 'width': '70%'})\n", (11480, 11614), True, 'import dash_html_components as html\n'), ((11627, 11737), 'dash_html_components.Button', 'html.Button', (['"""Evaluate model"""'], {'id': '"""eval"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Evaluate model', id='eval', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (11638, 11737), True, 'import dash_html_components as html\n'), ((11752, 11873), 'dash_html_components.Button', 'html.Button', (['"""Load best weigths"""'], {'id': '"""load_weigths"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Load best weigths', id='load_weigths', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (11763, 11873), True, 'import dash_html_components as html\n'), ((11889, 11997), 'dash_html_components.Button', 'html.Button', (['"""Train model"""'], {'id': '"""train"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Train model', id='train', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (11900, 11997), True, 'import dash_html_components as html\n'), ((12013, 12121), 'dash_html_components.Button', 'html.Button', (['"""Reset model"""'], {'id': '"""reset"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Reset model', id='reset', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (12024, 12121), True, 'import dash_html_components as html\n'), ((12136, 12186), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output_eval"""', 'style': "{'width': '20%'}"}), "(id='output_eval', style={'width': '20%'})\n", (12144, 12186), True, 'import dash_html_components as html\n'), ((12207, 12233), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output_text"""'}), "(id='output_text')\n", (12215, 12233), True, 'import dash_html_components as html\n'), ((12260, 12290), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output_interval"""'}), "(id='output_interval')\n", (12268, 12290), True, 'import dash_html_components as html\n'), ((12316, 12433), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input_epochs"""', 'type': '"""number"""', 'placeholder': '"""epochs"""', 'min': '(1)', 'max': '(100)', 'step': '(1)', 'style': "{'width': '33%'}"}), "(id='input_epochs', type='number', placeholder='epochs', min=1,\n max=100, step=1, style={'width': '33%'})\n", (12325, 12433), True, 'import dash_core_components as dcc\n'), ((12458, 12577), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""learning_rate"""', 'type': '"""number"""', 'placeholder': '"""learning_rate"""', 'min': '(1e-07)', 'max': '(1)', 'style': "{'width': '33%'}"}), "(id='learning_rate', type='number', placeholder='learning_rate',\n min=1e-07, max=1, style={'width': '33%'})\n", (12467, 12577), True, 'import dash_core_components as dcc\n'), ((12595, 12716), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""batch_size"""', 'type': '"""number"""', 'placeholder': '"""batch_size"""', 'min': '(32)', 'max': '(512)', 'step': '(32)', 'style': "{'width': '33%'}"}), "(id='batch_size', type='number', placeholder='batch_size', min=32,\n max=512, step=32, style={'width': '33%'})\n", (12604, 12716), True, 'import dash_core_components as dcc\n'), ((12881, 12952), 'dash_core_components.Interval', 'dcc.Interval', ([], {'id': '"""interval-component"""', 'interval': '(1 * 1000)', 'n_intervals': '(0)'}), "(id='interval-component', interval=1 * 1000, n_intervals=0)\n", (12893, 12952), True, 'import dash_core_components as dcc\n'), ((13169, 13267), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""fold_select"""', 'options': 'options', 'value': 'self.fold_test', 'style': "{'width': '85%'}"}), "(id='fold_select', options=options, value=self.fold_test, style\n ={'width': '85%'})\n", (13181, 13267), True, 'import dash_core_components as dcc\n'), ((13687, 13764), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""x_select"""', 'options': 'options', 'value': '(0)', 'style': "{'width': '80%'}"}), "(id='x_select', options=options, value=0, style={'width': '80%'})\n", (13699, 13764), True, 'import dash_core_components as dcc\n'), ((13781, 13858), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""y_select"""', 'options': 'options', 'value': '(1)', 'style': "{'width': '80%'}"}), "(id='y_select', options=options, value=1, style={'width': '80%'})\n", (13793, 13858), True, 'import dash_core_components as dcc\n'), ((13876, 13954), 'dash_html_components.Div', 'html.Div', (['[button_eval, output_eval]'], {'style': "{'columnCount': 2, 'width': '50%'}"}), "([button_eval, output_eval], style={'columnCount': 2, 'width': '50%'})\n", (13884, 13954), True, 'import dash_html_components as html\n'), ((13971, 14039), 'dash_html_components.Div', 'html.Div', (['[input_epochs, input_lr, input_bs]'], {'style': "{'width': '70%'}"}), "([input_epochs, input_lr, input_bs], style={'width': '70%'})\n", (13979, 14039), True, 'import dash_html_components as html\n'), ((14097, 14162), 'dash_html_components.Div', 'html.Div', (['[fold_select]'], {'style': "{'columnCount': 3, 'width': '80%'}"}), "([fold_select], style={'columnCount': 3, 'width': '80%'})\n", (14105, 14162), True, 'import dash_html_components as html\n'), ((14185, 14264), 'dash_html_components.Div', 'html.Div', (['[button_load, button_reset]'], {'style': "{'columnCount': 2, 'width': '50%'}"}), "([button_load, button_reset], style={'columnCount': 2, 'width': '50%'})\n", (14193, 14264), True, 'import dash_html_components as html\n'), ((18476, 18505), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(1)'}), '(rows=1, cols=1)\n', (18489, 18505), False, 'from plotly.subplots import make_subplots\n'), ((20535, 20592), 'os.path.join', 'os.path.join', (['self.weights_folder', 'self.model_output_name'], {}), '(self.weights_folder, self.model_output_name)\n', (20547, 20592), False, 'import os\n'), ((24016, 24084), 'numpy.array', 'np.array', (["[hoverData['points'][0]['x'], hoverData['points'][0]['y']]"], {}), "([hoverData['points'][0]['x'], hoverData['points'][0]['y']])\n", (24024, 24084), True, 'import numpy as np\n'), ((25940, 25958), 'numpy.ones', 'np.ones', (['(64, 128)'], {}), '((64, 128))\n', (25947, 25958), True, 'import numpy as np\n'), ((32128, 32202), 'numpy.array', 'np.array', (["[selectedData['points'][0]['x'], selectedData['points'][0]['y']]"], {}), "([selectedData['points'][0]['x'], selectedData['points'][0]['y']])\n", (32136, 32202), True, 'import numpy as np\n'), ((33851, 33895), 'dcase_models.util.data.get_fold_val', 'get_fold_val', (['self.fold_test', 'self.fold_list'], {}), '(self.fold_test, self.fold_list)\n', (33863, 33895), False, 'from dcase_models.util.data import get_fold_val\n'), ((40732, 40774), 'os.path.join', 'os.path.join', (['weights_folder', 'weights_file'], {}), '(weights_folder, weights_file)\n', (40744, 40774), False, 'import os\n'), ((4802, 4990), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x[j]', 'y': 'y[j]', 'text': 'classes[j]', 'name': 'proto_list[j]', 'mode': '"""markers"""', 'selectedpoints': 'selectedpoints_j', 'marker': "{'size': size, 'symbol': 'cross', 'color': colors[j % 10]}"}), "(x=x[j], y=y[j], text=classes[j], name=proto_list[j], mode=\n 'markers', selectedpoints=selectedpoints_j, marker={'size': size,\n 'symbol': 'cross', 'color': colors[j % 10]})\n", (4812, 4990), True, 'import plotly.graph_objects as go\n'), ((5097, 5304), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_centers[j][:s]', 'y': 'y_centers[j][:s]', 'text': 'classes_centers[j][:s]', 'name': 'label_list[j]', 'selectedpoints': 'None', 'mode': '"""markers"""', 'marker': "{'size': 5, 'color': colors[j % 10], 'opacity': 0.6}"}), "(x=x_centers[j][:s], y=y_centers[j][:s], text=classes_centers[j][\n :s], name=label_list[j], selectedpoints=None, mode='markers', marker={\n 'size': 5, 'color': colors[j % 10], 'opacity': 0.6})\n", (5107, 5304), True, 'import plotly.graph_objects as go\n'), ((7610, 7641), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[selected]', 'y': '[1]'}), '(x=[selected], y=[1])\n', (7620, 7641), True, 'import plotly.graph_objects as go\n'), ((12756, 12844), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""samples_per_class"""', 'min': '(1)', 'max': '(500)', 'step': '(1)', 'value': '(10)', 'vertical': '(False)'}), "(id='samples_per_class', min=1, max=500, step=1, value=10,\n vertical=False)\n", (12766, 12844), True, 'import dash_core_components as dcc\n'), ((21550, 21570), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data'}), '(data=data)\n', (21559, 21570), True, 'import plotly.graph_objects as go\n'), ((21869, 21905), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': "{'x': [0], 'y': [0]}"}), "(data={'x': [0], 'y': [0]})\n", (21878, 21905), True, 'import plotly.graph_objects as go\n'), ((22283, 22372), 'plotly.express.imshow', 'px.imshow', (['self.model_containers[self.fold_test].prototypes.W_dense.T'], {'origin': '"""lower"""'}), "(self.model_containers[self.fold_test].prototypes.W_dense.T,\n origin='lower')\n", (22292, 22372), True, 'import plotly.express as px\n'), ((23568, 23605), 'plotly.express.imshow', 'px.imshow', (['mel_spec.T'], {'origin': '"""lower"""'}), "(mel_spec.T, origin='lower')\n", (23577, 23605), True, 'import plotly.express as px\n'), ((24420, 24438), 'numpy.amin', 'np.amin', (['dist_data'], {}), '(dist_data)\n', (24427, 24438), True, 'import numpy as np\n'), ((24442, 24462), 'numpy.amin', 'np.amin', (['dist_protos'], {}), '(dist_protos)\n', (24449, 24462), True, 'import numpy as np\n'), ((24505, 24525), 'numpy.argmin', 'np.argmin', (['dist_data'], {}), '(dist_data)\n', (24514, 24525), True, 'import numpy as np\n'), ((25692, 25714), 'numpy.argmin', 'np.argmin', (['dist_protos'], {}), '(dist_protos)\n', (25701, 25714), True, 'import numpy as np\n'), ((26017, 26085), 'numpy.array', 'np.array', (["[clickData['points'][0]['x'], clickData['points'][0]['y']]"], {}), "([clickData['points'][0]['x'], clickData['points'][0]['y']])\n", (26025, 26085), True, 'import numpy as np\n'), ((32537, 32555), 'numpy.amin', 'np.amin', (['dist_data'], {}), '(dist_data)\n', (32544, 32555), True, 'import numpy as np\n'), ((32559, 32579), 'numpy.amin', 'np.amin', (['dist_protos'], {}), '(dist_protos)\n', (32566, 32579), True, 'import numpy as np\n'), ((32622, 32642), 'numpy.argmin', 'np.argmin', (['dist_data'], {}), '(dist_data)\n', (32631, 32642), True, 'import numpy as np\n'), ((32839, 32869), 'librosa.core.load', 'librosa.core.load', (['center_file'], {}), '(center_file)\n', (32856, 32869), False, 'import librosa\n'), ((33228, 33250), 'numpy.argmin', 'np.argmin', (['dist_protos'], {}), '(dist_protos)\n', (33237, 33250), True, 'import numpy as np\n'), ((34203, 34271), 'os.path.join', 'os.path.join', (['self.exp_folder_input', 'self.fold_test', '"""scaler.pickle"""'], {}), "(self.exp_folder_input, self.fold_test, 'scaler.pickle')\n", (34215, 34271), False, 'import os\n'), ((34293, 34317), 'dcase_models.util.files.load_pickle', 'load_pickle', (['scaler_path'], {}), '(scaler_path)\n', (34304, 34317), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((36739, 36791), 'os.path.join', 'os.path.join', (['self.exp_folder_output', 'self.fold_test'], {}), '(self.exp_folder_output, self.fold_test)\n', (36751, 36791), False, 'import os\n'), ((36911, 36956), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""training.log"""'], {}), "(exp_folder_fold, 'training.log')\n", (36923, 36956), False, 'import os\n'), ((36984, 37052), 'os.path.join', 'os.path.join', (['self.exp_folder_input', 'self.fold_test', '"""scaler.pickle"""'], {}), "(self.exp_folder_input, self.fold_test, 'scaler.pickle')\n", (36996, 37052), False, 'import os\n'), ((37777, 37801), 'dcase_models.util.files.load_pickle', 'load_pickle', (['scaler_path'], {}), '(scaler_path)\n', (37788, 37801), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((38845, 38899), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""data_instances.pickle"""'], {}), "(exp_folder_fold, 'data_instances.pickle')\n", (38857, 38899), False, 'import os\n'), ((38930, 38980), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""prototypes.pickle"""'], {}), "(exp_folder_fold, 'prototypes.pickle')\n", (38942, 38980), False, 'import os\n'), ((40123, 40184), 'os.path.join', 'os.path.join', (['self.exp_folder_output', '"""data_instances.pickle"""'], {}), "(self.exp_folder_output, 'data_instances.pickle')\n", (40135, 40184), False, 'import os\n'), ((40215, 40272), 'os.path.join', 'os.path.join', (['self.exp_folder_output', '"""prototypes.pickle"""'], {}), "(self.exp_folder_output, 'prototypes.pickle')\n", (40227, 40272), False, 'import os\n'), ((40286, 40376), 'dcase_models.util.files.save_pickle', 'save_pickle', (['self.model_containers[self.fold_test].data_instances', 'data_instances_path'], {}), '(self.model_containers[self.fold_test].data_instances,\n data_instances_path)\n', (40297, 40376), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((40385, 40463), 'dcase_models.util.files.save_pickle', 'save_pickle', (['self.model_containers[self.fold_test].prototypes', 'prototypes_path'], {}), '(self.model_containers[self.fold_test].prototypes, prototypes_path)\n', (40396, 40463), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((22556, 22587), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[selected]', 'y': '[1]'}), '(x=[selected], y=[1])\n', (22566, 22587), True, 'import plotly.graph_objects as go\n'), ((26410, 26428), 'numpy.amin', 'np.amin', (['dist_data'], {}), '(dist_data)\n', (26417, 26428), True, 'import numpy as np\n'), ((26432, 26452), 'numpy.amin', 'np.amin', (['dist_protos'], {}), '(dist_protos)\n', (26439, 26452), True, 'import numpy as np\n'), ((26499, 26519), 'numpy.argmin', 'np.argmin', (['dist_data'], {}), '(dist_data)\n', (26508, 26519), True, 'import numpy as np\n'), ((26905, 26935), 'librosa.core.load', 'librosa.core.load', (['center_file'], {}), '(center_file)\n', (26922, 26935), False, 'import librosa\n'), ((27240, 27262), 'numpy.argmin', 'np.argmin', (['dist_protos'], {}), '(dist_protos)\n', (27249, 27262), True, 'import numpy as np\n'), ((38257, 38307), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""best_weights.hdf5"""'], {}), "(exp_folder_fold, 'best_weights.hdf5')\n", (38269, 38307), False, 'import os\n'), ((39304, 39367), 'os.path.join', 'os.path.join', (["params_dataset['feature_folder']", '"""mel_basis.npy"""'], {}), "(params_dataset['feature_folder'], 'mel_basis.npy')\n", (39316, 39367), False, 'import os\n'), ((7263, 7298), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (7278, 7298), True, 'import plotly.graph_objects as go\n'), ((7813, 7841), 'numpy.array', 'np.array', (['prototypes_classes'], {}), '(prototypes_classes)\n', (7821, 7841), True, 'import numpy as np\n'), ((8596, 8631), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (8611, 8631), True, 'import plotly.graph_objects as go\n'), ((18775, 18926), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x[j]', 'y': 'y[j]', 'text': 'classes[j]', 'name': 'self.label_list[j]', 'mode': '"""markers"""', 'marker': "{'size': size, 'symbol': 'cross', 'color': colors[j]}"}), "(x=x[j], y=y[j], text=classes[j], name=self.label_list[j], mode=\n 'markers', marker={'size': size, 'symbol': 'cross', 'color': colors[j]})\n", (18785, 18926), True, 'import plotly.graph_objects as go\n'), ((18962, 19147), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_centers[j][:s]', 'y': 'y_centers[j][:s]', 'text': 'classes_centers[j][:s]', 'name': 'self.label_list[j]', 'mode': '"""markers"""', 'marker': "{'size': 6, 'color': colors[j], 'opacity': 0.7}"}), "(x=x_centers[j][:s], y=y_centers[j][:s], text=classes_centers[j][\n :s], name=self.label_list[j], mode='markers', marker={'size': 6,\n 'color': colors[j], 'opacity': 0.7})\n", (18972, 19147), True, 'import plotly.graph_objects as go\n'), ((19559, 19747), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x[j]', 'y': 'y[j]', 'text': 'classes[j]', 'name': 'self.label_list[j]', 'mode': '"""markers"""', 'selectedpoints': 'selectedpoints_j', 'marker': "{'size': size, 'symbol': 'cross', 'color': colors[j]}"}), "(x=x[j], y=y[j], text=classes[j], name=self.label_list[j], mode=\n 'markers', selectedpoints=selectedpoints_j, marker={'size': size,\n 'symbol': 'cross', 'color': colors[j]})\n", (19569, 19747), True, 'import plotly.graph_objects as go\n'), ((19778, 19983), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_centers[j][:s]', 'y': 'y_centers[j][:s]', 'text': 'classes_centers[j][:s]', 'name': 'self.label_list[j]', 'selectedpoints': '[]', 'mode': '"""markers"""', 'marker': "{'size': 6, 'color': colors[j], 'opacity': 0.7}"}), "(x=x_centers[j][:s], y=y_centers[j][:s], text=classes_centers[j][\n :s], name=self.label_list[j], selectedpoints=[], mode='markers', marker\n ={'size': 6, 'color': colors[j], 'opacity': 0.7})\n", (19788, 19983), True, 'import plotly.graph_objects as go\n'), ((24792, 24834), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(100, 100)'}), '(0, 255, size=(100, 100))\n', (24809, 24834), True, 'import numpy as np\n'), ((14504, 14634), 'dash_html_components.Div', 'html.Div', (['[x_select]'], {'className': '"""two columns"""', 'style': "{'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}"}), "([x_select], className='two columns', style={'display': 'flex',\n 'align-items': 'center', 'justify-content': 'center'})\n", (14512, 14634), True, 'import dash_html_components as html\n'), ((14647, 14777), 'dash_html_components.Div', 'html.Div', (['[y_select]'], {'className': '"""two columns"""', 'style': "{'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}"}), "([y_select], className='two columns', style={'display': 'flex',\n 'align-items': 'center', 'justify-content': 'center'})\n", (14655, 14777), True, 'import dash_html_components as html\n'), ((14790, 14928), 'dash_html_components.Div', 'html.Div', (['[slider_samples]'], {'className': '"""three columns"""', 'style': "{'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}"}), "([slider_samples], className='three columns', style={'display':\n 'flex', 'align-items': 'center', 'justify-content': 'center'})\n", (14798, 14928), True, 'import dash_html_components as html\n'), ((15092, 15162), 'dash_html_components.Div', 'html.Div', (['[plot2D]'], {'className': '"""nine columns"""', 'style': "{'height': '80vh'}"}), "([plot2D], className='nine columns', style={'height': '80vh'})\n", (15100, 15162), True, 'import dash_html_components as html\n'), ((15899, 15948), 'dash_html_components.Div', 'html.Div', (['[plot_weights]'], {'className': '"""six columns"""'}), "([plot_weights], className='six columns')\n", (15907, 15948), True, 'import dash_html_components as html\n'), ((22391, 22426), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (22406, 22426), True, 'import plotly.graph_objects as go\n'), ((22784, 22812), 'numpy.array', 'np.array', (['prototypes_classes'], {}), '(prototypes_classes)\n', (22792, 22812), True, 'import numpy as np\n'), ((23628, 23663), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (23643, 23663), True, 'import plotly.graph_objects as go\n'), ((27019, 27041), 'dcase_models.util.gui.encode_audio', 'encode_audio', (['data', 'sr'], {}), '(data, sr)\n', (27031, 27041), False, 'from dcase_models.util.gui import encode_audio\n'), ((27661, 27713), 'dcase_models.util.gui.encode_audio', 'encode_audio', (["proto_audio['data']", "proto_audio['sr']"], {}), "(proto_audio['data'], proto_audio['sr'])\n", (27673, 27713), False, 'from dcase_models.util.gui import encode_audio\n'), ((15198, 15207), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15205, 15207), True, 'import dash_html_components as html\n'), ((15214, 15223), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15221, 15223), True, 'import dash_html_components as html\n'), ((15238, 15247), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15245, 15247), True, 'import dash_html_components as html\n'), ((15260, 15269), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15267, 15269), True, 'import dash_html_components as html\n'), ((15282, 15291), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15289, 15291), True, 'import dash_html_components as html\n'), ((15304, 15313), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15311, 15313), True, 'import dash_html_components as html\n'), ((15356, 15365), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15363, 15365), True, 'import dash_html_components as html\n'), ((15379, 15388), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15386, 15388), True, 'import dash_html_components as html\n'), ((15399, 15408), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15406, 15408), True, 'import dash_html_components as html\n'), ((15438, 15447), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15445, 15447), True, 'import dash_html_components as html\n'), ((15522, 15531), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15529, 15531), True, 'import dash_html_components as html\n'), ((15532, 15541), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15539, 15541), True, 'import dash_html_components as html\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# documentation
# https://matplotlib.org/3.1.3/api/pyplot_summary.html
# scatter plot
x = np.random.randint(100, size=(100))
y = np.random.randint(100, size=(100))
plt.scatter(x, y, c='tab:blue', label='stuff')
plt.legend(loc=2)
# plt.show()
# line plot
x = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
y = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.plot(x, y, c='tab:green', label='aaa')
plt.plot(x, y, "-o", c='tab:green', label='aaa') # plot with dots
# plt.show()
# bar chart
x = np.arange(3)
plt.bar(x, height=[1,2,3])
plt.xticks(x, ['a','b','c'])
plt.ylabel('y')
plt.xlabel('x')
# plt.show()
# subplots (pie chart and histogram)
arr_pie = np.array([40,30,70])
arr_pie_labels = ["a","b","c"]
arr_hst = np.random.normal(size=1000)
fig1, axs = plt.subplots(2)
axs[0].pie(arr_pie, labels=arr_pie_labels)
axs[0].title.set_text("pie chart")
axs[1].hist(arr_hst, bins=30)
axs[1].title.set_text("histogram")
# plt.show()
|
[
"numpy.random.normal",
"matplotlib.pyplot.xticks",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.random.randint",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] |
[((145, 177), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(100)'}), '(100, size=100)\n', (162, 177), True, 'import numpy as np\n'), ((184, 216), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(100)'}), '(100, size=100)\n', (201, 216), True, 'import numpy as np\n'), ((219, 265), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""tab:blue"""', 'label': '"""stuff"""'}), "(x, y, c='tab:blue', label='stuff')\n", (230, 265), True, 'import matplotlib.pyplot as plt\n'), ((266, 283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (276, 283), True, 'import matplotlib.pyplot as plt\n'), ((316, 367), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]'], {}), '([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n', (324, 367), True, 'import numpy as np\n'), ((372, 423), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]'], {}), '([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n', (380, 423), True, 'import numpy as np\n'), ((424, 466), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'c': '"""tab:green"""', 'label': '"""aaa"""'}), "(x, y, c='tab:green', label='aaa')\n", (432, 466), True, 'import matplotlib.pyplot as plt\n'), ((467, 515), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-o"""'], {'c': '"""tab:green"""', 'label': '"""aaa"""'}), "(x, y, '-o', c='tab:green', label='aaa')\n", (475, 515), True, 'import matplotlib.pyplot as plt\n'), ((564, 576), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (573, 576), True, 'import numpy as np\n'), ((577, 605), 'matplotlib.pyplot.bar', 'plt.bar', (['x'], {'height': '[1, 2, 3]'}), '(x, height=[1, 2, 3])\n', (584, 605), True, 'import matplotlib.pyplot as plt\n'), ((604, 634), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "['a', 'b', 'c']"], {}), "(x, ['a', 'b', 'c'])\n", (614, 634), True, 'import matplotlib.pyplot as plt\n'), ((633, 648), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (643, 648), True, 'import matplotlib.pyplot as plt\n'), ((649, 664), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (659, 664), True, 'import matplotlib.pyplot as plt\n'), ((727, 749), 'numpy.array', 'np.array', (['[40, 30, 70]'], {}), '([40, 30, 70])\n', (735, 749), True, 'import numpy as np\n'), ((790, 817), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (806, 817), True, 'import numpy as np\n'), ((831, 846), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (843, 846), True, 'import matplotlib.pyplot as plt\n')]
|
from . import core
import io
import re
import requests
import pytz
import time
import datetime as dt
import dateutil.parser as du
import numpy as np
import pandas as pd
from typing import Tuple, Dict, List, Union, ClassVar, Any, Optional, Type
import types
class AccessModeInQuery(core.API):
# Enumeration class to list available API access modes.
NONE = 'n/a';
DOWNLOAD = 'download';
CHART = 'chart';
DEFAULT = 'download';
class EventsInQuery(core.API):
"""
Enumeration class to list the 'events' that is possible to request.
"""
NONE = '';
HISTORY = 'history';
DIVIDENDS = 'div';
SPLITS = 'split';
class Query():
"""
Class that encodes the request parameters into a query.
It provides methods to set such parameters
as well as to validate them in accordance to the Yahoo Finance API expected arguments.
"""
__events__:ClassVar[List[str]] = ["history", "split", "div"];
__chart_range__:ClassVar[List[str]] = ["1d", "5d", "1mo", "3mo", "6mo", "1y", "2y", "5y", "10y", "ytd", "max"];
__chart_interval__:ClassVar[List[str]] = ["1m", "2m", "5m", "15m", "30m", "60m", "90m", "1h", "1d", "5d", "1wk", "1mo", "3mo"];
__download_frequency__:ClassVar[List[str]] = ["1d", "1wk", "1mo"];
def __init__(self, using_api:Type[AccessModeInQuery]):
self.query:Dict[str,Optional[str]] = {};
self.__api__:AccessModeInQuery = using_api;
def __str__(self):
return "&".join([f"{param}={value}" for param, value in self.query.items() if value is not None]) if len(self.query)>0 else "";
def __len__(self):
return len(self.query);
def __bool__(self):
return True if len(self.query)>0 else False;
def SetEvents(self, events:Type[EventsInQuery]) -> None:
if not isinstance(events, EventsInQuery):
self.query['events'] = None;
raise TypeError(f"invalid type for the argument 'events'; <class 'EventsInQuery'> expected, got {type(events)}");
else:
if self.__api__ is AccessModeInQuery.CHART:
self.query['events'] = events if events not in [EventsInQuery.HISTORY, EventsInQuery.NONE] else None;
elif self.__api__ is AccessModeInQuery.DOWNLOAD:
self.query['events'] = events if events is not EventsInQuery.NONE else str(EventsInQuery.HISTORY);
else:
self.query['events'] = None;
raise ValueError(f"value of argument 'events' is not compatible with the given API '{str(self.__api__)}'");
def SetInterval(self, interval:str) -> None:
if not isinstance(interval, str):
self.query['interval'] = None;
raise TypeError(f"invalid type for the argument 'interval'; {type(str)} expected, got {type(interval)}");
else:
if (self.__api__ is AccessModeInQuery.CHART and interval in self.__chart_interval__) \
or (self.__api__ is AccessModeInQuery.DOWNLOAD and interval in self.__download_frequency__):
self.query['interval'] = interval;
else:
self.query['interval'] = None;
raise ValueError(f"value of argument 'interval' is not compatible with the given API '{str(self.__api__)}'");
def SetPeriod(self, period:Union[str,dt.datetime,List[Union[int,dt.datetime]]]) -> None:
if isinstance(period,list) and len(period) is 2 and all(lambda p: isinstance(p,int) or isinstance(p,dt.datetime) or isinstance(p,str) for p in period):
self.query['period1'], self.query['period2'] = self.__parse_periods__(*(period));
elif isinstance(period,str):
if self.__api__ is AccessModeInQuery.CHART and period in self.__chart_range__:
self.query['range'] = period;
else:
raise ValueError(f"value of argument 'period' is not compatible with the given API '{str(self.__api__)}'");
elif isinstance(period,dt.datetime):
self.query['period1'], self.query['period2'] = self.__parse_periods__(period,period);
else:
self.query['period1'], self.query['period2'], self.query['range'] = None, None, None;
raise TypeError(f"invalid type for the argument 'period'; {type(str)} or {type(dt.datetime)} or a list of either {type(int)} or {type(dt.datetime)} expected, got {type(period)}");
@classmethod
def __parse_periods__(cls, value1:Union[dt.datetime,int,str], value2:Union[dt.datetime,int,str]) -> Tuple[int,int]:
# Note that the earliest date that is possible to take into consideration is platform-dependent.
# For compatibility reasons, we do not accept timestamps prior to epoch time 0.
if isinstance(value1,str):
try:
period1 = int(du.isoparse(value1).timestamp());
except (OSError,OverflowError):
period1 = 0;
else:
period1 = max(0,(int(time.mktime(value1.timetuple())))) if isinstance(value1, dt.datetime) else max(0,value1);
if value1==value2:
period2 = period2;
elif isinstance(value2,str):
try:
period2 = int(du.isoparse(value2).timestamp());
except (OSError,OverflowError):
period2 = dt.datetime.now().timestamp();
else:
period2 = max(period1,int(time.mktime(value2.timetuple()))) if isinstance(value2, dt.datetime) else max(period1,value2);
return period1, period2
class Response:
"""
Class to parse and process responses sent back by the Yahoo Finance API.
Use the 'Parse()' method to correctly retrieve data structures in accordance to the chosen 'AccessModeInQuery' API.
"""
def __init__(self, input:Type[requests.models.Response]):
self.__format__:str = "";
self.__error__:Optional[Dict[str, str]] = None;
self.__meta__:Optional[Dict[str, Union[str, int, float]]] = None;
self.__timestamps__:Optional[List[dt.datetime]] = None;
self.__quotes__:Optional[pd.DataFrame] = None;
self.__events__:Optional[pd.DataFrame] = None;
self.__data__:Optional[Union[pd.DataFrame,dict]] = None;
def is_json() -> bool:
nonlocal input;
try:
input = input.json(parse_float=float, parse_int=int);
except ValueError :
return False
else:
return True
if is_json():
if'chart' in input.keys():
self.__format__ = 'chart';
if 'error' in input['chart'].keys():
self.__error__ = self.__response_parser__(input['chart']['error']);
if self.__error__ is None:
data = input['chart']['result'][0];
self.__error__ = {'code':"ok", 'description':"success!"};
self.__meta__ = self.__response_parser__(data['meta']);
self.__timestamps__ = pd.DatetimeIndex(list( map(dt.datetime.utcfromtimestamp, sorted(data['timestamp']))), name=f"Date ({pytz.utc})");
self.__quotes__ = pd.DataFrame({
'Open' : np.array(data['indicators']['quote'][0]['open']),
'High' : np.array(data['indicators']['quote'][0]['high']),
'Low' : np.array(data['indicators']['quote'][0]['low']),
'Close' : np.array(data['indicators']['quote'][0]['close']),
'Adj Close': np.array(data['indicators']['adjclose'][0]['adjclose'])
if 'adjclose' in data['indicators'].keys()
else np.full(len(data['indicators']['quote'][0]['close']),np.NaN),
'Volume' : np.array(data['indicators']['quote'][0]['volume'])},
index=self.__timestamps__);
if 'events' in data.keys():
index = list();
entries = list();
columns = list();
if 'splits' in data['events'].keys():
for split in data['events']['splits'].values():
index.append(split['date']);
entries.append([split['numerator'], split['denominator'], split['denominator']/split['numerator']]);
columns=['From', 'To', 'Split Ratio'];
elif 'dividends' in data['events'].keys():
for dividend in data['events']['dividends'].values():
index.append(dividend['date']);
entries.append(dividend['amount']);
columns=['Dividends'];
index = pd.DatetimeIndex(list(map(lambda ts: dt.datetime.utcfromtimestamp(ts).date(),sorted(index))), name=f"Date ({pytz.utc})");
self.__events__ = pd.DataFrame(entries,index=index,columns=columns);
elif 'finance' in input.keys():
self.__format__ = 'finance';
if 'error' in input['finance'].keys():
self.__error__ = self.__response_parser__(input['finance']['error']);
if self.__error__ is None:
self.__data__ = self.__response_parser__(input['finance']);
else:
self.__format__ = 'finance';
self.__error__ = {'code':"ok", 'description':"success!"};
self.__data__ = pd.read_csv(io.StringIO(input.text),index_col=0,parse_dates=True).sort_index();
def Parse(self) -> Dict[str,Any]:
if self.__format__ == 'chart':
return {'api':'chart', 'meta':self.__meta__, 'quotes':self.__quotes__, 'events':self.__events__, 'error':self.__error__};
elif self.__format__ == 'finance':
return {'api':'download', 'data':self.__data__, 'error':self.__error__};
else:
return {'api': 'unknown', 'error':{'code':"0", 'description':"invalid API"} };
@classmethod
def __response_parser__(cls, d:Any) -> Any:
if d is "null":
return None
elif isinstance(d,dict):
return {key:cls.__response_parser__(value) for key, value in d.items()};
elif isinstance(d,list):
try:
return list(map(float, d));
except :
return d;
elif isinstance(d,str):
try:
return float(d);
except:
return d;
else:
return d
class Session:
"""
A lower level class that explicitly requests data to Yahoo Finance via HTTP.
I provides two 'public' methods:
- With(...): to set the favorite access mode;
- Get(...): to explicitly push request to Yahoo.
It implements a recursive call to the HTTP 'GET' method in case of failure.
The maximum number of attempts has been hardcodedly set to 10.
"""
__yahoo_finance_url__:str = "";
__yahoo_finance_api__:Type[AccessModeInQuery] = AccessModeInQuery.NONE;
def __init__(self):
self.__last_time_checked__ : dt.datetime;
self.__cookies__ : Type[requests.cookies.RequestsCookieJar];
self.__crumb__ : str;
@classmethod
def With(cls, this_api:Type[AccessModeInQuery]) -> 'Session':
if not isinstance(this_api,AccessModeInQuery):
raise TypeError(f"invalid type for the argument 'this_api'; <class 'AccessModeInQuery'> expected, got {type(this_api)}.");
else:
cls.__set_api__(this_api);
cls.__set_url__();
session = cls();
session.__start__();
return session;
@classmethod
def __set_url__(cls) -> None:
if cls.__yahoo_finance_api__ is not AccessModeInQuery.NONE:
cls.__yahoo_finance_url__ = f"https://query1.finance.yahoo.com/v7/finance/{cls.__yahoo_finance_api__}/";
else:
raise UnboundLocalError("session's api has not been set yet");
@classmethod
def __set_api__(cls, input_api:Type[AccessModeInQuery]=AccessModeInQuery.DEFAULT) -> None:
if cls.__yahoo_finance_api__ is not input_api:
cls.__yahoo_finance_api__ = input_api if input_api is not AccessModeInQuery.NONE else AccessModeInQuery.DEFAULT;
#else:
# print(f"*INFO: the session 'api' was already '{input_api}'.");
def __start__(self) -> None:
r = requests.get('https://finance.yahoo.com/quote/SPY/history');
self.__cookies__ = requests.cookies.cookiejar_from_dict({'B': r.cookies['B']});
pattern = re.compile(r'.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}');
for line in r.text.splitlines():
crumb_match = pattern.match(line)
if crumb_match is not None:
self.__crumb__ = crumb_match.groupdict()['crumb'];
break;
self.__last_time_checked__ = dt.datetime.now();
def __restart__(self) -> None:
self.__abandon__();
self.__start__();
def __refresh__(self, force:bool=False) -> None:
if force:
self.__restart__();
else:
if self.__last_time_checked__ is not None:
current_time = dt.datetime.now()
delta_secs = (current_time - self.__last_time_checked__).total_seconds()
if delta_secs > 300: # 300 = 5 minutes
self.__restart__();
def __abandon__(self) -> None:
self.__cookies__ = None;
self.__crumb__ = "";
self.__last_time_checked__ = None;
def Get(self, ticker:str, params:Type[Query], attempt:int=0, timeout:int=10, last_error:str="") -> Tuple[bool, dict]:
if not isinstance(ticker,str):
raise TypeError(f"invalid type for the argument 'ticker'! {type(str)} expected; got {type(ticker)}");
if not isinstance(params, Query):
raise TypeError(f"invalid type for the argument 'params'! <class 'Query'> expected; got {type(params)}");
if attempt<10:
query = f"?{str(params)}&crumb={self.__crumb__}" if params else f"?crumb={self.__crumb__}";
url = self.__yahoo_finance_url__ + ticker + query;
try:
response = requests.get(url, cookies=self.__cookies__)
response.raise_for_status();
except requests.HTTPError as e:
if response.status_code in [408, 409, 429]:
time.sleep(timeout);
self.__refresh__();
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+1,last_error=str(e))
elif response.status_code in [401, 404, 422]:
r = Response(response).Parse();
if r['error']['description'] == "Invalid cookie":
self.__refresh__(force=True);
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+5,last_error=r['error']['description'])
else:
return True, dict({'code': r['error']['code'], 'description': f"{r['error']['description']} (attempt: {attempt})"});
else :
m = re.match(r'^(?P<code>\d{3})\s?\w*\s?Error\s?:\s?(?P<description>.+)$', str(e));
return True, dict({'code': m['code'], 'description': f"{m['description']} (attempt: {attempt})"});
except requests.Timeout as e:
time.sleep(timeout);
self.__refresh__();
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+1,last_error=str(e))
except requests.RequestException as e:
if re.search(r"^\s*Invalid\s?URL", str(e)):
time.sleep(timeout);
self.__refresh__();
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+1,last_error=str(e));
else:
return True, dict({'code': "-1", 'description': f"{str(e)} (attempt: {attempt})"});
else:
r = Response(response).Parse();
if r['error'] is not None and r['error']['code'] is not "ok":
return True, dict({'code': r['error']['code'], 'description': f"{r['error']['description']} (attempt: {attempt})"});
else:
return False, r;
else:
return True, dict({'code': "-2", 'description': "{}\nThe maximum number of attempts has been exceeded!".format(last_error)});
|
[
"datetime.datetime.utcfromtimestamp",
"requests.cookies.cookiejar_from_dict",
"dateutil.parser.isoparse",
"re.compile",
"time.sleep",
"requests.get",
"datetime.datetime.now",
"numpy.array",
"pandas.DataFrame",
"io.StringIO"
] |
[((12645, 12704), 'requests.get', 'requests.get', (['"""https://finance.yahoo.com/quote/SPY/history"""'], {}), "('https://finance.yahoo.com/quote/SPY/history')\n", (12657, 12704), False, 'import requests\n'), ((12733, 12792), 'requests.cookies.cookiejar_from_dict', 'requests.cookies.cookiejar_from_dict', (["{'B': r.cookies['B']}"], {}), "({'B': r.cookies['B']})\n", (12769, 12792), False, 'import requests\n'), ((12812, 12873), 're.compile', 're.compile', (['""".*"CrumbStore":\\\\{"crumb":"(?P<crumb>[^"]+)"\\\\}"""'], {}), '(\'.*"CrumbStore":\\\\{"crumb":"(?P<crumb>[^"]+)"\\\\}\')\n', (12822, 12873), False, 'import re\n'), ((13128, 13145), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (13143, 13145), True, 'import datetime as dt\n'), ((13445, 13462), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (13460, 13462), True, 'import datetime as dt\n'), ((14458, 14501), 'requests.get', 'requests.get', (['url'], {'cookies': 'self.__cookies__'}), '(url, cookies=self.__cookies__)\n', (14470, 14501), False, 'import requests\n'), ((15674, 15693), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (15684, 15693), False, 'import time\n'), ((9116, 9167), 'pandas.DataFrame', 'pd.DataFrame', (['entries'], {'index': 'index', 'columns': 'columns'}), '(entries, index=index, columns=columns)\n', (9128, 9167), True, 'import pandas as pd\n'), ((9690, 9713), 'io.StringIO', 'io.StringIO', (['input.text'], {}), '(input.text)\n', (9701, 9713), False, 'import io\n'), ((14671, 14690), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (14681, 14690), False, 'import time\n'), ((15963, 15982), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (15973, 15982), False, 'import time\n'), ((4874, 4893), 'dateutil.parser.isoparse', 'du.isoparse', (['value1'], {}), '(value1)\n', (4885, 4893), True, 'import dateutil.parser as du\n'), ((7280, 7328), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['open']"], {}), "(data['indicators']['quote'][0]['open'])\n", (7288, 7328), True, 'import numpy as np\n'), ((7367, 7415), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['high']"], {}), "(data['indicators']['quote'][0]['high'])\n", (7375, 7415), True, 'import numpy as np\n'), ((7454, 7501), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['low']"], {}), "(data['indicators']['quote'][0]['low'])\n", (7462, 7501), True, 'import numpy as np\n'), ((7540, 7589), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['close']"], {}), "(data['indicators']['quote'][0]['close'])\n", (7548, 7589), True, 'import numpy as np\n'), ((7911, 7961), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['volume']"], {}), "(data['indicators']['quote'][0]['volume'])\n", (7919, 7961), True, 'import numpy as np\n'), ((5262, 5281), 'dateutil.parser.isoparse', 'du.isoparse', (['value2'], {}), '(value2)\n', (5273, 5281), True, 'import dateutil.parser as du\n'), ((5366, 5383), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5381, 5383), True, 'import datetime as dt\n'), ((7628, 7683), 'numpy.array', 'np.array', (["data['indicators']['adjclose'][0]['adjclose']"], {}), "(data['indicators']['adjclose'][0]['adjclose'])\n", (7636, 7683), True, 'import numpy as np\n'), ((8989, 9021), 'datetime.datetime.utcfromtimestamp', 'dt.datetime.utcfromtimestamp', (['ts'], {}), '(ts)\n', (9017, 9021), True, 'import datetime as dt\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from ..measure import ConditionedLognormalSampler
class ScalarImage:
"""
Class containing a scalar image.
"""
def __init__(self, height=1000, width=1000):
""" Instantiate scalar image with shape (<height>, <width>). """
self.height = height
self.width = width
self.initialize()
@property
def shape(self):
""" Image shape. """
return self.im.shape[-2:]
@property
def pixels(self):
""" Returns image pixels. """
return self.im.ravel()
@property
def max(self):
""" Maximum pixel intensity. """
return self.im.max()
@property
def im_normalized(self):
""" Image normalized by the maximum value. """
return self.im/self.max
def percentile(self, q):
""" 98th percentile of pixel intensities. """
return np.percentile(self.im.ravel(), q=q)
def initialize(self):
""" Initialize blank image. """
self.im = np.zeros((self.height, self.width), dtype=np.float64)
def fill(self, mu=0.1, sigma=0.1):
"""
Fill image background with values sampled from a lognormal distribution.
Args:
mu (float) - mean of underlying normal distribution
sigma (float) - std dev of underlying normal distribution
"""
pixels = np.exp(np.random.normal(np.log(mu), sigma, size=self.shape))
self.im[:, :] = pixels
@staticmethod
def _render(im, vmin=0, vmax=None, cmap=plt.cm.Greys, size=5, ax=None):
"""
Render image.
Args:
im (np.ndarray[float]) - image
vmin, vmax (int) - colormap bounds
cmap (matplotlib.ColorMap or str) - if value is 'r', 'g', or 'b', use RGB colorscheme
size (int) - image panel size, in inches
ax (matplotlib.axes.AxesSubplot) - if None, create figure
"""
if ax is None:
fig, ax = plt.subplots(figsize=(size, size))
if vmax is None:
vmax = im.max()
# render image
if type(cmap) == str:
assert cmap in 'rgb', 'Color not recognized.'
im_rgb = np.zeros(im.shape+(3,), dtype=np.float64)
im_rgb[:,:,'rgb'.index(cmap)] = (im-vmin)/(vmax-vmin)
im_rgb[im_rgb>1.] = 1.
ax.imshow(im_rgb)
else:
ax.imshow(im, vmin=vmin, vmax=vmax, cmap=cmap)
# invert axis and remove ticks
ax.invert_yaxis()
ax.axis('off')
def render(self, **kwargs):
""" Render image. """
self._render(self.im.T, **kwargs)
def render_blank(self, **kwargs):
""" Render image. """
self._render(np.zeros(self.shape, dtype=int), **kwargs)
def center_xycoords(self, xy, shrinkage=0.9):
""" Project zero-centered coordinates to center of image. """
center_x, center_y = self.shape[0]/2, self.shape[1]/2
centered_xy = deepcopy(xy)
centered_xy[:, 0] = ((xy[:, 0]*center_x*shrinkage) + center_x)
centered_xy[:, 1] = ((xy[:, 1]*center_y*shrinkage) + center_y)
return centered_xy.astype(int)
class DependentScalarImage(ScalarImage):
"""
Class defines a scalar image whose pixel intensities are sampled with some dependence upon another scalar image.
"""
def __init__(self, pixels, mean, sigma):
""" Instantiate a dependent scalar image. """
super().__init__(*pixels.shape)
x = np.log(pixels.ravel())
self.sampler = ConditionedLognormalSampler(x, np.log(mean), sigma)
def fill(self, rho=0.0):
""" Generate randomly sampled pixel values. """
pixels = self.sampler.sample(rho=rho)
self.im[:, :] = pixels.reshape(self.shape)
|
[
"numpy.zeros",
"numpy.log",
"matplotlib.pyplot.subplots",
"copy.deepcopy"
] |
[((1067, 1120), 'numpy.zeros', 'np.zeros', (['(self.height, self.width)'], {'dtype': 'np.float64'}), '((self.height, self.width), dtype=np.float64)\n', (1075, 1120), True, 'import numpy as np\n'), ((3047, 3059), 'copy.deepcopy', 'deepcopy', (['xy'], {}), '(xy)\n', (3055, 3059), False, 'from copy import deepcopy\n'), ((2046, 2080), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (2058, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2311), 'numpy.zeros', 'np.zeros', (['(im.shape + (3,))'], {'dtype': 'np.float64'}), '(im.shape + (3,), dtype=np.float64)\n', (2276, 2311), True, 'import numpy as np\n'), ((2798, 2829), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'int'}), '(self.shape, dtype=int)\n', (2806, 2829), True, 'import numpy as np\n'), ((3646, 3658), 'numpy.log', 'np.log', (['mean'], {}), '(mean)\n', (3652, 3658), True, 'import numpy as np\n'), ((1459, 1469), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (1465, 1469), True, 'import numpy as np\n')]
|
from ...Renderer.Buffer import VertexBuffer, IndexBuffer, BufferLayout
from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData
from OpenGL.GL import GL_ARRAY_BUFFER, GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER, GL_DYNAMIC_DRAW
import ctypes
import numpy as np
from multipledispatch import dispatch
class OpenGLVertexBuffer(VertexBuffer):
__slots__ = "__RendererID", "__itemsize", \
"__Layout"
@dispatch(list)
def __init__(self, vertices: list) -> None:
vertices: np.ndarray = np.array(vertices, dtype=np.float32)
self.__itemsize = vertices.itemsize
self.__RendererID = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
@dispatch(int)
def __init__(self, size: int) -> None:
vertices = np.zeros((size,))
self.__itemsize = size
self.__RendererID = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, ctypes.c_void_p(None), GL_DYNAMIC_DRAW)
def __del__(self) -> None:
glDeleteBuffers(1, [self.__RendererID])
@property
def itemsize(self) -> int:
return self.__itemsize
@property
def RendererID(self) -> int:
return self.__RendererID
def Bind(self) -> None:
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
def Unbind(self) -> None:
glBindBuffer(GL_ARRAY_BUFFER, 0)
def SetLayout(self, layout: BufferLayout) -> None:
self.__Layout = layout
def SetData(self, data: np.ndarray) -> None:
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferSubData(GL_ARRAY_BUFFER, 0, data.nbytes, data.tobytes())
@property
def Layout(self) -> BufferLayout:
return self.__Layout
class OpenGLIndexBuffer(IndexBuffer):
__RendererID : int
__Count : int
def __init__(self, indices: list) -> None:
indices: np.ndarray = np.array(indices, dtype=np.uint32)
self.__Count = len(indices)
self.__RendererID = glGenBuffers(1)
self.Bind()
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
def __del__(self) -> None:
glDeleteBuffers(1, [self.__RendererID])
@property
def RendererID(self) -> int:
return self.__RendererID
@property
def Count(self) -> int:
return self.__Count
def Bind(self) -> None:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.__RendererID)
def Unbind(self) -> None:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
|
[
"OpenGL.GL.glBufferData",
"OpenGL.GL.glGenBuffers",
"numpy.array",
"numpy.zeros",
"multipledispatch.dispatch",
"ctypes.c_void_p",
"OpenGL.GL.glBindBuffer",
"OpenGL.GL.glDeleteBuffers"
] |
[((451, 465), 'multipledispatch.dispatch', 'dispatch', (['list'], {}), '(list)\n', (459, 465), False, 'from multipledispatch import dispatch\n'), ((815, 828), 'multipledispatch.dispatch', 'dispatch', (['int'], {}), '(int)\n', (823, 828), False, 'from multipledispatch import dispatch\n'), ((545, 581), 'numpy.array', 'np.array', (['vertices'], {'dtype': 'np.float32'}), '(vertices, dtype=np.float32)\n', (553, 581), True, 'import numpy as np\n'), ((655, 670), 'OpenGL.GL.glGenBuffers', 'glGenBuffers', (['(1)'], {}), '(1)\n', (667, 670), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((679, 727), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (691, 727), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((736, 808), 'OpenGL.GL.glBufferData', 'glBufferData', (['GL_ARRAY_BUFFER', 'vertices.nbytes', 'vertices', 'GL_STATIC_DRAW'], {}), '(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)\n', (748, 808), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((891, 908), 'numpy.zeros', 'np.zeros', (['(size,)'], {}), '((size,))\n', (899, 908), True, 'import numpy as np\n'), ((969, 984), 'OpenGL.GL.glGenBuffers', 'glGenBuffers', (['(1)'], {}), '(1)\n', (981, 984), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((993, 1041), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (1005, 1041), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1177, 1216), 'OpenGL.GL.glDeleteBuffers', 'glDeleteBuffers', (['(1)', '[self.__RendererID]'], {}), '(1, [self.__RendererID])\n', (1192, 1216), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1412, 1460), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (1424, 1460), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1500, 1532), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', '(0)'], {}), '(GL_ARRAY_BUFFER, 0)\n', (1512, 1532), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1678, 1726), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (1690, 1726), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2045, 2079), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.uint32'}), '(indices, dtype=np.uint32)\n', (2053, 2079), True, 'import numpy as np\n'), ((2145, 2160), 'OpenGL.GL.glGenBuffers', 'glGenBuffers', (['(1)'], {}), '(1)\n', (2157, 2160), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2189, 2267), 'OpenGL.GL.glBufferData', 'glBufferData', (['GL_ELEMENT_ARRAY_BUFFER', 'indices.nbytes', 'indices', 'GL_STATIC_DRAW'], {}), '(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)\n', (2201, 2267), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2308, 2347), 'OpenGL.GL.glDeleteBuffers', 'glDeleteBuffers', (['(1)', '[self.__RendererID]'], {}), '(1, [self.__RendererID])\n', (2323, 2347), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2537, 2593), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ELEMENT_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ELEMENT_ARRAY_BUFFER, self.__RendererID)\n', (2549, 2593), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2633, 2673), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ELEMENT_ARRAY_BUFFER', '(0)'], {}), '(GL_ELEMENT_ARRAY_BUFFER, 0)\n', (2645, 2673), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1097, 1118), 'ctypes.c_void_p', 'ctypes.c_void_p', (['None'], {}), '(None)\n', (1112, 1118), False, 'import ctypes\n')]
|
import numpy as np
import cv2
from semantic_segmentation.data_structure.image_handler import ImageHandler
class Preprocessor:
def __init__(self, image_size):
self.image_size = image_size
self.min_height = 16
self.min_width = 16
self.max_height = 900
self.max_width = 900
self.obox = None
def resize(self, image):
img_h = ImageHandler(image)
if None in self.image_size:
return self.rescale(image, self.max_width, self.max_height, is_lbm=False)
return img_h.resize(height=self.image_size[0], width=self.image_size[1])
def normalize(self, image):
epsilon = 1e-6
mean_mat = np.mean(image)
var_mat = np.var(image)
if var_mat != 0:
mat_norm = (image - mean_mat) / var_mat
min_mat = np.min(mat_norm)
max_mat = np.max(mat_norm)
mat_norm = (mat_norm - min_mat) / (max_mat - min_mat + epsilon)
else:
mat_norm = np.zeros(image.shape)
return mat_norm
def rescale(self, data, max_width, max_height, is_lbm=False):
height, width = data.shape[0], data.shape[1]
if height >= max_height:
new_height = max_height
new_width = int(width * new_height / height)
if is_lbm:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_NEAREST)
else:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_CUBIC)
height, width = data.shape[0], data.shape[1]
if width >= max_width:
new_width = max_width
new_height = int(height * new_width / width)
if is_lbm:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_NEAREST)
else:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_CUBIC)
height, width = data.shape[0], data.shape[1]
return np.reshape(data, (height, width, -1))
def pad(self, image):
img_h = ImageHandler(image)
height, width, ch = image.shape
if height > width:
if height >= self.image_size[0]:
new_height = self.image_size[0]
new_width = int(width * new_height / height)
image = img_h.resize(height=new_height, width=new_width)
else:
if width >= self.image_size[1]:
new_width = self.image_size[1]
new_height = int(height * new_width / width)
image = img_h.resize(height=new_height, width=new_width)
ih, iw = image.shape[:2]
ph, pw = self.image_size[0], self.image_size[1]
x = np.mean(image) * np.ones((ph, pw, ch))
sy1 = int(ph/2)-int(ih/2)
sx1 = int(pw/2)-int(iw/2)
if ch == 1:
image = np.expand_dims(image, axis=2)
x[sy1:sy1+ih, sx1:sx1+iw, :] = image
self.obox = [sx1, sy1, sx1 + iw, sy1 + ih]
return x
def apply(self, image):
image = self.resize(image)
img_h = ImageHandler(image)
if self.image_size[2] == 1:
image = img_h.gray()
image = np.expand_dims(image, axis=2)
image = self.normalize(image)
return image
def lbm_resize(self, lbm, width, height):
if None in [width, height]:
return self.rescale(lbm, self.max_width, self.max_height, is_lbm=True)
return cv2.resize(lbm,
(int(width), int(height)),
interpolation=cv2.INTER_NEAREST)
def apply_to_label_map(self, label_map):
label_map = self.lbm_resize(label_map, width=self.image_size[1], height=self.image_size[0])
if len(label_map.shape) < 3:
label_map = np.expand_dims(label_map, axis=2)
return label_map
|
[
"numpy.mean",
"numpy.reshape",
"numpy.ones",
"semantic_segmentation.data_structure.image_handler.ImageHandler",
"numpy.max",
"numpy.zeros",
"numpy.expand_dims",
"numpy.min",
"numpy.var"
] |
[((389, 408), 'semantic_segmentation.data_structure.image_handler.ImageHandler', 'ImageHandler', (['image'], {}), '(image)\n', (401, 408), False, 'from semantic_segmentation.data_structure.image_handler import ImageHandler\n'), ((687, 701), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (694, 701), True, 'import numpy as np\n'), ((720, 733), 'numpy.var', 'np.var', (['image'], {}), '(image)\n', (726, 733), True, 'import numpy as np\n'), ((2047, 2084), 'numpy.reshape', 'np.reshape', (['data', '(height, width, -1)'], {}), '(data, (height, width, -1))\n', (2057, 2084), True, 'import numpy as np\n'), ((2128, 2147), 'semantic_segmentation.data_structure.image_handler.ImageHandler', 'ImageHandler', (['image'], {}), '(image)\n', (2140, 2147), False, 'from semantic_segmentation.data_structure.image_handler import ImageHandler\n'), ((3152, 3171), 'semantic_segmentation.data_structure.image_handler.ImageHandler', 'ImageHandler', (['image'], {}), '(image)\n', (3164, 3171), False, 'from semantic_segmentation.data_structure.image_handler import ImageHandler\n'), ((833, 849), 'numpy.min', 'np.min', (['mat_norm'], {}), '(mat_norm)\n', (839, 849), True, 'import numpy as np\n'), ((872, 888), 'numpy.max', 'np.max', (['mat_norm'], {}), '(mat_norm)\n', (878, 888), True, 'import numpy as np\n'), ((1002, 1023), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (1010, 1023), True, 'import numpy as np\n'), ((2782, 2796), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (2789, 2796), True, 'import numpy as np\n'), ((2799, 2820), 'numpy.ones', 'np.ones', (['(ph, pw, ch)'], {}), '((ph, pw, ch))\n', (2806, 2820), True, 'import numpy as np\n'), ((2929, 2958), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (2943, 2958), True, 'import numpy as np\n'), ((3261, 3290), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (3275, 3290), True, 'import numpy as np\n'), ((3866, 3899), 'numpy.expand_dims', 'np.expand_dims', (['label_map'], {'axis': '(2)'}), '(label_map, axis=2)\n', (3880, 3899), True, 'import numpy as np\n')]
|
import numpy as np
def compute_intensity(pos, pos_list, radius):
return (norm(np.array(pos_list) - np.array(pos), axis=1) < radius).sum()
def compute_colours(all_pos):
colours = [compute_intensity(pos, all_pos, 1e-4) for pos in all_pos]
colours /= max(colours)
return colours
|
[
"numpy.array"
] |
[((84, 102), 'numpy.array', 'np.array', (['pos_list'], {}), '(pos_list)\n', (92, 102), True, 'import numpy as np\n'), ((105, 118), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (113, 118), True, 'import numpy as np\n')]
|
__author__ = "<NAME>"
__email__ = "<EMAIL>"
""" Baseline parallel BFS implementation.
Algorithm 1 Parallel BFS algorithm: High-level overview [1] was implemented.
Reference:
[1] https://www.researchgate.net/publication/220782745_Scalable_Graph_Exploration_on_Multicore_Processors
"""
import numpy as np
from multiprocessing import Pool
import multiprocessing as mp
import time
from src.load_graph import get_graph, gen_balanced_tree
from functools import partial
P_ARR = []
def get_adjacent_nodes(G, x):
idx_lst = []
adj_list = G[x]
for idx, val in enumerate(adj_list):
if val == 1:
idx_lst.append(idx)
return idx_lst
def get_neighbour(u, G, target):
nq = []
# For each v adjacent to u
# print(u)
found_node = False
for v in get_adjacent_nodes(G, u):
if v == target:
found_node = True
if P_ARR[v] == np.inf:
P_ARR[v] = u
nq.append(v)
return nq, found_node
def bfs_parallel(G, target):
r = 0
CQ = []
# Init all values in P to inf
for i in range(G.shape[0]):
P_ARR.append(np.inf)
# Set root node
P_ARR[r] = 0
# Enqueue r
CQ.append(r)
while len(CQ) != 0:
print(f"CQ: {CQ}")
# Parallel Dequeue
num_cpu = mp.cpu_count()
with Pool(num_cpu) as pool:
results = pool.map(partial(get_neighbour, G=G, target=target), CQ)
nq_tmp = [x for (x,y) in results]
for (x,y) in results:
if y:
return True
# print(nq_tmp)
NQ = list(np.concatenate(nq_tmp).ravel())
# Swap CQ and NQ
CQ = NQ
return False
def main():
start_time = time.time()
G = gen_balanced_tree(3, 4, directed=True)
# G = get_graph()
find_node = bfs_parallel(G, target=10000)
print("--- %s seconds ---" % (time.time() - start_time))
if find_node:
print(f"Node Found")
else:
print(f"Node not Found")
if __name__=='__main__':
main()
|
[
"multiprocessing.cpu_count",
"src.load_graph.gen_balanced_tree",
"functools.partial",
"multiprocessing.Pool",
"numpy.concatenate",
"time.time"
] |
[((1822, 1833), 'time.time', 'time.time', ([], {}), '()\n', (1831, 1833), False, 'import time\n'), ((1844, 1882), 'src.load_graph.gen_balanced_tree', 'gen_balanced_tree', (['(3)', '(4)'], {'directed': '(True)'}), '(3, 4, directed=True)\n', (1861, 1882), False, 'from src.load_graph import get_graph, gen_balanced_tree\n'), ((1370, 1384), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1382, 1384), True, 'import multiprocessing as mp\n'), ((1399, 1412), 'multiprocessing.Pool', 'Pool', (['num_cpu'], {}), '(num_cpu)\n', (1403, 1412), False, 'from multiprocessing import Pool\n'), ((1454, 1496), 'functools.partial', 'partial', (['get_neighbour'], {'G': 'G', 'target': 'target'}), '(get_neighbour, G=G, target=target)\n', (1461, 1496), False, 'from functools import partial\n'), ((1994, 2005), 'time.time', 'time.time', ([], {}), '()\n', (2003, 2005), False, 'import time\n'), ((1686, 1708), 'numpy.concatenate', 'np.concatenate', (['nq_tmp'], {}), '(nq_tmp)\n', (1700, 1708), True, 'import numpy as np\n')]
|
import numpy as np
import numpy.testing as npt
import pytest
from openscm_units import unit_registry as ur
from test_model_base import TwoLayerVariantTester
from openscm_twolayermodel import ImpulseResponseModel, TwoLayerModel
from openscm_twolayermodel.base import _calculate_geoffroy_helper_parameters
from openscm_twolayermodel.constants import DENSITY_WATER, HEAT_CAPACITY_WATER
class TestImpulseResponseModel(TwoLayerVariantTester):
tmodel = ImpulseResponseModel
parameters = dict(
q1=0.33 * ur("delta_degC/(W/m^2)"),
q2=0.41 * ur("delta_degC/(W/m^2)"),
d1=239.0 * ur("yr"),
d2=4.1 * ur("yr"),
efficacy=1.0 * ur("dimensionless"),
delta_t=1 * ur("yr"),
)
def test_init(self):
init_kwargs = dict(
q1=0.3 * ur("delta_degC/(W/m^2)"),
q2=0.4 * ur("delta_degC/(W/m^2)"),
d1=25.0 * ur("yr"),
d2=300 * ur("yr"),
efficacy=1.1 * ur("dimensionless"),
delta_t=1 / 12 * ur("yr"),
)
res = self.tmodel(**init_kwargs)
for k, v in init_kwargs.items():
assert getattr(res, k) == v, "{} not set properly".format(k)
assert np.isnan(res.erf)
assert np.isnan(res._temp1_mag)
assert np.isnan(res._temp2_mag)
assert np.isnan(res._rndt_mag)
def test_init_backwards_timescales_error(self):
init_kwargs = dict(d1=250.0 * ur("yr"), d2=3 * ur("yr"),)
error_msg = "The short-timescale must be d1"
with pytest.raises(ValueError, match=error_msg):
self.tmodel(**init_kwargs)
def test_calculate_next_temp(self, check_same_unit):
tdelta_t = 30 * 24 * 60 * 60
ttemp = 0.1
tq = 0.4
td = 35.0
tf = 1.2
res = self.tmodel._calculate_next_temp(tdelta_t, ttemp, tq, td, tf)
expected = ttemp * np.exp(-tdelta_t / td) + tf * tq * (
1 - np.exp(-tdelta_t / td)
)
npt.assert_equal(res, expected)
check_same_unit(self.tmodel._temp1_unit, self.tmodel._temp2_unit)
check_same_unit(self.tmodel._q1_unit, self.tmodel._q2_unit)
check_same_unit(self.tmodel._delta_t_unit, self.tmodel._d1_unit)
check_same_unit(self.tmodel._delta_t_unit, self.tmodel._d2_unit)
check_same_unit(
self.tmodel._temp1_unit,
(1.0 * ur(self.tmodel._erf_unit) * 1.0 * ur(self.tmodel._q1_unit)).units,
)
def test_calculate_next_rndt(self, check_same_unit):
ttemp1 = 1.1
ttemp_2 = 0.6
tq1 = 0.5
tq2 = 0.3
td1 = 30
td2 = 600
terf = 1.2
tefficacy = 1.13
helper = self.tmodel(
q1=tq1 * ur("delta_degC/(W/m^2)"),
q2=tq2 * ur("delta_degC/(W/m^2)"),
d1=td1 * ur("yr"),
d2=td2 * ur("yr"),
efficacy=tefficacy * ur("dimensionless"),
)
helper_twolayer = TwoLayerModel(**helper.get_two_layer_parameters())
gh = _calculate_geoffroy_helper_parameters(
helper_twolayer.du,
helper_twolayer.dl,
helper_twolayer.lambda0,
helper_twolayer.efficacy,
helper_twolayer.eta,
)
# see notebook for discussion of why this is so
efficacy_term = (
helper_twolayer.eta
* (helper_twolayer.efficacy - 1)
* (
((1 - gh["phi1"]) * ttemp1 * ur("delta_degC"))
+ ((1 - gh["phi2"]) * ttemp_2 * ur("delta_degC"))
)
)
expected = (
terf * ur(helper._erf_unit)
- ((ttemp1 + ttemp_2) * ur(helper._temp1_unit)) * helper_twolayer.lambda0
- efficacy_term
)
assert str(expected.units) == "watt / meter ** 2"
res = helper._calculate_next_rndt(ttemp1, ttemp_2, terf, tefficacy)
npt.assert_allclose(res, expected.magnitude)
# check internal units make sense
check_same_unit(self.tmodel._q1_unit, self.tmodel._q2_unit)
check_same_unit(
helper_twolayer._lambda0_unit, (1.0 * ur(self.tmodel._q2_unit) ** -1)
)
check_same_unit(
self.tmodel._erf_unit,
(
(
1.0 * ur(self.tmodel._temp1_unit) / (1.0 * ur(self.tmodel._q1_unit))
).units
),
)
check_same_unit(
self.tmodel._erf_unit, efficacy_term.units,
)
def test_step(self):
# move to integration tests
terf = np.array([3, 4, 5, 6, 7]) * ur("W/m^2")
model = self.tmodel()
model.set_drivers(terf)
model.reset()
model.step()
assert model._timestep_idx == 0
npt.assert_equal(model._temp1_mag[model._timestep_idx], 0)
npt.assert_equal(model._temp2_mag[model._timestep_idx], 0)
npt.assert_equal(model._rndt_mag[model._timestep_idx], 0)
model.step()
model.step()
model.step()
assert model._timestep_idx == 3
npt.assert_equal(
model._temp1_mag[model._timestep_idx],
model._calculate_next_temp(
model._delta_t_mag,
model._temp1_mag[model._timestep_idx - 1],
model._q1_mag,
model._d1_mag,
model._erf_mag[model._timestep_idx - 1],
),
)
npt.assert_equal(
model._temp2_mag[model._timestep_idx],
model._calculate_next_temp(
model._delta_t_mag,
model._temp2_mag[model._timestep_idx - 1],
model._q2_mag,
model._d2_mag,
model._erf_mag[model._timestep_idx - 1],
),
)
npt.assert_equal(
model._rndt_mag[model._timestep_idx],
model._calculate_next_rndt(
model._temp1_mag[model._timestep_idx - 1],
model._temp2_mag[model._timestep_idx - 1],
model._erf_mag[model._timestep_idx - 1],
model._efficacy_mag,
),
)
def test_reset(self):
terf = np.array([0, 1, 2]) * ur("W/m^2")
model = self.tmodel()
model.set_drivers(terf)
def assert_is_nan_and_erf_shape(inp):
assert np.isnan(inp).all()
assert inp.shape == terf.shape
model.reset()
# after reset, we are not in any timestep
assert np.isnan(model._timestep_idx)
assert_is_nan_and_erf_shape(model._temp1_mag)
assert_is_nan_and_erf_shape(model._temp2_mag)
assert_is_nan_and_erf_shape(model._rndt_mag)
def test_reset_run_reset(self):
# move to integration tests
terf = np.array([0, 1, 2, 3, 4, 5]) * ur("W/m^2")
model = self.tmodel()
model.set_drivers(terf)
def assert_is_nan_and_erf_shape(inp):
assert np.isnan(inp).all()
assert inp.shape == terf.shape
model.reset()
assert_is_nan_and_erf_shape(model._temp1_mag)
assert_is_nan_and_erf_shape(model._temp2_mag)
assert_is_nan_and_erf_shape(model._rndt_mag)
def assert_ge_zero_and_erf_shape(inp):
assert not (inp < 0).any()
assert inp.shape == terf.shape
model.run()
assert_ge_zero_and_erf_shape(model._temp1_mag)
assert_ge_zero_and_erf_shape(model._temp2_mag)
assert_ge_zero_and_erf_shape(model._rndt_mag)
model.reset()
assert_is_nan_and_erf_shape(model._temp1_mag)
assert_is_nan_and_erf_shape(model._temp2_mag)
assert_is_nan_and_erf_shape(model._rndt_mag)
def test_get_two_layer_model_parameters(self, check_equal_pint):
tq1 = 0.3 * ur("delta_degC/(W/m^2)")
tq2 = 0.4 * ur("delta_degC/(W/m^2)")
td1 = 3 * ur("yr")
td2 = 300.0 * ur("yr")
tefficacy = 1.2 * ur("dimensionless")
start_paras = dict(d1=td1, d2=td2, q1=tq1, q2=tq2, efficacy=tefficacy,)
mod_instance = self.tmodel(**start_paras)
# for explanation of what is going on, see
# impulse-response-equivalence.ipynb
efficacy = tefficacy
lambda0 = 1 / (tq1 + tq2)
C = (td1 * td2) / (tq1 * td2 + tq2 * td1)
a1 = lambda0 * tq1
a2 = lambda0 * tq2
tau1 = td1
tau2 = td2
C_D = (lambda0 * (tau1 * a1 + tau2 * a2) - C) / efficacy
eta = C_D / (tau1 * a2 + tau2 * a1)
expected = {
"lambda0": lambda0,
"du": C / (DENSITY_WATER * HEAT_CAPACITY_WATER),
"dl": C_D / (DENSITY_WATER * HEAT_CAPACITY_WATER),
"eta": eta,
"efficacy": efficacy,
}
res = mod_instance.get_two_layer_parameters()
assert res == expected
# check circularity
circular_params = TwoLayerModel(**res).get_impulse_response_parameters()
for k, v in circular_params.items():
check_equal_pint(v, start_paras[k])
|
[
"numpy.testing.assert_equal",
"openscm_units.unit_registry",
"numpy.testing.assert_allclose",
"openscm_twolayermodel.base._calculate_geoffroy_helper_parameters",
"numpy.exp",
"numpy.array",
"openscm_twolayermodel.TwoLayerModel",
"numpy.isnan",
"pytest.raises"
] |
[((1204, 1221), 'numpy.isnan', 'np.isnan', (['res.erf'], {}), '(res.erf)\n', (1212, 1221), True, 'import numpy as np\n'), ((1237, 1261), 'numpy.isnan', 'np.isnan', (['res._temp1_mag'], {}), '(res._temp1_mag)\n', (1245, 1261), True, 'import numpy as np\n'), ((1277, 1301), 'numpy.isnan', 'np.isnan', (['res._temp2_mag'], {}), '(res._temp2_mag)\n', (1285, 1301), True, 'import numpy as np\n'), ((1317, 1340), 'numpy.isnan', 'np.isnan', (['res._rndt_mag'], {}), '(res._rndt_mag)\n', (1325, 1340), True, 'import numpy as np\n'), ((1977, 2008), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (1993, 2008), True, 'import numpy.testing as npt\n'), ((3014, 3168), 'openscm_twolayermodel.base._calculate_geoffroy_helper_parameters', '_calculate_geoffroy_helper_parameters', (['helper_twolayer.du', 'helper_twolayer.dl', 'helper_twolayer.lambda0', 'helper_twolayer.efficacy', 'helper_twolayer.eta'], {}), '(helper_twolayer.du, helper_twolayer.\n dl, helper_twolayer.lambda0, helper_twolayer.efficacy, helper_twolayer.eta)\n', (3051, 3168), False, 'from openscm_twolayermodel.base import _calculate_geoffroy_helper_parameters\n'), ((3893, 3937), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res', 'expected.magnitude'], {}), '(res, expected.magnitude)\n', (3912, 3937), True, 'import numpy.testing as npt\n'), ((4759, 4817), 'numpy.testing.assert_equal', 'npt.assert_equal', (['model._temp1_mag[model._timestep_idx]', '(0)'], {}), '(model._temp1_mag[model._timestep_idx], 0)\n', (4775, 4817), True, 'import numpy.testing as npt\n'), ((4826, 4884), 'numpy.testing.assert_equal', 'npt.assert_equal', (['model._temp2_mag[model._timestep_idx]', '(0)'], {}), '(model._temp2_mag[model._timestep_idx], 0)\n', (4842, 4884), True, 'import numpy.testing as npt\n'), ((4893, 4950), 'numpy.testing.assert_equal', 'npt.assert_equal', (['model._rndt_mag[model._timestep_idx]', '(0)'], {}), '(model._rndt_mag[model._timestep_idx], 0)\n', (4909, 4950), True, 'import numpy.testing as npt\n'), ((6479, 6508), 'numpy.isnan', 'np.isnan', (['model._timestep_idx'], {}), '(model._timestep_idx)\n', (6487, 6508), True, 'import numpy as np\n'), ((1527, 1569), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'error_msg'}), '(ValueError, match=error_msg)\n', (1540, 1569), False, 'import pytest\n'), ((4564, 4589), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 7]'], {}), '([3, 4, 5, 6, 7])\n', (4572, 4589), True, 'import numpy as np\n'), ((4592, 4603), 'openscm_units.unit_registry', 'ur', (['"""W/m^2"""'], {}), "('W/m^2')\n", (4594, 4603), True, 'from openscm_units import unit_registry as ur\n'), ((6165, 6184), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (6173, 6184), True, 'import numpy as np\n'), ((6187, 6198), 'openscm_units.unit_registry', 'ur', (['"""W/m^2"""'], {}), "('W/m^2')\n", (6189, 6198), True, 'from openscm_units import unit_registry as ur\n'), ((6758, 6786), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (6766, 6786), True, 'import numpy as np\n'), ((6789, 6800), 'openscm_units.unit_registry', 'ur', (['"""W/m^2"""'], {}), "('W/m^2')\n", (6791, 6800), True, 'from openscm_units import unit_registry as ur\n'), ((7766, 7790), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (7768, 7790), True, 'from openscm_units import unit_registry as ur\n'), ((7811, 7835), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (7813, 7835), True, 'from openscm_units import unit_registry as ur\n'), ((7854, 7862), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (7856, 7862), True, 'from openscm_units import unit_registry as ur\n'), ((7885, 7893), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (7887, 7893), True, 'from openscm_units import unit_registry as ur\n'), ((7920, 7939), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (7922, 7939), True, 'from openscm_units import unit_registry as ur\n'), ((517, 541), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (519, 541), True, 'from openscm_units import unit_registry as ur\n'), ((561, 585), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (563, 585), True, 'from openscm_units import unit_registry as ur\n'), ((606, 614), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (608, 614), True, 'from openscm_units import unit_registry as ur\n'), ((633, 641), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (635, 641), True, 'from openscm_units import unit_registry as ur\n'), ((666, 685), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (668, 685), True, 'from openscm_units import unit_registry as ur\n'), ((707, 715), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (709, 715), True, 'from openscm_units import unit_registry as ur\n'), ((1882, 1904), 'numpy.exp', 'np.exp', (['(-tdelta_t / td)'], {}), '(-tdelta_t / td)\n', (1888, 1904), True, 'import numpy as np\n'), ((8873, 8893), 'openscm_twolayermodel.TwoLayerModel', 'TwoLayerModel', ([], {}), '(**res)\n', (8886, 8893), False, 'from openscm_twolayermodel import ImpulseResponseModel, TwoLayerModel\n'), ((798, 822), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (800, 822), True, 'from openscm_units import unit_registry as ur\n'), ((845, 869), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (847, 869), True, 'from openscm_units import unit_registry as ur\n'), ((893, 901), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (895, 901), True, 'from openscm_units import unit_registry as ur\n'), ((924, 932), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (926, 932), True, 'from openscm_units import unit_registry as ur\n'), ((961, 980), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (963, 980), True, 'from openscm_units import unit_registry as ur\n'), ((1011, 1019), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (1013, 1019), True, 'from openscm_units import unit_registry as ur\n'), ((1432, 1440), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (1434, 1440), True, 'from openscm_units import unit_registry as ur\n'), ((1449, 1457), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (1451, 1457), True, 'from openscm_units import unit_registry as ur\n'), ((1935, 1957), 'numpy.exp', 'np.exp', (['(-tdelta_t / td)'], {}), '(-tdelta_t / td)\n', (1941, 1957), True, 'import numpy as np\n'), ((2413, 2437), 'openscm_units.unit_registry', 'ur', (['self.tmodel._q1_unit'], {}), '(self.tmodel._q1_unit)\n', (2415, 2437), True, 'from openscm_units import unit_registry as ur\n'), ((2724, 2748), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (2726, 2748), True, 'from openscm_units import unit_registry as ur\n'), ((2771, 2795), 'openscm_units.unit_registry', 'ur', (['"""delta_degC/(W/m^2)"""'], {}), "('delta_degC/(W/m^2)')\n", (2773, 2795), True, 'from openscm_units import unit_registry as ur\n'), ((2818, 2826), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (2820, 2826), True, 'from openscm_units import unit_registry as ur\n'), ((2849, 2857), 'openscm_units.unit_registry', 'ur', (['"""yr"""'], {}), "('yr')\n", (2851, 2857), True, 'from openscm_units import unit_registry as ur\n'), ((2892, 2911), 'openscm_units.unit_registry', 'ur', (['"""dimensionless"""'], {}), "('dimensionless')\n", (2894, 2911), True, 'from openscm_units import unit_registry as ur\n'), ((3455, 3471), 'openscm_units.unit_registry', 'ur', (['"""delta_degC"""'], {}), "('delta_degC')\n", (3457, 3471), True, 'from openscm_units import unit_registry as ur\n'), ((3521, 3537), 'openscm_units.unit_registry', 'ur', (['"""delta_degC"""'], {}), "('delta_degC')\n", (3523, 3537), True, 'from openscm_units import unit_registry as ur\n'), ((3604, 3624), 'openscm_units.unit_registry', 'ur', (['helper._erf_unit'], {}), '(helper._erf_unit)\n', (3606, 3624), True, 'from openscm_units import unit_registry as ur\n'), ((4124, 4148), 'openscm_units.unit_registry', 'ur', (['self.tmodel._q2_unit'], {}), '(self.tmodel._q2_unit)\n', (4126, 4148), True, 'from openscm_units import unit_registry as ur\n'), ((6328, 6341), 'numpy.isnan', 'np.isnan', (['inp'], {}), '(inp)\n', (6336, 6341), True, 'import numpy as np\n'), ((6930, 6943), 'numpy.isnan', 'np.isnan', (['inp'], {}), '(inp)\n', (6938, 6943), True, 'import numpy as np\n'), ((3661, 3683), 'openscm_units.unit_registry', 'ur', (['helper._temp1_unit'], {}), '(helper._temp1_unit)\n', (3663, 3683), True, 'from openscm_units import unit_registry as ur\n'), ((4284, 4311), 'openscm_units.unit_registry', 'ur', (['self.tmodel._temp1_unit'], {}), '(self.tmodel._temp1_unit)\n', (4286, 4311), True, 'from openscm_units import unit_registry as ur\n'), ((4321, 4345), 'openscm_units.unit_registry', 'ur', (['self.tmodel._q1_unit'], {}), '(self.tmodel._q1_unit)\n', (4323, 4345), True, 'from openscm_units import unit_registry as ur\n'), ((2379, 2404), 'openscm_units.unit_registry', 'ur', (['self.tmodel._erf_unit'], {}), '(self.tmodel._erf_unit)\n', (2381, 2404), True, 'from openscm_units import unit_registry as ur\n')]
|
import pandas as pd
from util import StockAnalysis, AllStocks
import talib
import os
import numpy as np
class FilterEma:
def __init__(self, barCount, showtCount=None, longCount=None):
self.sa = StockAnalysis()
self.jsonData = self.sa.GetJson
self.trendLength = int(os.getenv('FILTER_TREND_LENGTH', '30'))
self.trendAt = int(os.getenv('FILTER_TREND_AT', '5'))
self.nearPercent = 0.05
self.setBarCount(barCount)
self.shortCount = int(os.getenv('FILTER_EMA_SHORT_COUNT', '14'))
self.longCount = int(os.getenv('FILTER_EMA_LONG_COUNT', '50'))
def setSymbol(self, symbol):
self.symbol = symbol
def setBarCount(self, barCount):
self.barCount = barCount
switcher = {
20: 'ema20',
50: 'ema50',
200: 'ema200'
}
self.filterName = switcher.get(barCount, 'ema20')
self.trendLength = 30
def FilterOn(self, closes, outputShort, outputLong):
# create dataframe with close and output
idx = 0
repeatCount = 0
lastState = 0
longs = iter(outputLong)
prices = iter(closes)
for short in outputShort:
idx += 1
long = next(longs)
price = next(prices)
if np.isnan(short) or np.isnan(long) or np.isnan(price):
break
if price > short and short > long:
thisState = 1
elif price < short and short < long:
thisState = -1
elif idx <= 5:
thisState = None
lastState = 0
repeatCount = 0
else:
break
if lastState == 0 or lastState == thisState:
repeatCount += 1
else:
break
return repeatCount
def isNearEma(self, close, open, ema):
isNear = True if abs(close - ema) / close <= self.nearPercent else False
if isNear:
return True
return True if abs(open - ema) / open <= self.nearPercent else False
def Run(self, symbol):
isLoaded, tp = AllStocks.GetDailyStockData(symbol)
if isLoaded:
try:
self.setSymbol(symbol)
close = tp.Close.to_numpy()
open = tp.Open.to_numpy()
output = talib.EMA(close[::-1], timeperiod=self.barCount)
self.sa.UpdateFilter(
self.jsonData, self.symbol, self.filterName, self.isNearEma(close[0], open[0], output[-1]))
except Exception as e:
print('filterEma.Run() {}'.format(e))
self.sa.UpdateFilter(
self.jsonData, self.symbol, self.filterName, False)
return False
def Trending(self, symbol):
isLoaded, tp = AllStocks.GetDailyStockData(symbol)
if isLoaded:
try:
close = tp.Close.to_numpy()
outputShort = talib.EMA(
close[::-1], timeperiod=self.shortCount)
outputLong = talib.EMA(close[::-1], timeperiod=self.longCount)
trendingDays = self.FilterOn(
close, outputShort[::-1], outputLong[::-1])
self.sa.UpdateFilter(self.jsonData, symbol, 'td', trendingDays)
except Exception as e:
print('filterEma.Run() {}'.format(e))
self.sa.UpdateFilter(self.jsonData, symbol, 'td', 0)
return False
def WriteFilter(self):
self.sa.WriteJson(self.jsonData)
@staticmethod
def All():
filter = FilterEma(20)
AllStocks.Run(filter.Run, False)
filter.setBarCount(50)
AllStocks.Run(filter.Run, False)
filter.setBarCount(200)
AllStocks.Run(filter.Run, False)
AllStocks.Run(filter.Trending, False)
filter.WriteFilter()
if __name__ == '__main__':
FilterEma.All()
print('---------- done ----------')
# filter = FilterEma(symbol='AAPL', barCount=20)
# up, down = filter.Run(filter.symbol)
# print(up, down)
|
[
"talib.EMA",
"os.getenv",
"util.StockAnalysis",
"util.AllStocks.GetDailyStockData",
"numpy.isnan",
"util.AllStocks.Run"
] |
[((207, 222), 'util.StockAnalysis', 'StockAnalysis', ([], {}), '()\n', (220, 222), False, 'from util import StockAnalysis, AllStocks\n'), ((2150, 2185), 'util.AllStocks.GetDailyStockData', 'AllStocks.GetDailyStockData', (['symbol'], {}), '(symbol)\n', (2177, 2185), False, 'from util import StockAnalysis, AllStocks\n'), ((2849, 2884), 'util.AllStocks.GetDailyStockData', 'AllStocks.GetDailyStockData', (['symbol'], {}), '(symbol)\n', (2876, 2884), False, 'from util import StockAnalysis, AllStocks\n'), ((3659, 3691), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Run', '(False)'], {}), '(filter.Run, False)\n', (3672, 3691), False, 'from util import StockAnalysis, AllStocks\n'), ((3731, 3763), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Run', '(False)'], {}), '(filter.Run, False)\n', (3744, 3763), False, 'from util import StockAnalysis, AllStocks\n'), ((3804, 3836), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Run', '(False)'], {}), '(filter.Run, False)\n', (3817, 3836), False, 'from util import StockAnalysis, AllStocks\n'), ((3845, 3882), 'util.AllStocks.Run', 'AllStocks.Run', (['filter.Trending', '(False)'], {}), '(filter.Trending, False)\n', (3858, 3882), False, 'from util import StockAnalysis, AllStocks\n'), ((294, 332), 'os.getenv', 'os.getenv', (['"""FILTER_TREND_LENGTH"""', '"""30"""'], {}), "('FILTER_TREND_LENGTH', '30')\n", (303, 332), False, 'import os\n'), ((361, 394), 'os.getenv', 'os.getenv', (['"""FILTER_TREND_AT"""', '"""5"""'], {}), "('FILTER_TREND_AT', '5')\n", (370, 394), False, 'import os\n'), ((493, 534), 'os.getenv', 'os.getenv', (['"""FILTER_EMA_SHORT_COUNT"""', '"""14"""'], {}), "('FILTER_EMA_SHORT_COUNT', '14')\n", (502, 534), False, 'import os\n'), ((565, 605), 'os.getenv', 'os.getenv', (['"""FILTER_EMA_LONG_COUNT"""', '"""50"""'], {}), "('FILTER_EMA_LONG_COUNT', '50')\n", (574, 605), False, 'import os\n'), ((1302, 1317), 'numpy.isnan', 'np.isnan', (['short'], {}), '(short)\n', (1310, 1317), True, 'import numpy as np\n'), ((1321, 1335), 'numpy.isnan', 'np.isnan', (['long'], {}), '(long)\n', (1329, 1335), True, 'import numpy as np\n'), ((1339, 1354), 'numpy.isnan', 'np.isnan', (['price'], {}), '(price)\n', (1347, 1354), True, 'import numpy as np\n'), ((2374, 2422), 'talib.EMA', 'talib.EMA', (['close[::-1]'], {'timeperiod': 'self.barCount'}), '(close[::-1], timeperiod=self.barCount)\n', (2383, 2422), False, 'import talib\n'), ((2997, 3047), 'talib.EMA', 'talib.EMA', (['close[::-1]'], {'timeperiod': 'self.shortCount'}), '(close[::-1], timeperiod=self.shortCount)\n', (3006, 3047), False, 'import talib\n'), ((3098, 3147), 'talib.EMA', 'talib.EMA', (['close[::-1]'], {'timeperiod': 'self.longCount'}), '(close[::-1], timeperiod=self.longCount)\n', (3107, 3147), False, 'import talib\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
def gl_confmatrix_2_confmatrix(sf,number_label=3):
Nlabels=max(len(sf['target_label'].unique()),len(sf['predicted_label'].unique()))
matrix=np.zeros([number_label,number_label],dtype=np.float)
for i in sf:
matrix[i['target_label'],i['predicted_label']]=i['count']
sum
row_sums = matrix.sum(axis=1)
matrix=matrix / row_sums[:, np.newaxis]
matrix*=100
plt.figure(figsize=(number_label, number_label))
dims = (8,8)
fig, ax = plt.subplots(figsize=dims)
sns.heatmap(matrix, annot=True, fmt='.2f', xticklabels=['0' ,'1','2'], yticklabels=['0' ,'1','2']);
plt.title('Confusion Matrix');
plt.xlabel('Predicted label')
plt.ylabel('True label')
return matrix
conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label2'],model.predict(train_data))
conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label2'],model.predict(test_data))
gl_confmatrix_2_confmatrix(conf_matrix_train)
gl_confmatrix_2_confmatrix(conf_matrix_test)
model.coefficients.sort('value').show()
model=gl.logistic_classifier.create(train_data,'label1',features_to_train,class_weights='auto')
conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label1'],model.predict(train_data))
conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label1'],model.predict(test_data))
gl_confmatrix_2_confmatrix(conf_matrix_train,number_label=2)
gl_confmatrix_2_confmatrix(conf_matrix_test,number_label=2)
model=gl.random_forest_classifier.create(train_data,'label2',features_to_train,class_weights='auto',num_trees=50)
conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label2'],model.predict(train_data))
conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label2'],model.predict(test_data))
gl_confmatrix_2_confmatrix(conf_matrix_train)
gl_confmatrix_2_confmatrix(conf_matrix_test)
model=gl.boosted_trees_classifier.create(train_data,'label2',features_to_train,class_weights='auto')
conf_matrix_train=gl.evaluation.confusion_matrix(train_data['label2'],model.predict(train_data))
conf_matrix_test=gl.evaluation.confusion_matrix(test_data['label2'],model.predict(test_data))
gl_confmatrix_2_confmatrix(conf_matrix_train)
gl_confmatrix_2_confmatrix(conf_matrix_test)
|
[
"IPython.get_ipython",
"seaborn.set",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"seaborn.heatmap",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots"
] |
[((182, 191), 'seaborn.set', 'sns.set', ([], {}), '()\n', (189, 191), True, 'import seaborn as sns\n'), ((342, 396), 'numpy.zeros', 'np.zeros', (['[number_label, number_label]'], {'dtype': 'np.float'}), '([number_label, number_label], dtype=np.float)\n', (350, 396), True, 'import numpy as np\n'), ((595, 643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(number_label, number_label)'}), '(figsize=(number_label, number_label))\n', (605, 643), True, 'import matplotlib.pyplot as plt\n'), ((675, 701), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'dims'}), '(figsize=dims)\n', (687, 701), True, 'import matplotlib.pyplot as plt\n'), ((706, 810), 'seaborn.heatmap', 'sns.heatmap', (['matrix'], {'annot': '(True)', 'fmt': '""".2f"""', 'xticklabels': "['0', '1', '2']", 'yticklabels': "['0', '1', '2']"}), "(matrix, annot=True, fmt='.2f', xticklabels=['0', '1', '2'],\n yticklabels=['0', '1', '2'])\n", (717, 810), True, 'import seaborn as sns\n'), ((811, 840), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (820, 840), True, 'import matplotlib.pyplot as plt\n'), ((846, 875), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (856, 875), True, 'import matplotlib.pyplot as plt\n'), ((880, 904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (890, 904), True, 'import matplotlib.pyplot as plt\n'), ((127, 140), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (138, 140), False, 'from IPython import get_ipython\n')]
|
import argparse
parser = argparse.ArgumentParser(description='This script takes a dihedral trajectory and detects change points using SIMPLE (simultaneous Penalized Likelihood Estimation, see Fan et al. P. Natl. Acad. Sci, 2015, 112, 7454-7459). Two parameters alpha and lambda are controlling the extent of simultaneous changes and total number of changes detected, respectively. alpha -> 1 means more simultaneous changes (0<alpha<1), and smaller lambda gives more changes.')
parser.add_argument('shifteddihed', default='shifted_dihedral.dat', help='input shifted dihedral file')
parser.add_argument('--alpha', type=float, default=0.7, help='extent of simultaneous changes, 0.7 by default suggested by the author if no prior ')
parser.add_argument('--lam', type=float, default=10, help='sensitivity of detecting changes, 10 by default')
args = parser.parse_args()
import numpy as np
from SIMPLEchangepoint import ComputeChanges
import collections
inputfile=args.shifteddihed
lam=args.lam
alpha=args.alpha
outputfile=inputfile[:-4]+".lam"+str(lam)+"alpha"+str(alpha)+".transitionProba.dat"
outputfile2=inputfile[:-4]+".lam"+str(lam)+"alpha"+str(alpha)+".transitionSummary.dat"
alldata=np.loadtxt(inputfile).T
time=alldata[0]
data=alldata[1:]
CPDresults = ComputeChanges(data,lam,alpha,lam_min=0,parallel=False)
def changeORnot(con_set,size):
x=[0]*size
for i in con_set:
x[i] = 1
return ' '.join(map(str,x))
od = collections.OrderedDict(sorted(CPDresults.items()))
with open(outputfile,'w') as fo:
for t in range(len(time)):
if t not in od.keys():
fo.write(str(time[t])+' '+' '.join(map(str,[0]*len(data)))+'\n')
else:
fo.write(str(time[t])+' '+changeORnot(od[t],len(data))+'\n')
def strplus1(x):
return str(x+1)
with open(outputfile2,'w') as fo2:
for k, v in od.iteritems():
fo2.write('{:10.1f} {:5d} {:s}\n'.format(time[k],len(v),','.join(map(strplus1,v))))
|
[
"numpy.loadtxt",
"SIMPLEchangepoint.ComputeChanges",
"argparse.ArgumentParser"
] |
[((25, 487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This script takes a dihedral trajectory and detects change points using SIMPLE (simultaneous Penalized Likelihood Estimation, see Fan et al. P. Natl. Acad. Sci, 2015, 112, 7454-7459). Two parameters alpha and lambda are controlling the extent of simultaneous changes and total number of changes detected, respectively. alpha -> 1 means more simultaneous changes (0<alpha<1), and smaller lambda gives more changes."""'}), "(description=\n 'This script takes a dihedral trajectory and detects change points using SIMPLE (simultaneous Penalized Likelihood Estimation, see Fan et al. P. Natl. Acad. Sci, 2015, 112, 7454-7459). Two parameters alpha and lambda are controlling the extent of simultaneous changes and total number of changes detected, respectively. alpha -> 1 means more simultaneous changes (0<alpha<1), and smaller lambda gives more changes.'\n )\n", (48, 487), False, 'import argparse\n'), ((1261, 1320), 'SIMPLEchangepoint.ComputeChanges', 'ComputeChanges', (['data', 'lam', 'alpha'], {'lam_min': '(0)', 'parallel': '(False)'}), '(data, lam, alpha, lam_min=0, parallel=False)\n', (1275, 1320), False, 'from SIMPLEchangepoint import ComputeChanges\n'), ((1190, 1211), 'numpy.loadtxt', 'np.loadtxt', (['inputfile'], {}), '(inputfile)\n', (1200, 1211), True, 'import numpy as np\n')]
|
import os
import pandas as pd
import numpy as np
from collections import Counter, defaultdict
def train_from_file(dir_path, leap_limit=15):
file_list = os.listdir(dir_path)
pig_format = [
"id",
"onset",
"offset",
"pitch",
"onsetvel",
"offsetvel",
"hand",
"fingernum",
]
right_init = Counter()
right_transition_count = Counter()
right_emission = defaultdict(Counter)
left_init = Counter()
left_transition_count = Counter()
left_emission = defaultdict(Counter)
for idx, file in enumerate(file_list):
path = dir_path + "/" + file
data_size = len(file_list)
print(f"Processing: {path} ({idx + 1}/{data_size})")
data = pd.read_csv(path, sep="\t", header=0, names=pig_format)
if data.fingernum.dtype == object:
data.fingernum = data.fingernum.apply(
lambda x: x.split("_")[0]
).astype("int")
left_hand = data[data.fingernum < 0]
right_hand = data[data.fingernum > 0]
init, transition, emission = count_fingering(
right_hand, limit=leap_limit
)
right_init += init
right_transition_count += transition
for k, counter in emission.items():
right_emission[k].update(counter)
init, transition, emission = count_fingering(
left_hand, limit=leap_limit
)
left_init += init
left_transition_count += transition
for k, counter in emission.items():
left_emission[k].update(counter)
return (right_init, right_transition_count, right_emission, left_init, left_transition_count, left_emission)
def pitch_to_key(pitch: str):
posx = {"C": 0, "D": 1, "E": 2, "F": 3, "G": 4, "A": 5, "B": 6}[pitch[0]]
posy = 0
if pitch[1].isdigit():
posx += (int(pitch[1]) - 4) * 7
elif pitch[1] == "#":
if pitch[2] == "#":
posx += (int(pitch[3]) - 4) * 7 + 1
else:
posy = 1
posx += (int(pitch[2]) - 4) * 7
elif pitch[1] == "b" or pitch[1] == "-":
if pitch[2] == "b" or pitch[2] == "-":
posx += (int(pitch[3]) - 4) * 7
else:
posy = 1
posx += (int(pitch[2]) - 4) * 7 - 1
return (posx, posy)
def note_to_diff(fingering_data, limit=15):
pos_x, pos_y = zip(*fingering_data.pitch.map(pitch_to_key))
series_x = pd.Series(pos_x)
series_y = pd.Series(pos_y)
diffs = list(
zip(
series_x.diff()
.fillna(0, downcast="infer")
.apply(lambda x: limit if x > limit else x)
.apply(lambda x: -limit if x < -limit else x),
series_y.diff().fillna(0, downcast="infer"),
)
)
return diffs
def count_fingering(fingering_data, limit=15):
hidden_state = list(
zip(
fingering_data.fingernum.shift(fill_value=0),
fingering_data.fingernum,
)
)
pos_x, pos_y = zip(*fingering_data.pitch.map(pitch_to_key))
model = pd.DataFrame(
{"hidden_state": hidden_state, "pos_x": pos_x, "pos_y": pos_y}
)
model["pos_diff"] = list(
zip(
model.pos_x.diff()
.fillna(0, downcast="infer")
.apply(lambda x: limit if x > limit else x)
.apply(lambda x: -limit if x < -limit else x),
model.pos_y.diff().fillna(0, downcast="infer"),
)
)
# First observation only
init = Counter([model.hidden_state[0][1]])
# Without first observation
transition = Counter(model.hidden_state[1:])
# Emission
emission = {
state: Counter(model[model.hidden_state == state].pos_diff)
for state in set(model.hidden_state[1:])
}
return (init, transition, Counter(emission))
def normalize(v):
return v / v.sum(axis=0)
def init_count_to_prob(init_count):
init_prob = np.zeros(5)
for key, value in init_count.items():
if key < 0:
init_prob[-key - 1] = value
else:
init_prob[key - 1] = value
return normalize(init_prob)
def transition_count_to_prob(transition_count):
transition_prob = np.zeros((5, 5))
for key, value in transition_count.items():
if key[0] < 0 and key[1] < 0:
transition_prob[-key[0] - 1, -key[1] - 1] = value
else:
transition_prob[key[0] - 1, key[1] - 1] = value
return np.apply_along_axis(normalize, axis=1, arr=transition_prob)
def series_to_matrix(emission_prob):
out_prob = np.zeros((5, 5))
for key, value in emission_prob.items():
if key[0] < 0 and key[1] < 0:
out_prob[-key[0] - 1, -key[1] - 1] = value
else:
out_prob[key[0] - 1, key[1] - 1] = value
return out_prob
def emission_count_to_prob(emission_count):
prob_df = (
pd.DataFrame.from_dict(emission_count).fillna(0, downcast="infer") + 1
).apply(normalize, axis=0)
prob_dict = {
out: series_to_matrix(prob_df.loc[out]) for out in prob_df.index
}
return prob_dict
def decoding(init_prob, transition, out_prob, observations, hand):
n_state = len(init_prob)
obs_len = len(observations)
delta = np.zeros((n_state, obs_len + 1))
psi = np.zeros((n_state, obs_len), dtype=int)
delta[:, 0] = np.log(init_prob)
for i, (pitch, time) in enumerate(
zip(observations.pitch_diff, observations.time_diff)
):
delta_mat = np.tile(delta[:, i], (n_state, 1)).transpose()
prod = delta_mat + np.log(transition) + np.log(out_prob[pitch])
if time < 0.03:
if hand == "R":
if pitch[0] > 0:
prod[np.tril_indices(n_state)] -= 5
else:
prod[np.triu_indices(n_state)] -= 5
else:
if pitch[0] > 0:
prod[np.triu_indices(n_state)] -= 5
else:
prod[np.tril_indices(n_state)] -= 5
delta[:, i + 1] = np.amax(prod, axis=0)
psi[:, i] = prod.argmax(axis=0) + 1
opt_path = [np.argmax(delta[:, obs_len]) + 1]
for i in range(obs_len - 1, -1, -1):
opt_path.append(psi[opt_path[-1] - 1, i])
return opt_path[::-1]
|
[
"pandas.Series",
"numpy.tile",
"os.listdir",
"numpy.triu_indices",
"pandas.read_csv",
"numpy.log",
"numpy.argmax",
"pandas.DataFrame.from_dict",
"collections.Counter",
"numpy.zeros",
"numpy.apply_along_axis",
"collections.defaultdict",
"pandas.DataFrame",
"numpy.tril_indices",
"numpy.amax"
] |
[((158, 178), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (168, 178), False, 'import os\n'), ((367, 376), 'collections.Counter', 'Counter', ([], {}), '()\n', (374, 376), False, 'from collections import Counter, defaultdict\n'), ((406, 415), 'collections.Counter', 'Counter', ([], {}), '()\n', (413, 415), False, 'from collections import Counter, defaultdict\n'), ((437, 457), 'collections.defaultdict', 'defaultdict', (['Counter'], {}), '(Counter)\n', (448, 457), False, 'from collections import Counter, defaultdict\n'), ((475, 484), 'collections.Counter', 'Counter', ([], {}), '()\n', (482, 484), False, 'from collections import Counter, defaultdict\n'), ((513, 522), 'collections.Counter', 'Counter', ([], {}), '()\n', (520, 522), False, 'from collections import Counter, defaultdict\n'), ((543, 563), 'collections.defaultdict', 'defaultdict', (['Counter'], {}), '(Counter)\n', (554, 563), False, 'from collections import Counter, defaultdict\n'), ((2458, 2474), 'pandas.Series', 'pd.Series', (['pos_x'], {}), '(pos_x)\n', (2467, 2474), True, 'import pandas as pd\n'), ((2490, 2506), 'pandas.Series', 'pd.Series', (['pos_y'], {}), '(pos_y)\n', (2499, 2506), True, 'import pandas as pd\n'), ((3089, 3165), 'pandas.DataFrame', 'pd.DataFrame', (["{'hidden_state': hidden_state, 'pos_x': pos_x, 'pos_y': pos_y}"], {}), "({'hidden_state': hidden_state, 'pos_x': pos_x, 'pos_y': pos_y})\n", (3101, 3165), True, 'import pandas as pd\n'), ((3528, 3563), 'collections.Counter', 'Counter', (['[model.hidden_state[0][1]]'], {}), '([model.hidden_state[0][1]])\n', (3535, 3563), False, 'from collections import Counter, defaultdict\n'), ((3614, 3645), 'collections.Counter', 'Counter', (['model.hidden_state[1:]'], {}), '(model.hidden_state[1:])\n', (3621, 3645), False, 'from collections import Counter, defaultdict\n'), ((3955, 3966), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (3963, 3966), True, 'import numpy as np\n'), ((4226, 4242), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (4234, 4242), True, 'import numpy as np\n'), ((4477, 4536), 'numpy.apply_along_axis', 'np.apply_along_axis', (['normalize'], {'axis': '(1)', 'arr': 'transition_prob'}), '(normalize, axis=1, arr=transition_prob)\n', (4496, 4536), True, 'import numpy as np\n'), ((4591, 4607), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (4599, 4607), True, 'import numpy as np\n'), ((5269, 5301), 'numpy.zeros', 'np.zeros', (['(n_state, obs_len + 1)'], {}), '((n_state, obs_len + 1))\n', (5277, 5301), True, 'import numpy as np\n'), ((5312, 5351), 'numpy.zeros', 'np.zeros', (['(n_state, obs_len)'], {'dtype': 'int'}), '((n_state, obs_len), dtype=int)\n', (5320, 5351), True, 'import numpy as np\n'), ((5370, 5387), 'numpy.log', 'np.log', (['init_prob'], {}), '(init_prob)\n', (5376, 5387), True, 'import numpy as np\n'), ((758, 813), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\t"""', 'header': '(0)', 'names': 'pig_format'}), "(path, sep='\\t', header=0, names=pig_format)\n", (769, 813), True, 'import pandas as pd\n'), ((3694, 3746), 'collections.Counter', 'Counter', (['model[model.hidden_state == state].pos_diff'], {}), '(model[model.hidden_state == state].pos_diff)\n', (3701, 3746), False, 'from collections import Counter, defaultdict\n'), ((3833, 3850), 'collections.Counter', 'Counter', (['emission'], {}), '(emission)\n', (3840, 3850), False, 'from collections import Counter, defaultdict\n'), ((6066, 6087), 'numpy.amax', 'np.amax', (['prod'], {'axis': '(0)'}), '(prod, axis=0)\n', (6073, 6087), True, 'import numpy as np\n'), ((5611, 5634), 'numpy.log', 'np.log', (['out_prob[pitch]'], {}), '(out_prob[pitch])\n', (5617, 5634), True, 'import numpy as np\n'), ((6149, 6177), 'numpy.argmax', 'np.argmax', (['delta[:, obs_len]'], {}), '(delta[:, obs_len])\n', (6158, 6177), True, 'import numpy as np\n'), ((5516, 5550), 'numpy.tile', 'np.tile', (['delta[:, i]', '(n_state, 1)'], {}), '(delta[:, i], (n_state, 1))\n', (5523, 5550), True, 'import numpy as np\n'), ((5590, 5608), 'numpy.log', 'np.log', (['transition'], {}), '(transition)\n', (5596, 5608), True, 'import numpy as np\n'), ((4904, 4942), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['emission_count'], {}), '(emission_count)\n', (4926, 4942), True, 'import pandas as pd\n'), ((5745, 5769), 'numpy.tril_indices', 'np.tril_indices', (['n_state'], {}), '(n_state)\n', (5760, 5769), True, 'import numpy as np\n'), ((5823, 5847), 'numpy.triu_indices', 'np.triu_indices', (['n_state'], {}), '(n_state)\n', (5838, 5847), True, 'import numpy as np\n'), ((5930, 5954), 'numpy.triu_indices', 'np.triu_indices', (['n_state'], {}), '(n_state)\n', (5945, 5954), True, 'import numpy as np\n'), ((6008, 6032), 'numpy.tril_indices', 'np.tril_indices', (['n_state'], {}), '(n_state)\n', (6023, 6032), True, 'import numpy as np\n')]
|
import pymongo
import numpy as np
from tqdm import tqdm
from datetime import datetime, timedelta
def mongo_query(**kwargs):
"""Create a MongoDB query based on a set of conditions."""
query = {}
if 'start_date' in kwargs:
if not ('CreationDate' in query):
query['CreationDate'] = {}
query['CreationDate']['$gte'] = kwargs['start_date']
if 'end_date' in kwargs:
if not ('CreationDate' in query):
query['CreationDate'] = {}
query['CreationDate']['$lt'] = kwargs['end_date']
if 'exclude_closed' in kwargs:
query['Closed'] = kwargs['exclude_closed']
return query
def year_range_query(start_year, end_year, exclude_closed=True):
"""Returns a MongoDB query returning all posts for a given year."""
query = mongo_query(start_date=datetime(start_year, 1, 1),
end_date=datetime(end_year + 1, 1, 1),
exclude_closed=exclude_closed)
return query
def single_day_query(day, month, year, exclude_closed=True):
"""Returns a MongoDB query returning all posts for a given day."""
start_date = datetime(year, month, day)
query = mongo_query(start_date=start_date,
end_date=start_date + timedelta(days=10),
exclude_closed=exclude_closed)
return query
class MongoDataset:
"""Interface between MongoDB and the rest of the Python code."""
def __init__(self, forum='overflow'):
try:
client = pymongo.MongoClient()
except Exception as e:
message = """Could not connect to MongoDB client. Make sure to start it by executing:
sudo systemctl start mongod """
print(message)
raise e
self.collection = client.titlewave[f'{forum}.posts']
def get_mongo_ids(self, query):
"""Fetches the ids of documents matching a query."""
result = self.collection.find(query, {'_id': True})
ids = [row['_id'] for row in result]
return ids
def batch_update(self, ids, command, batch_size=256, progress_bar=True):
"""
Execute an update_many command in batches.
Parameters:
ids - The document ids in the Mongo collection of the documents to be updated.
command - The update command to be executed on each document.
batch_size - The number of documents to update in a single call of update_many.
progress_bar - Whether to display a progress bar.
"""
num_batches = len(ids) // batch_size
# Split the array into batches of the specified size, and typecast the ids back to Python integers with tolist.
splits = np.array_split(ids, num_batches).tolist()
if progress_bar:
splits = tqdm(splits)
for batch_ids in splits:
self.collection.update_many({'_id': {'$in': batch_ids}}, command)
def get_partition(self, partition, projection):
"""
Fetches all documents in a specified partition of the dataset.
Parameters:
partition - The name of the partition (e.g., "classifier_train")
projection - Indicates which fields of the documents to return.
"""
cursor = self.collection.find({'partition': partition}, projection)
return list(cursor)
def reset_partitions(self):
"""Remove the partition field from all documents in the collection."""
self.collection.update_many({'partition': {'$exists': True}}, {'$unset': {'partition': 1}})
|
[
"datetime.datetime",
"tqdm.tqdm",
"numpy.array_split",
"pymongo.MongoClient",
"datetime.timedelta"
] |
[((1136, 1162), 'datetime.datetime', 'datetime', (['year', 'month', 'day'], {}), '(year, month, day)\n', (1144, 1162), False, 'from datetime import datetime, timedelta\n'), ((822, 848), 'datetime.datetime', 'datetime', (['start_year', '(1)', '(1)'], {}), '(start_year, 1, 1)\n', (830, 848), False, 'from datetime import datetime, timedelta\n'), ((883, 911), 'datetime.datetime', 'datetime', (['(end_year + 1)', '(1)', '(1)'], {}), '(end_year + 1, 1, 1)\n', (891, 911), False, 'from datetime import datetime, timedelta\n'), ((1516, 1537), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {}), '()\n', (1535, 1537), False, 'import pymongo\n'), ((2805, 2817), 'tqdm.tqdm', 'tqdm', (['splits'], {}), '(splits)\n', (2809, 2817), False, 'from tqdm import tqdm\n'), ((1256, 1274), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (1265, 1274), False, 'from datetime import datetime, timedelta\n'), ((2717, 2749), 'numpy.array_split', 'np.array_split', (['ids', 'num_batches'], {}), '(ids, num_batches)\n', (2731, 2749), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-darkgrid')
x = range(8)
y = np.linspace(1.1, 5.0, 8)
ylabel = map(lambda num: bin(num)[2:], x)
xlabel = map(lambda num: "{0:.2f}".format(num), y)
plt.step(x, y)
plt.yticks(y, ylabel)
plt.xticks(x, xlabel, rotation=45)
plt.ylabel("Binary Output")
plt.xlabel("Analog Input")
plt.savefig("adc.png", transparent=True)
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.step"
] |
[((51, 84), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (64, 84), True, 'import matplotlib.pyplot as plt\n'), ((103, 127), 'numpy.linspace', 'np.linspace', (['(1.1)', '(5.0)', '(8)'], {}), '(1.1, 5.0, 8)\n', (114, 127), True, 'import numpy as np\n'), ((223, 237), 'matplotlib.pyplot.step', 'plt.step', (['x', 'y'], {}), '(x, y)\n', (231, 237), True, 'import matplotlib.pyplot as plt\n'), ((238, 259), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y', 'ylabel'], {}), '(y, ylabel)\n', (248, 259), True, 'import matplotlib.pyplot as plt\n'), ((260, 294), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'xlabel'], {'rotation': '(45)'}), '(x, xlabel, rotation=45)\n', (270, 294), True, 'import matplotlib.pyplot as plt\n'), ((295, 322), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Binary Output"""'], {}), "('Binary Output')\n", (305, 322), True, 'import matplotlib.pyplot as plt\n'), ((323, 349), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Analog Input"""'], {}), "('Analog Input')\n", (333, 349), True, 'import matplotlib.pyplot as plt\n'), ((350, 390), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""adc.png"""'], {'transparent': '(True)'}), "('adc.png', transparent=True)\n", (361, 390), True, 'import matplotlib.pyplot as plt\n')]
|
from pandas import read_sql_query
from sqlite3 import connect
from pickle import load, dump
from time import time
from gensim.utils import simple_preprocess
from gensim.models import Phrases
from gensim.models.phrases import Phraser
from gensim.parsing.preprocessing import STOPWORDS
from gensim.corpora import Dictionary
from gensim.models import TfidfModel, AuthorTopicModel
from nltk import SnowballStemmer, WordNetLemmatizer
import numpy as np
np.random.seed(59)
DB_NAME = 'all-the-news.db'
SOURCES_FILE = 'sources.bin'
YEARS_FILE = 'years.bin'
NEWS_FILE = 'news.bin'
PROCESSED_NEWS_FILE = 'processed-news.bin'
DICTIONARY_FILE = 'dictionary.bin'
TFIDF_FILE = 'tf-idf.bin'
MODEL_FILE = 'model.bin'
def printExecutionTime(start_time):
print('Completed in {0:.4f} seconds\n'.format(time() - start_time))
def loadData():
print('Loading data...')
start_time = time()
try:
# Loading if possible
sources_file = open(SOURCES_FILE, 'rb')
sources = load(sources_file)
years_file = open(YEARS_FILE, 'rb')
years = load(years_file)
news_file = open(NEWS_FILE, 'rb')
news = load(news_file)
except:
conn = connect(DB_NAME)
df = read_sql_query('SELECT publication, year, content FROM longform WHERE content != "" AND content IS NOT NULL AND publication != "" AND publication IS NOT NULL', conn)
conn.commit()
conn.close()
sources = dict()
years = dict()
news = list()
for index, row in df.iterrows():
# Populating sources
if row.publication not in sources.keys():
sources[row.publication] = list()
sources[row.publication].append(index)
# Populating years
if row.year not in years.keys():
years[row.year] = list()
years[row.year] = index
# Populating news
news.append(row.content)
del df
# Saving sources to file
sources_file = open(SOURCES_FILE, 'wb')
dump(sources, sources_file)
# Saving years to file
years_file = open(YEARS_FILE, 'wb')
dump(years, years_file)
# Saving news to file
news_file = open(NEWS_FILE, 'wb')
dump(news, news_file)
finally:
sources_file.close()
years_file.close()
news_file.close()
printExecutionTime(start_time)
return sources, years, news
def preProcess(docs):
print('Pre-processing...')
start_time = time()
try:
# Loading if possible
f = open(PROCESSED_NEWS_FILE, 'rb')
processed_docs = load(f)
except:
stop_words = STOPWORDS
stemmer = SnowballStemmer('english')
lemmatizer = WordNetLemmatizer()
processed_docs = []
for doc in docs:
processed_doc = []
for token in simple_preprocess(doc, deacc=True):
if token not in stop_words and len(token) > 2:
token = lemmatizer.lemmatize(token, pos='v')
#token = stemmer.stem(token)
processed_doc.append(token)
processed_docs.append(processed_doc)
# Saving results to file
f = open(PROCESSED_NEWS_FILE, 'wb')
dump(processed_docs, f)
finally:
f.close()
printExecutionTime(start_time)
return processed_docs
def extractDictionary(documents):
print('Extracting dictionary...')
start_time = time()
try:
# Loading if possible
dictionary = Dictionary.load(DICTIONARY_FILE)
except:
dictionary = Dictionary(documents)
dictionary.filter_extremes(no_below=200, no_above=0.8, keep_n=4000)
# Saving to file
dictionary.save(DICTIONARY_FILE)
printExecutionTime(start_time)
return dictionary
def extractFeatures(documents, dictionary):
print('Extracting features...')
start_time = time()
try:
# Loading if possible
f = open(TFIDF_FILE, 'rb')
tfidf_corpus = load(f)
except:
bow_corpus = [ dictionary.doc2bow(doc) for doc in documents ]
tfidf = TfidfModel(bow_corpus)
tfidf_corpus = tfidf[bow_corpus]
# Saving to file
f = open(TFIDF_FILE, 'wb')
dump(tfidf_corpus, f)
finally:
f.close()
printExecutionTime(start_time)
return tfidf_corpus
def generateAuthorTopicModel(corpus, dictionary, authors):
print('Generating author-topic model...')
start_time = time()
try:
# Loading if possible
model = AuthorTopicModel.load(MODEL_FILE)
except:
model = AuthorTopicModel(
corpus,
num_topics=20,
id2word=dictionary,
author2doc=authors
)
# Saving to file
model.save(MODEL_FILE)
printExecutionTime(start_time)
return model
if __name__ == '__main__':
sources, years, news = loadData()
processed_news = preProcess(news)
del news
dictionary = extractDictionary(processed_news)
tfidf = extractFeatures(processed_news, dictionary)
del processed_news
model = generateAuthorTopicModel(tfidf.corpus, dictionary, sources)
del tfidf
print('Topics')
for idx, topic in model.print_topics(-1):
print('Topic {}: {}'.format(idx, topic))
print('\nAuthors')
for author in model.id2author.values():
print('{}: {}'.format(author, model.get_author_topics(author)))
|
[
"pandas.read_sql_query",
"gensim.models.AuthorTopicModel",
"gensim.models.AuthorTopicModel.load",
"gensim.corpora.Dictionary.load",
"pickle.dump",
"sqlite3.connect",
"gensim.corpora.Dictionary",
"nltk.SnowballStemmer",
"pickle.load",
"nltk.WordNetLemmatizer",
"gensim.utils.simple_preprocess",
"numpy.random.seed",
"time.time",
"gensim.models.TfidfModel"
] |
[((451, 469), 'numpy.random.seed', 'np.random.seed', (['(59)'], {}), '(59)\n', (465, 469), True, 'import numpy as np\n'), ((873, 879), 'time.time', 'time', ([], {}), '()\n', (877, 879), False, 'from time import time\n'), ((2337, 2343), 'time.time', 'time', ([], {}), '()\n', (2341, 2343), False, 'from time import time\n'), ((3185, 3191), 'time.time', 'time', ([], {}), '()\n', (3189, 3191), False, 'from time import time\n'), ((3604, 3610), 'time.time', 'time', ([], {}), '()\n', (3608, 3610), False, 'from time import time\n'), ((4132, 4138), 'time.time', 'time', ([], {}), '()\n', (4136, 4138), False, 'from time import time\n'), ((972, 990), 'pickle.load', 'load', (['sources_file'], {}), '(sources_file)\n', (976, 990), False, 'from pickle import load, dump\n'), ((1044, 1060), 'pickle.load', 'load', (['years_file'], {}), '(years_file)\n', (1048, 1060), False, 'from pickle import load, dump\n'), ((1111, 1126), 'pickle.load', 'load', (['news_file'], {}), '(news_file)\n', (1115, 1126), False, 'from pickle import load, dump\n'), ((2439, 2446), 'pickle.load', 'load', (['f'], {}), '(f)\n', (2443, 2446), False, 'from pickle import load, dump\n'), ((3243, 3275), 'gensim.corpora.Dictionary.load', 'Dictionary.load', (['DICTIONARY_FILE'], {}), '(DICTIONARY_FILE)\n', (3258, 3275), False, 'from gensim.corpora import Dictionary\n'), ((3697, 3704), 'pickle.load', 'load', (['f'], {}), '(f)\n', (3701, 3704), False, 'from pickle import load, dump\n'), ((4185, 4218), 'gensim.models.AuthorTopicModel.load', 'AuthorTopicModel.load', (['MODEL_FILE'], {}), '(MODEL_FILE)\n', (4206, 4218), False, 'from gensim.models import TfidfModel, AuthorTopicModel\n'), ((1148, 1164), 'sqlite3.connect', 'connect', (['DB_NAME'], {}), '(DB_NAME)\n', (1155, 1164), False, 'from sqlite3 import connect\n'), ((1174, 1349), 'pandas.read_sql_query', 'read_sql_query', (['"""SELECT publication, year, content FROM longform WHERE content != "" AND content IS NOT NULL AND publication != "" AND publication IS NOT NULL"""', 'conn'], {}), '(\n \'SELECT publication, year, content FROM longform WHERE content != "" AND content IS NOT NULL AND publication != "" AND publication IS NOT NULL\'\n , conn)\n', (1188, 1349), False, 'from pandas import read_sql_query\n'), ((1908, 1935), 'pickle.dump', 'dump', (['sources', 'sources_file'], {}), '(sources, sources_file)\n', (1912, 1935), False, 'from pickle import load, dump\n'), ((2008, 2031), 'pickle.dump', 'dump', (['years', 'years_file'], {}), '(years, years_file)\n', (2012, 2031), False, 'from pickle import load, dump\n'), ((2101, 2122), 'pickle.dump', 'dump', (['news', 'news_file'], {}), '(news, news_file)\n', (2105, 2122), False, 'from pickle import load, dump\n'), ((2498, 2524), 'nltk.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (2513, 2524), False, 'from nltk import SnowballStemmer, WordNetLemmatizer\n'), ((2542, 2561), 'nltk.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2559, 2561), False, 'from nltk import SnowballStemmer, WordNetLemmatizer\n'), ((2992, 3015), 'pickle.dump', 'dump', (['processed_docs', 'f'], {}), '(processed_docs, f)\n', (2996, 3015), False, 'from pickle import load, dump\n'), ((3303, 3324), 'gensim.corpora.Dictionary', 'Dictionary', (['documents'], {}), '(documents)\n', (3313, 3324), False, 'from gensim.corpora import Dictionary\n'), ((3793, 3815), 'gensim.models.TfidfModel', 'TfidfModel', (['bow_corpus'], {}), '(bow_corpus)\n', (3803, 3815), False, 'from gensim.models import TfidfModel, AuthorTopicModel\n'), ((3910, 3931), 'pickle.dump', 'dump', (['tfidf_corpus', 'f'], {}), '(tfidf_corpus, f)\n', (3914, 3931), False, 'from pickle import load, dump\n'), ((4241, 4320), 'gensim.models.AuthorTopicModel', 'AuthorTopicModel', (['corpus'], {'num_topics': '(20)', 'id2word': 'dictionary', 'author2doc': 'authors'}), '(corpus, num_topics=20, id2word=dictionary, author2doc=authors)\n', (4257, 4320), False, 'from gensim.models import TfidfModel, AuthorTopicModel\n'), ((792, 798), 'time.time', 'time', ([], {}), '()\n', (796, 798), False, 'from time import time\n'), ((2652, 2686), 'gensim.utils.simple_preprocess', 'simple_preprocess', (['doc'], {'deacc': '(True)'}), '(doc, deacc=True)\n', (2669, 2686), False, 'from gensim.utils import simple_preprocess\n')]
|
import argparse
import sys
import optax
import torch
import numpy as np
import time
import jax
import jax.numpy as jnp
import matplotlib as mp
import haiku as hk
import dill as pickle
try:
mp.use("Qt5Agg")
mp.rc('text', usetex=True)
mp.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
except ImportError:
pass
import deep_lagrangian_networks.jax_HNN_model as hnn
import deep_lagrangian_networks.jax_DeLaN_model as delan
import deep_lagrangian_networks.jax_Black_Box_model as black_box
from deep_lagrangian_networks.utils import load_dataset, init_env, activations
from deep_lagrangian_networks.jax_integrator import symplectic_euler, explicit_euler, runge_kutta_4
def running_mean(x, n):
cumsum = np.cumsum(np.concatenate([x[0] * np.ones((n,)), x]))
return (cumsum[n:] - cumsum[:-n]) / n
if __name__ == "__main__":
n_plot = 5
dataset = "uniform"
model_id = ["structured", "black_box", "structured", "black_box", "black_box"]
module_key = ["DeLaN", "DeLaN", "HNN", "HNN", "Network"]
colors = {
"DeLaN structured": cm.get_cmap(cm.Set1)(0),
"DeLaN black_box": cm.get_cmap(cm.Set1)(1),
"HNN structured": cm.get_cmap(cm.Set1)(2),
"HNN black_box": cm.get_cmap(cm.Set1)(3),
"Network black_box": cm.get_cmap(cm.Set1)(4),
}
results = {}
for i in range(n_plot):
with open(f"data/results/{module_key[i]}_{model_id[i]}_{dataset}.pickle", "rb") as file:
results[module_key[i] + " " + model_id[i]] = pickle.load(file)
if dataset == "char":
train_data, test_data, divider, dt = load_dataset(
filename="data/character_data.pickle",
test_label=["e", "q", "v"])
elif dataset == "uniform":
train_data, test_data, divider, dt = load_dataset(
filename="data/uniform_data.pickle",
test_label=["Test 0", "Test 1", "Test 2"])
else:
raise ValueError
vpt_th = 1.e-2
for i in range(n_plot):
key = f"{module_key[i]} {model_id[i]}"
n_seeds = results[key]['forward_model']['q_error'].shape[0]
xd_error = np.mean(results[key]['forward_model']['xd_error']), 2. * np.std(results[key]['forward_model']['xd_error'])
n_test = 2
vpt = np.zeros((0, n_test))
for i in range(n_seeds):
vpt_i = []
for j in range(n_test):
traj = np.concatenate([
results[key]['forward_model']['q_error'][i, divider[j]:divider[j+1]],
results[key]['forward_model']['q_error'][i, -1:] * 0.0 + 1.])
vpt_i = vpt_i + [np.argwhere(traj >= vpt_th)[0, 0]]
vpt = np.concatenate([vpt, np.array([vpt_i])])
vpt = np.mean(vpt), np.std(vpt)
unit = r"\text{s}"
string = f"${xd_error[0]:.1e}{'}'} \pm {xd_error[1]:.1e}{'}'}$ & ${vpt[0]*dt:.2f}{unit} \pm {vpt[1]*dt:.2f}{unit}$ \\\\".replace("e-", r"\mathrm{e}{-").replace("e+", r"\mathrm{e}{+")
print(f"{key:20} - " + string)
test_labels, test_qp, test_qv, test_qa, test_p, test_pd, test_tau, test_m, test_c, test_g = test_data
tau_g, tau_c, tau_m, tau = jnp.array(test_g), jnp.array(test_c), jnp.array(test_m), jnp.array(test_tau)
q, qd, qdd = jnp.array(test_qp), jnp.array(test_qv), jnp.array(test_qa)
p, pd = jnp.array(test_p), jnp.array(test_pd)
dHdt = jax.vmap(jnp.dot, [0, 0])(qd, tau)
H = jnp.concatenate([dt * jnp.cumsum(dHdt[divider[i]: divider[i+1]]) for i in range(3)])
def smoothing(x):
return np.concatenate([running_mean(x[divider[i]:divider[i + 1]], 10) for i in range(3)])
print("\n################################################")
print("Plotting Performance:")
# Alpha of the graphs:
plot_alpha = 0.8
y_offset = -0.15
n_test = 2
# Plot the performance:
q_low = np.clip(1.5 * np.min(np.array(q), axis=0), -np.inf, -0.01)
q_max = np.clip(1.5 * np.max(np.array(q), axis=0), 0.01, np.inf)
if dataset == "char":
q_max = np.array([0.25, 3.])
q_low = np.array([-1.25, 1.])
qd_low = np.clip(1.5 * np.min(qd, axis=0), -np.inf, -0.01)
qd_max = np.clip(1.5 * np.max(qd, axis=0), 0.01, np.inf)
p_low = np.clip(1.2 * np.min(p, axis=0), -np.inf, -0.01)
p_max = np.clip(1.2 * np.max(p, axis=0), 0.01, np.inf)
H_lim = [-0.01, +0.01] if dataset == "uniform" else [-2.75, +2.75]
err_min, err_max = 1.e-5, 1.e3
plt.rc('text', usetex=True)
color_i = ["r", "b", "g", "k"]
ticks = np.array(divider)
ticks = (ticks[:-1] + ticks[1:]) / 2
fig = plt.figure(figsize=(24.0 / 1.54, 8.0 / 1.54), dpi=100)
fig.subplots_adjust(left=0.06, bottom=0.12, right=0.98, top=0.95, wspace=0.24, hspace=0.2)
fig.canvas.set_window_title('')
legend = [
mp.patches.Patch(color=colors["DeLaN structured"], label="DeLaN - Structured Lagrangian"),
mp.patches.Patch(color=colors["DeLaN black_box"], label="DeLaN - Black-Box Lagrangian"),
mp.patches.Patch(color=colors["HNN structured"], label="HNN - Structured Hamiltonian"),
mp.patches.Patch(color=colors["HNN black_box"], label="HNN - Black-Box Hamiltonian"),
mp.patches.Patch(color=colors["Network black_box"], label="Feed-Forward Network"),
mp.patches.Patch(color="k", label="Ground Truth")]
ax0 = fig.add_subplot(3, 4, 1)
ax0.set_title(r"Generalized Position $\mathbf{q}$")
ax0.text(s=r"\textbf{Joint 0}", x=-0.25, y=.5, fontsize=12, fontweight="bold", rotation=90,
horizontalalignment="center", verticalalignment="center", transform=ax0.transAxes)
ax0.set_ylabel(r"$\mathbf{q}_0$ [Rad]")
ax0.get_yaxis().set_label_coords(-0.2, 0.5)
ax0.set_ylim(q_low[0], q_max[0])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax1 = fig.add_subplot(3, 4, 5)
ax1.text(s=r"\textbf{Joint 1}", x=-.25, y=0.5, fontsize=12, fontweight="bold", rotation=90,
horizontalalignment="center", verticalalignment="center", transform=ax1.transAxes)
ax1.set_ylabel(r"$\mathbf{q}_1$ [Rad]")
ax1.get_yaxis().set_label_coords(-0.2, 0.5)
ax1.set_ylim(q_low[1], q_max[1])
ax1.set_xticks(ticks)
ax1.set_xticklabels(test_labels)
[ax1.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax1.set_xlim(divider[0], divider[n_test])
ax1.yaxis.set_label_coords(y_offset, 0.5)
ax2 = fig.add_subplot(3, 4, 9)
ax2.text(s=r"\textbf{Error}", x=-.25, y=0.5, fontsize=12, fontweight="bold", rotation=90,
horizontalalignment="center", verticalalignment="center", transform=ax2.transAxes)
ax2.text(s=r"\textbf{(a)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.get_yaxis().set_label_coords(-0.2, 0.5)
ax2.set_xticks(ticks)
ax2.set_xticklabels(test_labels)
[ax2.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax2.set_xlim(divider[0], divider[n_test])
ax2.set_ylim(err_min, err_max)
ax2.set_yscale('log')
ax2.set_ylabel(r"Position Error")
ax2.yaxis.set_label_coords(y_offset, 0.5)
ax2.axhline(vpt_th, color="k", linestyle="--")
# Plot Ground Truth Torque:
ax0.plot(q[:, 0], color="k")
ax1.plot(q[:, 1], color="k")
# Plot DeLaN Torque:
for key in results.keys():
color = colors[key]
q_pred = results[key]["forward_model"]["q_pred"]
q_error = results[key]["forward_model"]["q_error"]
q_pred_min, q_pred_mean, q_pred_max = np.min(q_pred, axis=0), np.median(q_pred, axis=0), np.max(q_pred, axis=0)
q_error_min, q_error_mean, q_error_max = np.min(q_error, axis=0), np.median(q_error, axis=0), np.max(q_error, axis=0)
q_error_min = smoothing(q_error_min)
q_error_mean = smoothing(q_error_mean)
q_error_max = smoothing(q_error_max)
x = np.arange(q_pred_max.shape[0])
ax0.plot(q_pred_mean[:, 0], color=color, alpha=plot_alpha)
ax0.fill_between(x, q_pred_min[:, 0], q_pred_max[:, 0], color=color, alpha=plot_alpha/8.)
ax1.plot(q_pred_mean[:, 1], color=color, alpha=plot_alpha)
ax1.fill_between(x, q_pred_min[:, 1], q_pred_max[:, 1], color=color, alpha=plot_alpha/8.)
ax2.plot(q_error_mean, color=color, alpha=plot_alpha)
ax2.fill_between(x, q_error_min, q_error_max, color=color, alpha=plot_alpha/8.)
# Plot Mass Torque
ax0 = fig.add_subplot(3, 4, 2)
ax0.set_title(r"Generalized Velocity $\dot{\mathbf{q}}$")
ax0.set_ylabel(r"$\dot{\mathbf{q}}_0$ [Rad/s]")
ax0.set_ylim(qd_low[0], qd_max[0])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax1 = fig.add_subplot(3, 4, 6)
ax1.set_ylabel(r"$\dot{\mathbf{q}}_{1}$ [Rad/s]")
ax1.set_ylim(qd_low[1], qd_max[1])
ax1.set_xticks(ticks)
ax1.set_xticklabels(test_labels)
[ax1.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax1.set_xlim(divider[0], divider[n_test])
ax1.yaxis.set_label_coords(y_offset, 0.5)
ax2 = fig.add_subplot(3, 4, 10)
ax2.text(s=r"\textbf{(b)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.get_yaxis().set_label_coords(-0.2, 0.5)
ax2.set_xticks(ticks)
ax2.set_xticklabels(test_labels)
[ax2.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax2.set_xlim(divider[0], divider[n_test])
ax2.set_ylim(err_min, err_max)
ax2.set_yscale('log')
ax2.set_ylabel(r"Velocity Error")
ax2.yaxis.set_label_coords(y_offset, 0.5)
# Plot Ground Truth Inertial Torque:
ax0.plot(qd[:, 0], color="k")
ax1.plot(qd[:, 1], color="k")
# Plot DeLaN Inertial Torque:
for key in results.keys():
color = colors[key]
qd_pred = results[key]["forward_model"]["qd_pred"]
qd_error = results[key]["forward_model"]["qd_error"]
qd_pred_min, qd_pred_mean, qd_pred_max = np.min(qd_pred, axis=0), np.median(qd_pred, axis=0), np.max(qd_pred, axis=0)
qd_error_min, qd_error_mean, qd_error_max = np.min(qd_error, axis=0), np.median(qd_error, axis=0), np.max(qd_error, axis=0)
x = np.arange(qd_pred_max.shape[0])
qd_error_min = smoothing(qd_error_min)
qd_error_mean = smoothing(qd_error_mean)
qd_error_max = smoothing(qd_error_max)
ax0.plot(qd_pred_mean[:, 0], color=color, alpha=plot_alpha)
ax0.fill_between(x, qd_pred_min[:, 0], qd_pred_max[:, 0], color=color, alpha=plot_alpha/8.)
ax1.plot(qd_pred_mean[:, 1], color=color, alpha=plot_alpha)
ax1.fill_between(x, qd_pred_min[:, 1], qd_pred_max[:, 1], color=color, alpha=plot_alpha/8.)
ax2.plot(qd_error_mean, color=color, alpha=plot_alpha)
ax2.fill_between(x, qd_error_min, qd_error_max, color=color, alpha=plot_alpha/8.)
# Plot Coriolis Torque
ax0 = fig.add_subplot(3, 4, 3)
ax0.set_title(r"Generalized Momentum $\mathbf{p}$")
ax0.set_ylabel(r"$\mathbf{p}_0$")
ax0.set_ylim(p_low[0], p_max[0])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax1 = fig.add_subplot(3, 4, 7)
ax1.set_ylabel(r"$\mathbf{p}_1$")
ax1.set_ylim(p_low[1], p_max[1])
ax1.set_xticks(ticks)
ax1.set_xticklabels(test_labels)
[ax1.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax1.set_xlim(divider[0], divider[n_test])
ax1.yaxis.set_label_coords(y_offset, 0.5)
ax2 = fig.add_subplot(3, 4, 11)
ax2.text(s=r"\textbf{(c)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.get_yaxis().set_label_coords(-0.2, 0.5)
ax2.set_xticks(ticks)
ax2.set_xticklabels(test_labels)
[ax2.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax2.set_xlim(divider[0], divider[n_test])
ax2.set_ylim(err_min, err_max)
ax2.set_yscale('log')
ax2.set_ylabel(r"Impulse Error")
ax2.yaxis.set_label_coords(y_offset, 0.5)
# Plot Ground Truth Coriolis & Centrifugal Torque:
ax0.plot(p[:, 0], color="k")
ax1.plot(p[:, 1], color="k")
for key in results.keys():
color = colors[key]
p_pred = results[key]["forward_model"]["p_pred"]
p_error = results[key]["forward_model"]["p_error"]
p_pred_min, p_pred_mean, p_pred_max = np.min(p_pred, axis=0), np.median(p_pred, axis=0), np.max(p_pred, axis=0)
p_error_min, p_error_mean, p_error_max = np.min(p_error, axis=0), np.median(p_error, axis=0), np.max(p_error, axis=0)
x = np.arange(p_pred_max.shape[0])
p_error_min = smoothing(p_error_min)
p_error_mean = smoothing(p_error_mean)
p_error_max = smoothing(p_error_max)
ax0.plot(p_pred_mean[:, 0], color=color, alpha=plot_alpha)
ax0.fill_between(x, p_pred_min[:, 0], p_pred_max[:, 0], color=color, alpha=plot_alpha/8.)
ax1.plot(p_pred_mean[:, 1], color=color, alpha=plot_alpha)
ax1.fill_between(x, p_pred_min[:, 1], p_pred_max[:, 1], color=color, alpha=plot_alpha/8.)
ax2.plot(p_error_mean, color=color, alpha=plot_alpha)
ax2.fill_between(x, p_error_min, p_error_max, color=color, alpha=plot_alpha/8.)
# Plot Gravity
ax0 = fig.add_subplot(3, 4, 4)
ax0.set_title(r"Normalized Energy $\mathcal{H}$")
ax0.set_ylabel("$\mathcal{H}$")
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax0.set_ylim(H_lim[0], H_lim[1])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.plot(H[:], color="k")
for key in results.keys():
if key == "Network black_box":
continue
color = colors[key]
H_pred = results[key]["forward_model"]["H_pred"]
H_pred_min, H_pred_mean, H_pred_max = np.min(H_pred, axis=0), np.median(H_pred, axis=0), np.max(H_pred, axis=0)
x = np.arange(H_pred_max.shape[0])
ax0.plot(H_pred_mean[:], color=color, alpha=plot_alpha)
ax0.fill_between(x, H_pred_min[:], H_pred_max[:], color=color, alpha=plot_alpha/8.)
ax2 = fig.add_subplot(3, 4, 12)
ax2.text(s=r"\textbf{(d)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.set_frame_on(False)
ax2.set_xticks([])
ax2.set_yticks([])
ax2.legend(handles=legend, bbox_to_anchor=(-0.0375, 2.1), loc='upper left', ncol=1, framealpha=0., labelspacing=1.0)
# fig.savefig(f"figures/forward_model_{module_key}_{model_id}_Performance.pdf", format="pdf")
# fig.savefig(f"figures/forward_model_{module_key}_{model_id}_Performance.png", format="png")
print("\n################################################\n\n\n")
plt.show()
|
[
"deep_lagrangian_networks.utils.load_dataset",
"numpy.array",
"matplotlib.rc",
"numpy.arange",
"dill.load",
"numpy.mean",
"numpy.max",
"numpy.concatenate",
"numpy.min",
"matplotlib.cm.get_cmap",
"numpy.ones",
"matplotlib.use",
"jax.numpy.cumsum",
"matplotlib.patches.Patch",
"numpy.std",
"jax.vmap",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show",
"numpy.median",
"jax.numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.argwhere"
] |
[((194, 210), 'matplotlib.use', 'mp.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (200, 210), True, 'import matplotlib as mp\n'), ((215, 241), 'matplotlib.rc', 'mp.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (220, 241), True, 'import matplotlib as mp\n'), ((4554, 4581), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (4560, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4630, 4647), 'numpy.array', 'np.array', (['divider'], {}), '(divider)\n', (4638, 4647), True, 'import numpy as np\n'), ((4700, 4754), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24.0 / 1.54, 8.0 / 1.54)', 'dpi': '(100)'}), '(figsize=(24.0 / 1.54, 8.0 / 1.54), dpi=100)\n', (4710, 4754), True, 'import matplotlib.pyplot as plt\n'), ((15912, 15922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15920, 15922), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1797), 'deep_lagrangian_networks.utils.load_dataset', 'load_dataset', ([], {'filename': '"""data/character_data.pickle"""', 'test_label': "['e', 'q', 'v']"}), "(filename='data/character_data.pickle', test_label=['e', 'q', 'v'])\n", (1730, 1797), False, 'from deep_lagrangian_networks.utils import load_dataset, init_env, activations\n'), ((2377, 2398), 'numpy.zeros', 'np.zeros', (['(0, n_test)'], {}), '((0, n_test))\n', (2385, 2398), True, 'import numpy as np\n'), ((3277, 3294), 'jax.numpy.array', 'jnp.array', (['test_g'], {}), '(test_g)\n', (3286, 3294), True, 'import jax.numpy as jnp\n'), ((3296, 3313), 'jax.numpy.array', 'jnp.array', (['test_c'], {}), '(test_c)\n', (3305, 3313), True, 'import jax.numpy as jnp\n'), ((3315, 3332), 'jax.numpy.array', 'jnp.array', (['test_m'], {}), '(test_m)\n', (3324, 3332), True, 'import jax.numpy as jnp\n'), ((3334, 3353), 'jax.numpy.array', 'jnp.array', (['test_tau'], {}), '(test_tau)\n', (3343, 3353), True, 'import jax.numpy as jnp\n'), ((3371, 3389), 'jax.numpy.array', 'jnp.array', (['test_qp'], {}), '(test_qp)\n', (3380, 3389), True, 'import jax.numpy as jnp\n'), ((3391, 3409), 'jax.numpy.array', 'jnp.array', (['test_qv'], {}), '(test_qv)\n', (3400, 3409), True, 'import jax.numpy as jnp\n'), ((3411, 3429), 'jax.numpy.array', 'jnp.array', (['test_qa'], {}), '(test_qa)\n', (3420, 3429), True, 'import jax.numpy as jnp\n'), ((3442, 3459), 'jax.numpy.array', 'jnp.array', (['test_p'], {}), '(test_p)\n', (3451, 3459), True, 'import jax.numpy as jnp\n'), ((3461, 3479), 'jax.numpy.array', 'jnp.array', (['test_pd'], {}), '(test_pd)\n', (3470, 3479), True, 'import jax.numpy as jnp\n'), ((3491, 3516), 'jax.vmap', 'jax.vmap', (['jnp.dot', '[0, 0]'], {}), '(jnp.dot, [0, 0])\n', (3499, 3516), False, 'import jax\n'), ((4137, 4158), 'numpy.array', 'np.array', (['[0.25, 3.0]'], {}), '([0.25, 3.0])\n', (4145, 4158), True, 'import numpy as np\n'), ((4174, 4196), 'numpy.array', 'np.array', (['[-1.25, 1.0]'], {}), '([-1.25, 1.0])\n', (4182, 4196), True, 'import numpy as np\n'), ((4910, 5004), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['DeLaN structured']", 'label': '"""DeLaN - Structured Lagrangian"""'}), "(color=colors['DeLaN structured'], label=\n 'DeLaN - Structured Lagrangian')\n", (4926, 5004), True, 'import matplotlib as mp\n'), ((5009, 5101), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['DeLaN black_box']", 'label': '"""DeLaN - Black-Box Lagrangian"""'}), "(color=colors['DeLaN black_box'], label=\n 'DeLaN - Black-Box Lagrangian')\n", (5025, 5101), True, 'import matplotlib as mp\n'), ((5106, 5197), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['HNN structured']", 'label': '"""HNN - Structured Hamiltonian"""'}), "(color=colors['HNN structured'], label=\n 'HNN - Structured Hamiltonian')\n", (5122, 5197), True, 'import matplotlib as mp\n'), ((5202, 5291), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['HNN black_box']", 'label': '"""HNN - Black-Box Hamiltonian"""'}), "(color=colors['HNN black_box'], label=\n 'HNN - Black-Box Hamiltonian')\n", (5218, 5291), True, 'import matplotlib as mp\n'), ((5296, 5382), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['Network black_box']", 'label': '"""Feed-Forward Network"""'}), "(color=colors['Network black_box'], label=\n 'Feed-Forward Network')\n", (5312, 5382), True, 'import matplotlib as mp\n'), ((5387, 5436), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': '"""k"""', 'label': '"""Ground Truth"""'}), "(color='k', label='Ground Truth')\n", (5403, 5436), True, 'import matplotlib as mp\n'), ((8307, 8337), 'numpy.arange', 'np.arange', (['q_pred_max.shape[0]'], {}), '(q_pred_max.shape[0])\n', (8316, 8337), True, 'import numpy as np\n'), ((10915, 10946), 'numpy.arange', 'np.arange', (['qd_pred_max.shape[0]'], {}), '(qd_pred_max.shape[0])\n', (10924, 10946), True, 'import numpy as np\n'), ((13602, 13632), 'numpy.arange', 'np.arange', (['p_pred_max.shape[0]'], {}), '(p_pred_max.shape[0])\n', (13611, 13632), True, 'import numpy as np\n'), ((15045, 15075), 'numpy.arange', 'np.arange', (['H_pred_max.shape[0]'], {}), '(H_pred_max.shape[0])\n', (15054, 15075), True, 'import numpy as np\n'), ((1190, 1210), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1201, 1210), True, 'import matplotlib.cm as cm\n'), ((1242, 1262), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1253, 1262), True, 'import matplotlib.cm as cm\n'), ((1293, 1313), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1304, 1313), True, 'import matplotlib.cm as cm\n'), ((1343, 1363), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1354, 1363), True, 'import matplotlib.cm as cm\n'), ((1397, 1417), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1408, 1417), True, 'import matplotlib.cm as cm\n'), ((1628, 1645), 'dill.load', 'pickle.load', (['file'], {}), '(file)\n', (1639, 1645), True, 'import dill as pickle\n'), ((1900, 1996), 'deep_lagrangian_networks.utils.load_dataset', 'load_dataset', ([], {'filename': '"""data/uniform_data.pickle"""', 'test_label': "['Test 0', 'Test 1', 'Test 2']"}), "(filename='data/uniform_data.pickle', test_label=['Test 0',\n 'Test 1', 'Test 2'])\n", (1912, 1996), False, 'from deep_lagrangian_networks.utils import load_dataset, init_env, activations\n'), ((2236, 2286), 'numpy.mean', 'np.mean', (["results[key]['forward_model']['xd_error']"], {}), "(results[key]['forward_model']['xd_error'])\n", (2243, 2286), True, 'import numpy as np\n'), ((2845, 2857), 'numpy.mean', 'np.mean', (['vpt'], {}), '(vpt)\n', (2852, 2857), True, 'import numpy as np\n'), ((2859, 2870), 'numpy.std', 'np.std', (['vpt'], {}), '(vpt)\n', (2865, 2870), True, 'import numpy as np\n'), ((4224, 4242), 'numpy.min', 'np.min', (['qd'], {'axis': '(0)'}), '(qd, axis=0)\n', (4230, 4242), True, 'import numpy as np\n'), ((4287, 4305), 'numpy.max', 'np.max', (['qd'], {'axis': '(0)'}), '(qd, axis=0)\n', (4293, 4305), True, 'import numpy as np\n'), ((4348, 4365), 'numpy.min', 'np.min', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (4354, 4365), True, 'import numpy as np\n'), ((4409, 4426), 'numpy.max', 'np.max', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (4415, 4426), True, 'import numpy as np\n'), ((7956, 7978), 'numpy.min', 'np.min', (['q_pred'], {'axis': '(0)'}), '(q_pred, axis=0)\n', (7962, 7978), True, 'import numpy as np\n'), ((7980, 8005), 'numpy.median', 'np.median', (['q_pred'], {'axis': '(0)'}), '(q_pred, axis=0)\n', (7989, 8005), True, 'import numpy as np\n'), ((8007, 8029), 'numpy.max', 'np.max', (['q_pred'], {'axis': '(0)'}), '(q_pred, axis=0)\n', (8013, 8029), True, 'import numpy as np\n'), ((8079, 8102), 'numpy.min', 'np.min', (['q_error'], {'axis': '(0)'}), '(q_error, axis=0)\n', (8085, 8102), True, 'import numpy as np\n'), ((8104, 8130), 'numpy.median', 'np.median', (['q_error'], {'axis': '(0)'}), '(q_error, axis=0)\n', (8113, 8130), True, 'import numpy as np\n'), ((8132, 8155), 'numpy.max', 'np.max', (['q_error'], {'axis': '(0)'}), '(q_error, axis=0)\n', (8138, 8155), True, 'import numpy as np\n'), ((10694, 10717), 'numpy.min', 'np.min', (['qd_pred'], {'axis': '(0)'}), '(qd_pred, axis=0)\n', (10700, 10717), True, 'import numpy as np\n'), ((10719, 10745), 'numpy.median', 'np.median', (['qd_pred'], {'axis': '(0)'}), '(qd_pred, axis=0)\n', (10728, 10745), True, 'import numpy as np\n'), ((10747, 10770), 'numpy.max', 'np.max', (['qd_pred'], {'axis': '(0)'}), '(qd_pred, axis=0)\n', (10753, 10770), True, 'import numpy as np\n'), ((10823, 10847), 'numpy.min', 'np.min', (['qd_error'], {'axis': '(0)'}), '(qd_error, axis=0)\n', (10829, 10847), True, 'import numpy as np\n'), ((10849, 10876), 'numpy.median', 'np.median', (['qd_error'], {'axis': '(0)'}), '(qd_error, axis=0)\n', (10858, 10876), True, 'import numpy as np\n'), ((10878, 10902), 'numpy.max', 'np.max', (['qd_error'], {'axis': '(0)'}), '(qd_error, axis=0)\n', (10884, 10902), True, 'import numpy as np\n'), ((13390, 13412), 'numpy.min', 'np.min', (['p_pred'], {'axis': '(0)'}), '(p_pred, axis=0)\n', (13396, 13412), True, 'import numpy as np\n'), ((13414, 13439), 'numpy.median', 'np.median', (['p_pred'], {'axis': '(0)'}), '(p_pred, axis=0)\n', (13423, 13439), True, 'import numpy as np\n'), ((13441, 13463), 'numpy.max', 'np.max', (['p_pred'], {'axis': '(0)'}), '(p_pred, axis=0)\n', (13447, 13463), True, 'import numpy as np\n'), ((13513, 13536), 'numpy.min', 'np.min', (['p_error'], {'axis': '(0)'}), '(p_error, axis=0)\n', (13519, 13536), True, 'import numpy as np\n'), ((13538, 13564), 'numpy.median', 'np.median', (['p_error'], {'axis': '(0)'}), '(p_error, axis=0)\n', (13547, 13564), True, 'import numpy as np\n'), ((13566, 13589), 'numpy.max', 'np.max', (['p_error'], {'axis': '(0)'}), '(p_error, axis=0)\n', (13572, 13589), True, 'import numpy as np\n'), ((14959, 14981), 'numpy.min', 'np.min', (['H_pred'], {'axis': '(0)'}), '(H_pred, axis=0)\n', (14965, 14981), True, 'import numpy as np\n'), ((14983, 15008), 'numpy.median', 'np.median', (['H_pred'], {'axis': '(0)'}), '(H_pred, axis=0)\n', (14992, 15008), True, 'import numpy as np\n'), ((15010, 15032), 'numpy.max', 'np.max', (['H_pred'], {'axis': '(0)'}), '(H_pred, axis=0)\n', (15016, 15032), True, 'import numpy as np\n'), ((2293, 2342), 'numpy.std', 'np.std', (["results[key]['forward_model']['xd_error']"], {}), "(results[key]['forward_model']['xd_error'])\n", (2299, 2342), True, 'import numpy as np\n'), ((2514, 2673), 'numpy.concatenate', 'np.concatenate', (["[results[key]['forward_model']['q_error'][i, divider[j]:divider[j + 1]], \n results[key]['forward_model']['q_error'][i, -1:] * 0.0 + 1.0]"], {}), "([results[key]['forward_model']['q_error'][i, divider[j]:\n divider[j + 1]], results[key]['forward_model']['q_error'][i, -1:] * 0.0 +\n 1.0])\n", (2528, 2673), True, 'import numpy as np\n'), ((3556, 3599), 'jax.numpy.cumsum', 'jnp.cumsum', (['dHdt[divider[i]:divider[i + 1]]'], {}), '(dHdt[divider[i]:divider[i + 1]])\n', (3566, 3599), True, 'import jax.numpy as jnp\n'), ((3987, 3998), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (3995, 3998), True, 'import numpy as np\n'), ((4058, 4069), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (4066, 4069), True, 'import numpy as np\n'), ((874, 887), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (881, 887), True, 'import numpy as np\n'), ((2810, 2827), 'numpy.array', 'np.array', (['[vpt_i]'], {}), '([vpt_i])\n', (2818, 2827), True, 'import numpy as np\n'), ((2736, 2763), 'numpy.argwhere', 'np.argwhere', (['(traj >= vpt_th)'], {}), '(traj >= vpt_th)\n', (2747, 2763), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy as np
import scipy.sparse
from sklearn import svm
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer
from sklearn.model_selection import cross_val_score
def zero_pivot_columns(matrix, pivots):
matrix_lil = scipy.sparse.lil_matrix(matrix, copy=True)
for pivot in pivots:
matrix_lil[:,pivot] = 0.0
return matrix_lil.tocsr()
def zero_nonpivot_columns(array, pivots):
matrix = np.matrix(array, copy=False)
matrix_return = np.matrix(np.zeros(matrix.shape))
for pivot in pivots:
matrix_return[:,pivot] += matrix[:,pivot]
return scipy.sparse.csr_matrix(matrix_return)
def remove_columns(matrix, indices):
return scipy.sparse.csr_matrix(np.delete(matrix, indices, 1))
def evaluate_and_print_scores(X_train, y_train, X_test, y_test, score_label, C, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
preds = get_preds(X_train, y_train, X_test, C, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True)
r = recall_score(y_test, preds, pos_label=score_label)
p = precision_score(y_test, preds, pos_label=score_label)
f1 = f1_score(y_test, preds, pos_label=score_label)
acc = accuracy_score(y_test, preds)
print("Gold has %d instances of target class" % (len(np.where(y_test == score_label)[0])))
print("System predicted %d instances of target class" % (len(np.where(preds == score_label)[0])))
print("Accuracy is %f, p/r/f1 score is %f %f %f\n" % (acc, p, r, f1))
def get_preds(X_train, y_train, X_test, C=1.0, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
svc = svm.LinearSVC(C=C, penalty=penalty, loss=loss, dual=dual)
svc.fit(X_train, y_train, sample_weight=sample_weight)
preds = svc.predict(X_test)
return preds
def get_decisions(X_train, y_train, X_test, C=1.0, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
svc = svm.LinearSVC(C=C, penalty=penalty, loss=loss, dual=dual)
svc.fit(X_train, y_train, sample_weight=sample_weight)
preds = svc.decision_function(X_test)
return preds
def get_f1(X_train, y_train, X_test, y_test, score_label, C=1.0, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
preds = get_preds(X_train, y_train, X_test, C=C, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True)
f1 = f1_score(y_test, preds, pos_label=score_label)
return f1
def read_pivots(pivot_file):
pivots = {}
f = open(pivot_file, 'r')
for line in f:
line.rstrip()
pivot = int(line)
pivots[pivot] = 1
## Before we zero out the nopivot, copy it to the pivot
f.close()
return pivots
def align_test_X_train(X_train, X_test):
num_instances, num_feats = X_train.shape
num_test_instances, num_test_feats = X_test.shape
if num_test_feats < num_feats:
## Expand X_test
#print("Not sure I need to do anything here.")
X_test_array = X_test.toarray()
X_test = scipy.sparse.csr_matrix(np.append(X_test_array, np.zeros((num_test_instances, num_feats-num_test_feats)), axis=1))
elif num_test_feats > num_feats:
## Truncate X_test
X_test = X_test[:,:num_feats]
return X_test
def find_best_c(X_train, y_train, C_list = [0.01, 0.1, 1.0, 10.0], penalty='l2', dual=True, scorer=f1_score, **scorer_args):
scorer = make_scorer(scorer, **scorer_args)
best_score = 0
best_c = 0
for C in C_list:
score = np.average(cross_val_score(svm.LinearSVC(C=C, penalty=penalty, dual=dual), X_train, y_train, scoring=scorer, n_jobs=1))
if score > best_score:
best_score = score
best_c = C
return best_c, best_score
def read_feature_groups(groups_file, offset=0):
## The feature groups file unfortunately has to be adjusted here. The
## files written by cleartk are 1-indexed, but the reader that reads them
## in "helpfully" adjusts all the indices. So when we read them in we
## decrement them all.
map = {}
with open(groups_file, 'r') as f:
for line in f:
domain, indices = line.split(' : ')
map[domain] = [int(f)+offset for f in indices.split(',')]
return map
def read_feature_lookup(lookup_file, offset=0):
## The feature groups file unfortunately has to be adjusted here. The
## files written by cleartk are 1-indexed, but the reader that reads them
## in "helpfully" adjusts all the indices. So when we read them in we
## decrement them all.
map = {}
with open(lookup_file, 'r', encoding='utf-8') as f:
for line in f:
name, ind = line.rstrip().split(' : ')
map[int(ind)+offset] = name
## The first feature in our data is the bias feature, always set to 1:
list = ['Bias']
for i in sorted(map.keys()):
list.append(map[i])
return list
|
[
"sklearn.metrics.f1_score",
"numpy.where",
"numpy.delete",
"sklearn.svm.LinearSVC",
"sklearn.metrics.make_scorer",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.zeros",
"numpy.matrix",
"sklearn.metrics.accuracy_score"
] |
[((481, 509), 'numpy.matrix', 'np.matrix', (['array'], {'copy': '(False)'}), '(array, copy=False)\n', (490, 509), True, 'import numpy as np\n'), ((1070, 1120), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (1082, 1120), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1129, 1182), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (1144, 1182), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1192, 1238), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (1200, 1238), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1249, 1278), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (1263, 1278), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1676, 1733), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C', 'penalty': 'penalty', 'loss': 'loss', 'dual': 'dual'}), '(C=C, penalty=penalty, loss=loss, dual=dual)\n', (1689, 1733), False, 'from sklearn import svm\n'), ((1972, 2029), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C', 'penalty': 'penalty', 'loss': 'loss', 'dual': 'dual'}), '(C=C, penalty=penalty, loss=loss, dual=dual)\n', (1985, 2029), False, 'from sklearn import svm\n'), ((2411, 2457), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (2419, 2457), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((3426, 3460), 'sklearn.metrics.make_scorer', 'make_scorer', (['scorer'], {}), '(scorer, **scorer_args)\n', (3437, 3460), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((540, 562), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (548, 562), True, 'import numpy as np\n'), ((764, 793), 'numpy.delete', 'np.delete', (['matrix', 'indices', '(1)'], {}), '(matrix, indices, 1)\n', (773, 793), True, 'import numpy as np\n'), ((3099, 3157), 'numpy.zeros', 'np.zeros', (['(num_test_instances, num_feats - num_test_feats)'], {}), '((num_test_instances, num_feats - num_test_feats))\n', (3107, 3157), True, 'import numpy as np\n'), ((3559, 3605), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C', 'penalty': 'penalty', 'dual': 'dual'}), '(C=C, penalty=penalty, dual=dual)\n', (3572, 3605), False, 'from sklearn import svm\n'), ((1336, 1367), 'numpy.where', 'np.where', (['(y_test == score_label)'], {}), '(y_test == score_label)\n', (1344, 1367), True, 'import numpy as np\n'), ((1439, 1469), 'numpy.where', 'np.where', (['(preds == score_label)'], {}), '(preds == score_label)\n', (1447, 1469), True, 'import numpy as np\n')]
|
import numpy as np
from algorithm.base import Algorithm
class Greedy(Algorithm):
def __init__(self, knapsack):
assert isinstance(knapsack, dict)
self.capacity = knapsack['capacity'][0]
self.weights = knapsack['weights']
self.profits = knapsack['profits']
self.n = len(knapsack['weights'])
@property
def name(self):
return 'Greedy'
def solve(self):
value = [(x[0], x[2] / x[1]) for x in zip(np.arange(self.n),
self.weights,
self.profits)]
value = sorted(value, key=lambda x: x[1], reverse=True)
cur_weight = 0
optim_set = np.zeros(self.n, dtype=np.int64)
for v in value:
if cur_weight + self.weights[v[0]] <= self.capacity:
optim_set[v[0]] = 1
cur_weight += self.weights[v[0]]
else:
continue
return optim_set.tolist()
|
[
"numpy.zeros",
"numpy.arange"
] |
[((736, 768), 'numpy.zeros', 'np.zeros', (['self.n'], {'dtype': 'np.int64'}), '(self.n, dtype=np.int64)\n', (744, 768), True, 'import numpy as np\n'), ((479, 496), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (488, 496), True, 'import numpy as np\n')]
|
# script to test the parallelized gradient / divergence from pymirc
import numpy as np
import pymirc.image_operations as pi
# seed the random generator
np.random.seed(1)
# create a random 3D/4D image
shape = (6,200,190,180)
# create random array and pad with 0s
x = np.pad(np.random.rand(*shape), 1)
# allocate array for the gradient
grad_x = np.zeros((x.ndim,) + x.shape, dtype = x.dtype)
# calculate the gradient
pi.grad(x, grad_x)
# setup random array in gradient space
y = np.random.rand(*grad_x.shape)
# calucate divergence
div_y = pi.div(y)
# check if operators are adjoint
print(-(x*div_y).sum() / (grad_x*y).sum())
|
[
"numpy.random.rand",
"pymirc.image_operations.grad",
"pymirc.image_operations.div",
"numpy.zeros",
"numpy.random.seed"
] |
[((154, 171), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (168, 171), True, 'import numpy as np\n'), ((348, 392), 'numpy.zeros', 'np.zeros', (['((x.ndim,) + x.shape)'], {'dtype': 'x.dtype'}), '((x.ndim,) + x.shape, dtype=x.dtype)\n', (356, 392), True, 'import numpy as np\n'), ((421, 439), 'pymirc.image_operations.grad', 'pi.grad', (['x', 'grad_x'], {}), '(x, grad_x)\n', (428, 439), True, 'import pymirc.image_operations as pi\n'), ((485, 514), 'numpy.random.rand', 'np.random.rand', (['*grad_x.shape'], {}), '(*grad_x.shape)\n', (499, 514), True, 'import numpy as np\n'), ((547, 556), 'pymirc.image_operations.div', 'pi.div', (['y'], {}), '(y)\n', (553, 556), True, 'import pymirc.image_operations as pi\n'), ((277, 299), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (291, 299), True, 'import numpy as np\n')]
|
import numpy as np
class Grid:
def __init__(self, width, heigth, discount = 0.9):
self.width = width
self.heigth = heigth
self.x_pos = 0
self.y_pos = 0
self.values = np.zeros((heigth, width))
self.discount = discount
self.vertex_sources = []
self.vertex_dests = []
self.vertex_values = []
def init_rewards(self, rewards):
assert rewards.shape[0] == self.heigth and rewards.shape[1]==self.width, "reward initialized is not valid"
self.rewards = rewards
def add_vertex(self, source, dest):
assert len(source) == 2 and len(dest) == 2, "source or dest is not valid"
self.vertex_sources.append(source)
self.vertex_dests.append(dest)
def update(self):
next_values = np.zeros((self.heigth, self.width))
for x in range(self.width):
for y in range(self.heigth):
if [y, x] in self.vertex_sources:
for vertex_source, vertex_dest in zip(self.vertex_sources, self.vertex_dests):
if [y, x] == vertex_source:
next_values[y, x] += self.rewards[y,x] + self.discount*self.values[vertex_dest[0], vertex_dest[1]]
break
else:
for cur_movement, cur_prob in zip([[-1, 0], [0, 1], [1, 0], [0, -1]], [0.25, 0.25, 0.25, 0.25]):
next_place = [y+cur_movement[0], x+cur_movement[1]]
if 0<=next_place[0]<self.heigth and 0<=next_place[1]<self.width:
next_values[y, x] += cur_prob*(self.rewards[y,x] + self.discount*self.values[next_place[0], next_place[1]])
else:
next_values[y, x] += cur_prob*(-1+self.discount*self.values[y, x])
print('-'*20)
print (next_values)
self.values = next_values
def policy(self):
movement_x = -1
movement_y = -1
if np.random.rand()> 0.5:
movement_x = 1
if np.random.rand()>0.5:
movement_y = 1
if [self.x_pos, self.y_pos] in self.vertex_sources:
for vertex_source, vertex_dest in zip(self.vertex_sources, self.vertex_dests):
if vertex_source == [self.x_pos, self.y_pos]:
self.x_pos = vertex_dest[0]
self.y_pos = vertex_dest[1]
else:
if 0<=self.x_pos+movement_x<self.heigth:
self.x_pos+=movement_x
grid_world = Grid(5, 5, discount = 0.9)
reward = np.zeros((5, 5))
grid_world.add_vertex([0, 1], [4, 1])
reward[0, 1] = 10
grid_world.add_vertex([0, 3], [2, 3])
reward[0, 3] = 5
grid_world.init_rewards(reward)
print(grid_world.values)
for i in range(50):
print('iter: {}'.format(i))
grid_world.update()
|
[
"numpy.zeros",
"numpy.random.rand"
] |
[((2602, 2618), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (2610, 2618), True, 'import numpy as np\n'), ((211, 236), 'numpy.zeros', 'np.zeros', (['(heigth, width)'], {}), '((heigth, width))\n', (219, 236), True, 'import numpy as np\n'), ((808, 843), 'numpy.zeros', 'np.zeros', (['(self.heigth, self.width)'], {}), '((self.heigth, self.width))\n', (816, 843), True, 'import numpy as np\n'), ((2014, 2030), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2028, 2030), True, 'import numpy as np\n'), ((2075, 2091), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2089, 2091), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
#----------------------------------------------------------------------------
# Encoder network.
# Extract the feature of content and style image
# Use VGG19 network to extract features.
ENCODER_LAYERS = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1'
)
FEATURE_LAYERS = (
'relu1_1', 'relu2_1', 'relu3_1', 'relu4_1'
)
class Encoder(object):
def __init__(self, weights_path):
# load weights (kernel and bias) from npz file
weights = np.load(weights_path)
idx = 0
self.weight_vars = []
# create the TensorFlow variables
with tf.variable_scope('encoder'):
for layer in ENCODER_LAYERS:
kind = layer[:4]
if kind == 'conv':
kernel = weights['arr_%d' % idx].transpose([2, 3, 1, 0])
bias = weights['arr_%d' % (idx + 1)]
kernel = kernel.astype(np.float32)
bias = bias.astype(np.float32)
idx += 2
with tf.variable_scope(layer):
W = tf.Variable(kernel, trainable=False, name='kernel')
b = tf.Variable(bias, trainable=False, name='bias')
self.weight_vars.append((W, b))
def encode(self, image, img_type = 'style'):
# create the computational graph
idx = 0
layers = {}
current = image
for layer in ENCODER_LAYERS:
kind = layer[:4]
if kind == 'conv':
kernel, bias = self.weight_vars[idx]
idx += 1
current = conv2d(current, kernel, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = pool2d(current)
if layer in FEATURE_LAYERS:
layers[layer] = current
assert(len(layers) == len(FEATURE_LAYERS))
enc = layers[FEATURE_LAYERS[-1]]
if img_type == 'style':
latent_code = tf.reduce_mean(enc, axis=[1,2])
elif img_type == 'content':
latent_code = tf.nn.avg_pool(enc, ksize=[1,8,8,1], strides=[1,8,8,1], padding='SAME')
latent_code = tf.transpose(latent_code, [0, 3, 1, 2])
else:
raise
init = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init)
latent_code = latent_code.eval()
return latent_code, layers
def preprocess(self, image, mode='BGR'):
if mode == 'BGR':
return image - np.array([103.939, 116.779, 123.68])
else:
return image - np.array([123.68, 116.779, 103.939])
def deprocess(self, image, mode='BGR'):
if mode == 'BGR':
return image + np.array([103.939, 116.779, 123.68])
else:
return image + np.array([123.68, 116.779, 103.939])
def conv2d(x, kernel, bias):
# padding image with reflection mode
x_padded = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
# conv and add bias
out = tf.nn.conv2d(x_padded, kernel, strides=[1, 1, 1, 1], padding='VALID')
out = tf.nn.bias_add(out, bias)
return out
def pool2d(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
[
"tensorflow.nn.conv2d",
"tensorflow.local_variables_initializer",
"tensorflow.nn.max_pool",
"tensorflow.pad",
"tensorflow.variable_scope",
"tensorflow.transpose",
"tensorflow.nn.relu",
"tensorflow.nn.avg_pool",
"tensorflow.Session",
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.reduce_mean",
"numpy.load",
"tensorflow.nn.bias_add"
] |
[((3289, 3348), 'tensorflow.pad', 'tf.pad', (['x', '[[0, 0], [1, 1], [1, 1], [0, 0]]'], {'mode': '"""REFLECT"""'}), "(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')\n", (3295, 3348), True, 'import tensorflow as tf\n'), ((3384, 3453), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x_padded', 'kernel'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(x_padded, kernel, strides=[1, 1, 1, 1], padding='VALID')\n", (3396, 3453), True, 'import tensorflow as tf\n'), ((3464, 3489), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['out', 'bias'], {}), '(out, bias)\n', (3478, 3489), True, 'import tensorflow as tf\n'), ((3534, 3609), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3548, 3609), True, 'import tensorflow as tf\n'), ((707, 728), 'numpy.load', 'np.load', (['weights_path'], {}), '(weights_path)\n', (714, 728), True, 'import numpy as np\n'), ((832, 860), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (849, 860), True, 'import tensorflow as tf\n'), ((2282, 2314), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['enc'], {'axis': '[1, 2]'}), '(enc, axis=[1, 2])\n', (2296, 2314), True, 'import tensorflow as tf\n'), ((2563, 2596), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2594, 2596), True, 'import tensorflow as tf\n'), ((2598, 2630), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2628, 2630), True, 'import tensorflow as tf\n'), ((2645, 2657), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2655, 2657), True, 'import tensorflow as tf\n'), ((2376, 2453), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['enc'], {'ksize': '[1, 8, 8, 1]', 'strides': '[1, 8, 8, 1]', 'padding': '"""SAME"""'}), "(enc, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')\n", (2390, 2453), True, 'import tensorflow as tf\n'), ((2474, 2513), 'tensorflow.transpose', 'tf.transpose', (['latent_code', '[0, 3, 1, 2]'], {}), '(latent_code, [0, 3, 1, 2])\n', (2486, 2513), True, 'import tensorflow as tf\n'), ((2874, 2910), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (2882, 2910), True, 'import numpy as np\n'), ((2952, 2988), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (2960, 2988), True, 'import numpy as np\n'), ((3087, 3123), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (3095, 3123), True, 'import numpy as np\n'), ((3165, 3201), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (3173, 3201), True, 'import numpy as np\n'), ((1952, 1971), 'tensorflow.nn.relu', 'tf.nn.relu', (['current'], {}), '(current)\n', (1962, 1971), True, 'import tensorflow as tf\n'), ((1271, 1295), 'tensorflow.variable_scope', 'tf.variable_scope', (['layer'], {}), '(layer)\n', (1288, 1295), True, 'import tensorflow as tf\n'), ((1325, 1376), 'tensorflow.Variable', 'tf.Variable', (['kernel'], {'trainable': '(False)', 'name': '"""kernel"""'}), "(kernel, trainable=False, name='kernel')\n", (1336, 1376), True, 'import tensorflow as tf\n'), ((1405, 1452), 'tensorflow.Variable', 'tf.Variable', (['bias'], {'trainable': '(False)', 'name': '"""bias"""'}), "(bias, trainable=False, name='bias')\n", (1416, 1452), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'newGui.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets ,QtPrintSupport
from pyqtgraph import PlotWidget ,PlotItem
import os
import pathlib
import pyqtgraph as pg
import pandas as pd
import numpy as np
import sys
import random
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class Ui_MainWindow(QtGui.QMainWindow):
signals = []
timer = []
data = []
n = []
nn = []
data_line = []
r = [1200,1200,1200]
z = [1,1,1]
spectrogram = []
checkBox = []
counter = -1
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1010, 878)
mW = QtGui.QIcon("Mw.png")
MainWindow.setWindowIcon(mW)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
for i in range(0,3):
self.signals.append( PlotWidget(self.centralwidget))
self.spectrogram.append( QtWidgets.QLabel(self.centralwidget))
self.checkBox.append(QtWidgets.QCheckBox(self.centralwidget))
if i == 0:
self.signals[i].setGeometry(QtCore.QRect(20, 90, 461, 192))
self.signals[i].setObjectName("signal_1")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 90, 471, 192))
self.spectrogram[i].setObjectName("spectro_1")
self.checkBox[i].setGeometry(QtCore.QRect(20, 50, 68, 20))
self.checkBox[i].setObjectName("check_1")
elif i == 1:
self.signals[i].setGeometry(QtCore.QRect(20, 340, 461, 192))
self.signals[i].setObjectName("signal_2")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 340, 471, 192))
self.spectrogram[i].setObjectName("spectro_2")
self.checkBox[i].setGeometry(QtCore.QRect(20, 300, 68, 20))
self.checkBox[i].setObjectName("check_2")
else:
self.signals[i].setGeometry(QtCore.QRect(20, 600, 461, 192))
self.signals[i].setObjectName("signal_3")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 600, 471, 192))
self.spectrogram[i].setObjectName("spectro_3")
self.checkBox[i].setGeometry(QtCore.QRect(20, 560, 68, 20))
self.checkBox[i].setObjectName("check_3")
self.signals[i].setStyleSheet("background-color:rgb(0, 0, 0);")
self.signals[i].setRubberBandSelectionMode(QtCore.Qt.IntersectsItemBoundingRect)
self.signals[i].plotItem.showGrid(x=True, y=True )
self.signals[i].plotItem.setMenuEnabled(False)
self.checkBox[i].setStyleSheet("font: 10pt \"MS Shell Dlg 2\";")
self.spectrogram[i].setScaledContents(True)
self.open = QtWidgets.QPushButton(self.centralwidget)
self.open.setGeometry(QtCore.QRect(0, 1, 35, 35))
self.open.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("img/open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.open.setIcon(icon3)
self.open.setObjectName("open")
self.save = QtWidgets.QPushButton(self.centralwidget)
self.save.setGeometry(QtCore.QRect(30, 1, 35, 35))
self.save.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("img/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.save.setIcon(icon2)
self.save.setObjectName("save")
self.Zoom_in = QtWidgets.QPushButton(self.centralwidget)
self.Zoom_in.setGeometry(QtCore.QRect(60, 1, 35, 35))
self.Zoom_in.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("img/zoom-in.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Zoom_in.setIcon(icon)
self.Zoom_in.setObjectName("Zoom_in")
self.zoom_out = QtWidgets.QPushButton(self.centralwidget)
self.zoom_out.setGeometry(QtCore.QRect(90, 1, 35, 35))
self.zoom_out.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("img/zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoom_out.setIcon(icon1)
self.zoom_out.setObjectName("zoom_out")
self.left = QtWidgets.QPushButton(self.centralwidget)
self.left.setGeometry(QtCore.QRect(120, 1, 35, 35))
self.left.setText("")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("img/previous.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.left.setIcon(icon7)
self.left.setObjectName("left")
self.play = QtWidgets.QPushButton(self.centralwidget)
self.play.setGeometry(QtCore.QRect(150, 1, 35, 35))
self.play.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("img/play.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.play.setIcon(icon5)
self.play.setObjectName("play")
self.right = QtWidgets.QPushButton(self.centralwidget)
self.right.setGeometry(QtCore.QRect(180, 1, 35, 35))
self.right.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("img/next.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.right.setIcon(icon6)
self.right.setObjectName("right")
self.pause = QtWidgets.QPushButton(self.centralwidget)
self.pause.setGeometry(QtCore.QRect(210, 1, 35, 35))
self.pause.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("img/pause.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pause.setIcon(icon4)
self.pause.setObjectName("pause")
self.spec = QtWidgets.QPushButton(self.centralwidget)
self.spec.setGeometry(QtCore.QRect(240, 1, 35, 35))
self.spec.setText("")
icon20 = QtGui.QIcon()
icon20.addPixmap(QtGui.QPixmap("img/spec3.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.spec.setIcon(icon20)
self.spec.setObjectName("spec")
self.Zoom_in.raise_()
self.signals[0].raise_()
self.checkBox[1].raise_()
self.spectrogram[1].raise_()
self.spectrogram[2].raise_()
self.checkBox[2].raise_()
self.spectrogram[0].raise_()
self.signals[1].raise_()
self.signals[2].raise_()
self.checkBox[0].raise_()
self.zoom_out.raise_()
self.save.raise_()
self.open.raise_()
self.pause.raise_()
self.play.raise_()
self.right.raise_()
self.left.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1010, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuSignal_tools = QtWidgets.QMenu(self.menubar)
self.menuSignal_tools.setObjectName("menuSignal_tools")
self.menuPlay_navigate = QtWidgets.QMenu(self.menubar)
self.menuPlay_navigate.setObjectName("menuPlay_navigate")
MainWindow.setMenuBar(self.menubar)
self.actionOpen = QtWidgets.QAction(MainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon9)
self.actionOpen.setObjectName("actionOpen")
self.actionzoom_in = QtWidgets.QAction(MainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("zoom-in_1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionzoom_in.setIcon(icon10)
self.actionzoom_in.setObjectName("actionzoom_in")
self.actionzoom_out = QtWidgets.QAction(MainWindow)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionzoom_out.setIcon(icon11)
self.actionzoom_out.setObjectName("actionzoom_out")
self.actionSpectrogram = QtWidgets.QAction(MainWindow)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap("sound.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSpectrogram.setIcon(icon12)
self.actionSpectrogram.setObjectName("actionSpectrogram")
self.actionPlay = QtWidgets.QAction(MainWindow)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap("play-button.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPlay.setIcon(icon13)
self.actionPlay.setObjectName("actionPlay")
self.actionPause = QtWidgets.QAction(MainWindow)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap("pause-button.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPause.setIcon(icon14)
self.actionPause.setObjectName("actionPause")
self.actionBackward = QtWidgets.QAction(MainWindow)
icon16 = QtGui.QIcon()
icon16.addPixmap(QtGui.QPixmap("backward.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionBackward.setIcon(icon16)
self.actionBackward.setObjectName("actionBackward")
self.actionForward = QtWidgets.QAction(MainWindow)
icon17 = QtGui.QIcon()
icon17.addPixmap(QtGui.QPixmap("forward.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionForward.setIcon(icon17)
self.actionForward.setObjectName("actionForward")
self.actionSave_as_pdf = QtWidgets.QAction(MainWindow)
icon18 = QtGui.QIcon()
icon18.addPixmap(QtGui.QPixmap("pdf-file.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave_as_pdf.setIcon(icon18)
self.actionSave_as_pdf.setObjectName("actionSave_as_pdf")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave_as_pdf)
self.menuEdit.addAction(self.actionzoom_in)
self.menuEdit.addAction(self.actionzoom_out)
self.menuSignal_tools.addAction(self.actionSpectrogram)
self.menuPlay_navigate.addAction(self.actionPlay)
self.menuPlay_navigate.addAction(self.actionPause)
self.menuPlay_navigate.addSeparator()
self.menuPlay_navigate.addAction(self.actionBackward)
self.menuPlay_navigate.addAction(self.actionForward)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuPlay_navigate.menuAction())
self.menubar.addAction(self.menuSignal_tools.menuAction())
self.signals[0].hide()
self.checkBox[0].hide()
self.spectrogram[0].hide()
self.signals[1].hide()
self.checkBox[1].hide()
self.spectrogram[1].hide()
self.signals[2].hide()
self.checkBox[2].hide()
self.spectrogram[2].hide()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.actionOpen.triggered.connect(lambda:self.opensignal())
self.actionzoom_in.triggered.connect(lambda:self.zoomin())
self.actionzoom_out.triggered.connect(lambda:self.zoomout())
self.actionSave_as_pdf.triggered.connect(lambda:self.savepdf())
self.actionBackward.triggered.connect(lambda:self.scrlleft())
self.actionForward.triggered.connect(lambda:self.scrlright())
self.actionSpectrogram.triggered.connect(lambda:self.spectro())
self.actionPlay.triggered.connect(lambda:self.playy())
self.actionPause.triggered.connect(lambda:self.pausee())
self.Zoom_in.clicked.connect(lambda:self.zoomin())
self.zoom_out.clicked.connect(lambda:self.zoomout())
self.left.clicked.connect(lambda:self.scrlleft())
self.right.clicked.connect(lambda:self.scrlright())
self.pause.clicked.connect(lambda:self.pausee())
self.play.clicked.connect(lambda:self.playy())
self.open.clicked.connect(lambda:self.opensignal())
self.save.clicked.connect(lambda:self.savepdf())
self.spec.clicked.connect(lambda:self.spectro())
def readsignal(self):
self.fname=QtGui.QFileDialog.getOpenFileName(self,' txt or CSV or xls',os.getenv('home'),"xls(*.xls) ;; text(*.txt) ;; csv(*.csv)")
path=self.fname[0]
self.data.append(np.genfromtxt(path))
def opensignal(self):
self.readsignal()
self.counter+=1
self.n.append(0)
self.nn.append(0)
self.data_line.append(self.signals[self.counter % 3].plot(self.data[self.counter], name="mode2"))
self.pen = pg.mkPen(color=(255, 0, 0))
# Set timer
self.timer.append(pg.QtCore.QTimer())
# Timer signal binding update_data function
x = self.counter
if x%3 == 0:
self.timer[x].timeout.connect(lambda: self.update_data1(x))
self.timer[x].start(50)
if x%3 == 1:
self.timer[x].timeout.connect(lambda: self.update_data2(x))
self.timer[x].start(50)
if x%3 == 2:
self.timer[x].timeout.connect(lambda: self.update_data3(x))
self.timer[x].start(50)
# The timer interval is 50ms, which can be understood as refreshing data once in 50ms
#self.timer1.start(50)
self.signals[x%3].show()
self.checkBox[x%3].show()
self.checkBox[x%3].setChecked(True)
# Data shift left
def update_data1(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def update_data2(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def update_data3(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def spectro(self):
index = (len(self.data) - 1) - ((len(self.data)-1)%3)
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.spectrogram[i].show()
if i==0:
plt.specgram(self.data[index], Fs= 250 )
elif i == 1:
if (len(self.data ) - 1 - index >= 1):
plt.specgram(self.data[index + 1], Fs= 250 )
else:
plt.specgram(self.data[index - 2], Fs= 250 )
else:
if (len(self.data) - 1 - index == 2):
plt.specgram(self.data[index + 2], Fs= 250 )
else:
plt.specgram(self.data[index - 1], Fs= 250 )
plt.savefig('spectro'+str(i)+'.png', dpi=300, bbox_inches='tight')
self.spectrogram[i].setPixmap(QtGui.QPixmap('spectro'+str(i)+'.png'))
plt.close(None)
def pausee(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if self.timer[i].isActive():
self.timer[i].stop()
def playy(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if self.timer[i].isActive()==False:
self.timer[i].start()
def zoomin(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().scaleBy(x=0.5,y=1)
self.r[i]=self.r[i]*0.5
self.z[i] = self.z[i] * 0.5
def zoomout(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().scaleBy(x=2,y=1)
self.r[i]=self.r[i]*2
self.z[i] = self.z[i] * 2
def scrlleft(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().translateBy(x=-100,y=0)
def scrlright(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().translateBy(x=100,y=0)
#
def savepdf(self):
fig=plt.figure(figsize=(1000, 1000))
index = (len(self.data) - 1) - ((len(self.data)-1)%3)
spectrogramData = []
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if i == 0:
plt.subplot(3,2,1)
spectrogramData = list(self.data[index][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,2)
elif i == 1:
if (len(self.data ) - 1 - index >= 1):
plt.subplot(3,2,3)
spectrogramData = list(self.data[index+1][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,4)
else:
plt.subplot(3,2,3)
spectrogramData = list(self.data[index-2][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,4)
else:
if (len(self.data) - 1 - index == 2):
plt.subplot(3,2,5)
spectrogramData = list(self.data[index+2][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,6)
else:
plt.subplot(3,2,5)
spectrogramData = list(self.data[index-1][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,6)
plt.specgram(spectrogramData, Fs= 250)
plt.subplots_adjust(bottom=0.1,right=0.9,top=1.0)
plt.show()
fn,_=QtWidgets.QFileDialog.getSaveFileName(self,"Export PDF",None,"PDF files(.pdf);;AllFiles()")
if fn:
if QtCore.QFileInfo(fn).suffix()=="":
fn+=".pdf"
fig.savefig(fn)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.checkBox[1].setText(_translate("MainWindow", "signal-2"))
self.checkBox[1].setShortcut(_translate("MainWindow", "2"))
self.checkBox[2].setText(_translate("MainWindow", "signal-3"))
self.checkBox[2].setShortcut(_translate("MainWindow", "3"))
self.checkBox[0].setText(_translate("MainWindow", "signal-1"))
self.checkBox[0].setShortcut(_translate("MainWindow", "1"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.menuSignal_tools.setTitle(_translate("MainWindow", "Signal tools"))
self.menuPlay_navigate.setTitle(_translate("MainWindow", "Play and navigate "))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+o"))
self.actionzoom_in.setText(_translate("MainWindow", "Zoom-in"))
self.actionzoom_in.setShortcut(_translate("MainWindow", "Up"))
self.actionzoom_out.setText(_translate("MainWindow", "Zoom-out"))
self.actionzoom_out.setShortcut(_translate("MainWindow", "Down"))
self.actionSpectrogram.setText(_translate("MainWindow", "Spectrogram"))
self.actionSpectrogram.setShortcut(_translate("MainWindow", "S"))
self.actionPlay.setText(_translate("MainWindow", "Play"))
self.actionPlay.setShortcut(_translate("MainWindow", "Space"))
self.actionPause.setText(_translate("MainWindow", "Pause"))
self.actionPause.setShortcut(_translate("MainWindow", "Shift+Space"))
self.actionBackward.setText(_translate("MainWindow", "Backward"))
self.actionBackward.setShortcut(_translate("MainWindow", "Left"))
self.actionForward.setText(_translate("MainWindow", "Forward"))
self.actionForward.setShortcut(_translate("MainWindow", "Right"))
self.actionSave_as_pdf.setText(_translate("MainWindow", "Save as pdf"))
self.actionSave_as_pdf.setShortcut(_translate("MainWindow", "Ctrl+S"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtGui.QIcon",
"matplotlib.pyplot.specgram",
"PyQt5.QtWidgets.QApplication",
"pyqtgraph.mkPen",
"pyqtgraph.QtCore.QTimer",
"numpy.genfromtxt",
"PyQt5.QtCore.QFileInfo",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QMainWindow",
"PyQt5.QtWidgets.QMenu",
"matplotlib.use",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QCheckBox",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"PyQt5.QtWidgets.QMenuBar",
"os.getenv",
"PyQt5.QtWidgets.QAction",
"PyQt5.QtCore.QRect",
"matplotlib.pyplot.figure",
"PyQt5.QtGui.QPixmap",
"pyqtgraph.PlotWidget",
"matplotlib.pyplot.subplot",
"PyQt5.QtWidgets.QFileDialog.getSaveFileName"
] |
[((553, 577), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (567, 577), False, 'import matplotlib\n'), ((23354, 23386), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (23376, 23386), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((23404, 23427), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (23425, 23427), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1055, 1076), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""Mw.png"""'], {}), "('Mw.png')\n", (1066, 1076), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1143, 1172), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (1160, 1172), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3264, 3305), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3285, 3305), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3410, 3423), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (3421, 3423), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3610, 3651), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3631, 3651), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3757, 3770), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (3768, 3770), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3977, 4018), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3998, 4018), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4129, 4142), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (4140, 4142), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4343, 4384), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4364, 4384), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4498, 4511), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (4509, 4511), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4714, 4755), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4735, 4755), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4862, 4875), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (4873, 4875), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5066, 5107), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5087, 5107), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5214, 5227), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (5225, 5227), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5424, 5465), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5445, 5465), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5574, 5587), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (5585, 5587), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5779, 5820), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5800, 5820), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5929, 5942), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (5940, 5942), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6142, 6183), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6163, 6183), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6291, 6304), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (6302, 6304), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7105, 7135), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (7123, 7135), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7269, 7298), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7284, 7298), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7371, 7400), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7386, 7400), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7481, 7510), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7496, 7510), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7608, 7637), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7623, 7637), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7775, 7804), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (7792, 7804), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7821, 7834), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (7832, 7834), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8054, 8083), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8071, 8083), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8101, 8114), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8112, 8114), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8340, 8369), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8357, 8369), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8387, 8400), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8398, 8400), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8631, 8660), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8648, 8660), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8678, 8691), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8689, 8691), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8921, 8950), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8938, 8950), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8968, 8981), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8979, 8981), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9197, 9226), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (9214, 9226), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9244, 9257), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (9255, 9257), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9489, 9518), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (9506, 9518), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9536, 9549), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (9547, 9549), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9776, 9805), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (9793, 9805), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9823, 9836), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (9834, 9836), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((10063, 10092), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (10080, 10092), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((10110, 10123), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (10121, 10123), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((11528, 11577), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (11565, 11577), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((13223, 13250), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(255, 0, 0)'}), '(color=(255, 0, 0))\n', (13231, 13250), True, 'import pyqtgraph as pg\n'), ((19065, 19097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1000, 1000)'}), '(figsize=(1000, 1000))\n', (19075, 19097), True, 'import matplotlib.pyplot as plt\n'), ((20768, 20819), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.1)', 'right': '(0.9)', 'top': '(1.0)'}), '(bottom=0.1, right=0.9, top=1.0)\n', (20787, 20819), True, 'import matplotlib.pyplot as plt\n'), ((20826, 20836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20834, 20836), True, 'import matplotlib.pyplot as plt\n'), ((20850, 20948), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""Export PDF"""', 'None', '"""PDF files(.pdf);;AllFiles()"""'], {}), "(self, 'Export PDF', None,\n 'PDF files(.pdf);;AllFiles()')\n", (20887, 20948), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3336, 3362), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(1)', '(35)', '(35)'], {}), '(0, 1, 35, 35)\n', (3348, 3362), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3448, 3477), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/open.png"""'], {}), "('img/open.png')\n", (3461, 3477), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3682, 3709), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(1)', '(35)', '(35)'], {}), '(30, 1, 35, 35)\n', (3694, 3709), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3795, 3824), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/save.png"""'], {}), "('img/save.png')\n", (3808, 3824), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4052, 4079), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(1)', '(35)', '(35)'], {}), '(60, 1, 35, 35)\n', (4064, 4079), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4166, 4198), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/zoom-in.png"""'], {}), "('img/zoom-in.png')\n", (4179, 4198), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4419, 4446), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(1)', '(35)', '(35)'], {}), '(90, 1, 35, 35)\n', (4431, 4446), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4536, 4569), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/zoom-out.png"""'], {}), "('img/zoom-out.png')\n", (4549, 4569), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4786, 4814), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(120)', '(1)', '(35)', '(35)'], {}), '(120, 1, 35, 35)\n', (4798, 4814), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4900, 4933), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/previous.png"""'], {}), "('img/previous.png')\n", (4913, 4933), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5138, 5166), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(1)', '(35)', '(35)'], {}), '(150, 1, 35, 35)\n', (5150, 5166), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5252, 5281), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/play.png"""'], {}), "('img/play.png')\n", (5265, 5281), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5497, 5525), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(180)', '(1)', '(35)', '(35)'], {}), '(180, 1, 35, 35)\n', (5509, 5525), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5612, 5641), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/next.png"""'], {}), "('img/next.png')\n", (5625, 5641), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5852, 5880), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(210)', '(1)', '(35)', '(35)'], {}), '(210, 1, 35, 35)\n', (5864, 5880), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5967, 5997), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/pause.png"""'], {}), "('img/pause.png')\n", (5980, 5997), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6214, 6242), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(240)', '(1)', '(35)', '(35)'], {}), '(240, 1, 35, 35)\n', (6226, 6242), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6330, 6361), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/spec3.jpeg"""'], {}), "('img/spec3.jpeg')\n", (6343, 6361), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7169, 7197), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1010)', '(21)'], {}), '(0, 0, 1010, 21)\n', (7181, 7197), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7859, 7886), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""search.png"""'], {}), "('search.png')\n", (7872, 7886), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8140, 8170), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""zoom-in_1.png"""'], {}), "('zoom-in_1.png')\n", (8153, 8170), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8426, 8455), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""zoom-out.png"""'], {}), "('zoom-out.png')\n", (8439, 8455), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8717, 8743), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""sound.png"""'], {}), "('sound.png')\n", (8730, 8743), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9007, 9039), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""play-button.png"""'], {}), "('play-button.png')\n", (9020, 9039), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9283, 9316), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""pause-button.png"""'], {}), "('pause-button.png')\n", (9296, 9316), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9575, 9604), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""backward.png"""'], {}), "('backward.png')\n", (9588, 9604), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9862, 9890), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""forward.png"""'], {}), "('forward.png')\n", (9875, 9890), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((10149, 10178), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""pdf-file.png"""'], {}), "('pdf-file.png')\n", (10162, 10178), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((12832, 12849), 'os.getenv', 'os.getenv', (['"""home"""'], {}), "('home')\n", (12841, 12849), False, 'import os\n'), ((12945, 12964), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {}), '(path)\n', (12958, 12964), True, 'import numpy as np\n'), ((13298, 13316), 'pyqtgraph.QtCore.QTimer', 'pg.QtCore.QTimer', ([], {}), '()\n', (13314, 13316), True, 'import pyqtgraph as pg\n'), ((1303, 1333), 'pyqtgraph.PlotWidget', 'PlotWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1313, 1333), False, 'from pyqtgraph import PlotWidget, PlotItem\n'), ((1381, 1417), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1397, 1417), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1453, 1492), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1472, 1492), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((17690, 17705), 'matplotlib.pyplot.close', 'plt.close', (['None'], {}), '(None)\n', (17699, 17705), True, 'import matplotlib.pyplot as plt\n'), ((20713, 20750), 'matplotlib.pyplot.specgram', 'plt.specgram', (['spectrogramData'], {'Fs': '(250)'}), '(spectrogramData, Fs=250)\n', (20725, 20750), True, 'import matplotlib.pyplot as plt\n'), ((1561, 1591), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(90)', '(461)', '(192)'], {}), '(20, 90, 461, 192)\n', (1573, 1591), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1700, 1731), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(90)', '(471)', '(192)'], {}), '(490, 90, 471, 192)\n', (1712, 1731), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1842, 1870), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(50)', '(68)', '(20)'], {}), '(20, 50, 68, 20)\n', (1854, 1870), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((16962, 17000), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index]'], {'Fs': '(250)'}), '(self.data[index], Fs=250)\n', (16974, 17000), True, 'import matplotlib.pyplot as plt\n'), ((19324, 19344), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (19335, 19344), True, 'import matplotlib.pyplot as plt\n'), ((19429, 19482), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (19437, 19482), True, 'import matplotlib.pyplot as plt\n'), ((19501, 19521), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (19512, 19521), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2031), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(340)', '(461)', '(192)'], {}), '(20, 340, 461, 192)\n', (2012, 2031), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2140, 2172), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(340)', '(471)', '(192)'], {}), '(490, 340, 471, 192)\n', (2152, 2172), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2283, 2312), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(300)', '(68)', '(20)'], {}), '(20, 300, 68, 20)\n', (2295, 2312), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2435, 2466), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(600)', '(461)', '(192)'], {}), '(20, 600, 461, 192)\n', (2447, 2466), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2575, 2607), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(600)', '(471)', '(192)'], {}), '(490, 600, 471, 192)\n', (2587, 2607), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2718, 2747), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(560)', '(68)', '(20)'], {}), '(20, 560, 68, 20)\n', (2730, 2747), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((20972, 20992), 'PyQt5.QtCore.QFileInfo', 'QtCore.QFileInfo', (['fn'], {}), '(fn)\n', (20988, 20992), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((17119, 17161), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index + 1]'], {'Fs': '(250)'}), '(self.data[index + 1], Fs=250)\n', (17131, 17161), True, 'import matplotlib.pyplot as plt\n'), ((17214, 17256), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index - 2]'], {'Fs': '(250)'}), '(self.data[index - 2], Fs=250)\n', (17226, 17256), True, 'import matplotlib.pyplot as plt\n'), ((17364, 17406), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index + 2]'], {'Fs': '(250)'}), '(self.data[index + 2], Fs=250)\n', (17376, 17406), True, 'import matplotlib.pyplot as plt\n'), ((17460, 17502), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index - 1]'], {'Fs': '(250)'}), '(self.data[index - 1], Fs=250)\n', (17472, 17502), True, 'import matplotlib.pyplot as plt\n'), ((19657, 19677), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (19668, 19677), True, 'import matplotlib.pyplot as plt\n'), ((19771, 19824), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (19779, 19824), True, 'import matplotlib.pyplot as plt\n'), ((19847, 19867), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (19858, 19867), True, 'import matplotlib.pyplot as plt\n'), ((19916, 19936), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (19927, 19936), True, 'import matplotlib.pyplot as plt\n'), ((20030, 20083), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (20038, 20083), True, 'import matplotlib.pyplot as plt\n'), ((20106, 20126), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (20117, 20126), True, 'import matplotlib.pyplot as plt\n'), ((20229, 20249), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (20240, 20249), True, 'import matplotlib.pyplot as plt\n'), ((20343, 20396), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (20351, 20396), True, 'import matplotlib.pyplot as plt\n'), ((20419, 20439), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (20430, 20439), True, 'import matplotlib.pyplot as plt\n'), ((20488, 20508), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (20499, 20508), True, 'import matplotlib.pyplot as plt\n'), ((20602, 20655), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (20610, 20655), True, 'import matplotlib.pyplot as plt\n'), ((20678, 20698), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (20689, 20698), True, 'import matplotlib.pyplot as plt\n')]
|
import h5py
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import scipy.stats
import complex_pca
def plot_pca_variance_curve(x: np.ndarray, title: str = 'PCA -- Variance Explained Curve') -> None:
pca = complex_pca.ComplexPCA(n_components=x.shape[1])
pca.fit(x)
plt.figure()
plt.plot(range(1, x.shape[1] + 1), np.cumsum(pca.explained_variance_ratio_) / np.sum(pca.explained_variance_ratio_))
plt.xlabel('Number of Principal Components')
plt.ylabel('Proportion of Variance Captured')
plt.title(title)
plt.grid(True)
# noinspection DuplicatedCode
def main() -> None:
# data_path = r'D:\EE 364D\dataset\synthetic_data\channel_specific\train_indoor\subsampled\10_percent\train_indoor_channel_e_flat_3.h5'
data_path = r'D:\EE 364D\dataset\synthetic_data\channel_specific\test_indoor_20dB\test_indoor_20dB_channel_e_flat.h5'
constant_features_path = '../data_preprocessing/constant_features.mat'
data = h5py.File(data_path, 'r')
constant_features = scipy.io.loadmat(constant_features_path, squeeze_me=True)
constant_features = constant_features['constant']
# Number of data points to use.
n = 1
# Data and pilot extraction.
data_indices = constant_features['iMDataTone_HePpdu'][()].astype(np.int32) - 1
pilot_indices = constant_features['iMPilotTone_HePpdu'][()].astype(np.int32) - 1
data_size = 256
rx_pilot = np.array(data['rx_pilot'][0:n, :])
tx_pilot = np.array(data['tx_pilot'][0:n, :])
pilot_gain = rx_pilot / tx_pilot
rx_data = np.array(data['rx_data'][0:n, :])
tx_data = np.array(data['tx_data'][0:n, :])
data_gain = rx_data / tx_data
# L-LTF extraction.
l_ltf_size = 64
rx_l_ltf_1 = np.array(data['rx_l_ltf_1'][0:n, :])
rx_l_ltf_2 = np.array(data['rx_l_ltf_2'][0:n, :])
tx_l_ltf = constant_features['txLltfFftOut'][()]
rx_l_ltf_1_trimmed = rx_l_ltf_1[:, tx_l_ltf != 0]
rx_l_ltf_2_trimmed = rx_l_ltf_2[:, tx_l_ltf != 0]
tx_l_ltf_trimmed = tx_l_ltf[tx_l_ltf != 0]
l_ltf_1_trimmed_gain = rx_l_ltf_1_trimmed / tx_l_ltf_trimmed
l_ltf_2_trimmed_gain = rx_l_ltf_2_trimmed / tx_l_ltf_trimmed
# HE-LTF extraction.
he_ltf_data_indices = constant_features['iMDataTone_Heltf'][()].astype(np.int32) - 1
he_ltf_pilot_indices = constant_features['iMPilotTone_Heltf'][()].astype(np.int32) - 1
he_ltf_size = 256
rx_he_ltf_data = np.array(data['rx_he_ltf_data'][0:n, :])
rx_he_ltf_pilot = np.array(data['rx_he_ltf_pilot'][0:n, :])
rx_he_ltf = np.zeros((rx_he_ltf_data.shape[0], he_ltf_size), dtype=complex)
rx_he_ltf[:, he_ltf_data_indices] = rx_he_ltf_data
rx_he_ltf[:, he_ltf_pilot_indices] = rx_he_ltf_pilot
tx_he_ltf = constant_features['txHeltfFftOut'][()]
rx_he_ltf_trimmed = rx_he_ltf[:, tx_he_ltf != 0]
tx_he_ltf_trimmed = tx_he_ltf[tx_he_ltf != 0]
he_ltf_trimmed_gain = rx_he_ltf_trimmed / tx_he_ltf_trimmed
# Frequency domain.
f = np.linspace(0, 1, data_size)
f_data = f[data_indices]
f_pilot = f[pilot_indices]
f_rx_he_ltf = np.linspace(0, 1, he_ltf_size)
f_rx_he_ltf_trimmed = f_rx_he_ltf[tx_he_ltf != 0]
f_l_ltf = np.linspace(0, 1, l_ltf_size)
f_l_ltf_trimmed = f_l_ltf[tx_l_ltf != 0]
# Channel instance to use.
i = 0
# Make plots.
plot_constellation = False
plot_magnitude = True
plot_phase = True
plot_pca = False
plot_mean_magnitude = False
plot_correction_phase = False
if plot_constellation:
plt.figure()
plt.scatter(np.real(tx_he_ltf_trimmed), np.imag(tx_he_ltf_trimmed))
plt.scatter(np.real(tx_l_ltf_trimmed), np.imag(tx_l_ltf_trimmed))
plt.scatter(np.real(tx_pilot[i, :]), np.imag(tx_pilot[i, :]))
plt.xlabel('In-phase Component')
plt.ylabel('Quadrature Component')
plt.title('Transmitted Symbol Constellation')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])
plt.grid()
plt.figure()
plt.scatter(np.real(he_ltf_trimmed_gain[i, :]), np.imag(he_ltf_trimmed_gain[i, :]))
plt.scatter(np.real(l_ltf_1_trimmed_gain[i, :]), np.imag(l_ltf_1_trimmed_gain[i, :]))
plt.scatter(np.real(l_ltf_2_trimmed_gain[i, :]), np.imag(l_ltf_2_trimmed_gain[i, :]))
plt.scatter(np.real(pilot_gain[i, :]), np.imag(pilot_gain[i, :]))
plt.xlabel('In-phase Component')
plt.ylabel('Quadrature Component')
plt.title('Channel Gain Estimate Constellation')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])
plt.grid()
if plot_magnitude:
plt.figure()
plt.scatter(f_rx_he_ltf_trimmed, 20 * np.log10(np.abs(he_ltf_trimmed_gain[i, :])))
plt.scatter(f_l_ltf_trimmed, 20 * np.log10(np.abs(l_ltf_1_trimmed_gain[i, :])))
plt.scatter(f_l_ltf_trimmed, 20 * np.log10(np.abs(l_ltf_2_trimmed_gain[i, :])))
plt.scatter(f_pilot, 20 * np.log10(np.abs(pilot_gain[i, :])))
plt.scatter(f_data, 20 * np.log10(np.abs(data_gain[i, :])), marker='x')
plt.xlabel(r'$f$ (normalized)')
plt.ylabel(r'$|H|^2$ (dB)')
plt.title('Channel Gain Estimate')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])
plt.grid()
if plot_phase:
plt.figure()
unwrap = False
if unwrap:
plt.scatter(f_rx_he_ltf_trimmed, np.unwrap(np.angle(he_ltf_trimmed_gain[i, :])) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.unwrap(np.angle(l_ltf_1_trimmed_gain[i, :])) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.unwrap(np.angle(l_ltf_2_trimmed_gain[i, :])) / np.pi)
plt.scatter(f_pilot, np.unwrap(np.angle(pilot_gain[i, :])) / np.pi)
plt.scatter(f_data, np.unwrap(np.angle(data_gain[i, :])) / np.pi, marker='x')
else:
plt.scatter(f_rx_he_ltf_trimmed, np.angle(he_ltf_trimmed_gain[i, :]) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.angle(l_ltf_1_trimmed_gain[i, :]) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.angle(l_ltf_2_trimmed_gain[i, :]) / np.pi)
plt.scatter(f_pilot, np.angle(pilot_gain[i, :]) / np.pi)
plt.scatter(f_data, np.angle(data_gain[i, :]) / np.pi, marker='x')
plt.xlabel(r'$f$ (normalized)')
plt.ylabel(r'$\angle H$ ($\times \pi^{-1}$)')
plt.title('Channel Phase')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])
plt.grid()
if plot_pca:
plot_pca_variance_curve(he_ltf_trimmed_gain, 'HE-LTF Trimmed Gain')
plot_pca_variance_curve(rx_he_ltf, 'HE-LTF Raw')
plot_pca_variance_curve(l_ltf_1_trimmed_gain, 'L-LTF-1 Trimmed Gain')
plot_pca_variance_curve(rx_l_ltf_1, 'L-LTF-1 Raw')
plot_pca_variance_curve(l_ltf_2_trimmed_gain, 'L-LTF-2 Trimmed Gain')
plot_pca_variance_curve(rx_l_ltf_2, 'L-LTF-2 Raw')
plot_pca_variance_curve(rx_pilot, 'Pilot Raw')
plot_pca_variance_curve(pilot_gain, 'Pilot Gain')
plot_pca_variance_curve(np.hstack([
he_ltf_trimmed_gain,
l_ltf_1_trimmed_gain,
l_ltf_2_trimmed_gain,
pilot_gain
]), 'HE-LTF, L-LTF-1, L-LTF-2, and Pilot Trimmed Gain')
if plot_mean_magnitude:
plt.figure()
x = f_rx_he_ltf_trimmed
y = np.mean(np.abs(he_ltf_trimmed_gain), axis=0)
s = np.std(np.abs(he_ltf_trimmed_gain), axis=0)
plt.plot(x, 20 * np.log10(y))
plt.fill_between(x, 20 * np.log10(y - s), 20 * np.log10(y + s), alpha=0.5)
plt.xlabel(r'$f$ (normalized)')
plt.ylabel(r'$|H|^2$ (dB)')
plt.title('Mean Channel Gain')
plt.legend([r'$\mu$', r'$\pm\sigma$'])
plt.grid()
if plot_correction_phase:
index = np.arange(0, he_ltf_size)[tx_he_ltf != 0]
phase = np.angle(he_ltf_trimmed_gain[0, :])
consecutive_phase = np.split(phase, np.where(np.diff(index) != 1)[0] + 1)
consecutive_index = np.split(index, np.where(np.diff(index) != 1)[0] + 1)
consecutive_phase = [np.unwrap(x) for x in consecutive_phase]
consecutive_fits = [scipy.stats.linregress(x, y) for x, y in zip(consecutive_index, consecutive_phase)]
combined_phase = []
for x, y in zip(consecutive_index, consecutive_phase):
y_hat = x * consecutive_fits[0].slope + consecutive_fits[0].intercept
# We can add this offset WLoG because phase is 2π periodic.
offset = 2 * np.pi * np.round((y_hat - y) / (2 * np.pi))
combined_phase.append(y + offset)
combined_phase = np.hstack(combined_phase)
plt.figure()
for x, y in zip(consecutive_index, consecutive_phase):
plt.scatter(x, y / np.pi)
for fit in consecutive_fits:
x = np.linspace(0, he_ltf_size, 1000)
y = fit.slope * x + fit.intercept
plt.plot(x, y / np.pi)
plt.xlabel('Subcarrier Index')
plt.ylabel(r'$\angle H$ ($\times \pi^{-1}$)')
plt.title('HE-LTF Channel Phase Estimates')
plt.legend([f'Interval {i + 1}' for i in range(len(consecutive_index))])
plt.grid()
plt.figure()
plt.scatter(index, combined_phase / np.pi)
plt.xlabel('Subcarrier Index')
plt.ylabel(r'$\angle H$ ($\times \pi^{-1}$)')
plt.title('HE-LTF Channel Phase Combined Estimate')
plt.grid()
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.grid",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"complex_pca.ComplexPCA",
"numpy.unwrap",
"numpy.array",
"numpy.imag",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.real",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.round",
"numpy.abs",
"h5py.File",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.angle",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.cumsum"
] |
[((230, 277), 'complex_pca.ComplexPCA', 'complex_pca.ComplexPCA', ([], {'n_components': 'x.shape[1]'}), '(n_components=x.shape[1])\n', (252, 277), False, 'import complex_pca\n'), ((298, 310), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (308, 310), True, 'import matplotlib.pyplot as plt\n'), ((436, 480), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Principal Components"""'], {}), "('Number of Principal Components')\n", (446, 480), True, 'import matplotlib.pyplot as plt\n'), ((485, 530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of Variance Captured"""'], {}), "('Proportion of Variance Captured')\n", (495, 530), True, 'import matplotlib.pyplot as plt\n'), ((535, 551), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (544, 551), True, 'import matplotlib.pyplot as plt\n'), ((556, 570), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (564, 570), True, 'import matplotlib.pyplot as plt\n'), ((972, 997), 'h5py.File', 'h5py.File', (['data_path', '"""r"""'], {}), "(data_path, 'r')\n", (981, 997), False, 'import h5py\n'), ((1419, 1453), 'numpy.array', 'np.array', (["data['rx_pilot'][0:n, :]"], {}), "(data['rx_pilot'][0:n, :])\n", (1427, 1453), True, 'import numpy as np\n'), ((1469, 1503), 'numpy.array', 'np.array', (["data['tx_pilot'][0:n, :]"], {}), "(data['tx_pilot'][0:n, :])\n", (1477, 1503), True, 'import numpy as np\n'), ((1556, 1589), 'numpy.array', 'np.array', (["data['rx_data'][0:n, :]"], {}), "(data['rx_data'][0:n, :])\n", (1564, 1589), True, 'import numpy as np\n'), ((1604, 1637), 'numpy.array', 'np.array', (["data['tx_data'][0:n, :]"], {}), "(data['tx_data'][0:n, :])\n", (1612, 1637), True, 'import numpy as np\n'), ((1735, 1771), 'numpy.array', 'np.array', (["data['rx_l_ltf_1'][0:n, :]"], {}), "(data['rx_l_ltf_1'][0:n, :])\n", (1743, 1771), True, 'import numpy as np\n'), ((1789, 1825), 'numpy.array', 'np.array', (["data['rx_l_ltf_2'][0:n, :]"], {}), "(data['rx_l_ltf_2'][0:n, :])\n", (1797, 1825), True, 'import numpy as np\n'), ((2417, 2457), 'numpy.array', 'np.array', (["data['rx_he_ltf_data'][0:n, :]"], {}), "(data['rx_he_ltf_data'][0:n, :])\n", (2425, 2457), True, 'import numpy as np\n'), ((2480, 2521), 'numpy.array', 'np.array', (["data['rx_he_ltf_pilot'][0:n, :]"], {}), "(data['rx_he_ltf_pilot'][0:n, :])\n", (2488, 2521), True, 'import numpy as np\n'), ((2538, 2601), 'numpy.zeros', 'np.zeros', (['(rx_he_ltf_data.shape[0], he_ltf_size)'], {'dtype': 'complex'}), '((rx_he_ltf_data.shape[0], he_ltf_size), dtype=complex)\n', (2546, 2601), True, 'import numpy as np\n'), ((2972, 3000), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'data_size'], {}), '(0, 1, data_size)\n', (2983, 3000), True, 'import numpy as np\n'), ((3080, 3110), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'he_ltf_size'], {}), '(0, 1, he_ltf_size)\n', (3091, 3110), True, 'import numpy as np\n'), ((3180, 3209), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'l_ltf_size'], {}), '(0, 1, l_ltf_size)\n', (3191, 3209), True, 'import numpy as np\n'), ((9387, 9397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9395, 9397), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3530), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3528, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""In-phase Component"""'], {}), "('In-phase Component')\n", (3769, 3791), True, 'import matplotlib.pyplot as plt\n'), ((3800, 3834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Quadrature Component"""'], {}), "('Quadrature Component')\n", (3810, 3834), True, 'import matplotlib.pyplot as plt\n'), ((3843, 3888), 'matplotlib.pyplot.title', 'plt.title', (['"""Transmitted Symbol Constellation"""'], {}), "('Transmitted Symbol Constellation')\n", (3852, 3888), True, 'import matplotlib.pyplot as plt\n'), ((3897, 3950), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])\n", (3907, 3950), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3969), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3967, 3969), True, 'import matplotlib.pyplot as plt\n'), ((3979, 3991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3989, 3991), True, 'import matplotlib.pyplot as plt\n'), ((4354, 4386), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""In-phase Component"""'], {}), "('In-phase Component')\n", (4364, 4386), True, 'import matplotlib.pyplot as plt\n'), ((4395, 4429), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Quadrature Component"""'], {}), "('Quadrature Component')\n", (4405, 4429), True, 'import matplotlib.pyplot as plt\n'), ((4438, 4486), 'matplotlib.pyplot.title', 'plt.title', (['"""Channel Gain Estimate Constellation"""'], {}), "('Channel Gain Estimate Constellation')\n", (4447, 4486), True, 'import matplotlib.pyplot as plt\n'), ((4495, 4548), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])\n", (4505, 4548), True, 'import matplotlib.pyplot as plt\n'), ((4557, 4567), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4565, 4567), True, 'import matplotlib.pyplot as plt\n'), ((4600, 4612), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4610, 4612), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5068), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ (normalized)"""'], {}), "('$f$ (normalized)')\n", (5048, 5068), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5104), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|H|^2$ (dB)"""'], {}), "('$|H|^2$ (dB)')\n", (5088, 5104), True, 'import matplotlib.pyplot as plt\n'), ((5114, 5148), 'matplotlib.pyplot.title', 'plt.title', (['"""Channel Gain Estimate"""'], {}), "('Channel Gain Estimate')\n", (5123, 5148), True, 'import matplotlib.pyplot as plt\n'), ((5157, 5218), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])\n", (5167, 5218), True, 'import matplotlib.pyplot as plt\n'), ((5227, 5237), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5235, 5237), True, 'import matplotlib.pyplot as plt\n'), ((5266, 5278), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5276, 5278), True, 'import matplotlib.pyplot as plt\n'), ((6222, 6252), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ (normalized)"""'], {}), "('$f$ (normalized)')\n", (6232, 6252), True, 'import matplotlib.pyplot as plt\n'), ((6262, 6309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)"""'], {}), "('$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)')\n", (6272, 6309), True, 'import matplotlib.pyplot as plt\n'), ((6316, 6342), 'matplotlib.pyplot.title', 'plt.title', (['"""Channel Phase"""'], {}), "('Channel Phase')\n", (6325, 6342), True, 'import matplotlib.pyplot as plt\n'), ((6351, 6412), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])\n", (6361, 6412), True, 'import matplotlib.pyplot as plt\n'), ((6421, 6431), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6429, 6431), True, 'import matplotlib.pyplot as plt\n'), ((7239, 7251), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7249, 7251), True, 'import matplotlib.pyplot as plt\n'), ((7526, 7556), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ (normalized)"""'], {}), "('$f$ (normalized)')\n", (7536, 7556), True, 'import matplotlib.pyplot as plt\n'), ((7566, 7592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|H|^2$ (dB)"""'], {}), "('$|H|^2$ (dB)')\n", (7576, 7592), True, 'import matplotlib.pyplot as plt\n'), ((7602, 7632), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Channel Gain"""'], {}), "('Mean Channel Gain')\n", (7611, 7632), True, 'import matplotlib.pyplot as plt\n'), ((7641, 7680), 'matplotlib.pyplot.legend', 'plt.legend', (["['$\\\\mu$', '$\\\\pm\\\\sigma$']"], {}), "(['$\\\\mu$', '$\\\\pm\\\\sigma$'])\n", (7651, 7680), True, 'import matplotlib.pyplot as plt\n'), ((7688, 7698), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7696, 7698), True, 'import matplotlib.pyplot as plt\n'), ((7804, 7839), 'numpy.angle', 'np.angle', (['he_ltf_trimmed_gain[0, :]'], {}), '(he_ltf_trimmed_gain[0, :])\n', (7812, 7839), True, 'import numpy as np\n'), ((8573, 8598), 'numpy.hstack', 'np.hstack', (['combined_phase'], {}), '(combined_phase)\n', (8582, 8598), True, 'import numpy as np\n'), ((8608, 8620), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8618, 8620), True, 'import matplotlib.pyplot as plt\n'), ((8900, 8930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Subcarrier Index"""'], {}), "('Subcarrier Index')\n", (8910, 8930), True, 'import matplotlib.pyplot as plt\n'), ((8939, 8986), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)"""'], {}), "('$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)')\n", (8949, 8986), True, 'import matplotlib.pyplot as plt\n'), ((8993, 9036), 'matplotlib.pyplot.title', 'plt.title', (['"""HE-LTF Channel Phase Estimates"""'], {}), "('HE-LTF Channel Phase Estimates')\n", (9002, 9036), True, 'import matplotlib.pyplot as plt\n'), ((9126, 9136), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9134, 9136), True, 'import matplotlib.pyplot as plt\n'), ((9146, 9158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9156, 9158), True, 'import matplotlib.pyplot as plt\n'), ((9167, 9209), 'matplotlib.pyplot.scatter', 'plt.scatter', (['index', '(combined_phase / np.pi)'], {}), '(index, combined_phase / np.pi)\n', (9178, 9209), True, 'import matplotlib.pyplot as plt\n'), ((9218, 9248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Subcarrier Index"""'], {}), "('Subcarrier Index')\n", (9228, 9248), True, 'import matplotlib.pyplot as plt\n'), ((9257, 9304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)"""'], {}), "('$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)')\n", (9267, 9304), True, 'import matplotlib.pyplot as plt\n'), ((9311, 9362), 'matplotlib.pyplot.title', 'plt.title', (['"""HE-LTF Channel Phase Combined Estimate"""'], {}), "('HE-LTF Channel Phase Combined Estimate')\n", (9320, 9362), True, 'import matplotlib.pyplot as plt\n'), ((9371, 9381), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9379, 9381), True, 'import matplotlib.pyplot as plt\n'), ((350, 390), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (359, 390), True, 'import numpy as np\n'), ((393, 430), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (399, 430), True, 'import numpy as np\n'), ((3551, 3577), 'numpy.real', 'np.real', (['tx_he_ltf_trimmed'], {}), '(tx_he_ltf_trimmed)\n', (3558, 3577), True, 'import numpy as np\n'), ((3579, 3605), 'numpy.imag', 'np.imag', (['tx_he_ltf_trimmed'], {}), '(tx_he_ltf_trimmed)\n', (3586, 3605), True, 'import numpy as np\n'), ((3627, 3652), 'numpy.real', 'np.real', (['tx_l_ltf_trimmed'], {}), '(tx_l_ltf_trimmed)\n', (3634, 3652), True, 'import numpy as np\n'), ((3654, 3679), 'numpy.imag', 'np.imag', (['tx_l_ltf_trimmed'], {}), '(tx_l_ltf_trimmed)\n', (3661, 3679), True, 'import numpy as np\n'), ((3701, 3724), 'numpy.real', 'np.real', (['tx_pilot[i, :]'], {}), '(tx_pilot[i, :])\n', (3708, 3724), True, 'import numpy as np\n'), ((3726, 3749), 'numpy.imag', 'np.imag', (['tx_pilot[i, :]'], {}), '(tx_pilot[i, :])\n', (3733, 3749), True, 'import numpy as np\n'), ((4012, 4046), 'numpy.real', 'np.real', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (4019, 4046), True, 'import numpy as np\n'), ((4048, 4082), 'numpy.imag', 'np.imag', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (4055, 4082), True, 'import numpy as np\n'), ((4104, 4139), 'numpy.real', 'np.real', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (4111, 4139), True, 'import numpy as np\n'), ((4141, 4176), 'numpy.imag', 'np.imag', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (4148, 4176), True, 'import numpy as np\n'), ((4198, 4233), 'numpy.real', 'np.real', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (4205, 4233), True, 'import numpy as np\n'), ((4235, 4270), 'numpy.imag', 'np.imag', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (4242, 4270), True, 'import numpy as np\n'), ((4292, 4317), 'numpy.real', 'np.real', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (4299, 4317), True, 'import numpy as np\n'), ((4319, 4344), 'numpy.imag', 'np.imag', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (4326, 4344), True, 'import numpy as np\n'), ((7002, 7094), 'numpy.hstack', 'np.hstack', (['[he_ltf_trimmed_gain, l_ltf_1_trimmed_gain, l_ltf_2_trimmed_gain, pilot_gain]'], {}), '([he_ltf_trimmed_gain, l_ltf_1_trimmed_gain, l_ltf_2_trimmed_gain,\n pilot_gain])\n', (7011, 7094), True, 'import numpy as np\n'), ((7304, 7331), 'numpy.abs', 'np.abs', (['he_ltf_trimmed_gain'], {}), '(he_ltf_trimmed_gain)\n', (7310, 7331), True, 'import numpy as np\n'), ((7360, 7387), 'numpy.abs', 'np.abs', (['he_ltf_trimmed_gain'], {}), '(he_ltf_trimmed_gain)\n', (7366, 7387), True, 'import numpy as np\n'), ((7746, 7771), 'numpy.arange', 'np.arange', (['(0)', 'he_ltf_size'], {}), '(0, he_ltf_size)\n', (7755, 7771), True, 'import numpy as np\n'), ((8033, 8045), 'numpy.unwrap', 'np.unwrap', (['x'], {}), '(x)\n', (8042, 8045), True, 'import numpy as np\n'), ((8696, 8721), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', '(y / np.pi)'], {}), '(x, y / np.pi)\n', (8707, 8721), True, 'import matplotlib.pyplot as plt\n'), ((8776, 8809), 'numpy.linspace', 'np.linspace', (['(0)', 'he_ltf_size', '(1000)'], {}), '(0, he_ltf_size, 1000)\n', (8787, 8809), True, 'import numpy as np\n'), ((8868, 8890), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(y / np.pi)'], {}), '(x, y / np.pi)\n', (8876, 8890), True, 'import matplotlib.pyplot as plt\n'), ((7422, 7433), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (7430, 7433), True, 'import numpy as np\n'), ((7468, 7483), 'numpy.log10', 'np.log10', (['(y - s)'], {}), '(y - s)\n', (7476, 7483), True, 'import numpy as np\n'), ((7490, 7505), 'numpy.log10', 'np.log10', (['(y + s)'], {}), '(y + s)\n', (7498, 7505), True, 'import numpy as np\n'), ((8465, 8500), 'numpy.round', 'np.round', (['((y_hat - y) / (2 * np.pi))'], {}), '((y_hat - y) / (2 * np.pi))\n', (8473, 8500), True, 'import numpy as np\n'), ((4668, 4701), 'numpy.abs', 'np.abs', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (4674, 4701), True, 'import numpy as np\n'), ((4755, 4789), 'numpy.abs', 'np.abs', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (4761, 4789), True, 'import numpy as np\n'), ((4843, 4877), 'numpy.abs', 'np.abs', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (4849, 4877), True, 'import numpy as np\n'), ((4923, 4947), 'numpy.abs', 'np.abs', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (4929, 4947), True, 'import numpy as np\n'), ((4992, 5015), 'numpy.abs', 'np.abs', (['data_gain[i, :]'], {}), '(data_gain[i, :])\n', (4998, 5015), True, 'import numpy as np\n'), ((5847, 5882), 'numpy.angle', 'np.angle', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (5855, 5882), True, 'import numpy as np\n'), ((5933, 5969), 'numpy.angle', 'np.angle', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (5941, 5969), True, 'import numpy as np\n'), ((6020, 6056), 'numpy.angle', 'np.angle', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (6028, 6056), True, 'import numpy as np\n'), ((6099, 6125), 'numpy.angle', 'np.angle', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (6107, 6125), True, 'import numpy as np\n'), ((6167, 6192), 'numpy.angle', 'np.angle', (['data_gain[i, :]'], {}), '(data_gain[i, :])\n', (6175, 6192), True, 'import numpy as np\n'), ((5376, 5411), 'numpy.angle', 'np.angle', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (5384, 5411), True, 'import numpy as np\n'), ((5473, 5509), 'numpy.angle', 'np.angle', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (5481, 5509), True, 'import numpy as np\n'), ((5571, 5607), 'numpy.angle', 'np.angle', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (5579, 5607), True, 'import numpy as np\n'), ((5661, 5687), 'numpy.angle', 'np.angle', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (5669, 5687), True, 'import numpy as np\n'), ((5740, 5765), 'numpy.angle', 'np.angle', (['data_gain[i, :]'], {}), '(data_gain[i, :])\n', (5748, 5765), True, 'import numpy as np\n'), ((7893, 7907), 'numpy.diff', 'np.diff', (['index'], {}), '(index)\n', (7900, 7907), True, 'import numpy as np\n'), ((7975, 7989), 'numpy.diff', 'np.diff', (['index'], {}), '(index)\n', (7982, 7989), True, 'import numpy as np\n')]
|
import os
from random import sample
import numpy as np
from numpy import cos
from scipy.linalg import lstsq
from compmech.constants import CMHOME
from compmech.logger import *
def load_c0(name, funcnum, m0, n0):
path = os.path.join(CMHOME, 'conecyl', 'imperfections', 'c0',
'c0_{0}_f{1}_m{2:03d}_n{3:03d}.txt'.format(
name, funcnum, m0, n0))
if os.path.isfile(path):
return np.loadtxt(path)
else:
raise ValueError('Coefficient file not found!')
def calc_c0(path, m0=40, n0=40, funcnum=2, sample_size=None,
maxmem=8, save=True, offset_w0=None):
r"""Find the coefficients `c_0` that best fit the `w_0` function.
The measured data will be fit using one of the following functions,
selected using the ``funcnum`` parameter:
``funcnum=1``
.. math::
w_0 = \sum_{i=1}^{m_0}{ \sum_{j=0}^{n_0}{
c_{ij}^a sin{b_x} sin{b_\theta}
+c_{ij}^b sin{b_x} cos{b_\theta}}}
``funcnum=2`` (default)
.. math::
w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{
c_{ij}^a cos{b_x} sin{b_\theta}
+c_{ij}^b cos{b_x} cos{b_\theta}}}
``funcnum=3``
.. math::
w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{
c_{ij}^a sin{b_x} sin{b_\theta}
+c_{ij}^b sin{b_x} cos{b_\theta}
+c_{ij}^c cos{b_x} sin{b_\theta}
+c_{ij}^d cos{b_x} cos{b_\theta}}}
where:
.. math::
b_x = i \pi \frac x L_{points}
b_\theta = j \theta
where `L_{points}` represents the difference between the maximum and
the height values in the imperfection file divided by the cosine
of the semi-vertex angle:
.. math::
L_{points} = \frac{H_{max} - H_{min}}{cos(\alpha)}
= \frac{H_{points}}{cos(\alpha)}
In this form `{}^x/_{L_{points}}` will vary from `0.` (at the top)
to `1.` (at the bottom).
.. note:: Note that if the measured sample does not
cover all the height, **it will be stretched**.
The approximation can be written in matrix form as:
.. math::
w_0 = [g] \{c\}
where `[g]` carries the base functions and `{c}` the respective
amplitudes. The solution consists on finding the best `\{c\}` that minimizes
the least-square error between the measured imperfection pattern and the
`w_0` function.
Parameters
----------
path : str or numpy.ndarray
The path of the file containing the data. Can be a full path using
``r"C:\Temp\inputfile.txt"``, for example.
The input file must have 3 columns: `\theta`, `height`, `imp`;
expressed in Cartesian coordinates.
This input can also be a ``numpy.ndarray`` object, with
`\theta`, `height`, `imp` in each corresponding column.
m0 : int
Number of terms along the meridian (`x`).
n0 : int
Number of terms along the circumference (`\theta`).
funcnum : int, optional
As explained above, selects the base functions used for
the approximation.
sample_size : int or None, optional
Specifies how many points of the imperfection file should be used. If
``None`` all points will be used in the computations.
maxmem : int, optional
Maximum RAM memory in GB allowed to compute the base functions.
The ``scipy.interpolate.lstsq`` will go beyond this limit.
save : bool, optional
If ``True`` saves the calculated coefficients in the
``compmech/conecyl/imperfections/c0`` folder.
Returns
-------
out : numpy.ndarray
A 1-D array with the best-fit coefficients.
"""
import mgi
if isinstance(path, np.ndarray):
input_pts = path
path = 'unnamed.txt'
else:
input_pts = np.loadtxt(path)
if input_pts.shape[1] != 3:
raise ValueError('Input does not have the format: "theta, x, imp"')
log('Finding w0 coefficients for {0},\n\tusing funcnum {1}'.format(
str(os.path.basename(path)), funcnum))
if sample_size:
num = input_pts.shape[0]
if sample_size < num:
input_pts = input_pts[sample(range(num), int(sample_size))]
if funcnum==1:
size = 2
elif funcnum==2:
size = 2
elif funcnum==3:
size = 4
else:
raise ValueError('Valid values for "funcnum" are 1, 2 or 3')
maxnum = maxmem*1024*1024*1024*8/(64*size*m0*n0)
num = input_pts.shape[0]
if num >= maxnum:
input_pts = input_pts[sample(range(num), int(maxnum))]
warn('Reducing sample size from {0} to {1} ' +
'due to the "maxmem" specified'.format(num, maxnum), level=1)
thetas = input_pts[:, 0].copy()
xs = input_pts[:, 1]
w0pts = input_pts[:, 2]
if offset_w0:
w0pts += offset_w0
# normalizing x
xs = (xs - xs.min())/(xs.max() - xs.min())
# inverting x to cope with the coordsys of the semi-analytical model
xs = 1 - xs
a = mgi.fa(m0, n0, xs, thetas, funcnum=funcnum)
log('Base functions calculated', level=1)
try:
c0, residues, rank, s = lstsq(a, w0pts)
except MemoryError:
error('Reduce the "maxmem" parameter!')
log('Finished scipy.linalg.lstsq', level=1)
if save:
name = '.'.join(os.path.basename(path).split('.')[0:-1])
outpath = os.path.join(CMHOME, 'conecyl', 'imperfections', 'c0',
'c0_{0}_f{1}_m{2:03d}_n{3:03d}.txt'.format(
name, funcnum, m0, n0))
np.savetxt(outpath, c0)
return c0, residues
def fw0(m0, n0, c0, xs_norm, ts, funcnum=2):
r"""Calculates the imperfection field `w_0` for a given input.
Parameters
----------
m0 : int
The number of terms along the meridian.
n0 : int
The number of terms along the circumference.
c0 : numpy.ndarray
The coefficients of the imperfection pattern.
xs_norm : numpy.ndarray
The meridian coordinate (`x`) normalized to be between ``0.`` and
``1.``.
ts : numpy.ndarray
The angles in radians representing the circumferential coordinate
(`\theta`).
funcnum : int, optional
The function used for the approximation (see the ``calc_c0`` function)
Notes
-----
The inputs ``xs_norm`` and ``ts`` must be of the same size.
If ``funcnum==1 or funcnum==2`` then ``size=2``, if ``funcnum==3`` then
``size=4`` and the inputs must satisfy ``c0.shape[0] == size*m0*n0``.
"""
if xs_norm.shape != ts.shape:
raise ValueError('xs_norm and ts must have the same shape')
if funcnum==1:
size = 2
elif funcnum==2:
size = 2
elif funcnum==3:
size = 4
if c0.shape[0] != size*m0*n0:
raise ValueError('Invalid c0 for the given m0 and n0!')
import mgi
w0s = mgi.fw0(m0, n0, c0, xs_norm.ravel(), ts.ravel(), funcnum)
return w0s.reshape(xs_norm.shape)
|
[
"scipy.linalg.lstsq",
"os.path.isfile",
"os.path.basename",
"numpy.savetxt",
"numpy.loadtxt",
"mgi.fa"
] |
[((380, 400), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (394, 400), False, 'import os\n'), ((5034, 5077), 'mgi.fa', 'mgi.fa', (['m0', 'n0', 'xs', 'thetas'], {'funcnum': 'funcnum'}), '(m0, n0, xs, thetas, funcnum=funcnum)\n', (5040, 5077), False, 'import mgi\n'), ((417, 433), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (427, 433), True, 'import numpy as np\n'), ((3839, 3855), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (3849, 3855), True, 'import numpy as np\n'), ((5165, 5180), 'scipy.linalg.lstsq', 'lstsq', (['a', 'w0pts'], {}), '(a, w0pts)\n', (5170, 5180), False, 'from scipy.linalg import lstsq\n'), ((5561, 5584), 'numpy.savetxt', 'np.savetxt', (['outpath', 'c0'], {}), '(outpath, c0)\n', (5571, 5584), True, 'import numpy as np\n'), ((4050, 4072), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4066, 4072), False, 'import os\n'), ((5339, 5361), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5355, 5361), False, 'import os\n')]
|
import numpy as np
from cost_functions import trajectory_cost_fn
import time
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
""" YOUR CODE HERE """
self.env = env
def get_action(self, state):
""" YOUR CODE HERE """
""" Your code should randomly sample an action uniformly from the action space """
return self.env.action_space.sample()
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
def get_action(self, state):
""" YOUR CODE HERE
Note: be careful to batch your simulations through the model for speed """
observations = np.empty(
(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0]))
next_observations = np.empty(
(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0]))
actions = [
[self.env.action_space.sample()
for _ in range(self.horizon)]
for _ in range(self.num_simulated_paths)
]
actions = np.array(actions)
last_state = np.array([state for _ in range(self.num_simulated_paths)])
for idx in range(self.horizon):
action_batch = actions[:, idx]
next_state = self.dyn_model.predict(last_state, action_batch)
observations[:, idx, :] = last_state
next_observations[:, idx, :] = next_state
last_state = next_state
costs = np.array([trajectory_cost_fn(
self.cost_fn, observations[i], actions[i],
next_observations[i])
for i in range(self.num_simulated_paths)
])
min_cost_path_id = np.argmin(costs)
return actions[min_cost_path_id][0]
|
[
"numpy.argmin",
"numpy.array",
"numpy.empty",
"cost_functions.trajectory_cost_fn"
] |
[((1235, 1327), 'numpy.empty', 'np.empty', (['(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0])'], {}), '((self.num_simulated_paths, self.horizon, self.env.\n observation_space.shape[0]))\n', (1243, 1327), True, 'import numpy as np\n'), ((1364, 1456), 'numpy.empty', 'np.empty', (['(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0])'], {}), '((self.num_simulated_paths, self.horizon, self.env.\n observation_space.shape[0]))\n', (1372, 1456), True, 'import numpy as np\n'), ((1654, 1671), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1662, 1671), True, 'import numpy as np\n'), ((2277, 2293), 'numpy.argmin', 'np.argmin', (['costs'], {}), '(costs)\n', (2286, 2293), True, 'import numpy as np\n'), ((2076, 2163), 'cost_functions.trajectory_cost_fn', 'trajectory_cost_fn', (['self.cost_fn', 'observations[i]', 'actions[i]', 'next_observations[i]'], {}), '(self.cost_fn, observations[i], actions[i],\n next_observations[i])\n', (2094, 2163), False, 'from cost_functions import trajectory_cost_fn\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 09:15:33 2020
@author: dhulls
"""
# Imports
import numpy as np
np.random.seed(100)
from tensorflow import random
random.set_seed(100)
import os
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
os.chdir('/Users/som/Dropbox/Complex_systems_RNN/DL_tutorial')
from Kij import Kij
from scipy.stats import beta
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
print(tf.__version__)
import tensorflow_docs as tfdocs
import tensorflow_docs.plots
import tensorflow_docs.modeling
# from matplotlib import rc
# Import dataset
# dataset_path = '/Users/som/Dropbox/Complex_systems_RNN/Data/DL_test_data_Eq_Hazus_125.csv'
dataset_path = '/Users/som/Dropbox/Complex_systems_RNN/Data/New data for improved disf match/Eq_10IM.csv'
# column_names = ['Time','P1','P2','IM','Rec']
# dataset = pd.read_csv(dataset_path, names=column_names,
# na_values = "?", comment='\t',
# sep=" ", skipinitialspace=True)
dataset = pd.read_csv(dataset_path)
dataset.pop("IM")
dataset.pop("Time")
# dataset["IM"] = np.log(dataset["IM"])
# dataset.pop("P1")
# dataset.pop("P2")
a1 = 1.0
b1 = 1.0
loc1 = 0
sca1 = 1
def transform(Rec):
# Rec = beta.ppf(Rec,a1,b1,loc1,sca1)
# Fin = np.zeros((len(Rec),1))
# for ii in np.arange(0,len(Rec),1):
# if ((1-Rec[ii])<0.04):
# Fin[ii] = np.power((1-Rec[ii]),1/4)
# else:
# Fin[ii] = 1-Rec[ii]
# return Fin
return np.power((1-Rec),1/4)
# return (1/(1+np.exp(-(1-Rec))))
def invtransform(x):
# Fin = np.zeros(len(x))
# for ii in np.arange(0,len(x),1):
# if ((1-np.power(x[ii],4))<0.04):
# Fin[ii] = (1-np.power(x[ii],4))
# else:
# Fin[ii] = 1-x[ii]
# return Fin # (1-np.power(x,4))
return (1-np.power(x,4))
# return (1+np.log(1/(x)-1))
#beta.cdf(x,a1,b1,loc1,sca1)
dataset['Rec'] = transform(dataset['Rec'])
dataset['P1'] = transform(dataset['P1'])
dataset['P2'] = transform(dataset['P2'])
dataset.tail()
# Split the data into train and test
train_dataset = dataset.sample(frac=1.0,random_state=100)
test_dataset = dataset.drop(train_dataset.index)
# Inspect the data
# sns.pairplot(train_dataset[["Rec", "P1", "P2", "Time"]], diag_kind="kde")
# sns.pairplot(train_dataset[["Rec", "IM"]], diag_kind="kde")
train_stats = train_dataset.describe()
train_stats.pop("Rec")
train_stats = train_stats.transpose()
train_stats
# Split features from labels
train_labels = train_dataset.pop('Rec')
test_labels = test_dataset.pop('Rec')
# Normalize the data
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# Build the model ,kernel_regularizer='l2'
def build_model():
model = keras.Sequential([
layers.Dense(10, activation='softmax', input_shape=[len(train_dataset.keys())],bias_initializer='zeros'),
layers.Dense(10, activation='softmax',bias_initializer='zeros'),
layers.Dense(1,bias_initializer='zeros')
])
# optimizer = tf.keras.optimizers.RMSprop(0.001)
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
# Inspect the model
model.summary()
# example_batch = normed_train_data[:10]
# example_result = model.predict(example_batch)
# example_result
# Train the model
EPOCHS = 3000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.0, verbose=0,
callbacks=[tfdocs.modeling.EpochDots()],shuffle = False)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
## Verify model for multiple recovery curves
dataset_path1 = '/Users/som/Dropbox/Complex_systems_RNN/Data/New data for sequence/DL_verify_EQ_0_7.csv'
data1 = pd.read_csv(dataset_path1)
# data1["IM"] = np.log(data1["IM"])
Time1 = data1.pop("Time")
# data1.pop("P1")
# data1.pop("P2")
data1['Rec'] = transform(data1['Rec'])
data1['P1'] = transform(data1['P1'])
data1['P2'] = transform(data1['P2'])
data1_labels = data1.pop('Rec')
normed_data1 = norm(data1)
data1_pred = model.predict(normed_data1).flatten()
|
[
"tensorflow.random.set_seed",
"pandas.read_csv",
"numpy.power",
"tensorflow_docs.modeling.EpochDots",
"os.chdir",
"tensorflow.keras.layers.Dense",
"numpy.random.seed",
"pandas.DataFrame",
"tensorflow.keras.optimizers.RMSprop"
] |
[((138, 157), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (152, 157), True, 'import numpy as np\n'), ((188, 208), 'tensorflow.random.set_seed', 'random.set_seed', (['(100)'], {}), '(100)\n', (203, 208), False, 'from tensorflow import random\n'), ((309, 371), 'os.chdir', 'os.chdir', (['"""/Users/som/Dropbox/Complex_systems_RNN/DL_tutorial"""'], {}), "('/Users/som/Dropbox/Complex_systems_RNN/DL_tutorial')\n", (317, 371), False, 'import os\n'), ((1143, 1168), 'pandas.read_csv', 'pd.read_csv', (['dataset_path'], {}), '(dataset_path)\n', (1154, 1168), True, 'import pandas as pd\n'), ((3909, 3938), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (3921, 3938), True, 'import pandas as pd\n'), ((4142, 4168), 'pandas.read_csv', 'pd.read_csv', (['dataset_path1'], {}), '(dataset_path1)\n', (4153, 4168), True, 'import pandas as pd\n'), ((1623, 1647), 'numpy.power', 'np.power', (['(1 - Rec)', '(1 / 4)'], {}), '(1 - Rec, 1 / 4)\n', (1631, 1647), True, 'import numpy as np\n'), ((3335, 3369), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', (['(0.001)'], {}), '(0.001)\n', (3362, 3369), True, 'import tensorflow as tf\n'), ((1961, 1975), 'numpy.power', 'np.power', (['x', '(4)'], {}), '(x, 4)\n', (1969, 1975), True, 'import numpy as np\n'), ((3114, 3178), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'bias_initializer': '"""zeros"""'}), "(10, activation='softmax', bias_initializer='zeros')\n", (3126, 3178), False, 'from tensorflow.keras import layers\n'), ((3191, 3232), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'bias_initializer': '"""zeros"""'}), "(1, bias_initializer='zeros')\n", (3203, 3232), False, 'from tensorflow.keras import layers\n'), ((3855, 3882), 'tensorflow_docs.modeling.EpochDots', 'tfdocs.modeling.EpochDots', ([], {}), '()\n', (3880, 3882), True, 'import tensorflow_docs as tfdocs\n')]
|
from abc import ABC, abstractmethod
from typing import Callable, cast, Set, List, Dict, Optional
import numpy as np
from autofit import ModelInstance, Analysis, DirectoryPaths
from autofit.graphical.expectation_propagation import AbstractFactorOptimiser
from autofit.graphical.expectation_propagation import EPMeanField
from autofit.graphical.expectation_propagation import EPOptimiser
from autofit.graphical.factor_graphs.factor import Factor
from autofit.graphical.factor_graphs.graph import FactorGraph
from autofit.graphical.messages import NormalMessage
from autofit.mapper.prior.prior import Prior
from autofit.mapper.prior_model.collection import CollectionPriorModel
from autofit.mapper.prior_model.prior_model import PriorModel, AbstractPriorModel
class AbstractModelFactor(Analysis, ABC):
@property
@abstractmethod
def model_factors(self) -> List["ModelFactor"]:
"""
A list of factors that comprise a PriorModel and corresponding fitness function
"""
def freeze(self):
for model_factor in self.model_factors:
model_factor.freeze()
@property
def priors(self) -> Set[Prior]:
"""
A set of all priors encompassed by the contained likelihood models
"""
return {
prior
for model
in self.model_factors
for prior
in model.prior_model.priors
}
@property
def prior_factors(self) -> List[Factor]:
"""
A list of factors that act as priors on latent variables. One factor exists
for each unique prior.
"""
return [
Factor(
cast(
Callable,
prior
),
x=prior
)
for prior
in self.priors
]
@property
def message_dict(self) -> Dict[Prior, NormalMessage]:
"""
Dictionary mapping priors to messages.
TODO: should support more than just GaussianPriors/NormalMessages
"""
return {
prior: NormalMessage.from_prior(
prior
)
for prior
in self.priors
}
@property
def graph(self) -> FactorGraph:
"""
The complete graph made by combining all factors and priors
"""
return cast(
FactorGraph,
np.prod(
[
model
for model
in self.model_factors
] + self.prior_factors
)
)
def mean_field_approximation(self) -> EPMeanField:
"""
Returns a EPMeanField of the factor graph
"""
return EPMeanField.from_approx_dists(
self.graph,
self.message_dict
)
def _make_ep_optimiser(
self,
optimiser: AbstractFactorOptimiser
) -> EPOptimiser:
return EPOptimiser(
self.graph,
default_optimiser=optimiser,
factor_optimisers={
factor: factor.optimiser
for factor in self.model_factors
if factor.optimiser is not None
}
)
def optimise(
self,
optimiser:
AbstractFactorOptimiser
) -> CollectionPriorModel:
"""
Use an EP Optimiser to optimise the graph associated with this collection
of factors and create a Collection to represent the results.
Parameters
----------
optimiser
An optimiser that acts on graphs
Returns
-------
A collection of prior models
"""
self.freeze()
opt = self._make_ep_optimiser(
optimiser
)
updated_model = opt.run(
self.mean_field_approximation()
)
collection = CollectionPriorModel([
factor.prior_model
for factor
in self.model_factors
])
arguments = {
prior: updated_model.mean_field[
prior
].as_prior()
for prior
in collection.priors
}
return collection.gaussian_prior_model_for_arguments(
arguments
)
def visualize(
self,
paths: DirectoryPaths,
instance: ModelInstance,
during_analysis: bool
):
"""
Visualise the instances provided using each factor.
Instances in the ModelInstance must have the same order as the factors.
Parameters
----------
paths
Object describing where data should be saved to
instance
A collection of instances, each corresponding to a factor
during_analysis
Is this visualisation during analysis?
"""
for model_factor, instance in zip(
self.model_factors,
instance
):
model_factor.visualize(
paths,
instance,
during_analysis
)
def log_likelihood_function(
self,
instance: ModelInstance
) -> float:
"""
Compute the combined likelihood of each factor from a collection of instances
with the same ordering as the factors.
Parameters
----------
instance
A collection of instances, one corresponding to each factor
Returns
-------
The combined likelihood of all factors
"""
likelihood = abs(
self.model_factors[0].analysis.log_likelihood_function(
instance[0]
)
)
for model_factor, instance_ in zip(
self.model_factors[1:],
instance[1:]
):
likelihood *= abs(
model_factor.analysis.log_likelihood_function(
instance_
)
)
return -likelihood
@property
def global_prior_model(self) -> CollectionPriorModel:
"""
A collection of prior models, with one model for each factor.
"""
return CollectionPriorModel([
model_factor.prior_model
for model_factor
in self.model_factors
])
class ModelFactor(Factor, AbstractModelFactor):
def __init__(
self,
prior_model: AbstractPriorModel,
analysis: Analysis,
optimiser: Optional[AbstractFactorOptimiser] = None
):
"""
A factor in the graph that actually computes the likelihood of a model
given values for each variable that model contains
Parameters
----------
prior_model
A model with some dimensionality
analysis
A class that implements a function which evaluates how well an
instance of the model fits some data
optimiser
A custom optimiser that will be used to fit this factor specifically
instead of the default optimiser
"""
self.prior_model = prior_model
self.analysis = analysis
self.optimiser = optimiser
prior_variable_dict = {
prior.name: prior
for prior
in prior_model.priors
}
def _factor(
**kwargs: np.ndarray
) -> float:
"""
Returns an instance of the prior model and evaluates it, forming
a factor.
Parameters
----------
kwargs
Arguments with names that are unique for each prior.
Returns
-------
Calculated likelihood
"""
arguments = dict()
for name, array in kwargs.items():
prior_id = int(name.split("_")[1])
prior = prior_model.prior_with_id(
prior_id
)
arguments[prior] = array
instance = prior_model.instance_for_arguments(
arguments
)
return analysis.log_likelihood_function(
instance
)
super().__init__(
_factor,
**prior_variable_dict
)
def freeze(self):
self.prior_model.freeze()
@property
def model_factors(self) -> List["ModelFactor"]:
return [self]
def optimise(self, optimiser) -> PriorModel:
"""
Optimise this factor on its own returning a PriorModel
representing the final state of the messages.
Parameters
----------
optimiser
Returns
-------
A PriorModel representing the optimised factor
"""
return super().optimise(
optimiser
)[0]
class FactorGraphModel(AbstractModelFactor):
def __init__(self, *model_factors: ModelFactor):
"""
A collection of factors that describe models, which can be
used to create a graph and messages.
If the models have shared priors then the graph has shared variables
Parameters
----------
model_factors
"""
self._model_factors = model_factors
@property
def model_factors(self):
return self._model_factors
|
[
"numpy.prod",
"autofit.mapper.prior_model.collection.CollectionPriorModel",
"autofit.graphical.expectation_propagation.EPMeanField.from_approx_dists",
"autofit.graphical.expectation_propagation.EPOptimiser",
"autofit.graphical.messages.NormalMessage.from_prior",
"typing.cast"
] |
[((2759, 2819), 'autofit.graphical.expectation_propagation.EPMeanField.from_approx_dists', 'EPMeanField.from_approx_dists', (['self.graph', 'self.message_dict'], {}), '(self.graph, self.message_dict)\n', (2788, 2819), False, 'from autofit.graphical.expectation_propagation import EPMeanField\n'), ((2985, 3158), 'autofit.graphical.expectation_propagation.EPOptimiser', 'EPOptimiser', (['self.graph'], {'default_optimiser': 'optimiser', 'factor_optimisers': '{factor: factor.optimiser for factor in self.model_factors if factor.\n optimiser is not None}'}), '(self.graph, default_optimiser=optimiser, factor_optimisers={\n factor: factor.optimiser for factor in self.model_factors if factor.\n optimiser is not None})\n', (2996, 3158), False, 'from autofit.graphical.expectation_propagation import EPOptimiser\n'), ((3934, 4009), 'autofit.mapper.prior_model.collection.CollectionPriorModel', 'CollectionPriorModel', (['[factor.prior_model for factor in self.model_factors]'], {}), '([factor.prior_model for factor in self.model_factors])\n', (3954, 4009), False, 'from autofit.mapper.prior_model.collection import CollectionPriorModel\n'), ((6271, 6363), 'autofit.mapper.prior_model.collection.CollectionPriorModel', 'CollectionPriorModel', (['[model_factor.prior_model for model_factor in self.model_factors]'], {}), '([model_factor.prior_model for model_factor in self.\n model_factors])\n', (6291, 6363), False, 'from autofit.mapper.prior_model.collection import CollectionPriorModel\n'), ((2104, 2135), 'autofit.graphical.messages.NormalMessage.from_prior', 'NormalMessage.from_prior', (['prior'], {}), '(prior)\n', (2128, 2135), False, 'from autofit.graphical.messages import NormalMessage\n'), ((2426, 2495), 'numpy.prod', 'np.prod', (['([model for model in self.model_factors] + self.prior_factors)'], {}), '([model for model in self.model_factors] + self.prior_factors)\n', (2433, 2495), True, 'import numpy as np\n'), ((1671, 1692), 'typing.cast', 'cast', (['Callable', 'prior'], {}), '(Callable, prior)\n', (1675, 1692), False, 'from typing import Callable, cast, Set, List, Dict, Optional\n')]
|
from shapely.geometry import shape
import fiona
import networkx as nx
import matplotlib.pyplot as plt
import math
import random
import traffic
import pickle
from datetime import datetime
from request import Request
import numpy as np
try:
from itertools import izip as zip
except ImportError:
pass
def main():
"""
Main function used for demo of data loading and pathfinding.
"""
G, trips = load_data(reset=False, graph=False, trip=False, abbr=False)
# t = random_trip(G) # Selects a random trip for pathfinding demo
# Predetermined trip for demo
t = Request((40.74345679662331, -73.72770035929027), (40.77214782804362, -73.76426798716528), 0, 0, datetime(2015, 1, 1))
draw_graph(G, bounds=(t.start, t.stop))
process_trips(G, trips=[t], heuristic=diste)
plt.axis('equal')
plt.show()
# === Load Data ===
def load_data(reset=False, graph=False, trip=False, abbr=False):
"""
Returns a graph representing the NYC map and an array of 2015 trips. Saves all the data in pickle files.
*** To refresh everything, reset=True ***
Parameters: (reset, graph, trip, abbr)
reset - bool
graph - bool
trips - bool
abbr - bool
"""
G = None
trips = None
if reset:
graph = trip = abbr = True
if graph:
traffic_dict = traffic.process_traffic("NYC/Traffic_Data/traffic_volume.csv")
pickle_graph(abbr, traffic_dict)
with open('graph.pkl', 'rb') as graph_file:
G = pickle.load(graph_file)
if trip:
pickle_trips(G)
with open('trips.pkl', 'rb') as trips_file:
trips = pickle.load(trips_file)
return G, trips
def pickle_graph(abbr, traffic_dict):
"""
Generate and save the graph in a pickle file.
Parameters: (abbr, traffic_dict)
abbr - bool
traffic_dict - dict of traffic volume per street
"""
# Replace with street abbr
try:
if abbr:
raise ResetPickle
with open('abbr.pkl', 'rb') as abbr_file:
abbr = pickle.load(abbr_file)
except:
print("Loading abbreviations...")
abbr = {}
with open("abbr.txt") as rFile:
for line in rFile:
line = line.rstrip("\n")
abbr[line.split(" ")[0].upper()] = line.split(" ")[1].upper()
with open('abbr.pkl', 'wb') as out:
pickle.dump(abbr, out)
print("Done.")
# Variables to keep track of the number of recognized streets
recognized = 0
unrecognized = 0
# Build speeds dictionary for every road
print("Building speeds dictionary...")
speeds = {}
for feature in fiona.open("NYC/VZV_Speed Limits/geo_export_6459c10e-7bfb-4e64-ae29-f0747dc3824c.shp"):
street = feature["properties"]["street"]
for v in street_variations(street, abbr):
speeds[v] = feature["properties"]["postvz_sl"]
print("Done.")
# Create a Graph with intersections as nodes and roads as edges
print("Creating graph...")
time = random.randint(0, 23)
G = nx.Graph()
for feature in fiona.open("NYC/Map/geo_export_24fdfadb-893d-40a0-a751-a76cdefc9bc6.shp"):
for seg_start, seg_end in zip(list(shape(feature["geometry"]).coords),
list(shape(feature["geometry"]).coords)[1:]):
street = feature["properties"]["st_label"]
if street in speeds:
recognized += 1
else:
unrecognized += 1
divider = speeds.get(street, 0)
if divider == 0:
divider = 25
seg_start = seg_start[1] , seg_start[0]
seg_end = seg_end[1] , seg_end[0]
if street in traffic_dict:
volume_total = traffic_dict[street]
volume_count = volume_total[time]
w = reweight(seg_start, seg_end, divider, int(volume_count))
else:
w = weight(seg_start, seg_end, divider)
G.add_edge(seg_start, seg_end, weight=w, distance=feature["properties"]["shape_leng"],
speed=divider / 3600 * 1609) # Gives the edge properties like a weight, the in real life distance, and the speed limit
print(
f"Streets recognized: {recognized}. Unrecognized: {unrecognized}. Percent recognized: {recognized / (unrecognized + recognized) * 100}%.")
with open('graph.pkl', 'wb') as out:
pickle.dump(G, out)
print("Done.")
def pickle_trips(G):
"""
Saves the trips in a pickle file.
Parameters: (G)
G - networkx.graph()
"""
print("Loading trips...")
t = 0 # Number of trips loaded so far
trips = []
with open("NYC/2015_taxi_data.csv") as rFile:
first_line = rFile.readline().rstrip("\n").split(",")
for line in rFile:
line = line.rstrip("\n").split(",")
temp = {}
for i in range(len(first_line)):
temp[first_line[i]] = line[i]
starting = (float(temp["pickup_latitude"]) , float(temp["pickup_longitude"]) )
ending = (float(temp["dropoff_latitude"]) , float(temp["dropoff_longitude"]) )
n1, n2 = find_closest_node(G, starting), find_closest_node(G, ending)
trips.append(Request(n1, n2, 0, int(temp["passenger_count"]),
datetime.strptime(temp["tpep_pickup_datetime"], "%Y-%m-%d %H:%M:%S")))
t += 1
if t == 100: # Sets a limit on the number of trips to save time.
print("Loaded " + str(t) + " trips.")
break
with open('trips.pkl', 'wb') as out:
pickle.dump(trips, out)
print("Done.")
def find_closest_node(G, starting):
"""
Finds the closest node to starting.
Parameters: (G, starting)
G - networkx.graph()
starting - (lat, lon)
"""
n1 = (None, float("inf"))
for node in G.nodes():
closeness = abs(starting[0] - node[0]) + abs(starting[1] - node[1])
if closeness < n1[1]:
n1 = (node, closeness)
return n1[0]
def street_variations(s, abbr):
"""
Returns multiple variations of the street name based on common street term abbreviations.
Parameters: (s, abbr)
s - string
abbr - dict of common street abbreviations
"""
variations = [s]
for a in abbr:
for v in variations.copy():
if a in v:
v = v.replace(a, abbr[a])
variations.append(v)
return variations
class ResetPickle(Exception):
pass
# === Plotting ===
def draw_graph(g, bounds=((-180 , -90 ), (180 , 90 ))):
"""
Plots the edges on matplotlib.
Parameters: (g, bounds)
g - networkx.graph()
bounds - (node, node)
node - (lat, lon)
"""
n1 = bounds[0]
n2 = bounds[1]
for edge in g.edges():
if min(n1[0], n2[0]) < edge[0][0] < max(n1[0], n2[0]) and min(n1[1], n2[1]) < edge[0][1] < max(n1[1], n2[1]):
plt.plot((edge[0][1], edge[1][1]), (edge[0][0], edge[1][0]), 'c.-')
def draw_path(path, color="b"):
"""
Plots a path on matplotlib.
Parameters: (path, color)
path - [nodes]
color - str
node - (lat, lon)
"""
px = []
py = []
for p in range(len(path) - 1):
plt.plot((path[p][1], path[p + 1][1]), (path[p][0], path[p + 1][0]), "m--")
px.append(path[p][1])
py.append(path[p][0])
plt.plot(px, py, color + '.')
# === Trips ===
def process_trips(G, trips, heuristic):
"""
Processes trips and plots them on the graph.
Parameters: (G, trips, heuristic)
G - networkx.graph()
trips - [trips]
heuristic - Callable
trip - (node, node)
node - (lat, lon)
"""
for trip in trips:
n1 = trip.start
n2 = trip.stop
print(f"\nGoing from {n1} to {n2}")
print("Calculating traffic...")
try:
path = nx.astar_path(G, n1, n2, heuristic)
print(f"Cost of trip: {nx.astar_path_length(G, n1, n2, heuristic)}")
print(f"Nodes in trip: {len(path)}")
print_trip_info(n1, n2, path, G)
draw_path(path)
except:
print("Couldn't find a path")
def random_trip(G):
"""
Returns a randomly generated trip as a Request.
Parameters: (G)
G - netwrokx.graph()
"""
tn = len(G.nodes())
n1 = random.randint(0, tn)
n2 = random.randint(0, tn)
tn = 0
for node in G.nodes():
if n1 == tn:
n1 = node
if n2 == tn:
n2 = node
tn += 1
return Request(n1, n2, 0, 0, datetime(2015, 1, 1))
def print_trip_info(n1, n2, path, G, pr=False):
"""
Prints and returns out the trip info for the trip: path.
Parameters: (n1, n2, path, G)
n1 - (lat, lon)
n2 - (lat, lon)
path - list of nodes in order
G - networkx.graph()
pr - bool - whether to print the info
node - (lat, lon)
"""
# Note: Edges with the exact same length are only counted once as this was found to be the most accurate so far
speeds = {}
distances = []
time = 0
for p in range(len(path) - 1):
speed = round(G[path[p]][path[p + 1]]["speed"], 2)
if G[path[p]][path[p + 1]]["distance"] not in distances:
distances.append(G[path[p]][path[p + 1]]["distance"])
speeds[speed] = speeds.get(speed, 0) + 1
time += G[path[p]][path[p + 1]]["distance"] * 0.3048 / speed
if pr:
print(f"Speeds (m/s): {speeds}")
print(f"Distance (meters?): {round(sum(distances) * 0.3048, 2)}")
print(f"Euclidean distance (meters): {distance_to_meters(n1, n2)}")
print(f"Time (minutes): {round(time / 60, 2)}")
return speeds, round(sum(distances) * 0.3048, 2), round(time / 60, 2)
# === Heuristics ===
def weight(s, e, speed):
"""
Returns the weight to be assigned to the edges of the graph.
Parameters: (s, e, d)
s - (lat, lon)
e - (lat, lon)
speed - int
"""
return ((s[0] - e[0]) ** 2 + (s[1] - e[1]) ** 2) ** 0.5 / speed
def reweight(s, e, speed, volume):
"""
Returns the weight to be assigned to the edges of the graph.
** Traffic Version (Includes historical traffic data for more accurate weighting) **
Parameters: (s, e, speed, volume)
s - (lat, lon)
e - (lat, lon)
speed - int
volume - int
"""
density = volume / (distance_to_meters(s, e))
congestion = density / speed
return ((s[0] - e[0]) ** 2 + (s[1] - e[1]) ** 2) ** 0.5 / congestion
def diste(p1, p2):
"""
Returns euclidean distance divided by the default NYC speed. Admissible.
Parameters: (p1, p2)
p1 - (lat, lon)
p2 - (lat, lon)
"""
return (pow(abs(p1[0] - p2[0]), 2) + pow(abs(p1[1] - p2[1]), 2)) ** 0.5 / 65
def distm(p1, p2):
"""
Returns manhattan distance divided by the default NYC speed. NOT admissible.
Parameters: (p1, p2)
p1 - (lat, lon)
p2 - (lat, lon)
"""
return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1]) / 65
# === Helpers ===
def distance_to_meters(n1, n2):
"""
Calculates the great circle distance between two points.
Parameters: (n1, n2)
n1 - (lat, lon)
n2 - (lat, lon)
"""
radius = 6371000 # Radius of earth
x1, y1 = float(n1[0]), float(n1[1])
x2, y2 = float(n2[0]), float(n2[1])
o1 = np.divide(np.multiply(x1, math.pi), 180)
o2 = np.divide(np.multiply(x2,math.pi),180)
d1 = np.divide(np.multiply(np.subtract(x2,x1),math.pi),180)
d2 = np.divide(np.multiply(np.subtract(y2,y1),math.pi),180)
a = np.add(np.multiply(np.sin(np.divide(d1,2)),np.sin(np.divide(d1,2))),np.multiply(np.multiply(np.cos(o2),math.sin(np.divide(d2,2))),np.sin(np.divide(d2,2))))
c = np.multiply(2, np.arctan(np.divide(np.sqrt(a),np.sqrt(np.subtract(1,a)))))
return round(np.multiply(radius,c),2)
# === Main ===
if __name__ == "__main__":
main()
|
[
"numpy.sqrt",
"shapely.geometry.shape",
"networkx.astar_path",
"numpy.divide",
"datetime.datetime",
"numpy.multiply",
"matplotlib.pyplot.plot",
"numpy.subtract",
"fiona.open",
"matplotlib.pyplot.axis",
"random.randint",
"traffic.process_traffic",
"pickle.load",
"numpy.cos",
"matplotlib.pyplot.show",
"pickle.dump",
"datetime.datetime.strptime",
"networkx.Graph",
"networkx.astar_path_length"
] |
[((804, 821), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (812, 821), True, 'import matplotlib.pyplot as plt\n'), ((826, 836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (834, 836), True, 'import matplotlib.pyplot as plt\n'), ((2669, 2760), 'fiona.open', 'fiona.open', (['"""NYC/VZV_Speed Limits/geo_export_6459c10e-7bfb-4e64-ae29-f0747dc3824c.shp"""'], {}), "(\n 'NYC/VZV_Speed Limits/geo_export_6459c10e-7bfb-4e64-ae29-f0747dc3824c.shp')\n", (2679, 2760), False, 'import fiona\n'), ((3045, 3066), 'random.randint', 'random.randint', (['(0)', '(23)'], {}), '(0, 23)\n', (3059, 3066), False, 'import random\n'), ((3075, 3085), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3083, 3085), True, 'import networkx as nx\n'), ((3105, 3178), 'fiona.open', 'fiona.open', (['"""NYC/Map/geo_export_24fdfadb-893d-40a0-a751-a76cdefc9bc6.shp"""'], {}), "('NYC/Map/geo_export_24fdfadb-893d-40a0-a751-a76cdefc9bc6.shp')\n", (3115, 3178), False, 'import fiona\n'), ((7499, 7528), 'matplotlib.pyplot.plot', 'plt.plot', (['px', 'py', "(color + '.')"], {}), "(px, py, color + '.')\n", (7507, 7528), True, 'import matplotlib.pyplot as plt\n'), ((8484, 8505), 'random.randint', 'random.randint', (['(0)', 'tn'], {}), '(0, tn)\n', (8498, 8505), False, 'import random\n'), ((8515, 8536), 'random.randint', 'random.randint', (['(0)', 'tn'], {}), '(0, tn)\n', (8529, 8536), False, 'import random\n'), ((685, 705), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)'], {}), '(2015, 1, 1)\n', (693, 705), False, 'from datetime import datetime\n'), ((1339, 1401), 'traffic.process_traffic', 'traffic.process_traffic', (['"""NYC/Traffic_Data/traffic_volume.csv"""'], {}), "('NYC/Traffic_Data/traffic_volume.csv')\n", (1362, 1401), False, 'import traffic\n'), ((1503, 1526), 'pickle.load', 'pickle.load', (['graph_file'], {}), '(graph_file)\n', (1514, 1526), False, 'import pickle\n'), ((1629, 1652), 'pickle.load', 'pickle.load', (['trips_file'], {}), '(trips_file)\n', (1640, 1652), False, 'import pickle\n'), ((4462, 4481), 'pickle.dump', 'pickle.dump', (['G', 'out'], {}), '(G, out)\n', (4473, 4481), False, 'import pickle\n'), ((5679, 5702), 'pickle.dump', 'pickle.dump', (['trips', 'out'], {}), '(trips, out)\n', (5690, 5702), False, 'import pickle\n'), ((7359, 7434), 'matplotlib.pyplot.plot', 'plt.plot', (['(path[p][1], path[p + 1][1])', '(path[p][0], path[p + 1][0])', '"""m--"""'], {}), "((path[p][1], path[p + 1][1]), (path[p][0], path[p + 1][0]), 'm--')\n", (7367, 7434), True, 'import matplotlib.pyplot as plt\n'), ((8710, 8730), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)'], {}), '(2015, 1, 1)\n', (8718, 8730), False, 'from datetime import datetime\n'), ((11572, 11596), 'numpy.multiply', 'np.multiply', (['x1', 'math.pi'], {}), '(x1, math.pi)\n', (11583, 11596), True, 'import numpy as np\n'), ((11622, 11646), 'numpy.multiply', 'np.multiply', (['x2', 'math.pi'], {}), '(x2, math.pi)\n', (11633, 11646), True, 'import numpy as np\n'), ((12044, 12066), 'numpy.multiply', 'np.multiply', (['radius', 'c'], {}), '(radius, c)\n', (12055, 12066), True, 'import numpy as np\n'), ((2051, 2073), 'pickle.load', 'pickle.load', (['abbr_file'], {}), '(abbr_file)\n', (2062, 2073), False, 'import pickle\n'), ((7041, 7108), 'matplotlib.pyplot.plot', 'plt.plot', (['(edge[0][1], edge[1][1])', '(edge[0][0], edge[1][0])', '"""c.-"""'], {}), "((edge[0][1], edge[1][1]), (edge[0][0], edge[1][0]), 'c.-')\n", (7049, 7108), True, 'import matplotlib.pyplot as plt\n'), ((8014, 8049), 'networkx.astar_path', 'nx.astar_path', (['G', 'n1', 'n2', 'heuristic'], {}), '(G, n1, n2, heuristic)\n', (8027, 8049), True, 'import networkx as nx\n'), ((11682, 11701), 'numpy.subtract', 'np.subtract', (['x2', 'x1'], {}), '(x2, x1)\n', (11693, 11701), True, 'import numpy as np\n'), ((11746, 11765), 'numpy.subtract', 'np.subtract', (['y2', 'y1'], {}), '(y2, y1)\n', (11757, 11765), True, 'import numpy as np\n'), ((2392, 2414), 'pickle.dump', 'pickle.dump', (['abbr', 'out'], {}), '(abbr, out)\n', (2403, 2414), False, 'import pickle\n'), ((11814, 11830), 'numpy.divide', 'np.divide', (['d1', '(2)'], {}), '(d1, 2)\n', (11823, 11830), True, 'import numpy as np\n'), ((11838, 11854), 'numpy.divide', 'np.divide', (['d1', '(2)'], {}), '(d1, 2)\n', (11847, 11854), True, 'import numpy as np\n'), ((11880, 11890), 'numpy.cos', 'np.cos', (['o2'], {}), '(o2)\n', (11886, 11890), True, 'import numpy as np\n'), ((11925, 11941), 'numpy.divide', 'np.divide', (['d2', '(2)'], {}), '(d2, 2)\n', (11934, 11941), True, 'import numpy as np\n'), ((11987, 11997), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (11994, 11997), True, 'import numpy as np\n'), ((3223, 3249), 'shapely.geometry.shape', 'shape', (["feature['geometry']"], {}), "(feature['geometry'])\n", (3228, 3249), False, 'from shapely.geometry import shape\n'), ((5386, 5454), 'datetime.datetime.strptime', 'datetime.strptime', (["temp['tpep_pickup_datetime']", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(temp['tpep_pickup_datetime'], '%Y-%m-%d %H:%M:%S')\n", (5403, 5454), False, 'from datetime import datetime\n'), ((11900, 11916), 'numpy.divide', 'np.divide', (['d2', '(2)'], {}), '(d2, 2)\n', (11909, 11916), True, 'import numpy as np\n'), ((12006, 12023), 'numpy.subtract', 'np.subtract', (['(1)', 'a'], {}), '(1, a)\n', (12017, 12023), True, 'import numpy as np\n'), ((3302, 3328), 'shapely.geometry.shape', 'shape', (["feature['geometry']"], {}), "(feature['geometry'])\n", (3307, 3328), False, 'from shapely.geometry import shape\n'), ((8086, 8128), 'networkx.astar_path_length', 'nx.astar_path_length', (['G', 'n1', 'n2', 'heuristic'], {}), '(G, n1, n2, heuristic)\n', (8106, 8128), True, 'import networkx as nx\n')]
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import math
import numpy as np
import unittest
from op_test import OpTest
def calc_psroi_pool(x, rois, rois_num_per_img, output_channels, spatial_scale,
pooled_height, pooled_width):
"""
Psroi_pool implemented by Numpy.
x: 4-D as (N, C, H, W),
rois: 2-D as [[x1, y1, x2, y2], ...],
rois_num_per_img: 1-D as [nums_of_batch_0, nums_of_batch_1, ...]
"""
output_shape = (len(rois), output_channels, pooled_height, pooled_width)
out_data = np.zeros(output_shape)
batch_id = 0
rois_num_id = 0
rois_num_left = rois_num_per_img[rois_num_id]
for i in range(len(rois)):
roi = rois[i]
roi_batch_id = batch_id
rois_num_left -= 1
if rois_num_left == 0:
rois_num_id += 1
if rois_num_id < len(rois_num_per_img):
rois_num_left = rois_num_per_img[rois_num_id]
batch_id += 1
roi_start_w = round(roi[0]) * spatial_scale
roi_start_h = round(roi[1]) * spatial_scale
roi_end_w = (round(roi[2]) + 1.) * spatial_scale
roi_end_h = (round(roi[3]) + 1.) * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / float(pooled_height)
bin_size_w = roi_width / float(pooled_width)
x_i = x[roi_batch_id]
for c in range(output_channels):
for ph in range(pooled_height):
for pw in range(pooled_width):
hstart = int(
math.floor(float(ph) * bin_size_h + roi_start_h))
wstart = int(
math.floor(float(pw) * bin_size_w + roi_start_w))
hend = int(
math.ceil(float(ph + 1) * bin_size_h + roi_start_h))
wend = int(
math.ceil(float(pw + 1) * bin_size_w + roi_start_w))
hstart = min(max(hstart, 0), x.shape[2])
hend = min(max(hend, 0), x.shape[2])
wstart = min(max(wstart, 0), x.shape[3])
wend = min(max(wend, 0), x.shape[3])
c_in = (c * pooled_height + ph) * pooled_width + pw
is_empty = (hend <= hstart) or (wend <= wstart)
out_sum = 0.
for ih in range(hstart, hend):
for iw in range(wstart, wend):
out_sum += x_i[c_in, ih, iw]
bin_area = (hend - hstart) * (wend - wstart)
out_data[i, c, ph, pw] = 0. if is_empty else (
out_sum / float(bin_area))
return out_data
class TestPSROIPoolOp(OpTest):
def set_data(self):
paddle.enable_static()
self.init_test_case()
self.make_rois()
self.outs = calc_psroi_pool(self.x, self.boxes, self.boxes_num,
self.output_channels, self.spatial_scale,
self.pooled_height,
self.pooled_width).astype('float64')
self.inputs = {
'X': self.x,
'ROIs': (self.rois_with_batch_id[:, 1:5], self.rois_lod)
}
self.attrs = {
'output_channels': self.output_channels,
'spatial_scale': self.spatial_scale,
'pooled_height': self.pooled_height,
'pooled_width': self.pooled_width
}
self.outputs = {'Out': self.outs}
def init_test_case(self):
self.batch_size = 3
self.channels = 3 * 2 * 2
self.height = 6
self.width = 4
self.x_dim = [self.batch_size, self.channels, self.height, self.width]
self.spatial_scale = 1.0 / 4.0
self.output_channels = 3
self.pooled_height = 2
self.pooled_width = 2
self.x = np.random.random(self.x_dim).astype('float64')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.random_integers(
0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers(
0, self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width,
self.width // self.spatial_scale)
y2 = np.random.random_integers(
y1 + self.pooled_height, self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2]
rois.append(roi)
self.rois_num = len(rois)
self.rois_with_batch_id = np.array(rois).astype('float64')
self.boxes = self.rois_with_batch_id[:, 1:]
self.boxes_num = np.array(
[bno + 1 for bno in range(self.batch_size)]).astype('int32')
def setUp(self):
self.op_type = 'psroi_pool'
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.random([2, 490, 28, 28]).astype(np.float32)
self.boxes = np.array(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]).astype(np.float32)
self.boxes_num = np.array([1, 2]).astype(np.int32)
def test_output_size(self):
def test_output_size_is_int():
output_size = 7
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
def test_output_size_is_tuple():
output_size = (7, 7)
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
def test_dytype_is_float64():
output_size = (7, 7)
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x, 'float64'),
paddle.to_tensor(self.boxes, 'float64'),
paddle.to_tensor(self.boxes_num, 'int32'), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
places = ['cpu']
if paddle.fluid.core.is_compiled_with_cuda():
places.append('gpu')
for place in places:
paddle.set_device(place)
test_output_size_is_int()
test_output_size_is_tuple()
test_dytype_is_float64()
class TestPSROIPoolDynamicClassAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.random([2, 128, 32, 32]).astype(np.float32)
self.boxes = np.array([[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10],
[5, 3, 25, 21]]).astype(np.float32)
self.boxes_num = np.array([2, 2]).astype(np.int32)
def test_output_size(self):
def test_output_size_is_int():
psroi_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_module(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num)).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
def test_output_size_is_tuple():
psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_pool_module(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num)).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
def test_dytype_is_float64():
psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_pool_module(
paddle.to_tensor(self.x, 'float64'),
paddle.to_tensor(self.boxes, 'float64'),
paddle.to_tensor(self.boxes_num, 'int32')).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
paddle.disable_static()
places = ['cpu']
if paddle.fluid.core.is_compiled_with_cuda():
places.append('gpu')
for place in places:
paddle.set_device(place)
test_output_size_is_int()
test_output_size_is_tuple()
test_dytype_is_float64()
class TestPSROIPoolBoxesNumError(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.x = paddle.uniform([2, 490, 28, 28], dtype='float32')
self.boxes = paddle.to_tensor(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')
def test_errors(self):
def test_boxes_num_nums_error():
boxes_num = paddle.to_tensor([1, 5], 'int32')
out = paddle.vision.ops.psroi_pool(
self.x, self.boxes, boxes_num, output_size=7)
self.assertRaises(ValueError, test_boxes_num_nums_error)
def test_boxes_num_length_error():
boxes_num = paddle.to_tensor([1, 1, 1], 'int32')
out = paddle.vision.ops.psroi_pool(
self.x, self.boxes, boxes_num, output_size=7)
self.assertRaises(ValueError, test_boxes_num_length_error)
class TestPSROIPoolChannelError(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.x = paddle.uniform([2, 490, 28, 28], dtype='float32')
self.boxes = paddle.to_tensor(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')
self.output_size = 4
def test_errors(self):
def test_channel_error():
boxes_num = paddle.to_tensor([2, 1], 'int32')
out = paddle.vision.ops.psroi_pool(self.x, self.boxes, boxes_num,
self.output_size)
self.assertRaises(ValueError, test_channel_error)
class TestPSROIPoolStaticAPI(unittest.TestCase):
def setUp(self):
paddle.enable_static()
self.x_placeholder = paddle.static.data(
name='x', shape=[2, 490, 28, 28])
self.x = np.random.random([2, 490, 28, 28]).astype(np.float32)
self.boxes_placeholder = paddle.static.data(
name='boxes', shape=[3, 4], lod_level=1)
self.boxes = np.array(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]).astype(np.float32)
self.boxes_num = np.array([1, 2]).astype(np.int32)
def test_function_in_static(self):
output_size = 7
out = paddle.vision.ops.psroi_pool(self.x_placeholder,
self.boxes_placeholder,
self.boxes_num, output_size)
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
places = [paddle.CPUPlace()]
if paddle.fluid.core.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0))
for place in places:
exe = paddle.static.Executor(place)
boxes_lod_data = paddle.fluid.create_lod_tensor(self.boxes,
[[1, 2]], place)
out_res = exe.run(paddle.static.default_main_program(),
feed={'x': self.x,
'boxes': boxes_lod_data},
fetch_list=[out.name])
self.assertTrue(np.allclose(out_res, expect_out))
if __name__ == '__main__':
unittest.main()
|
[
"numpy.array",
"paddle.disable_static",
"unittest.main",
"paddle.CPUPlace",
"numpy.random.random",
"paddle.vision.ops.psroi_pool",
"paddle.vision.ops.PSRoIPool",
"paddle.fluid.create_lod_tensor",
"paddle.enable_static",
"paddle.to_tensor",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.set_device",
"numpy.allclose",
"paddle.uniform",
"paddle.static.data",
"paddle.static.Executor",
"paddle.static.default_main_program",
"numpy.random.random_integers",
"paddle.CUDAPlace",
"numpy.zeros"
] |
[((1156, 1178), 'numpy.zeros', 'np.zeros', (['output_shape'], {}), '(output_shape)\n', (1164, 1178), True, 'import numpy as np\n'), ((13311, 13326), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13324, 13326), False, 'import unittest\n'), ((3454, 3476), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (3474, 3476), False, 'import paddle\n'), ((7744, 7785), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (7783, 7785), False, 'import paddle\n'), ((9859, 9882), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (9880, 9882), False, 'import paddle\n'), ((9919, 9960), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (9958, 9960), False, 'import paddle\n'), ((10260, 10283), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (10281, 10283), False, 'import paddle\n'), ((10301, 10350), 'paddle.uniform', 'paddle.uniform', (['[2, 490, 28, 28]'], {'dtype': '"""float32"""'}), "([2, 490, 28, 28], dtype='float32')\n", (10315, 10350), False, 'import paddle\n'), ((10372, 10448), 'paddle.to_tensor', 'paddle.to_tensor', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]', '"""float32"""'], {}), "([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')\n", (10388, 10448), False, 'import paddle\n'), ((11131, 11154), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (11152, 11154), False, 'import paddle\n'), ((11172, 11221), 'paddle.uniform', 'paddle.uniform', (['[2, 490, 28, 28]'], {'dtype': '"""float32"""'}), "([2, 490, 28, 28], dtype='float32')\n", (11186, 11221), False, 'import paddle\n'), ((11243, 11319), 'paddle.to_tensor', 'paddle.to_tensor', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]', '"""float32"""'], {}), "([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')\n", (11259, 11319), False, 'import paddle\n'), ((11764, 11786), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (11784, 11786), False, 'import paddle\n'), ((11816, 11868), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""x"""', 'shape': '[2, 490, 28, 28]'}), "(name='x', shape=[2, 490, 28, 28])\n", (11834, 11868), False, 'import paddle\n'), ((11986, 12045), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""boxes"""', 'shape': '[3, 4]', 'lod_level': '(1)'}), "(name='boxes', shape=[3, 4], lod_level=1)\n", (12004, 12045), False, 'import paddle\n'), ((12307, 12412), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x_placeholder', 'self.boxes_placeholder', 'self.boxes_num', 'output_size'], {}), '(self.x_placeholder, self.boxes_placeholder,\n self.boxes_num, output_size)\n', (12335, 12412), False, 'import paddle\n'), ((12668, 12709), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (12707, 12709), False, 'import paddle\n'), ((7861, 7885), 'paddle.set_device', 'paddle.set_device', (['place'], {}), '(place)\n', (7878, 7885), False, 'import paddle\n'), ((8452, 8487), 'paddle.vision.ops.PSRoIPool', 'paddle.vision.ops.PSRoIPool', (['(8)', '(1.1)'], {}), '(8, 1.1)\n', (8479, 8487), False, 'import paddle\n'), ((8930, 8965), 'paddle.vision.ops.PSRoIPool', 'paddle.vision.ops.PSRoIPool', (['(8)', '(1.1)'], {}), '(8, 1.1)\n', (8957, 8965), False, 'import paddle\n'), ((9410, 9445), 'paddle.vision.ops.PSRoIPool', 'paddle.vision.ops.PSRoIPool', (['(8)', '(1.1)'], {}), '(8, 1.1)\n', (9437, 9445), False, 'import paddle\n'), ((10036, 10060), 'paddle.set_device', 'paddle.set_device', (['place'], {}), '(place)\n', (10053, 10060), False, 'import paddle\n'), ((10555, 10588), 'paddle.to_tensor', 'paddle.to_tensor', (['[1, 5]', '"""int32"""'], {}), "([1, 5], 'int32')\n", (10571, 10588), False, 'import paddle\n'), ((10607, 10681), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x', 'self.boxes', 'boxes_num'], {'output_size': '(7)'}), '(self.x, self.boxes, boxes_num, output_size=7)\n', (10635, 10681), False, 'import paddle\n'), ((10833, 10869), 'paddle.to_tensor', 'paddle.to_tensor', (['[1, 1, 1]', '"""int32"""'], {}), "([1, 1, 1], 'int32')\n", (10849, 10869), False, 'import paddle\n'), ((10888, 10962), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x', 'self.boxes', 'boxes_num'], {'output_size': '(7)'}), '(self.x, self.boxes, boxes_num, output_size=7)\n', (10916, 10962), False, 'import paddle\n'), ((11448, 11481), 'paddle.to_tensor', 'paddle.to_tensor', (['[2, 1]', '"""int32"""'], {}), "([2, 1], 'int32')\n", (11464, 11481), False, 'import paddle\n'), ((11500, 11577), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x', 'self.boxes', 'boxes_num', 'self.output_size'], {}), '(self.x, self.boxes, boxes_num, self.output_size)\n', (11528, 11577), False, 'import paddle\n'), ((12638, 12655), 'paddle.CPUPlace', 'paddle.CPUPlace', ([], {}), '()\n', (12653, 12655), False, 'import paddle\n'), ((12805, 12834), 'paddle.static.Executor', 'paddle.static.Executor', (['place'], {}), '(place)\n', (12827, 12834), False, 'import paddle\n'), ((12864, 12923), 'paddle.fluid.create_lod_tensor', 'paddle.fluid.create_lod_tensor', (['self.boxes', '[[1, 2]]', 'place'], {}), '(self.boxes, [[1, 2]], place)\n', (12894, 12923), False, 'import paddle\n'), ((4583, 4611), 'numpy.random.random', 'np.random.random', (['self.x_dim'], {}), '(self.x_dim)\n', (4599, 4611), True, 'import numpy as np\n'), ((4849, 4936), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(self.width // self.spatial_scale - self.pooled_width)'], {}), '(0, self.width // self.spatial_scale - self.\n pooled_width)\n', (4874, 4936), True, 'import numpy as np\n'), ((4974, 5063), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(self.height // self.spatial_scale - self.pooled_height)'], {}), '(0, self.height // self.spatial_scale - self.\n pooled_height)\n', (4999, 5063), True, 'import numpy as np\n'), ((5102, 5190), 'numpy.random.random_integers', 'np.random.random_integers', (['(x1 + self.pooled_width)', '(self.width // self.spatial_scale)'], {}), '(x1 + self.pooled_width, self.width // self.\n spatial_scale)\n', (5127, 5190), True, 'import numpy as np\n'), ((5254, 5344), 'numpy.random.random_integers', 'np.random.random_integers', (['(y1 + self.pooled_height)', '(self.height // self.spatial_scale)'], {}), '(y1 + self.pooled_height, self.height // self.\n spatial_scale)\n', (5279, 5344), True, 'import numpy as np\n'), ((5506, 5520), 'numpy.array', 'np.array', (['rois'], {}), '(rois)\n', (5514, 5520), True, 'import numpy as np\n'), ((6011, 6045), 'numpy.random.random', 'np.random.random', (['[2, 490, 28, 28]'], {}), '([2, 490, 28, 28])\n', (6027, 6045), True, 'import numpy as np\n'), ((6086, 6143), 'numpy.array', 'np.array', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]'], {}), '([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]])\n', (6094, 6143), True, 'import numpy as np\n'), ((6201, 6217), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (6209, 6217), True, 'import numpy as np\n'), ((6703, 6731), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (6714, 6731), True, 'import numpy as np\n'), ((7176, 7204), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (7187, 7204), True, 'import numpy as np\n'), ((7677, 7705), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (7688, 7705), True, 'import numpy as np\n'), ((8096, 8130), 'numpy.random.random', 'np.random.random', (['[2, 128, 32, 32]'], {}), '([2, 128, 32, 32])\n', (8112, 8130), True, 'import numpy as np\n'), ((8171, 8243), 'numpy.array', 'np.array', (['[[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10], [5, 3, 25, 21]]'], {}), '([[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10], [5, 3, 25, 21]])\n', (8179, 8243), True, 'import numpy as np\n'), ((8319, 8335), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (8327, 8335), True, 'import numpy as np\n'), ((8826, 8854), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (8837, 8854), True, 'import numpy as np\n'), ((9309, 9337), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (9320, 9337), True, 'import numpy as np\n'), ((9820, 9848), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (9831, 9848), True, 'import numpy as np\n'), ((11899, 11933), 'numpy.random.random', 'np.random.random', (['[2, 490, 28, 28]'], {}), '([2, 490, 28, 28])\n', (11915, 11933), True, 'import numpy as np\n'), ((12080, 12137), 'numpy.array', 'np.array', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]'], {}), '([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]])\n', (12088, 12137), True, 'import numpy as np\n'), ((12195, 12211), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (12203, 12211), True, 'import numpy as np\n'), ((12737, 12756), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (12753, 12756), False, 'import paddle\n'), ((13014, 13050), 'paddle.static.default_main_program', 'paddle.static.default_main_program', ([], {}), '()\n', (13048, 13050), False, 'import paddle\n'), ((13244, 13276), 'numpy.allclose', 'np.allclose', (['out_res', 'expect_out'], {}), '(out_res, expect_out)\n', (13255, 13276), True, 'import numpy as np\n'), ((6399, 6423), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (6415, 6423), False, 'import paddle\n'), ((6441, 6469), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (6457, 6469), False, 'import paddle\n'), ((6487, 6519), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (6503, 6519), False, 'import paddle\n'), ((6872, 6896), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (6888, 6896), False, 'import paddle\n'), ((6914, 6942), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (6930, 6942), False, 'import paddle\n'), ((6960, 6992), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (6976, 6992), False, 'import paddle\n'), ((7342, 7377), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x', '"""float64"""'], {}), "(self.x, 'float64')\n", (7358, 7377), False, 'import paddle\n'), ((7395, 7434), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes', '"""float64"""'], {}), "(self.boxes, 'float64')\n", (7411, 7434), False, 'import paddle\n'), ((7452, 7493), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num', '"""int32"""'], {}), "(self.boxes_num, 'int32')\n", (7468, 7493), False, 'import paddle\n'), ((8536, 8560), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (8552, 8560), False, 'import paddle\n'), ((8578, 8606), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (8594, 8606), False, 'import paddle\n'), ((8624, 8656), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (8640, 8656), False, 'import paddle\n'), ((9019, 9043), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (9035, 9043), False, 'import paddle\n'), ((9061, 9089), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (9077, 9089), False, 'import paddle\n'), ((9107, 9139), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (9123, 9139), False, 'import paddle\n'), ((9499, 9534), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x', '"""float64"""'], {}), "(self.x, 'float64')\n", (9515, 9534), False, 'import paddle\n'), ((9552, 9591), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes', '"""float64"""'], {}), "(self.boxes, 'float64')\n", (9568, 9591), False, 'import paddle\n'), ((9609, 9650), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num', '"""int32"""'], {}), "(self.boxes_num, 'int32')\n", (9625, 9650), False, 'import paddle\n')]
|
#!/usr/bin/env python3
import random
import time
import sys
import pygame
from pygame.locals import *
import pygame.surfarray as surfarray # for performance
import numpy as np
import colors # color definition
SCREEN_SIZE = (1600, 900) # change it to your screen size
#color definitions
# (r, g, b)
RED = (255, 0, 0)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 128)
BG_COLOR = BLACK
CELL_COLOR = GREEN
def main():
"""\
Press 'a' to decrease max possible fps.
Press 'd' to increase max possible fps.
Press 's' for no max fps limit.
Press 'z' to decrease length of cell side.
Press 'x' to increase length of cell side.
Press 'p' to pause the game.
"""
side = 50 # length of cell side
width = int(SCREEN_SIZE[0] / side) # number of cells per row
height = int(SCREEN_SIZE[1] / side) # number of cellls per column
pygame.init()
pygame.mouse.set_visible(False)
SURF = pygame.display.set_mode(SCREEN_SIZE,FULLSCREEN,32);
fontObj = pygame.font.Font('freesansbold.ttf',32);
FPSCLOCK = pygame.time.Clock()
fps = 5 # max fps
maps, slices = generate_random_map(width, height, side);
pre_frame_time = time.time() # time of previous frame
paused = False
while True:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_q:
pygame.quit()
sys.exit()
if event.key == K_a and fps > 1:
fps -= 1
if event.key == K_d and fps < 60:
fps += 1
if event.key == K_p:
paused = not paused
if event.key == K_f:
maps[random.randint(1, width), random.randint(1, height)] = True
if event.key == K_k:
maps[random.randint(1, width), :] = True
if event.key == K_l:
maps[:, random.randint(1, height)] = True
if event.key == K_m:
maps[:, :] = False
if event.key == K_z and side > 5:
side -= 5
if side == 15:
side = 10
width = int(SCREEN_SIZE[0] / side);
height = int(SCREEN_SIZE[1] / side);
maps, slices = generate_random_map(width, height, side)
if event.key == K_x and side < 100:
side += 5
if side == 15:
side = 20
width = int(SCREEN_SIZE[0] / side);
height = int(SCREEN_SIZE[1] / side);
maps, slices = generate_random_map(width, height, side)
if event.key == K_s:
fps = 0
if event.key == K_r:
maps, slices = generate_random_map(width, height, side)
if event.type == QUIT:
pygame.quit()
sys.exit()
SURF.fill(BG_COLOR)
show_map(SURF, maps, side, slices)
if not paused:
maps = update(maps)
current_frame_time = time.time()
textSURF = fontObj.render('real fps: ' + str(1//(current_frame_time-pre_frame_time)), True, colors.random_color());
pre_frame_time = current_frame_time
textRect = textSURF.get_rect();
textRect.topright = (SCREEN_SIZE[0],200);
SURF.blit(textSURF,textRect);
textSURF = fontObj.render('length of side: ' + str(side), True, colors.random_color());
textRect = textSURF.get_rect();
textRect.topright = (SCREEN_SIZE[0],100);
SURF.blit(textSURF,textRect);
pygame.display.update();
FPSCLOCK.tick(fps)
def generate_random_map(width, height, side):
"""\
Generate a larger sized map than given width, height.
Define slices for quickly drawing with small length of side.
Return generated map and slices.
"""
slices = None
if side < 10:
K = side
Y, X = SCREEN_SIZE
slices = []
for y in range(0, K):
for x in range(0, K):
s = slice(y, Y, K), slice(x, X, K)
slices.append(s)
maps = np.zeros((width+2, height+2), dtype=np.bool)
for col in range(width):
n_cell = random.randint(0, height)
col_map = n_cell * [np.bool(1)]
col_map.extend([np.bool(0)] * (height-n_cell))
assert len(col_map) == height
random.shuffle(col_map)
maps[col+1,1:-1] = col_map
return (maps, slices)
def show_map(SURF, _map, side, slices=None):
"""\
Draw the map to surface SURF. If side is to small, pass in slices returned
by generate_random_map.
"""
_map = _map[1:-1, 1:-1]
if slices is not None:
bit_map = np.zeros(SCREEN_SIZE, dtype=np.bool)
for s in slices:
bit_map[s] = _map
bit_map = bit_map * SURF.map_rgb(colors.random_color())
surfarray.blit_array(SURF, bit_map)
else:
cell_surf = pygame.Surface((side,side))
for w in range(_map.shape[0]):
for h in range(_map.shape[1]):
if _map[w][h]:
cell_surf.fill(colors.random_color())
SURF.blit(cell_surf, (w * side, h * side))
def update(oldmap):
"""\
Update the status fo every cell according to arround live cells.
"""
nbrs_count = sum(np.roll(np.roll(oldmap, i, 0), j, 1)
for i in (-1, 0, 1) for j in (-1, 0, 1)
if (i != 0 or j != 0))
_newmap = (nbrs_count == 3) | (oldmap & (nbrs_count == 2))
newmap = np.zeros(oldmap.shape, dtype=np.bool)
newmap[1:-1, 1:-1] = _newmap[1:-1, 1:-1]
return newmap
if __name__ == '__main__':
main()
|
[
"colors.random_color",
"sys.exit",
"pygame.init",
"random.shuffle",
"pygame.event.get",
"pygame.Surface",
"pygame.display.set_mode",
"pygame.display.update",
"pygame.quit",
"numpy.bool",
"numpy.roll",
"pygame.mouse.set_visible",
"numpy.zeros",
"pygame.surfarray.blit_array",
"pygame.time.Clock",
"pygame.font.Font",
"time.time",
"random.randint"
] |
[((1014, 1027), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1025, 1027), False, 'import pygame\n'), ((1032, 1063), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (1056, 1063), False, 'import pygame\n'), ((1075, 1127), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREEN_SIZE', 'FULLSCREEN', '(32)'], {}), '(SCREEN_SIZE, FULLSCREEN, 32)\n', (1098, 1127), False, 'import pygame\n'), ((1141, 1181), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(32)'], {}), "('freesansbold.ttf', 32)\n", (1157, 1181), False, 'import pygame\n'), ((1198, 1217), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1215, 1217), False, 'import pygame\n'), ((1359, 1370), 'time.time', 'time.time', ([], {}), '()\n', (1368, 1370), False, 'import time\n'), ((4427, 4475), 'numpy.zeros', 'np.zeros', (['(width + 2, height + 2)'], {'dtype': 'np.bool'}), '((width + 2, height + 2), dtype=np.bool)\n', (4435, 4475), True, 'import numpy as np\n'), ((5824, 5861), 'numpy.zeros', 'np.zeros', (['oldmap.shape'], {'dtype': 'np.bool'}), '(oldmap.shape, dtype=np.bool)\n', (5832, 5861), True, 'import numpy as np\n'), ((1467, 1485), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1483, 1485), False, 'import pygame\n'), ((3363, 3374), 'time.time', 'time.time', ([], {}), '()\n', (3372, 3374), False, 'import time\n'), ((3906, 3929), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3927, 3929), False, 'import pygame\n'), ((4518, 4543), 'random.randint', 'random.randint', (['(0)', 'height'], {}), '(0, height)\n', (4532, 4543), False, 'import random\n'), ((4686, 4709), 'random.shuffle', 'random.shuffle', (['col_map'], {}), '(col_map)\n', (4700, 4709), False, 'import random\n'), ((5003, 5039), 'numpy.zeros', 'np.zeros', (['SCREEN_SIZE'], {'dtype': 'np.bool'}), '(SCREEN_SIZE, dtype=np.bool)\n', (5011, 5039), True, 'import numpy as np\n'), ((5168, 5203), 'pygame.surfarray.blit_array', 'surfarray.blit_array', (['SURF', 'bit_map'], {}), '(SURF, bit_map)\n', (5188, 5203), True, 'import pygame.surfarray as surfarray\n'), ((5234, 5262), 'pygame.Surface', 'pygame.Surface', (['(side, side)'], {}), '((side, side))\n', (5248, 5262), False, 'import pygame\n'), ((3475, 3496), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (3494, 3496), False, 'import colors\n'), ((3745, 3766), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (3764, 3766), False, 'import colors\n'), ((3164, 3177), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3175, 3177), False, 'import pygame\n'), ((3194, 3204), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3202, 3204), False, 'import sys\n'), ((4572, 4582), 'numpy.bool', 'np.bool', (['(1)'], {}), '(1)\n', (4579, 4582), True, 'import numpy as np\n'), ((5137, 5158), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (5156, 5158), False, 'import colors\n'), ((5624, 5645), 'numpy.roll', 'np.roll', (['oldmap', 'i', '(0)'], {}), '(oldmap, i, 0)\n', (5631, 5645), True, 'import numpy as np\n'), ((1582, 1595), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1593, 1595), False, 'import pygame\n'), ((1616, 1626), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1624, 1626), False, 'import sys\n'), ((4608, 4618), 'numpy.bool', 'np.bool', (['(0)'], {}), '(0)\n', (4615, 4618), True, 'import numpy as np\n'), ((5410, 5431), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (5429, 5431), False, 'import colors\n'), ((1923, 1947), 'random.randint', 'random.randint', (['(1)', 'width'], {}), '(1, width)\n', (1937, 1947), False, 'import random\n'), ((1949, 1974), 'random.randint', 'random.randint', (['(1)', 'height'], {}), '(1, height)\n', (1963, 1974), False, 'import random\n'), ((2045, 2069), 'random.randint', 'random.randint', (['(1)', 'width'], {}), '(1, width)\n', (2059, 2069), False, 'import random\n'), ((2146, 2171), 'random.randint', 'random.randint', (['(1)', 'height'], {}), '(1, height)\n', (2160, 2171), False, 'import random\n')]
|
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import dragon as dg
from dragon.vm.torch.tensor import *
from dragon.vm.torch.c_api import device as _Device
def UnifyDevices(tensors, key='Inputs'):
types, indices = [t.device.type for t in tensors], [0]
if len(set(types)) != 1:
raise ValueError('{} from different device type: [{}].'
.format(key, ', '.join(types)))
if types[0] == 'cuda':
indices = [t.device.index for t in tensors]
if len(set(indices)) != 1:
raise ValueError('{} from different cuda device: [{}].'
.format(key, ', '.join([str(d) for d in indices])))
return _Device(types[0], indices[0])
def MakeDevice(inputs=(), outputs=()):
# Case #1: [], [] -> CPU
# Case #2: [...], [] -> Refer Inputs
# Case #3: [], [...] -> Refer Outputs
# Case #4: [...], [...] -> Refer Outputs
if len(outputs) > 0: return UnifyDevices(outputs, 'Outputs')
if len(inputs) > 0: return UnifyDevices(inputs, 'Inputs')
return _Device()
def WrapScalar(scalar, dtype, device):
# We use (DType + Value) to hash different scalars
# Setting a Tensor with same DType and shape will not deconstruct it
if 'float' in dtype: scalar = float(scalar)
if 'int' in dtype: scalar = int(scalar)
name = '/share/scalar/{}/{}'.format(dtype, str(scalar))
if not dg.workspace.HasTensor(name):
dg.workspace.FeedTensor(name, np.array(scalar, dtype=dtype))
t = Tensor(name=name, dtype=dtype, device=device, own_storage=False)
t.requires_grad = False
return t
|
[
"dragon.workspace.HasTensor",
"numpy.array",
"dragon.vm.torch.c_api.device"
] |
[((1121, 1150), 'dragon.vm.torch.c_api.device', '_Device', (['types[0]', 'indices[0]'], {}), '(types[0], indices[0])\n', (1128, 1150), True, 'from dragon.vm.torch.c_api import device as _Device\n'), ((1487, 1496), 'dragon.vm.torch.c_api.device', '_Device', ([], {}), '()\n', (1494, 1496), True, 'from dragon.vm.torch.c_api import device as _Device\n'), ((1829, 1857), 'dragon.workspace.HasTensor', 'dg.workspace.HasTensor', (['name'], {}), '(name)\n', (1851, 1857), True, 'import dragon as dg\n'), ((1897, 1926), 'numpy.array', 'np.array', (['scalar'], {'dtype': 'dtype'}), '(scalar, dtype=dtype)\n', (1905, 1926), True, 'import numpy as np\n')]
|
"""
This module is the main API used to create track collections
"""
# Standard library imports
import copy
import random
import inspect
import logging
import itertools
from typing import Any
from typing import List
from typing import Union
from typing import Tuple
from typing import Callable
from dataclasses import dataclass, field, asdict
# Third party imports
import numpy as np
import pandas as pd
import networkx as nx
# Local imports
import spotify_flows.database as database
from .login import login
from .data_structures import (
EpisodeItem,
SpotifyDataStructure,
TrackItem,
AudioFeaturesItem,
)
from .tracks import get_track_id, read_track_from_id
from .tracks import get_audio_features
from .albums import get_album_id
from .albums import get_album_songs
from .podcasts import get_show_id
from .podcasts import get_show_episodes
from .user import get_all_saved_tracks
from .user import get_recommendations_for_genre
from .artists import get_artist_id
from .artists import get_artist_albums
from .artists import get_related_artists
from .artists import get_artist_popular_songs
from .playlists import get_playlist_id
from .playlists import make_new_playlist
from .playlists import get_playlist_tracks
# Main body
logger = logging.getLogger()
class DatabaseNotLoaded(Exception):
pass
@dataclass
class TrackCollection:
"""Class representing a collection of tracks. Can be chained together through a
variety of defined methods."""
read_items_from_db = lambda id_, db: db.build_collection_from_collection_id(id_=id_)
sp = login(
scope="playlist-modify-private playlist-modify-public user-read-playback-position user-library-read"
)
id_: str = ""
info: SpotifyDataStructure = None
_items: List[Any] = field(default_factory=list)
_audio_features_enriched: bool = False
def copy(self):
return copy.copy(self)
@property
def _api_track_gen(self):
yield from self._items
@property
def _db_track_gen(self):
db = CollectionDatabase()
return db.load_playlist(playlist_id=self.id_)
@property
def exist_in_db(self):
db = CollectionDatabase()
return db.playlist_exists(self.id_) if db.is_loaded() else False
@property
def items(self):
if self._items:
yield from self._items
else:
if self.id_:
yield from self.item_gen()
else:
yield from iter(())
def item_gen(self):
db = CollectionDatabase()
if self.exist_in_db:
yield from self._db_track_gen
else:
logger.info(f"Retrieving items via API")
for track_dict in self._api_track_gen:
track = TrackItem.from_dict(track_dict)
if db.is_loaded():
db.add_track(track_item=track)
yield track
@classmethod
def from_id(cls, id_: str):
return cls(id_=id_)
@classmethod
def from_item(cls, id_: str, item: SpotifyDataStructure):
return cls(id_=id_, info=item)
@classmethod
def from_db(cls, id_: str, db_path: str):
db = database.SpotifyDatabase(db_path, op_table="table")
items = cls.read_items_from_db(id_=id_, db=db)
return TrackCollection(id_=id_, _items=items)
@classmethod
def from_name(cls, name: str):
name = name.replace("_", " ")
id_ = cls.func_get_id(name=name)
return cls(id_=id_)
def __str__(self) -> str:
return "\n".join([str(item) for item in self.items])
def __add__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the addition of two collections. Items get concatenated.
Returns:
TrackCollection: Collection object with combined items
"""
def new_items():
yield from self.items
yield from other.items
enriched = (self._audio_features_enriched) and (other._audio_features_enriched)
return TrackCollection(
id_="", _items=new_items(), _audio_features_enriched=enriched
)
def __radd__(self, other: "TrackCollection") -> "TrackCollection":
"""Used when building track collections from list of other track collections
Returns:
TrackCollection: Sum of two collections
"""
if other == 0:
return self
else:
return self + other
def __sub__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the substraction of two collections. Items from other get removed from items from self.
Returns:
TrackCollection: Collection object with modified items.
"""
other_items = list(other.items)
def new_items():
for item in self.items:
if item not in other_items:
yield item
enriched = self._audio_features_enriched
return TrackCollection(
id_="", _items=new_items(), _audio_features_enriched=enriched
)
def __truediv__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the division of two collections.
Returns:
TrackCollection: Items are intersection of self and other
"""
other_items = list(other.items)
def new_items():
for item in self.items:
if item in other_items:
yield item
enriched = self._audio_features_enriched
return TrackCollection(
id_="", _items=new_items(), _audio_features_enriched=enriched
)
def __mod__(self, other: "TrackCollection") -> "TrackCollection":
"""Defines the modulo of two collections
Returns:
TrackCollection: Items are alternates of self and other.
"""
def new_items():
for i, j in zip(self.items, other.items):
yield i
yield j
enriched = (self._audio_features_enriched) and (other._audio_features_enriched)
return TrackCollection(_items=new_items(), _audio_features_enriched=enriched)
def to_dataframes(self) -> Tuple[pd.DataFrame]:
"""Transforms items into dataframes, used for storage in database.
Returns:
Tuple[pd.DataFrame]: Representation of items as dataframes
"""
# Enrich with audio features
tracks = copy.copy(list(self.add_audio_features().items))
# Extract data
album_artist = [
{"album_id": track.album.id, "artist_id": artist.id}
for track in tracks
for artist in track.album.artists
]
all_tracks = [asdict(track) for track in tracks]
all_audio_features = [
{"track_id": track["id"], **track["audio_features"]} for track in all_tracks
]
all_albums = [asdict(track.album) for track in tracks]
all_artists = [artist for album in all_albums for artist in album["artists"]]
# Build dataframes
df_all_artists = pd.DataFrame(all_artists)
df_all_albums = pd.DataFrame(all_albums).drop(columns="artists")
df_audio_features = pd.DataFrame(all_audio_features)
df_all_tracks = pd.DataFrame(all_tracks)
df_all_tracks.loc[:, "album_id"] = df_all_tracks["album"].apply(
lambda x: x["id"]
)
df_all_tracks.drop(columns=["album", "audio_features"], inplace=True)
df_album_artist = pd.DataFrame(album_artist)
return (
df_all_tracks,
df_all_artists,
df_all_albums,
df_audio_features,
df_album_artist,
)
def shuffle(self) -> "TrackCollection":
"""Shuffle items
Returns:
TrackCollection: Object with items shuffled.
"""
new_items_list = copy.copy(list(self.items))
random.shuffle(new_items_list)
new_items = (item for item in new_items_list)
return TrackCollection(
_items=new_items, _audio_features_enriched=self._audio_features_enriched
)
def random(self, N: int) -> "TrackCollection":
"""Sample items randomly
Args:
N (int): Number of items to pick
Returns:
TrackCollection: Object with new items
"""
def new_items(N):
all_items = list(self.items)
k = min(N, len(all_items))
yield from random.sample(all_items, k=k)
return TrackCollection(
_items=new_items(N), _audio_features_enriched=self._audio_features_enriched
)
def remove_remixes(self) -> "TrackCollection":
"""Remove remixes from items
Returns:
TrackCollection: Object with new items
"""
banned_words = ["remix", "mixed"]
def new_items():
for item in self.items:
if all(
[
(banned_word not in item.name.lower())
for banned_word in banned_words
]
):
yield item
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
def sort(self, by: str, ascending: bool = True) -> "TrackCollection":
"""Sort items
Args:
by (str): Criteria used for sorting
ascending (bool, optional): Ascending order. Defaults to True.
Returns:
TrackCollection: Object with sorted items
"""
str_attr = f"item.{by}"
def new_items():
# Enrichment with audio features if needed
if by.startswith("audio_features") and not self._audio_features_enriched:
all_items = self._enrich_with_audio_features(items=self.items)
self._audio_features_enriched = True
else:
all_items = self.items
sorted_items = sorted(
list(all_items),
key=eval(f"lambda item: {str_attr}"),
reverse=(not ascending),
)
yield from sorted_items
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
def filter(self, criteria_func: Callable[..., Any]) -> "TrackCollection":
"""Filter items by certain criteria function
Args:
criteria_func (Callable[..., Any]): Criteria used for filtering
Returns:
TrackCollection: Object with filtered items
"""
# Enrichment with audio features if needed
def new_items():
if (
"audio_features" in inspect.getsource(criteria_func)
and not self._audio_features_enriched
):
self._audio_features_enriched = True
all_items = self._enrich_with_audio_features(items=self.items)
else:
all_items = self.items
for item in all_items:
if criteria_func(item):
yield item
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
def insert_at_time_intervals(self, other, time: int):
def new_items(time):
dups = itertools.tee(other.items, 20)
i_dup = 0
cum_time = 0
for item in self.items:
prev_cum_time = cum_time
cum_time += item.duration_ms / 1000 / 60
yield item
if cum_time % time < prev_cum_time % time:
yield from dups[i_dup]
i_dup += 1
cum_time = 0
return TrackCollection(_items=new_items(time))
def insert_at_time(self, other, time: int):
def new_items(time):
cum_time = 0
for item in self.items:
prev_cum_time = cum_time
cum_time += item.duration_ms / 1000 / 60
yield item
if cum_time % time < prev_cum_time % time:
yield from other.items
return TrackCollection(_items=new_items(time))
def insert_at_position(self, other, position: int):
def new_items(position):
before, after = itertools.tee(self.items, 2)
yield from itertools.islice(before, position)
yield from other.items
yield from after
return TrackCollection(_items=new_items(position))
def add_audio_features(self) -> "TrackCollection":
def new_items():
for item in self.items:
item.audio_features = AudioFeaturesItem.from_dict(
get_audio_features(track_ids=[item.id])[item.id]
)
yield item
return TrackCollection(_items=new_items(), _audio_features_enriched=True)
def _enrich_with_audio_features(self, items: List[TrackItem]) -> List[TrackItem]:
"""Get items enriched with audio features
Args:
items (List[TrackItem]): Items to enrich
Returns:
List[TrackItem]: Enriched items
"""
for item in items:
item.audio_features = AudioFeaturesItem.from_dict(
get_audio_features(track_ids=[item.id])[item.id]
)
yield item
def set_id(self, id_: str) -> "TrackCollection":
"""Add ID to collection, e.g. to use for storage in a database
Returns:
TrackCollection: Same collection, but with ID
"""
return TrackCollection(
id_=id_,
_items=self.items,
_audio_features_enriched=self._audio_features_enriched,
)
def remove_duplicates(self: "TrackCollection") -> "TrackCollection":
"""Remove duplicate tracks from items based on ID
Returns:
TrackCollection: Collection with no duplicate tracks
"""
# By ID
items = copy.copy(self.items)
idx = 0
while idx < len(items):
names = [item.name for item in items]
if items[idx].name in names[:idx]:
items.pop(idx)
else:
idx += 1
new_coll = copy.deepcopy(self)
new_coll._items = items
return new_coll
def first(self, n: int) -> "TrackCollection":
"""First n items
Returns:
TrackCollection: Collection with trimmed items
"""
new_items = itertools.islice(self.items, n)
return TrackCollection(
_items=new_items, _audio_features_enriched=self._audio_features_enriched
)
def to_playlist(self, playlist_name: str = None) -> None:
if playlist_name is None:
playlist_name = self.id_
make_new_playlist(sp=self.sp, playlist_name=playlist_name, items=self.items)
def to_database(self, db: database.SpotifyDatabase = None) -> None:
logger.info(f"Storing collection to database. id = {self.id_}")
if db is None:
db = CollectionDatabase()
if not db.is_loaded():
raise DatabaseNotLoaded
db.store_tracks_in_database(collection=self)
def optimize(self, target_func, N: int = None) -> None:
items = list(self.items)
if N is None:
N = len(items)
diffs = np.abs(np.array([target_func(item) for item in items]))
idx = np.argsort(diffs)
n = min(N, len(items))
return TrackCollection(_items=list(np.array(items)[idx[:n]]))
def complex_sort(
self, by: str = "artist", graph: nx.Graph = nx.Graph()
) -> "TrackCollection":
items = list(self.items)
def new_items():
unique_artists = list(set([item.album.artists[0].id for item in items]))
artists = [
(
artist_id,
[item for item in items if item.album.artists[0].id == artist_id],
)
for artist_id in unique_artists
]
remaining_artists = artists
latest_artist = remaining_artists.pop(0)
new_items_ = [track for track in latest_artist[1]]
while remaining_artists:
# Find the closest artist
all_path_lengths = []
for artist in remaining_artists:
try:
path_length = nx.shortest_path_length(
graph,
source=latest_artist[0],
target=artist[0],
weight="weight",
)
except nx.NetworkXNoPath as e:
path_length = 9999999
all_path_lengths.append(path_length)
# Get the minimum
all_path_lengths = np.array(all_path_lengths)
min_idx = np.where(all_path_lengths == all_path_lengths.min())[0][0]
# Set the latest artist
latest_artist = remaining_artists.pop(min_idx)
# Add the tracks
new_items_ += [track for track in latest_artist[1]]
return (item for item in new_items_)
return TrackCollection(
_items=new_items(), _audio_features_enriched=self._audio_features_enriched
)
@dataclass
class Playlist(TrackCollection):
@classmethod
def func_get_id(cls, name):
return get_playlist_id(sp=cls.sp, playlist_name=name)
@property
def _db_track_gen(self):
return super()._db_track_gen
@property
def _api_track_gen(self):
return get_playlist_tracks(sp=self.sp, playlist_id=self.id_)
class Album(TrackCollection):
"""Class representing an Album's track contents"""
@classmethod
def func_get_id(cls, name):
return get_album_id(sp=cls.sp, album_name=name)
@property
def _db_track_gen(self):
db = CollectionDatabase()
return db.load_album(album_id=self.id_)
@property
def _api_track_gen(self):
return get_album_songs(sp=self.sp, album_id=self.id_)
class Artist(TrackCollection):
"""Class representing an Artist's track contents"""
@classmethod
def func_get_id(cls, name):
return get_artist_id(sp=cls.sp, artist_name=name)
@property
def _db_track_gen(self):
db = CollectionDatabase()
return db.load_artist(artist_id=self.id_)
@property
def _api_track_gen(self):
return self.all_songs()
def popular(self) -> "Artist":
"""Popular songs for the artist
Returns:
Artist: Artist with items set to the popular songs only
"""
def items():
for track_dict in get_artist_popular_songs(sp=self.sp, artist_id=self.id_):
yield TrackItem.from_dict(track_dict)
return Artist(id_=self.id_, _items=items())
def all_songs(self) -> "Artist":
"""All songs by the artist
Returns:
Artist: Artist with items set to all of their songs
"""
# Build album collections
album_data = get_artist_albums(artist_id=self.id_)
album_collection_items = [Album.from_id(album["id"]) for album in album_data]
album_collection = CollectionCollection(collections=album_collection_items)
# Retrieve items from album collection
if album_collection:
yield from album_collection.items
def related_artists(self, n: int, include: bool = True) -> "ArtistCollection":
"""Artists related to the artist
Args:
n (int): The number of related artists
include (bool): Whether the original artist should be included
Returns:
ArtistCollection: Collection of related artists
"""
related_artist_items = get_related_artists(sp=self.sp, artist_id=self.id_)
if include:
related_artist_items.append(self)
n += 1
related_artists = [
Artist(id_=artist_item["id"]) for artist_item in related_artist_items[:n]
]
return ArtistCollection(collections=related_artists)
class SavedTracks(TrackCollection):
"""Class representing an saved track contents"""
def __init__(self):
self._items = []
self.id_ = "Saved tracks"
@property
def _db_track_gen(self):
return super()._db_track_gen
@property
def _api_track_gen(self):
return get_all_saved_tracks(sp=self.sp)
@dataclass
class CollectionCollection(TrackCollection):
collections: List[TrackCollection] = field(default_factory=list)
def item_gen(self):
if self.collections:
yield from sum(self.collections).items
def alternate(self):
def new_items():
return itertools.chain(*zip(*[c.items for c in self.collections]))
return TrackCollection(id_="", _items=new_items())
@dataclass
class ArtistCollection(CollectionCollection):
"""Class representing a collection of artists"""
collections: List[Artist] = field(default_factory=list)
def popular(self) -> TrackCollection:
"""Popular songs of a given artist collection
Returns:
TrackCollection: New collection with all popular songs
"""
return sum([artist.popular() for artist in self.collections])
class Genre(TrackCollection):
"""Class representing an genre's track contents"""
def __init__(self, genre_name: str = "") -> None:
self.genre_name = genre_name
self._items = []
@property
def items(self) -> List[TrackItem]:
if self._items:
return self._items
else:
if self.id_:
yield from get_recommendations_for_genre(
sp=self.sp, genre_names=[self.genre_name]
)
else:
yield from iter(())
class Show(TrackCollection):
"""Class representing an show's episode contents"""
@classmethod
def func_get_id(cls, name):
return get_show_id(sp=cls.sp, query=name)
@property
def _db_track_gen(self):
return self._api_track_gen # TBD
@property
def _api_track_gen(self):
for ep_dict in get_show_episodes(sp=self.sp, show_id=self.id_):
yield EpisodeItem.from_dict(ep_dict)
def item_gen(self):
yield from self._api_track_gen
class Track(TrackCollection):
"""Class representing a single-track collection"""
def __init__(self, id_: str):
self.id_ = id_
self._items = iter([TrackItem.from_dict(read_track_from_id(track_id=id_))])
@classmethod
def func_get_id(cls, name):
return get_track_id(sp=cls.sp, track_name=name)
class CollectionDatabase(
database.SpotifyDatabase, metaclass=database.DatabaseSingleton
):
def __init__(self, file_path=None, op_table=None):
super().__init__(file_path=file_path, op_table=op_table)
def is_loaded(self):
return self.file_path is not None
def init_db(db_path):
CollectionDatabase(file_path=db_path, op_table="operations")
|
[
"logging.getLogger",
"itertools.islice",
"random.sample",
"copy.deepcopy",
"random.shuffle",
"dataclasses.asdict",
"itertools.tee",
"networkx.Graph",
"networkx.shortest_path_length",
"numpy.argsort",
"numpy.array",
"inspect.getsource",
"spotify_flows.database.SpotifyDatabase",
"pandas.DataFrame",
"copy.copy",
"dataclasses.field"
] |
[((1263, 1282), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1280, 1282), False, 'import logging\n'), ((1788, 1815), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1793, 1815), False, 'from dataclasses import dataclass, field, asdict\n'), ((20879, 20906), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (20884, 20906), False, 'from dataclasses import dataclass, field, asdict\n'), ((21347, 21374), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (21352, 21374), False, 'from dataclasses import dataclass, field, asdict\n'), ((1895, 1910), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1904, 1910), False, 'import copy\n'), ((3193, 3244), 'spotify_flows.database.SpotifyDatabase', 'database.SpotifyDatabase', (['db_path'], {'op_table': '"""table"""'}), "(db_path, op_table='table')\n", (3217, 3244), True, 'import spotify_flows.database as database\n'), ((7116, 7141), 'pandas.DataFrame', 'pd.DataFrame', (['all_artists'], {}), '(all_artists)\n', (7128, 7141), True, 'import pandas as pd\n'), ((7243, 7275), 'pandas.DataFrame', 'pd.DataFrame', (['all_audio_features'], {}), '(all_audio_features)\n', (7255, 7275), True, 'import pandas as pd\n'), ((7301, 7325), 'pandas.DataFrame', 'pd.DataFrame', (['all_tracks'], {}), '(all_tracks)\n', (7313, 7325), True, 'import pandas as pd\n'), ((7544, 7570), 'pandas.DataFrame', 'pd.DataFrame', (['album_artist'], {}), '(album_artist)\n', (7556, 7570), True, 'import pandas as pd\n'), ((7959, 7989), 'random.shuffle', 'random.shuffle', (['new_items_list'], {}), '(new_items_list)\n', (7973, 7989), False, 'import random\n'), ((14164, 14185), 'copy.copy', 'copy.copy', (['self.items'], {}), '(self.items)\n', (14173, 14185), False, 'import copy\n'), ((14427, 14446), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (14440, 14446), False, 'import copy\n'), ((14689, 14720), 'itertools.islice', 'itertools.islice', (['self.items', 'n'], {}), '(self.items, n)\n', (14705, 14720), False, 'import itertools\n'), ((15633, 15650), 'numpy.argsort', 'np.argsort', (['diffs'], {}), '(diffs)\n', (15643, 15650), True, 'import numpy as np\n'), ((15827, 15837), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (15835, 15837), True, 'import networkx as nx\n'), ((6747, 6760), 'dataclasses.asdict', 'asdict', (['track'], {}), '(track)\n', (6753, 6760), False, 'from dataclasses import dataclass, field, asdict\n'), ((6936, 6955), 'dataclasses.asdict', 'asdict', (['track.album'], {}), '(track.album)\n', (6942, 6955), False, 'from dataclasses import dataclass, field, asdict\n'), ((11460, 11490), 'itertools.tee', 'itertools.tee', (['other.items', '(20)'], {}), '(other.items, 20)\n', (11473, 11490), False, 'import itertools\n'), ((12466, 12494), 'itertools.tee', 'itertools.tee', (['self.items', '(2)'], {}), '(self.items, 2)\n', (12479, 12494), False, 'import itertools\n'), ((7166, 7190), 'pandas.DataFrame', 'pd.DataFrame', (['all_albums'], {}), '(all_albums)\n', (7178, 7190), True, 'import pandas as pd\n'), ((8529, 8558), 'random.sample', 'random.sample', (['all_items'], {'k': 'k'}), '(all_items, k=k)\n', (8542, 8558), False, 'import random\n'), ((12518, 12552), 'itertools.islice', 'itertools.islice', (['before', 'position'], {}), '(before, position)\n', (12534, 12552), False, 'import itertools\n'), ((17096, 17122), 'numpy.array', 'np.array', (['all_path_lengths'], {}), '(all_path_lengths)\n', (17104, 17122), True, 'import numpy as np\n'), ((10824, 10856), 'inspect.getsource', 'inspect.getsource', (['criteria_func'], {}), '(criteria_func)\n', (10841, 10856), False, 'import inspect\n'), ((15725, 15740), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (15733, 15740), True, 'import numpy as np\n'), ((16641, 16735), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['graph'], {'source': 'latest_artist[0]', 'target': 'artist[0]', 'weight': '"""weight"""'}), "(graph, source=latest_artist[0], target=artist[0],\n weight='weight')\n", (16664, 16735), True, 'import networkx as nx\n')]
|
'''
makeRankingCard.py:制作评分卡。
Author: HeRaNO
'''
import sys
import imblearn
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression as LR
# Read data start
model = pd.read_csv("model_data.csv", index_col = 0)
vali = pd.read_csv("vali_data.csv", index_col = 0)
# Read data end
# Start binning
def calcWOE(num_bins):
columns = ["min", "max", "count_0", "count_1"]
df = pd.DataFrame(num_bins, columns = columns)
df["total"] = df.count_0 + df.count_1
df["percentage"] = df.total / df.total.sum()
df["bad_rate"] = df.count_1 / df.total
df["goodpercent"] = df.count_0 / df.count_0.sum()
df["badpercent"] = df.count_1 / df.count_1.sum()
df["woe"] = np.log(df["goodpercent"] / df["badpercent"])
return df
def calcIV(df):
rate = df["goodpercent"] - df["badpercent"]
iv = np.sum(rate * df.woe)
return iv
def bestBin(DF, X, Y, n, q):
pass
'''
自己写吧我写崩溃了
大概的取值:
RevolvingUtilizationOfUnsecuredLines:8
age:11
DebtRatio:11
MonthlyIncome:9
NumberOfOpenCreditLinesAndLoans:6
其余均无法分箱,需要手动分
'''
# 分箱应该得到一个 bins_of_col[] 数组,里面是分箱的分段点
# Binning end
# Modeling start
model_woe = pd.DataFrame(index = model_data.index)
for col in bins_of_col:
model_woe[col] = pd.cut(model_data[col],bins_of_col[col]).map(woeall[col])
model_woe["SeriousDlqin2yrs"] = model_data["SeriousDlqin2yrs"]
vali_woe = pd.DataFrame(index = vali_data.index)
for col in bins_of_col:
vali_woe[col] = pd.cut(vali_data[col],bins_of_col[col]).map(woeall[col])
vali_woe["SeriousDlqin2yrs"] = vali_data["SeriousDlqin2yrs"]
vali_X = vali_woe.iloc[:,:-1]
vali_y = vali_woe.iloc[:,-1]
X = model_woe.iloc[:,:-1]
y = model_woe.iloc[:,-1]
lr = LR().fit(X, y)
lr.score(vali_X, vali_y)
# Modeling end
# Make card start
B = 20 / np.log(2)
A = 600 + B * np.log(1 / 60)
with open("score.csv", "w") as fdata:
fdata.write("base_score,{}\n".format(base_score))
for i, col in enumerate(X.columns):
score = woeall[col] * (-B * lr.coef_[0][i])
score.name = "Score"
score.index.name = col
score.to_csv(file, header = True, mode = "a")
# Make card end
|
[
"pandas.read_csv",
"numpy.log",
"pandas.cut",
"sklearn.linear_model.LogisticRegression",
"numpy.sum",
"pandas.DataFrame"
] |
[((204, 246), 'pandas.read_csv', 'pd.read_csv', (['"""model_data.csv"""'], {'index_col': '(0)'}), "('model_data.csv', index_col=0)\n", (215, 246), True, 'import pandas as pd\n'), ((256, 297), 'pandas.read_csv', 'pd.read_csv', (['"""vali_data.csv"""'], {'index_col': '(0)'}), "('vali_data.csv', index_col=0)\n", (267, 297), True, 'import pandas as pd\n'), ((1137, 1173), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'model_data.index'}), '(index=model_data.index)\n', (1149, 1173), True, 'import pandas as pd\n'), ((1352, 1387), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'vali_data.index'}), '(index=vali_data.index)\n', (1364, 1387), True, 'import pandas as pd\n'), ((411, 450), 'pandas.DataFrame', 'pd.DataFrame', (['num_bins'], {'columns': 'columns'}), '(num_bins, columns=columns)\n', (423, 450), True, 'import pandas as pd\n'), ((693, 737), 'numpy.log', 'np.log', (["(df['goodpercent'] / df['badpercent'])"], {}), "(df['goodpercent'] / df['badpercent'])\n", (699, 737), True, 'import numpy as np\n'), ((817, 838), 'numpy.sum', 'np.sum', (['(rate * df.woe)'], {}), '(rate * df.woe)\n', (823, 838), True, 'import numpy as np\n'), ((1753, 1762), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1759, 1762), True, 'import numpy as np\n'), ((1668, 1672), 'sklearn.linear_model.LogisticRegression', 'LR', ([], {}), '()\n', (1670, 1672), True, 'from sklearn.linear_model import LogisticRegression as LR\n'), ((1777, 1791), 'numpy.log', 'np.log', (['(1 / 60)'], {}), '(1 / 60)\n', (1783, 1791), True, 'import numpy as np\n'), ((1218, 1259), 'pandas.cut', 'pd.cut', (['model_data[col]', 'bins_of_col[col]'], {}), '(model_data[col], bins_of_col[col])\n', (1224, 1259), True, 'import pandas as pd\n'), ((1431, 1471), 'pandas.cut', 'pd.cut', (['vali_data[col]', 'bins_of_col[col]'], {}), '(vali_data[col], bins_of_col[col])\n', (1437, 1471), True, 'import pandas as pd\n')]
|
import datetime
import os
import uuid
from os.path import join as opjoin
from pathlib import Path
import numpy as np
import requests
import yaml
from celery.result import AsyncResult
from django.db.models import Q
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, status, views, viewsets
from rest_framework.response import Response
from backend import celery_app, settings
from backend_app import mixins as BAMixins, models, serializers, swagger
from backend_app import utils
from deeplearning.tasks import classification, segmentation
from deeplearning.utils import nn_settings
class AllowedPropViewSet(BAMixins.ParamListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.AllowedProperty.objects.all()
serializer_class = serializers.AllowedPropertySerializer
params = ['model_id', 'property_id']
def get_queryset(self):
model_id = self.request.query_params.get('model_id')
property_id = self.request.query_params.get('property_id')
self.queryset = models.AllowedProperty.objects.filter(model_id=model_id, property_id=property_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY, "Integer representing a model",
required=True, type=openapi.TYPE_INTEGER),
openapi.Parameter('property_id', openapi.IN_QUERY, "Integer representing a property",
required=True, type=openapi.TYPE_INTEGER)]
)
def list(self, request, *args, **kwargs):
"""Return the allowed and default values of a property
This method returns the values that a property can assume depending on the model employed. \
It provides a default value and a comma separated list of values to choose from.
When this api returns an empty list, the property allowed values and default should be retrieved \
using the `/properties/{id}` API.
"""
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""Create a new AllowedProperty
This method create a new AllowedProperty
"""
return super().create(request, *args, **kwargs)
class DatasetViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.Dataset.objects.filter(is_single_image=False)
serializer_class = serializers.DatasetSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Dataset.objects.filter(task_id=task_id, is_single_image=False)
# self.queryset = models.Dataset.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request, *args, **kwargs):
"""Get the list datasets to use for training or finetuning
This method returns all the datasets in the backend.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single dataset
This method returns the `{id}` dataset.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.DatasetViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Upload a new dataset downloading it from a URL
This API uploads a dataset YAML file and stores it in the backend.
The `path` field must contain the URL of a dataset, e.g. \
[`dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml`](https://www.dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml?dl=1).
"""
serializer = self.get_serializer(data=request.data)
if not serializer.is_valid():
return Response({'error': 'Validation error. Request data is malformed.'},
status=status.HTTP_400_BAD_REQUEST)
# Download the yml file in url
url = serializer.validated_data['path']
dataset_name = serializer.validated_data['name']
dataset_out_path = f'{settings.DATASETS_DIR}/{dataset_name}.yml'
if Path(f'{settings.DATASETS_DIR}/{dataset_name}.yml').exists():
return Response({'error': f'The dataset `{dataset_name}` already exists'},
status=status.HTTP_400_BAD_REQUEST)
try:
r = requests.get(url, allow_redirects=True)
if r.status_code == 200:
yaml_content = yaml.load(r.content, Loader=yaml.FullLoader)
with open(f'{settings.DATASETS_DIR}/{dataset_name}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
serializer.save(path=dataset_out_path)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
except requests.exceptions.RequestException:
# URL malformed
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
class InferenceViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Start an inference process using a pre-trained model on a dataset
This is the main entry point to start the inference. \
It is mandatory to specify a pre-trained model and a dataset.
"""
serializer = serializers.InferenceSerializer(data=request.data)
if serializer.is_valid():
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class InferenceSingleViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSingleSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Starts the inference providing an image URL
This API allows the inference of a single image.
It is mandatory to specify the same fields of `/inference` API, but for dataset_id which is replaced by \
the url of the image to process.
"""
serializer = serializers.InferenceSingleSerializer(data=request.data)
if serializer.is_valid():
image_url = serializer.validated_data['image_url']
project_id = serializer.validated_data['project_id']
task_id = models.Project.objects.get(id=project_id).task_id
# Create a dataset with the single image to process
dummy_dataset = f'name: "{image_url}"\n' \
f'description: "{image_url} auto-generated dataset"\n' \
f'images: ["{image_url}"]\n' \
f'split:\n' \
f' test: [0]'
# Save dataset and get id
d = models.Dataset(name=f'single-image-dataset', task_id=task_id, path='', is_single_image=True)
d.save()
try:
yaml_content = yaml.load(dummy_dataset, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
d.delete()
print(e)
return Response({'error': 'Error in YAML parsing'}, status=status.HTTP_400_BAD_REQUEST)
with open(f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
d.path = f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml'
d.save()
serializer.validated_data['dataset_id'] = d
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ModelViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Model.objects.all()
serializer_class = serializers.ModelSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Model.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY,
"Integer for filtering the models based on task.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
This API allows the client to know which Neural Network models are available in the system in order to allow \
their selection.
The optional `task_id` parameter is used to filter them based on the task the models are used for.
"""
return super().list(request)
class ModelWeightsViewSet(BAMixins.ParamListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.ModelWeights.objects.all()
serializer_class = serializers.ModelWeightsSerializer
params = ['model_id']
def get_queryset(self):
if self.action == 'list':
model_id = self.request.query_params.get('model_id')
self.queryset = models.ModelWeights.objects.filter(model_id=model_id)
return self.queryset
else:
return super(ModelWeightsViewSet, self).get_queryset()
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY,
"Return the modelweights obtained on `model_id` model.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
When 'use pre-trained' is selected, it is possible to query the backend passing a `model_id` to obtain a list
of dataset on which it was pretrained.
"""
return super().list(request)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single modelweight
This API returns the modelweight with the requested`{id}`.
"""
return super().retrieve(request, *args, **kwargs)
def get_obj(self, id):
try:
return models.ModelWeights.objects.get(id=id)
except models.ModelWeights.DoesNotExist:
return None
def put(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
weight = self.get_obj(request.data['id'])
if not weight:
error = {"Error": f"Weight {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = self.serializer_class(weight, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements with model_id in request
queryset = models.ModelWeights.objects.filter(model_id=weight.model_id)
serializer = self.get_serializer(queryset, many=True)
# serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class OutputViewSet(views.APIView):
@staticmethod
def trunc(values, decs=0):
return np.trunc(values * 10 ** decs) / (10 ** decs)
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('process_id', openapi.IN_QUERY,
"Pass a required UUID representing a finished process.",
type=openapi.TYPE_STRING, format=openapi.FORMAT_UUID, required=False)],
responses=swagger.OutputViewSet_get_responses
)
def get(self, request, *args, **kwargs):
"""Retrieve results about an inference process
This API provides information about an `inference` process.In classification task it returns the list \
of images and an array composed of the classes prediction scores.
In segmentation task it returns the URLs of the segmented images.
"""
if not self.request.query_params.get('process_id'):
error = {'Error': f'Missing required parameter `process_id`'}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
process_id = self.request.query_params.get('process_id')
infer = models.Inference.objects.filter(celery_id=process_id)
if not infer:
# already deleted weight/training or inference
return Response({"result": "Process stopped before finishing or non existing."},
status=status.HTTP_404_NOT_FOUND)
if AsyncResult(process_id).status == 'PENDING':
return Response({"result": "Process in execution. Try later for output results."},
status=status.HTTP_200_OK)
infer = infer.first()
if not os.path.exists(opjoin(settings.OUTPUTS_DIR, infer.outputfile)):
return Response({"result": "Output file not found"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
outputs = open(opjoin(settings.OUTPUTS_DIR, infer.outputfile), 'r')
# Differentiate classification and segmentation
if infer.modelweights_id.model_id.task_id.name.lower() == 'classification':
lines = outputs.read().splitlines()
lines = [line.split(';') for line in lines]
# preds = self.trunc(preds, decs=8)
else:
# Segmentation
# output file contains path of files
uri = request.build_absolute_uri(settings.MEDIA_URL)
lines = outputs.read().splitlines()
lines = [l.replace(settings.OUTPUTS_DIR, uri) for l in lines]
response = {'outputs': lines}
return Response(response, status=status.HTTP_200_OK)
class ProjectViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.Project.objects.all()
serializer_class = serializers.ProjectSerializer
def get_obj(self, id):
try:
return models.Project.objects.get(id=id)
except models.Project.DoesNotExist:
return None
def list(self, request, *args, **kwargs):
"""Loads all the projects
This method lists all the available projects.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single project
Returns a project by `{id}`.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.ProjectViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Create a new project
Create a new project.
"""
return super().create(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
project = self.get_obj(request.data['id'])
if not project:
error = {"Error": f"Project {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.ProjectSerializer(project, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements
return self.list(request)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing project
Update a project instance by providing its `{id}`.
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class PropertyViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = models.Property.objects.all()
serializer_class = serializers.PropertyListSerializer
def get_queryset(self):
name = self.request.query_params.get('name')
# Substitute underscore with space if present
if name:
name = [name, name.replace('_', ' ')]
self.queryset = models.Property.objects.filter(Q(name__icontains=name[0]) | Q(name__icontains=name[1]))
return self.queryset
def list(self, request, *args, **kwargs):
"""Return the Properties supported by backend
This API allows the client to know which properties are "globally" supported by the backend.
A model can have different default value and allowed values if the `/allowedProperties` return an entry.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single property
Return a property by `{id}`.
"""
return super().retrieve(request, *args, **kwargs)
class StatusView(views.APIView):
@swagger_auto_schema(manual_parameters=[openapi.Parameter('process_id', openapi.IN_QUERY,
"UUID representing a process",
required=True, type=openapi.TYPE_STRING,
format=openapi.FORMAT_UUID)],
responses=swagger.StatusView_get_response
)
def get(self, request):
"""Return the status of an training or inference process
This API allows the frontend to query the status of a training or inference, identified by a `process_id` \
(which is returned by `/train` or `/inference` APIs).
"""
if not self.request.query_params.get('process_id'):
error = {'Error': f'Missing required parameter `process_id`'}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
process_id = self.request.query_params.get('process_id')
if models.ModelWeights.objects.filter(celery_id=process_id).exists():
process_type = 'training'
process = models.ModelWeights.objects.filter(celery_id=process_id).first()
elif models.Inference.objects.filter(celery_id=process_id).exists():
process_type = 'inference'
process = models.Inference.objects.filter(celery_id=process_id).first()
else:
res = {
"result": "error",
"error": "Process not found."
}
return Response(data=res, status=status.HTTP_404_NOT_FOUND)
try:
with open(process.logfile, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
except:
res = {
"result": "error",
"error": "Log file not found"
}
return Response(data=res, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if last_line == '<done>':
process_status = 'finished'
last_line = lines[-2]
else:
process_status = 'running'
res = {
'result': 'ok',
'status': {
'process_type': process_type,
'process_status': process_status,
'process_data': last_line,
}
}
return Response(data=res, status=status.HTTP_200_OK)
class StopProcessViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.StopProcessSerializer,
responses=swagger.StopProcessViewSet_post_response
)
def post(self, request):
"""Kill a training or inference process
Stop a training process specifying a `process_id` (which is returned by `/train` or `/inference` APIs).
"""
serializer = serializers.StopProcessSerializer(data=request.data)
if serializer.is_valid():
process_id = serializer.data['process_id']
weights = models.ModelWeights.objects.filter(celery_id=process_id)
infer = models.Inference.objects.filter(celery_id=process_id)
response = {"result": "Process stopped"}
if not weights.exists() and not infer.exists():
# already deleted weight/training or inference
return Response({"result": "Process already stopped or non existing"}, status=status.HTTP_404_NOT_FOUND)
elif weights:
weights = weights.first()
celery_id = weights.celery_id
celery_app.control.revoke(celery_id, terminate=True, signal='SIGUSR1')
response = {"result": "Training stopped"}
# delete the ModelWeights entry from db
# also delete ModelWeights fk in project
weights.delete()
elif infer:
infer = infer.first()
celery_id = infer.celery_id
celery_app.control.revoke(celery_id, terminate=True, signal='SIGUSR1')
response = {"result": "Inference stopped"}
# delete the ModelWeights entry from db
infer.delete()
# todo delete log file? delete weight file?
return Response(response, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Task.objects.all()
serializer_class = serializers.TaskSerializer
def list(self, request, *args, **kwargs):
"""Return the tasks supported by backend
This API allows the client to know which task this platform supports. e.g. classification or segmentation tasks.
"""
return super().list(request, *args, **kwargs)
class TrainViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.TrainSerializer,
responses=swagger.TrainViewSet_post_response
)
def post(self, request):
"""Starts the training of a (possibly pre-trained) model on a dataset
This is the main entry point to start the training of a model on a dataset. \
It is mandatory to specify a model to be trained and a dataset.
When providing a `weights_id`, the training starts from the pre-trained model.
"""
serializer = serializers.TrainSerializer(data=request.data)
if serializer.is_valid():
# Create a new modelweights and start training
weight = models.ModelWeights()
weight.dataset_id_id = serializer.data['dataset_id']
weight.model_id_id = serializer.data['model_id']
if not models.Dataset.objects.filter(id=weight.dataset_id_id, is_single_image=False).exists():
error = {"Error": f"Dataset with id `{weight.dataset_id_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not models.Model.objects.filter(id=weight.model_id_id).exists():
error = {"Error": f"Model with id `{weight.model_id_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not models.Project.objects.filter(id=serializer.data['project_id']).exists():
error = {"Error": f"Project with id `{serializer.data['project_id']}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
# Check if dataset and model are both for same task
if weight.model_id.task_id != weight.dataset_id.task_id:
error = {"Error": f"Model and dataset must belong to the same task"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
project = models.Project.objects.get(id=serializer.data['project_id'])
task_name = project.task_id.name.lower()
weight.task_id = project.task_id
weight.name = f'{weight.model_id.name}_{weight.dataset_id.name}_' \
f'{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}'
if serializer.data['weights_id']:
weight.pretrained_on_id = serializer.data['weights_id']
if not models.ModelWeights.objects.filter(id=weight.pretrained_on_id).exists():
error = {"Error": f"Model weight with id `{weight.pretrained_on_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
weight.save() # Generate an id for the weight
ckpts_dir = opjoin(settings.TRAINING_DIR, 'ckpts')
weight.location = Path(opjoin(ckpts_dir, f'{weight.id}.bin')).absolute()
# Create a logfile
weight.logfile = models.generate_file_path(f'{uuid.uuid4().hex}.log', settings.TRAINING_DIR, 'logs')
weight.save()
hyperparams = {}
# Check if current model has some custom properties and load them
props_allowed = models.AllowedProperty.objects.filter(model_id=weight.model_id_id)
if props_allowed:
for p in props_allowed:
hyperparams[p.property_id.name] = p.default_value
# Load default values for those properties not in props_allowed
props_general = models.Property.objects.all()
for p in props_general:
if hyperparams.get(p.name) is None:
hyperparams[p.name] = p.default
# Overwrite hyperparams with ones provided by the user
props = serializer.data['properties']
for p in props:
ts = models.TrainingSetting()
# Get the property by name
name = p['name']
name = [name, name.replace('_', ' ')]
queryset = models.Property.objects.filter(Q(name__icontains=name[0]) | Q(name__icontains=name[1]))
if len(queryset) == 0:
# Property does not exist, delete the weight and its associated properties (cascade)
weight.delete()
error = {"Error": f"Property `{p['name']}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
property = queryset[0]
ts.property_id = property
ts.modelweights_id = weight
ts.value = str(p['value'])
ts.save()
hyperparams[property.name] = ts.value
config = nn_settings(modelweight=weight, hyperparams=hyperparams)
if not config:
return Response({"Error": "Properties error"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Differentiate the task and start training
if task_name == 'classification':
celery_id = classification.classificate.delay(config)
# celery_id = classification.classificate(config)
elif task_name == 'segmentation':
celery_id = segmentation.segment.delay(config)
# celery_id = segmentation.segment(config)
else:
return Response({'error': 'error on task'}, status=status.HTTP_400_BAD_REQUEST)
weight = models.ModelWeights.objects.get(id=weight.id)
weight.celery_id = celery_id.id
weight.save()
# todo what if project already has a modelweight?
# Training started, store the training in project
project.modelweights_id = weight
project.save()
response = {
"result": "ok",
"process_id": celery_id.id,
"weight_id": weight.id
}
return Response(response, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TrainingSettingViewSet(BAMixins.ParamListModelMixin,
viewsets.GenericViewSet):
queryset = models.TrainingSetting.objects.all()
serializer_class = serializers.TrainingSettingSerializer
params = ['modelweights_id', 'property_id']
def get_queryset(self):
modelweights_id = self.request.query_params.get('modelweights_id')
property_id = self.request.query_params.get('property_id')
self.queryset = models.TrainingSetting.objects.filter(modelweights_id=modelweights_id, property_id=property_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('modelweights_id', openapi.IN_QUERY, "Integer representing a ModelWeights",
required=True, type=openapi.TYPE_INTEGER),
openapi.Parameter('property_id', openapi.IN_QUERY, "Integer representing a Property",
required=True, type=openapi.TYPE_INTEGER)]
)
def list(self, request, *args, **kwargs):
"""Returns settings used for a training
This API returns the value used for a property in a specific training (a modelweights).
It requires a `modelweights_id`, indicating a training process, and a `property_id`.
"""
return super().list(request, *args, **kwargs)
|
[
"backend_app.models.Model.objects.filter",
"backend_app.models.Project.objects.filter",
"numpy.trunc",
"drf_yasg.utils.swagger_auto_schema",
"backend_app.models.Project.objects.get",
"yaml.load",
"backend_app.models.AllowedProperty.objects.all",
"backend_app.models.TrainingSetting",
"backend_app.models.Inference.objects.filter",
"backend_app.models.ModelWeights.objects.get",
"backend_app.models.Dataset",
"pathlib.Path",
"backend_app.models.Task.objects.all",
"deeplearning.tasks.segmentation.segment.delay",
"backend_app.utils.do_inference",
"django.db.models.Q",
"backend.celery_app.control.revoke",
"backend_app.models.TrainingSetting.objects.all",
"backend_app.models.ModelWeights.objects.filter",
"yaml.dump",
"deeplearning.utils.nn_settings",
"celery.result.AsyncResult",
"backend_app.serializers.InferenceSerializer",
"requests.get",
"uuid.uuid4",
"backend_app.models.ModelWeights.objects.all",
"backend_app.models.ModelWeights",
"backend_app.models.Project.objects.all",
"drf_yasg.openapi.Parameter",
"backend_app.models.Model.objects.all",
"backend_app.models.AllowedProperty.objects.filter",
"backend_app.serializers.TrainSerializer",
"backend_app.models.Property.objects.all",
"os.path.join",
"backend_app.models.Dataset.objects.filter",
"datetime.datetime.now",
"rest_framework.response.Response",
"backend_app.models.TrainingSetting.objects.filter",
"backend_app.serializers.ProjectSerializer",
"backend_app.serializers.StopProcessSerializer",
"deeplearning.tasks.classification.classificate.delay",
"backend_app.serializers.InferenceSingleSerializer"
] |
[((816, 852), 'backend_app.models.AllowedProperty.objects.all', 'models.AllowedProperty.objects.all', ([], {}), '()\n', (850, 852), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((2601, 2653), 'backend_app.models.Dataset.objects.filter', 'models.Dataset.objects.filter', ([], {'is_single_image': '(False)'}), '(is_single_image=False)\n', (2630, 2653), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((3624, 3693), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'responses': 'swagger.DatasetViewSet_create_response'}), '(responses=swagger.DatasetViewSet_create_response)\n', (3643, 3693), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((5697, 5812), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.InferenceSerializer', 'responses': 'swagger.inferences_post_responses'}), '(request_body=serializers.InferenceSerializer, responses\n =swagger.inferences_post_responses)\n', (5716, 5812), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((6373, 6493), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.InferenceSingleSerializer', 'responses': 'swagger.inferences_post_responses'}), '(request_body=serializers.InferenceSingleSerializer,\n responses=swagger.inferences_post_responses)\n', (6392, 6493), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((8546, 8572), 'backend_app.models.Model.objects.all', 'models.Model.objects.all', ([], {}), '()\n', (8570, 8572), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((9741, 9774), 'backend_app.models.ModelWeights.objects.all', 'models.ModelWeights.objects.all', ([], {}), '()\n', (9772, 9774), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((12362, 12399), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'auto_schema': 'None'}), '(auto_schema=None)\n', (12381, 12399), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((15424, 15452), 'backend_app.models.Project.objects.all', 'models.Project.objects.all', ([], {}), '()\n', (15450, 15452), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((16072, 16141), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'responses': 'swagger.ProjectViewSet_create_response'}), '(responses=swagger.ProjectViewSet_create_response)\n', (16091, 16141), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((17116, 17153), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'auto_schema': 'None'}), '(auto_schema=None)\n', (17135, 17153), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((17433, 17462), 'backend_app.models.Property.objects.all', 'models.Property.objects.all', ([], {}), '()\n', (17460, 17462), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((20989, 21112), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.StopProcessSerializer', 'responses': 'swagger.StopProcessViewSet_post_response'}), '(request_body=serializers.StopProcessSerializer,\n responses=swagger.StopProcessViewSet_post_response)\n', (21008, 21112), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((23022, 23047), 'backend_app.models.Task.objects.all', 'models.Task.objects.all', ([], {}), '()\n', (23045, 23047), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((23424, 23536), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'request_body': 'serializers.TrainSerializer', 'responses': 'swagger.TrainViewSet_post_response'}), '(request_body=serializers.TrainSerializer, responses=\n swagger.TrainViewSet_post_response)\n', (23443, 23536), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((29642, 29678), 'backend_app.models.TrainingSetting.objects.all', 'models.TrainingSetting.objects.all', ([], {}), '()\n', (29676, 29678), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((1136, 1222), 'backend_app.models.AllowedProperty.objects.filter', 'models.AllowedProperty.objects.filter', ([], {'model_id': 'model_id', 'property_id': 'property_id'}), '(model_id=model_id, property_id=\n property_id)\n', (1173, 1222), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((5578, 5650), 'rest_framework.response.Response', 'Response', (["{'error': 'URL malformed'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)\n", (5586, 5650), False, 'from rest_framework.response import Response\n'), ((6106, 6156), 'backend_app.serializers.InferenceSerializer', 'serializers.InferenceSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (6137, 6156), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((6257, 6320), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (6265, 6320), False, 'from rest_framework.response import Response\n'), ((6845, 6901), 'backend_app.serializers.InferenceSingleSerializer', 'serializers.InferenceSingleSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (6882, 6901), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((8378, 8441), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (8386, 8441), False, 'from rest_framework.response import Response\n'), ((12060, 12123), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (12068, 12123), False, 'from rest_framework.response import Response\n'), ((13714, 13767), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (13745, 13767), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((15130, 15175), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_200_OK'}), '(response, status=status.HTTP_200_OK)\n', (15138, 15175), False, 'from rest_framework.response import Response\n'), ((16617, 16674), 'backend_app.serializers.ProjectSerializer', 'serializers.ProjectSerializer', (['project'], {'data': 'request.data'}), '(project, data=request.data)\n', (16646, 16674), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((16831, 16894), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (16839, 16894), False, 'from rest_framework.response import Response\n'), ((20895, 20940), 'rest_framework.response.Response', 'Response', ([], {'data': 'res', 'status': 'status.HTTP_200_OK'}), '(data=res, status=status.HTTP_200_OK)\n', (20903, 20940), False, 'from rest_framework.response import Response\n'), ((21383, 21435), 'backend_app.serializers.StopProcessSerializer', 'serializers.StopProcessSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (21416, 21435), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22856, 22919), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (22864, 22919), False, 'from rest_framework.response import Response\n'), ((23969, 24015), 'backend_app.serializers.TrainSerializer', 'serializers.TrainSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (23996, 24015), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((29447, 29510), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (29455, 29510), False, 'from rest_framework.response import Response\n'), ((29983, 30082), 'backend_app.models.TrainingSetting.objects.filter', 'models.TrainingSetting.objects.filter', ([], {'modelweights_id': 'modelweights_id', 'property_id': 'property_id'}), '(modelweights_id=modelweights_id,\n property_id=property_id)\n', (30020, 30082), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((2843, 2912), 'backend_app.models.Dataset.objects.filter', 'models.Dataset.objects.filter', ([], {'task_id': 'task_id', 'is_single_image': '(False)'}), '(task_id=task_id, is_single_image=False)\n', (2872, 2912), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((4207, 4315), 'rest_framework.response.Response', 'Response', (["{'error': 'Validation error. Request data is malformed.'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'Validation error. Request data is malformed.'}, status=\n status.HTTP_400_BAD_REQUEST)\n", (4215, 4315), False, 'from rest_framework.response import Response\n'), ((4649, 4757), 'rest_framework.response.Response', 'Response', (["{'error': f'The dataset `{dataset_name}` already exists'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': f'The dataset `{dataset_name}` already exists'}, status=\n status.HTTP_400_BAD_REQUEST)\n", (4657, 4757), False, 'from rest_framework.response import Response\n'), ((4810, 4849), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)'}), '(url, allow_redirects=True)\n', (4822, 4849), False, 'import requests\n'), ((6211, 6241), 'backend_app.utils.do_inference', 'utils.do_inference', (['serializer'], {}), '(serializer)\n', (6229, 6241), False, 'from backend_app import utils\n'), ((7540, 7636), 'backend_app.models.Dataset', 'models.Dataset', ([], {'name': 'f"""single-image-dataset"""', 'task_id': 'task_id', 'path': '""""""', 'is_single_image': '(True)'}), "(name=f'single-image-dataset', task_id=task_id, path='',\n is_single_image=True)\n", (7554, 7636), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((8332, 8362), 'backend_app.utils.do_inference', 'utils.do_inference', (['serializer'], {}), '(serializer)\n', (8350, 8362), False, 'from backend_app import utils\n'), ((8760, 8804), 'backend_app.models.Model.objects.filter', 'models.Model.objects.filter', ([], {'task_id': 'task_id'}), '(task_id=task_id)\n', (8787, 8804), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((10015, 10068), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'model_id': 'model_id'}), '(model_id=model_id)\n', (10049, 10068), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((11071, 11109), 'backend_app.models.ModelWeights.objects.get', 'models.ModelWeights.objects.get', ([], {'id': 'id'}), '(id=id)\n', (11102, 11109), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((11525, 11581), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (11533, 11581), False, 'from rest_framework.response import Response\n'), ((11803, 11863), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'model_id': 'weight.model_id'}), '(model_id=weight.model_id)\n', (11837, 11863), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((12019, 12044), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (12027, 12044), False, 'from rest_framework.response import Response\n'), ((12622, 12651), 'numpy.trunc', 'np.trunc', (['(values * 10 ** decs)'], {}), '(values * 10 ** decs)\n', (12630, 12651), True, 'import numpy as np\n'), ((13576, 13632), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (13584, 13632), False, 'from rest_framework.response import Response\n'), ((13868, 13979), 'rest_framework.response.Response', 'Response', (["{'result': 'Process stopped before finishing or non existing.'}"], {'status': 'status.HTTP_404_NOT_FOUND'}), "({'result': 'Process stopped before finishing or non existing.'},\n status=status.HTTP_404_NOT_FOUND)\n", (13876, 13979), False, 'from rest_framework.response import Response\n'), ((14080, 14186), 'rest_framework.response.Response', 'Response', (["{'result': 'Process in execution. Try later for output results.'}"], {'status': 'status.HTTP_200_OK'}), "({'result': 'Process in execution. Try later for output results.'},\n status=status.HTTP_200_OK)\n", (14088, 14186), False, 'from rest_framework.response import Response\n'), ((14340, 14436), 'rest_framework.response.Response', 'Response', (["{'result': 'Output file not found'}"], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), "({'result': 'Output file not found'}, status=status.\n HTTP_500_INTERNAL_SERVER_ERROR)\n", (14348, 14436), False, 'from rest_framework.response import Response\n'), ((14455, 14501), 'os.path.join', 'opjoin', (['settings.OUTPUTS_DIR', 'infer.outputfile'], {}), '(settings.OUTPUTS_DIR, infer.outputfile)\n', (14461, 14501), True, 'from os.path import join as opjoin\n'), ((15566, 15599), 'backend_app.models.Project.objects.get', 'models.Project.objects.get', ([], {'id': 'id'}), '(id=id)\n', (15592, 15599), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((16539, 16595), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (16547, 16595), False, 'from rest_framework.response import Response\n'), ((19397, 19453), 'rest_framework.response.Response', 'Response', ([], {'data': 'error', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data=error, status=status.HTTP_400_BAD_REQUEST)\n', (19405, 19453), False, 'from rest_framework.response import Response\n'), ((21547, 21603), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (21581, 21603), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((21624, 21677), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (21655, 21677), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22795, 22840), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_200_OK'}), '(response, status=status.HTTP_200_OK)\n', (22803, 22840), False, 'from rest_framework.response import Response\n'), ((24131, 24152), 'backend_app.models.ModelWeights', 'models.ModelWeights', ([], {}), '()\n', (24150, 24152), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((25391, 25451), 'backend_app.models.Project.objects.get', 'models.Project.objects.get', ([], {'id': "serializer.data['project_id']"}), "(id=serializer.data['project_id'])\n", (25417, 25451), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26199, 26237), 'os.path.join', 'opjoin', (['settings.TRAINING_DIR', '"""ckpts"""'], {}), "(settings.TRAINING_DIR, 'ckpts')\n", (26205, 26237), True, 'from os.path import join as opjoin\n'), ((26630, 26696), 'backend_app.models.AllowedProperty.objects.filter', 'models.AllowedProperty.objects.filter', ([], {'model_id': 'weight.model_id_id'}), '(model_id=weight.model_id_id)\n', (26667, 26696), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26942, 26971), 'backend_app.models.Property.objects.all', 'models.Property.objects.all', ([], {}), '()\n', (26969, 26971), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((28158, 28214), 'deeplearning.utils.nn_settings', 'nn_settings', ([], {'modelweight': 'weight', 'hyperparams': 'hyperparams'}), '(modelweight=weight, hyperparams=hyperparams)\n', (28169, 28214), False, 'from deeplearning.utils import nn_settings\n'), ((28894, 28939), 'backend_app.models.ModelWeights.objects.get', 'models.ModelWeights.objects.get', ([], {'id': 'weight.id'}), '(id=weight.id)\n', (28925, 28939), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((29381, 29431), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_201_CREATED'}), '(response, status=status.HTTP_201_CREATED)\n', (29389, 29431), False, 'from rest_framework.response import Response\n'), ((1301, 1426), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""model_id"""', 'openapi.IN_QUERY', '"""Integer representing a model"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('model_id', openapi.IN_QUERY,\n 'Integer representing a model', required=True, type=openapi.TYPE_INTEGER)\n", (1318, 1426), False, 'from drf_yasg import openapi\n'), ((1496, 1632), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""property_id"""', 'openapi.IN_QUERY', '"""Integer representing a property"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('property_id', openapi.IN_QUERY,\n 'Integer representing a property', required=True, type=openapi.TYPE_INTEGER\n )\n", (1513, 1632), False, 'from drf_yasg import openapi\n'), ((3073, 3166), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""task_id"""', 'openapi.IN_QUERY'], {'type': 'openapi.TYPE_INTEGER', 'required': '(False)'}), "('task_id', openapi.IN_QUERY, type=openapi.TYPE_INTEGER,\n required=False)\n", (3090, 3166), False, 'from drf_yasg import openapi\n'), ((4568, 4619), 'pathlib.Path', 'Path', (['f"""{settings.DATASETS_DIR}/{dataset_name}.yml"""'], {}), "(f'{settings.DATASETS_DIR}/{dataset_name}.yml')\n", (4572, 4619), False, 'from pathlib import Path\n'), ((4918, 4962), 'yaml.load', 'yaml.load', (['r.content'], {'Loader': 'yaml.FullLoader'}), '(r.content, Loader=yaml.FullLoader)\n', (4927, 4962), False, 'import yaml\n'), ((5315, 5389), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED', 'headers': 'headers'}), '(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n', (5323, 5389), False, 'from rest_framework.response import Response\n'), ((5490, 5562), 'rest_framework.response.Response', 'Response', (["{'error': 'URL malformed'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)\n", (5498, 5562), False, 'from rest_framework.response import Response\n'), ((7087, 7128), 'backend_app.models.Project.objects.get', 'models.Project.objects.get', ([], {'id': 'project_id'}), '(id=project_id)\n', (7113, 7128), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((7702, 7750), 'yaml.load', 'yaml.load', (['dummy_dataset'], {'Loader': 'yaml.FullLoader'}), '(dummy_dataset, Loader=yaml.FullLoader)\n', (7711, 7750), False, 'import yaml\n'), ((8057, 8123), 'yaml.dump', 'yaml.dump', (['yaml_content', 'f'], {'Dumper': 'utils.MyDumper', 'sort_keys': '(False)'}), '(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)\n', (8066, 8123), False, 'import yaml\n'), ((8888, 9037), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""task_id"""', 'openapi.IN_QUERY', '"""Integer for filtering the models based on task."""'], {'type': 'openapi.TYPE_INTEGER', 'required': '(False)'}), "('task_id', openapi.IN_QUERY,\n 'Integer for filtering the models based on task.', type=openapi.\n TYPE_INTEGER, required=False)\n", (8905, 9037), False, 'from drf_yasg import openapi\n'), ((10237, 10393), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""model_id"""', 'openapi.IN_QUERY', '"""Return the modelweights obtained on `model_id` model."""'], {'type': 'openapi.TYPE_INTEGER', 'required': '(False)'}), "('model_id', openapi.IN_QUERY,\n 'Return the modelweights obtained on `model_id` model.', type=openapi.\n TYPE_INTEGER, required=False)\n", (10254, 10393), False, 'from drf_yasg import openapi\n'), ((14016, 14039), 'celery.result.AsyncResult', 'AsyncResult', (['process_id'], {}), '(process_id)\n', (14027, 14039), False, 'from celery.result import AsyncResult\n'), ((14272, 14318), 'os.path.join', 'opjoin', (['settings.OUTPUTS_DIR', 'infer.outputfile'], {}), '(settings.OUTPUTS_DIR, infer.outputfile)\n', (14278, 14318), True, 'from os.path import join as opjoin\n'), ((12721, 12906), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""process_id"""', 'openapi.IN_QUERY', '"""Pass a required UUID representing a finished process."""'], {'type': 'openapi.TYPE_STRING', 'format': 'openapi.FORMAT_UUID', 'required': '(False)'}), "('process_id', openapi.IN_QUERY,\n 'Pass a required UUID representing a finished process.', type=openapi.\n TYPE_STRING, format=openapi.FORMAT_UUID, required=False)\n", (12738, 12906), False, 'from drf_yasg import openapi\n'), ((19531, 19587), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19565, 19587), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((20071, 20123), 'rest_framework.response.Response', 'Response', ([], {'data': 'res', 'status': 'status.HTTP_404_NOT_FOUND'}), '(data=res, status=status.HTTP_404_NOT_FOUND)\n', (20079, 20123), False, 'from rest_framework.response import Response\n'), ((20422, 20486), 'rest_framework.response.Response', 'Response', ([], {'data': 'res', 'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), '(data=res, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n', (20430, 20486), False, 'from rest_framework.response import Response\n'), ((18527, 18684), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""process_id"""', 'openapi.IN_QUERY', '"""UUID representing a process"""'], {'required': '(True)', 'type': 'openapi.TYPE_STRING', 'format': 'openapi.FORMAT_UUID'}), "('process_id', openapi.IN_QUERY,\n 'UUID representing a process', required=True, type=openapi.TYPE_STRING,\n format=openapi.FORMAT_UUID)\n", (18544, 18684), False, 'from drf_yasg import openapi\n'), ((21877, 21979), 'rest_framework.response.Response', 'Response', (["{'result': 'Process already stopped or non existing'}"], {'status': 'status.HTTP_404_NOT_FOUND'}), "({'result': 'Process already stopped or non existing'}, status=\n status.HTTP_404_NOT_FOUND)\n", (21885, 21979), False, 'from rest_framework.response import Response\n'), ((24504, 24555), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (24512, 24555), False, 'from rest_framework.response import Response\n'), ((24750, 24801), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (24758, 24801), False, 'from rest_framework.response import Response\n'), ((25022, 25073), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (25030, 25073), False, 'from rest_framework.response import Response\n'), ((25316, 25367), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (25324, 25367), False, 'from rest_framework.response import Response\n'), ((27279, 27303), 'backend_app.models.TrainingSetting', 'models.TrainingSetting', ([], {}), '()\n', (27301, 27303), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((28265, 28355), 'rest_framework.response.Response', 'Response', (["{'Error': 'Properties error'}"], {'status': 'status.HTTP_500_INTERNAL_SERVER_ERROR'}), "({'Error': 'Properties error'}, status=status.\n HTTP_500_INTERNAL_SERVER_ERROR)\n", (28273, 28355), False, 'from rest_framework.response import Response\n'), ((28482, 28523), 'deeplearning.tasks.classification.classificate.delay', 'classification.classificate.delay', (['config'], {}), '(config)\n', (28515, 28523), False, 'from deeplearning.tasks import classification, segmentation\n'), ((30162, 30306), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""modelweights_id"""', 'openapi.IN_QUERY', '"""Integer representing a ModelWeights"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('modelweights_id', openapi.IN_QUERY,\n 'Integer representing a ModelWeights', required=True, type=openapi.\n TYPE_INTEGER)\n", (30179, 30306), False, 'from drf_yasg import openapi\n'), ((30371, 30507), 'drf_yasg.openapi.Parameter', 'openapi.Parameter', (['"""property_id"""', 'openapi.IN_QUERY', '"""Integer representing a Property"""'], {'required': '(True)', 'type': 'openapi.TYPE_INTEGER'}), "('property_id', openapi.IN_QUERY,\n 'Integer representing a Property', required=True, type=openapi.TYPE_INTEGER\n )\n", (30388, 30507), False, 'from drf_yasg import openapi\n'), ((5067, 5133), 'yaml.dump', 'yaml.dump', (['yaml_content', 'f'], {'Dumper': 'utils.MyDumper', 'sort_keys': '(False)'}), '(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)\n', (5076, 5133), False, 'import yaml\n'), ((7866, 7951), 'rest_framework.response.Response', 'Response', (["{'error': 'Error in YAML parsing'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'Error in YAML parsing'}, status=status.HTTP_400_BAD_REQUEST\n )\n", (7874, 7951), False, 'from rest_framework.response import Response\n'), ((17783, 17809), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[0]'}), '(name__icontains=name[0])\n', (17784, 17809), False, 'from django.db.models import Q\n'), ((17812, 17838), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[1]'}), '(name__icontains=name[1])\n', (17813, 17838), False, 'from django.db.models import Q\n'), ((19658, 19714), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19692, 19714), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((19736, 19789), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19767, 19789), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22105, 22175), 'backend.celery_app.control.revoke', 'celery_app.control.revoke', (['celery_id'], {'terminate': '(True)', 'signal': '"""SIGUSR1"""'}), "(celery_id, terminate=True, signal='SIGUSR1')\n", (22130, 22175), False, 'from backend import celery_app, settings\n'), ((26063, 26114), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (26071, 26114), False, 'from rest_framework.response import Response\n'), ((27836, 27887), 'rest_framework.response.Response', 'Response', (['error'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(error, status=status.HTTP_400_BAD_REQUEST)\n', (27844, 27887), False, 'from rest_framework.response import Response\n'), ((28664, 28698), 'deeplearning.tasks.segmentation.segment.delay', 'segmentation.segment.delay', (['config'], {}), '(config)\n', (28690, 28698), False, 'from deeplearning.tasks import classification, segmentation\n'), ((28799, 28871), 'rest_framework.response.Response', 'Response', (["{'error': 'error on task'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'error on task'}, status=status.HTTP_400_BAD_REQUEST)\n", (28807, 28871), False, 'from rest_framework.response import Response\n'), ((19861, 19914), 'backend_app.models.Inference.objects.filter', 'models.Inference.objects.filter', ([], {'celery_id': 'process_id'}), '(celery_id=process_id)\n', (19892, 19914), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((22502, 22572), 'backend.celery_app.control.revoke', 'celery_app.control.revoke', (['celery_id'], {'terminate': '(True)', 'signal': '"""SIGUSR1"""'}), "(celery_id, terminate=True, signal='SIGUSR1')\n", (22527, 22572), False, 'from backend import celery_app, settings\n'), ((24299, 24376), 'backend_app.models.Dataset.objects.filter', 'models.Dataset.objects.filter', ([], {'id': 'weight.dataset_id_id', 'is_single_image': '(False)'}), '(id=weight.dataset_id_id, is_single_image=False)\n', (24328, 24376), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((24576, 24626), 'backend_app.models.Model.objects.filter', 'models.Model.objects.filter', ([], {'id': 'weight.model_id_id'}), '(id=weight.model_id_id)\n', (24603, 24626), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((24822, 24885), 'backend_app.models.Project.objects.filter', 'models.Project.objects.filter', ([], {'id': "serializer.data['project_id']"}), "(id=serializer.data['project_id'])\n", (24851, 24885), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26273, 26310), 'os.path.join', 'opjoin', (['ckpts_dir', 'f"""{weight.id}.bin"""'], {}), "(ckpts_dir, f'{weight.id}.bin')\n", (26279, 26310), True, 'from os.path import join as opjoin\n'), ((27492, 27518), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[0]'}), '(name__icontains=name[0])\n', (27493, 27518), False, 'from django.db.models import Q\n'), ((27521, 27547), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'name[1]'}), '(name__icontains=name[1])\n', (27522, 27547), False, 'from django.db.models import Q\n'), ((25659, 25682), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25680, 25682), False, 'import datetime\n'), ((25857, 25919), 'backend_app.models.ModelWeights.objects.filter', 'models.ModelWeights.objects.filter', ([], {'id': 'weight.pretrained_on_id'}), '(id=weight.pretrained_on_id)\n', (25891, 25919), False, 'from backend_app import mixins as BAMixins, models, serializers, swagger\n'), ((26412, 26424), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26422, 26424), False, 'import uuid\n')]
|
# Copyright (c) 2018 IoTeX
# This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
# warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
# permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
# License 2.0 that can be found in the LICENSE file.
"""This module defines the Player class, which represents a player in the network and contains functionality to make transactions, propose blocks, and validate blocks.
"""
import random
import grpc
import numpy as np
from proto import simulator_pb2_grpc
from proto import simulator_pb2
import solver
import consensus_client
import consensus_failurestop
# enum for defining the consensus types
class CTypes:
Honest = 0
FailureStop = 1
ByzantineFault = 2
class Player:
id = 0 # player id
MEAN_TX_FEE = 0.2 # mean transaction fee
STD_TX_FEE = 0.05 # std of transaction fee
DUMMY_MSG_TYPE = 1999 # if there are no messages to process, dummy message is sent to consensus engine
msgMap = {(DUMMY_MSG_TYPE, bytes()): "dummy msg"} # maps message to message name for printing
correctHashes = [] # hashes of blocks proposed by HONEST consensus types
def __init__(self, consensusType):
"""Creates a new Player object"""
self.id = Player.id # the player's id
Player.id += 1
self.blockchain = [] # blockchain (simplified to a list)
self.connections = [] # list of connected players
self.inbound = [] # inbound messages from other players in the network at heartbeat r
self.outbound = [] # outbound messages to other players in the network at heartbeat r
self.seenMessages = set() # set of seen messages
if consensusType == CTypes.Honest or consensusType == CTypes.ByzantineFault:
self.consensus = consensus_client.Consensus()
elif consensusType == CTypes.FailureStop:
self.consensus = consensus_failurestop.ConsensusFS()
self.consensusType = consensusType
self.consensus.playerID = self.id
self.consensus.player = self
self.nMsgsPassed = [0]
self.timeCreated = []
def action(self, heartbeat):
"""Executes the player's actions for heartbeat r"""
print("player %d action started at heartbeat %f" % (self.id, heartbeat))
# self.inbound: [sender, (msgType, msgBody), timestamp]
# print messages
for sender, msg, timestamp in self.inbound:
print("received %s from %s with timestamp %f" % (Player.msgMap[msg], sender, timestamp))
# if the Player received no messages before the current heartbeat, add a `dummy` message so the Player still pings the consensus in case the consensus has something it wants to return
# an example of when this is useful is in the first round when consensus proposes a message; the Player needs to ping it to receive the proposal
if len(list(filter(lambda x: x[2] <= heartbeat, self.inbound))) == 0:
self.inbound += [[-1, (Player.DUMMY_MSG_TYPE, bytes()), heartbeat]]
# process each inbound message
for sender, msg, timestamp in self.inbound:
# note: msg is a tuple: (msgType, msgBody)
# cannot see the message yet if the heartbeat is less than the timestamp
if timestamp > heartbeat: continue
if msg[0] != Player.DUMMY_MSG_TYPE and (msg, sender) in self.seenMessages: continue
self.seenMessages.add((msg, sender))
print("sent %s to consensus engine" % Player.msgMap[msg])
received = self.consensus.processMessage(sender, msg)
for recipient, mt, v in received:
# if mt = 2, the msgBody is comprised of message|blockHash
# recipient is the recipient of the message the consensus sends outwards
# mt is the message type (0 = view state change message, 1 = block committed, 2 = special case)
# v is message value
# TODO: fix this
'''if "|" in v[1]:
separator = v[1].index("|")
blockHash = v[1][separator+1:]
v = (v[0], v[1][:separator])'''
if v not in Player.msgMap:
Player.msgMap[v] = "msg "+str(len(Player.msgMap))
print("received %s from consensus engine" % Player.msgMap[v])
if mt == 0: # view state change message
self.outbound.append([recipient, v, timestamp])
elif mt == 1: # block to be committed
self.blockchain.append(v[1]) # append block hash
self.nMsgsPassed.append(0)
self.timeCreated.append(timestamp)
print("committed %s to blockchain" % Player.msgMap[v])
else: # newly proposed block
self.outbound.append([recipient, v, timestamp])
print("PROPOSED %s BLOCK"%("HONEST" if self.consensusType == CTypes.Honest else "BYZANTINE"))
if self.consensusType != CTypes.ByzantineFault:
Player.correctHashes.append(blockHash)
else:
Player.correctHashes.append("")
# also gossip the current message
# I think this is not necessary for tendermint so I am commenting it out
'''if msg[0] != Player.DUMMY_MSG_TYPE and self.consensusType != CTypes.FailureStop:
self.outbound.append([msg, timestamp])'''
self.inbound = list(filter(lambda x: x[2] > heartbeat, self.inbound)) # get rid of processed messages
return self.sendOutbound()
def sendOutbound(self):
"""Send all outbound connections to connected nodes.
Returns list of nodes messages have been sent to"""
if len(self.outbound) == 0:
sentMsgs = False
else:
sentMsgs = True
ci = set()
for recipient, message, timestamp in self.outbound:
if recipient == -1: # -1 means message is broadcast to all connections
for c in self.connections:
self.outbound.append([c.id, message, timestamp])
continue
recipient = list(filter(lambda x: x.id == recipient, self.connections))[0] # recipient is an id; we want to find the player which corresponds to this id in the connections
ci.add(recipient)
sender = self.id
self.nMsgsPassed[-1] += 1
dt = np.random.lognormal(self.NORMAL_MEAN, self.NORMAL_STD) # add propagation time to timestamp
print("sent %s to %s" % (Player.msgMap[message], recipient.id))
recipient.inbound.append([sender, message, timestamp+dt])
self.outbound.clear()
print()
return list(ci), sentMsgs
def __str__(self):
return "player %s" % (self.id)
def __repr__(self):
return "player %s" % (self.id)
def __hash__(self):
return self.id
|
[
"consensus_failurestop.ConsensusFS",
"consensus_client.Consensus",
"numpy.random.lognormal"
] |
[((2124, 2152), 'consensus_client.Consensus', 'consensus_client.Consensus', ([], {}), '()\n', (2150, 2152), False, 'import consensus_client\n'), ((6945, 6999), 'numpy.random.lognormal', 'np.random.lognormal', (['self.NORMAL_MEAN', 'self.NORMAL_STD'], {}), '(self.NORMAL_MEAN, self.NORMAL_STD)\n', (6964, 6999), True, 'import numpy as np\n'), ((2232, 2267), 'consensus_failurestop.ConsensusFS', 'consensus_failurestop.ConsensusFS', ([], {}), '()\n', (2265, 2267), False, 'import consensus_failurestop\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import math
TIME_SLEEP = 0.000000001
def train_sgd(X, y, alpha, w=None):
"""Trains a linear regression model using stochastic gradient descent.
Parameters
----------
X : numpy.ndarray
Numpy array of data
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of rows
in `X`.
alpha : float
Describes the learning rate.
w : numpy.ndarray, optional
The initial w vector (the default is zero).
Returns
-------
w : numpy.ndarray
Trained vector with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
previous_error = -1
error = -1
stop = False
num_iters = 0
if w is None:
w = np.zeros((x.shape[1] + 1, 1))
while not stop:
for i in range(0, len(X)):
w = w - alpha / len(X) * (np.dot(np.transpose(w),
X_b[i].reshape(X_b.shape[1], 1)) -
y[i]) * X_b[i].reshape(X_b.shape[1], 1)
error = evaluate_error(X, y, w)
if previous_error == -1:
previous_error = error
elif (math.fabs(error - previous_error) < 0.01 * previous_error and
num_iters > 10000):
stop = True
break
previous_error = error
num_iters += 1
return w
def train(X, y):
"""Trains a linear regression model using linear algebra.
Parameters
----------
X : numpy.ndarray
Numpy array of data
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of rows
in `X`.
Returns
-------
w : numpy.ndarray
Trained vector with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
"""
# Add bias term
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
# Compute pseudo-inverse
X_inverse = (np.linalg.inv(np.transpose(X_b).dot(X_b)).dot(
np.transpose(X_b)))
# Compute w
w = X_inverse.dot(y)
return w
# Plot data
def plot(X, y, w):
"""Plot X data, the actual y output, and the prediction line.
Parameters
----------
X : numpy.ndarray
Numpy array of data with 1 column.
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of
rows in `X`.
w : numpy.ndarray
Numpy array with dimensions 2 * 1.
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
y_predict = X_b.dot(w)
plt.clf()
plt.plot(X[:, 0], y_predict, 'r-', X[:, 0], y, 'o')
plt.pause(TIME_SLEEP)
def init_plot(figsize=(15, 8)):
"""Initializes the plot.
Parameters
----------
figsize : tuple, optional
A tuple containing the width and height of the plot (the default is
(15, 8)).
"""
plt.ion()
f = plt.figure(figsize=figsize)
plt.show()
def evaluate_error(X, y, w):
"""Returns the mean squared error.
X : numpy.ndarray
Numpy array of data.
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of
rows in `X`.
w : numpy.ndarray
Numpy array with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
Returns
-------
float
The mean squared error
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
y_predict = X_b.dot(w)
dist = (y - y_predict) ** 2
return float(np.sum(dist)) / X.shape[0]
def predict(X, w):
"""Returns the prediction for one data point.
Parameters
----------
X : numpy.ndarray
Numpy array of data
w : numpy.ndarray
Numpy array with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
Returns
-------
float
The mean squared error
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
return X_b.dot(w)
|
[
"numpy.ones",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"math.fabs",
"matplotlib.pyplot.pause",
"numpy.transpose",
"matplotlib.pyplot.ion"
] |
[((2701, 2710), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2708, 2710), True, 'import matplotlib.pyplot as plt\n'), ((2715, 2766), 'matplotlib.pyplot.plot', 'plt.plot', (['X[:, 0]', 'y_predict', '"""r-"""', 'X[:, 0]', 'y', '"""o"""'], {}), "(X[:, 0], y_predict, 'r-', X[:, 0], y, 'o')\n", (2723, 2766), True, 'import matplotlib.pyplot as plt\n'), ((2771, 2792), 'matplotlib.pyplot.pause', 'plt.pause', (['TIME_SLEEP'], {}), '(TIME_SLEEP)\n', (2780, 2792), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3033), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3031, 3033), True, 'import matplotlib.pyplot as plt\n'), ((3042, 3069), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3052, 3069), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3082, 3084), True, 'import matplotlib.pyplot as plt\n'), ((860, 889), 'numpy.zeros', 'np.zeros', (['(x.shape[1] + 1, 1)'], {}), '((x.shape[1] + 1, 1))\n', (868, 889), True, 'import numpy as np\n'), ((2149, 2166), 'numpy.transpose', 'np.transpose', (['X_b'], {}), '(X_b)\n', (2161, 2166), True, 'import numpy as np\n'), ((723, 747), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (730, 747), True, 'import numpy as np\n'), ((2008, 2032), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2015, 2032), True, 'import numpy as np\n'), ((2638, 2662), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2645, 2662), True, 'import numpy as np\n'), ((3546, 3570), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (3553, 3570), True, 'import numpy as np\n'), ((3653, 3665), 'numpy.sum', 'np.sum', (['dist'], {}), '(dist)\n', (3659, 3665), True, 'import numpy as np\n'), ((4048, 4072), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (4055, 4072), True, 'import numpy as np\n'), ((1298, 1331), 'math.fabs', 'math.fabs', (['(error - previous_error)'], {}), '(error - previous_error)\n', (1307, 1331), False, 'import math\n'), ((2099, 2116), 'numpy.transpose', 'np.transpose', (['X_b'], {}), '(X_b)\n', (2111, 2116), True, 'import numpy as np\n'), ((991, 1006), 'numpy.transpose', 'np.transpose', (['w'], {}), '(w)\n', (1003, 1006), True, 'import numpy as np\n')]
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Separation via Time-Frequency Masking
# =====================================
#
# One of the most effective ways to separate sounds from a mixture is by
# *masking*. Consider the following mixture, which we will download via
# one of the dataset hooks in *nussl*.
# +
import nussl
import matplotlib.pyplot as plt
import numpy as np
import copy
import time
start_time = time.time()
musdb = nussl.datasets.MUSDB18(download=True)
item = musdb[40]
mix = item['mix']
sources = item['sources']
# -
# Let's listen to the mixture. Note that it contains 4 sources: drums, bass,
# vocals, and all other sounds (considered as one source: other).
mix.embed_audio()
print(mix)
# Let's now consider the time-frequency representation of this mixture:
plt.figure(figsize=(10, 3))
plt.title('Mixture spectrogram')
nussl.utils.visualize_spectrogram(mix, y_axis='mel')
plt.tight_layout()
plt.show()
# Masking means to assign each of these time-frequency bins to one of the four
# sources in part or in whole. The first method involves creating a *soft* mask
# on the time-frequency representation, while the second is a *binary* mask. How
# do we assign each time-frequency bin to each source? This is a very hard problem,
# in general. For now, let's consider that we *know* the actual assignment of each
# time-frequency bin. If we know that, how do we separate the sounds?
#
# First let's look at one of the sources, say the drums:
plt.figure(figsize=(10, 3))
plt.title('Drums')
nussl.utils.visualize_spectrogram(sources['drums'], y_axis='mel')
plt.tight_layout()
plt.show()
# Looking at this versus the mixture spectrogram, one can see which time-frequency
# bins belong to the drum. Now, let's build a *mask* on the mixture spectrogram
# using a soft mask. We construct the soft mask using the drum STFT data and the
# mixture STFT data, like so:
mask_data = np.abs(sources['drums'].stft()) / np.abs(mix.stft())
# Hmm, this may not be a safe way to do this. What if there's a `0` in both the source
# and the mix? Then we would get `0/0`, which would result in NaN in the mask. Or
# what if the source STFT is louder than the mix at some time-frequency bin due to
# cancellation between sources when mixed? Let's do things a bit more safely by
# using the maximum and some checking...
mask_data = (
np.abs(sources['drums'].stft()) /
np.maximum(
np.abs(mix.stft()),
np.abs(sources['drums'].stft())
) + nussl.constants.EPSILON
)
# Great, some peace of mind. Now let's apply the soft mask to the mixture to
# separate the drums. We can do this by element-wise multiplying the STFT and
# adding the mixture phase.
# +
magnitude, phase = np.abs(mix.stft_data), np.angle(mix.stft_data)
masked_abs = magnitude * mask_data
masked_stft = masked_abs * np.exp(1j * phase)
drum_est = mix.make_copy_with_stft_data(masked_stft)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# Cool! Sounds pretty good! But it'd be a drag if we had to type all of
# that every time we wanted to separate something. Lucky for you, we
# built this stuff into the core functionality of *nussl*!
#
# `SoftMask` and `BinaryMask`
# ---------------------------
#
# At the core of *nussl*'s separation functionality are the classes
# `SoftMask` and `BinaryMask`. These are classes that contain some logic
# for masking and can be used with AudioSignal objects. We have a soft mask
# already, so let's build a `SoftMask` object.
soft_mask = nussl.core.masks.SoftMask(mask_data)
# `soft_mask` contains our mask here:
soft_mask.mask.shape
# We can apply the soft mask to our mix and return the separated drums easily,
# using the `apply_mask` method:
# +
drum_est = mix.apply_mask(soft_mask)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# Sometimes masks are *binary* instead of *soft*. To apply a binary mask, we can do this:
# +
binary_mask = nussl.core.masks.BinaryMask(mask_data > .5)
drum_est = mix.apply_mask(binary_mask)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# Playing around with the threshold will result in more or less leakage of other sources:
# +
binary_mask = nussl.core.masks.BinaryMask(mask_data > .05)
drum_est = mix.apply_mask(binary_mask)
drum_est.istft()
drum_est.embed_audio()
plt.figure(figsize=(10, 3))
plt.title('Separated drums')
nussl.utils.visualize_spectrogram(drum_est, y_axis='mel')
plt.tight_layout()
plt.show()
# -
# You can hear the vocals slightly in the background as well as the
# other sources.
#
# Finally, given a list of separated sources, we can use some handy nussl
# functionality to easily visualize the masks and listen to the original
# sources that make up the mixture.
# +
plt.figure(figsize=(10, 7))
plt.subplot(211)
nussl.utils.visualize_sources_as_masks(
sources, db_cutoff=-60, y_axis='mel')
plt.subplot(212)
nussl.utils.visualize_sources_as_waveform(
sources, show_legend=False)
plt.tight_layout()
plt.show()
nussl.play_utils.multitrack(sources, ext='.wav')
# -
end_time = time.time()
time_taken = end_time - start_time
print(f'Time taken: {time_taken:.4f} seconds')
|
[
"numpy.abs",
"nussl.core.masks.BinaryMask",
"nussl.play_utils.multitrack",
"nussl.utils.visualize_spectrogram",
"nussl.utils.visualize_sources_as_waveform",
"nussl.utils.visualize_sources_as_masks",
"matplotlib.pyplot.subplot",
"numpy.angle",
"matplotlib.pyplot.figure",
"numpy.exp",
"nussl.core.masks.SoftMask",
"matplotlib.pyplot.tight_layout",
"nussl.datasets.MUSDB18",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.show"
] |
[((671, 682), 'time.time', 'time.time', ([], {}), '()\n', (680, 682), False, 'import time\n'), ((692, 729), 'nussl.datasets.MUSDB18', 'nussl.datasets.MUSDB18', ([], {'download': '(True)'}), '(download=True)\n', (714, 729), False, 'import nussl\n'), ((1045, 1072), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (1055, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1105), 'matplotlib.pyplot.title', 'plt.title', (['"""Mixture spectrogram"""'], {}), "('Mixture spectrogram')\n", (1082, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1158), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['mix'], {'y_axis': '"""mel"""'}), "(mix, y_axis='mel')\n", (1139, 1158), False, 'import nussl\n'), ((1159, 1177), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1175, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1188), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1186, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1759), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (1742, 1759), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1778), 'matplotlib.pyplot.title', 'plt.title', (['"""Drums"""'], {}), "('Drums')\n", (1769, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1844), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (["sources['drums']"], {'y_axis': '"""mel"""'}), "(sources['drums'], y_axis='mel')\n", (1812, 1844), False, 'import nussl\n'), ((1845, 1863), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1861, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1864, 1874), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1872, 1874), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (3212, 3229), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3258), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (3239, 3258), True, 'import matplotlib.pyplot as plt\n'), ((3259, 3316), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (3292, 3316), False, 'import nussl\n'), ((3317, 3335), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3333, 3335), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3344, 3346), True, 'import matplotlib.pyplot as plt\n'), ((3898, 3934), 'nussl.core.masks.SoftMask', 'nussl.core.masks.SoftMask', (['mask_data'], {}), '(mask_data)\n', (3923, 3934), False, 'import nussl\n'), ((4193, 4220), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (4203, 4220), True, 'import matplotlib.pyplot as plt\n'), ((4221, 4249), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (4230, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4307), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (4283, 4307), False, 'import nussl\n'), ((4308, 4326), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4324, 4326), True, 'import matplotlib.pyplot as plt\n'), ((4327, 4337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4335, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4452, 4496), 'nussl.core.masks.BinaryMask', 'nussl.core.masks.BinaryMask', (['(mask_data > 0.5)'], {}), '(mask_data > 0.5)\n', (4479, 4496), False, 'import nussl\n'), ((4576, 4603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (4586, 4603), True, 'import matplotlib.pyplot as plt\n'), ((4604, 4632), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (4613, 4632), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4690), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (4666, 4690), False, 'import nussl\n'), ((4691, 4709), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4707, 4709), True, 'import matplotlib.pyplot as plt\n'), ((4710, 4720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4718, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4835, 4880), 'nussl.core.masks.BinaryMask', 'nussl.core.masks.BinaryMask', (['(mask_data > 0.05)'], {}), '(mask_data > 0.05)\n', (4862, 4880), False, 'import nussl\n'), ((4960, 4987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (4970, 4987), True, 'import matplotlib.pyplot as plt\n'), ((4988, 5016), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated drums"""'], {}), "('Separated drums')\n", (4997, 5016), True, 'import matplotlib.pyplot as plt\n'), ((5017, 5074), 'nussl.utils.visualize_spectrogram', 'nussl.utils.visualize_spectrogram', (['drum_est'], {'y_axis': '"""mel"""'}), "(drum_est, y_axis='mel')\n", (5050, 5074), False, 'import nussl\n'), ((5075, 5093), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5091, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5094, 5104), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5102, 5104), True, 'import matplotlib.pyplot as plt\n'), ((5389, 5416), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (5399, 5416), True, 'import matplotlib.pyplot as plt\n'), ((5417, 5433), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (5428, 5433), True, 'import matplotlib.pyplot as plt\n'), ((5434, 5510), 'nussl.utils.visualize_sources_as_masks', 'nussl.utils.visualize_sources_as_masks', (['sources'], {'db_cutoff': '(-60)', 'y_axis': '"""mel"""'}), "(sources, db_cutoff=-60, y_axis='mel')\n", (5472, 5510), False, 'import nussl\n'), ((5516, 5532), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (5527, 5532), True, 'import matplotlib.pyplot as plt\n'), ((5533, 5602), 'nussl.utils.visualize_sources_as_waveform', 'nussl.utils.visualize_sources_as_waveform', (['sources'], {'show_legend': '(False)'}), '(sources, show_legend=False)\n', (5574, 5602), False, 'import nussl\n'), ((5608, 5626), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5624, 5626), True, 'import matplotlib.pyplot as plt\n'), ((5627, 5637), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5635, 5637), True, 'import matplotlib.pyplot as plt\n'), ((5639, 5687), 'nussl.play_utils.multitrack', 'nussl.play_utils.multitrack', (['sources'], {'ext': '""".wav"""'}), "(sources, ext='.wav')\n", (5666, 5687), False, 'import nussl\n'), ((5704, 5715), 'time.time', 'time.time', ([], {}), '()\n', (5713, 5715), False, 'import time\n'), ((2979, 3000), 'numpy.abs', 'np.abs', (['mix.stft_data'], {}), '(mix.stft_data)\n', (2985, 3000), True, 'import numpy as np\n'), ((3002, 3025), 'numpy.angle', 'np.angle', (['mix.stft_data'], {}), '(mix.stft_data)\n', (3010, 3025), True, 'import numpy as np\n'), ((3088, 3108), 'numpy.exp', 'np.exp', (['(1.0j * phase)'], {}), '(1.0j * phase)\n', (3094, 3108), True, 'import numpy as np\n')]
|
from course_lib.Base.BaseRecommender import BaseRecommender
import numpy as np
import scipy.sparse as sps
class SearchFieldWeightICMRecommender(BaseRecommender):
""" Search Field Weight ICM Recommender """
RECOMMENDER_NAME = "SearchFieldWeightICMRecommender"
def __init__(self, URM_train, ICM_train, recommender_class: classmethod, recommender_par: dict,
item_feature_to_range_mapper: dict, verbose=True):
super(SearchFieldWeightICMRecommender, self).__init__(URM_train, verbose=verbose)
self.recommender_class = recommender_class
self.recommender_par = recommender_par
self.item_feature_to_range_mapper = item_feature_to_range_mapper
self.ICM_train: sps.csr_matrix = ICM_train
self.model = None
def fit(self, **field_weights):
item_feature_weights = np.ones(shape=self.ICM_train.shape[1])
for feature_name, weight in field_weights.items():
start, end = self.item_feature_to_range_mapper[feature_name]
item_feature_weights[start:end] = item_feature_weights[start:end]*weight
user_feature_weights_diag = sps.diags(item_feature_weights)
self.ICM_train = self.ICM_train.dot(user_feature_weights_diag)
self.model = self.recommender_class(self.URM_train, self.ICM_train)
self.model.fit(**self.recommender_par)
def _compute_item_score(self, user_id_array, items_to_compute=None):
return self.model._compute_item_score(user_id_array=user_id_array, items_to_compute=items_to_compute)
def save_model(self, folder_path, file_name=None):
pass
|
[
"numpy.ones",
"scipy.sparse.diags"
] |
[((846, 884), 'numpy.ones', 'np.ones', ([], {'shape': 'self.ICM_train.shape[1]'}), '(shape=self.ICM_train.shape[1])\n', (853, 884), True, 'import numpy as np\n'), ((1138, 1169), 'scipy.sparse.diags', 'sps.diags', (['item_feature_weights'], {}), '(item_feature_weights)\n', (1147, 1169), True, 'import scipy.sparse as sps\n')]
|
from typing import Dict, List, Optional, Union
import numpy as np
import torch
MOLECULAR_ATOMS = (
"H,He,Li,Be,B,C,N,O,F,Ne,Na,Mg,Al,Si,P,S,Cl,Ar,K,Ca,Sc,Ti,V,Cr,Mn,Fe,Co,Ni,Cu,Zn,"
"Ga,Ge,As,Se,Br,Kr,Rb,Sr,Y,Zr,Nb,Mo,Tc,Ru,Rh,Pd,Ag,Cd,In,Sn,Sb,Te,I,Xe,Cs,Ba,La,Ce,"
"Pr,Nd,Pm,Sm,Eu,Gd,Tb,Dy,Ho,Er,Tm,Yb,Lu,Hf,Ta,W,Re,Os,Ir,Pt,Au,Hg,Tl,Pb,Bi,Po,At,"
"Rn,Fr,Ra,Ac,Th,Pa,U,Np,Pu,Am,Cm,Bk,Cf,Es,Fm,Md,No,Lr,Rf,Db,Sg,Bh,Hs,Mt,Ds,Rg,Cn,"
"Nh,Fl,Mc,Lv,Ts,Og"
).split(",")
MOLECULAR_CHARGES = list(range(-15, 16)) + [":", "^", "^^"]
MOLECULAR_BOND_TYPES = [1, 2, 3, 4, 5, 6, 7, 8]
class MolecularEncoder:
"""Molecular structure encoder class.
This class is a kind of tokenizers for MoT model. Every transformer models have
their own subword tokenizers (and it even creates attention masks), and of course
MoT needs its own input encoder. While 3D-molecular structure data is not as simple
as sentences which are used in common transformer model, we create new input encoder
which creates input encodings from the 3D-molecular structure data. Using this, you
can simply encode the structure data and pass to the MoT model.
Args:
cls_token: The name of classification token. Default is `[CLS]`.
pad_token: The name of padding token. Default is `[PAD]`.
"""
# This field is a part of MoT configurations. If you are using MoT model with this
# encoder class, then you can simply define the number of embeddings and attention
# types using this field. The vocabularies are predefined, so you do not need to
# handle the vocabulary sizes.
mot_config = dict(
num_embeddings=[len(MOLECULAR_ATOMS) + 2, len(MOLECULAR_CHARGES) + 2],
num_attention_types=len(MOLECULAR_BOND_TYPES) + 2,
)
def __init__(
self,
cls_token: str = "[CLS]",
pad_token: str = "[PAD]",
):
self.vocab1 = [pad_token, cls_token] + MOLECULAR_ATOMS
self.vocab2 = [pad_token, cls_token] + MOLECULAR_CHARGES
self.vocab3 = [pad_token, cls_token] + MOLECULAR_BOND_TYPES
self.cls_token = cls_token
self.pad_token = pad_token
def collect_input_sequences(self, molecular: Dict[str, List]) -> Dict[str, List]:
"""Collect input sequences from the molecular structure data.
Args:
molecular: The molecular data which contains 3D atoms and their bonding
informations.
Returns:
A dictionary which contains the input tokens and 3d positions of the atoms.
"""
input_ids = [
[self.vocab1.index(self.cls_token)],
[self.vocab2.index(self.cls_token)],
]
position_ids = [[0.0, 0.0, 0.0]]
attention_mask = [1] * (len(molecular["atoms"]) + 1)
for atom in molecular["atoms"]:
input_ids[0].append(self.vocab1.index(atom[3]))
input_ids[1].append(self.vocab2.index(atom[4]))
position_ids.append(atom[:3])
return {
"input_ids": input_ids,
"position_ids": position_ids,
"attention_mask": attention_mask,
}
def create_attention_type_ids(self, molecular: Dict) -> np.ndarray:
"""Create an attention types from the molecular structure data.
MoT supports attention types which are applied to the attention scores
relatively. Using this, you can give attention weights (bond types) directly to
the self-attention module. This method creates the attention type array by using
the bond informations in the molecular structure.
Args:
molecular: The molecular data which contains 3D atoms and their bonding
informations.
Returns:
The attention type array from the bond informations.
"""
max_seq_len = len(molecular["atoms"]) + 1
attention_type_ids = np.empty((max_seq_len, max_seq_len), dtype=np.int64)
attention_type_ids.fill(self.vocab3.index(self.pad_token))
attention_type_ids[0, :] = self.vocab3.index(self.cls_token)
attention_type_ids[:, 0] = self.vocab3.index(self.cls_token)
for first, second, bond_type in molecular["bonds"]:
attention_type_ids[first + 1, second + 1] = self.vocab3.index(bond_type)
attention_type_ids[second + 1, first + 1] = self.vocab3.index(bond_type)
return attention_type_ids
def encode(self, molecular: Dict[str, List]) -> Dict[str, Union[List, np.ndarray]]:
"""Encode the molecular structure data to the model inputs.
Args:
molecular: The molecular data which contains 3D atoms and their bonding
informations.
Returns:
An encoded output which contains input ids, 3d positions, position mask, and
attention types.
"""
return {
**self.collect_input_sequences(molecular),
"attention_type_ids": self.create_attention_type_ids(molecular),
}
def collate(
self,
encodings: List[Dict[str, Union[List, np.ndarray]]],
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
) -> Dict[str, Union[List, np.ndarray, torch.Tensor]]:
"""Collate the encodings of which lengths are different to each other.
The lengths of encoded molecular structure data are not exactly same. To group
the sequences into the batch requires equal lengths. To resolve the problem,
this class supports sequence and attention mask paddings. Using this, you can
pad the encodings to desired lengths or match to the longest sequences. In
addition, this method automatically converts the sequences to torch tensors.
Args:
encodings: The batch of encodings.
max_length: The desired maximum length of sequences. Default is `None`.
pad_to_multiple_of: To match the sequence length to be multiple of certain
factor. Default is `None`.
Returns:
The collated batched encodings which contain converted tensors.
"""
longest_length = max(len(enc["input_ids"][0]) for enc in encodings)
max_length = min(max_length or longest_length, longest_length)
if pad_to_multiple_of is not None:
max_length = max_length + pad_to_multiple_of - 1
max_length = max_length // pad_to_multiple_of * pad_to_multiple_of
padding_id_1 = self.vocab1.index(self.pad_token)
padding_id_2 = self.vocab2.index(self.pad_token)
for enc in encodings:
num_paddings = max_length - len(enc["input_ids"][0])
if num_paddings >= 0:
enc["input_ids"][0] += [padding_id_1] * num_paddings
enc["input_ids"][1] += [padding_id_2] * num_paddings
enc["position_ids"] += [[0.0, 0.0, 0.0]] * num_paddings
enc["attention_mask"] += [0] * num_paddings
enc["attention_type_ids"] = np.pad(
enc["attention_type_ids"],
pad_width=((0, num_paddings), (0, num_paddings)),
constant_values=self.vocab3.index(self.pad_token),
)
else:
# If the encoded sequences are longer than the maximum length, then
# truncate the sequences and attention mask.
enc["input_ids"][0] = enc["input_ids"][0][:max_length]
enc["input_ids"][1] = enc["input_ids"][1][:max_length]
enc["position_ids"] = enc["position_ids"][:max_length]
enc["attention_mask"] = enc["attention_mask"][:max_length]
enc["attention_type_ids"] = enc["attention_type_ids"][
:max_length, :max_length
]
# Collect all sequences into their batch and convert them to torch tensor. After
# that, you can use the sequences to the model because all inputs are converted
# to the tensors. Since we use two `input_ids` and handle them on the list, they
# will be converted individually.
encodings = {k: [enc[k] for enc in encodings] for k in encodings[0]}
encodings["input_ids"] = [
torch.tensor([x[0] for x in encodings["input_ids"]]),
torch.tensor([x[1] for x in encodings["input_ids"]]),
]
encodings["position_ids"] = torch.tensor(encodings["position_ids"])
encodings["attention_mask"] = torch.tensor(encodings["attention_mask"])
encodings["attention_type_ids"] = torch.tensor(encodings["attention_type_ids"])
if "labels" in encodings:
encodings["labels"] = torch.tensor(encodings["labels"])
return encodings
|
[
"torch.tensor",
"numpy.empty"
] |
[((3918, 3970), 'numpy.empty', 'np.empty', (['(max_seq_len, max_seq_len)'], {'dtype': 'np.int64'}), '((max_seq_len, max_seq_len), dtype=np.int64)\n', (3926, 3970), True, 'import numpy as np\n'), ((8457, 8496), 'torch.tensor', 'torch.tensor', (["encodings['position_ids']"], {}), "(encodings['position_ids'])\n", (8469, 8496), False, 'import torch\n'), ((8535, 8576), 'torch.tensor', 'torch.tensor', (["encodings['attention_mask']"], {}), "(encodings['attention_mask'])\n", (8547, 8576), False, 'import torch\n'), ((8619, 8664), 'torch.tensor', 'torch.tensor', (["encodings['attention_type_ids']"], {}), "(encodings['attention_type_ids'])\n", (8631, 8664), False, 'import torch\n'), ((8291, 8343), 'torch.tensor', 'torch.tensor', (["[x[0] for x in encodings['input_ids']]"], {}), "([x[0] for x in encodings['input_ids']])\n", (8303, 8343), False, 'import torch\n'), ((8357, 8409), 'torch.tensor', 'torch.tensor', (["[x[1] for x in encodings['input_ids']]"], {}), "([x[1] for x in encodings['input_ids']])\n", (8369, 8409), False, 'import torch\n'), ((8734, 8767), 'torch.tensor', 'torch.tensor', (["encodings['labels']"], {}), "(encodings['labels'])\n", (8746, 8767), False, 'import torch\n')]
|
# importing libraries
import warnings
warnings.filterwarnings("ignore")
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
from catboost import CatBoostRegressor
import lightgbm as lgb
from sqlalchemy import create_engine
import pickle
from sklearn.metrics import r2_score,mean_absolute_error
train_period = 7
predict_period = 1
n_day_later_predict= 7
def get_rolling_data(X,y,train_period,predict_period=1,n_day_later_predict=1):
"""
Generating Timeseries Input And Output Data.
Parameters:
X,y (DataFrame): Features,Labels
train_period (int): Timesteps For Model
predict_period (int): Predict On The nth Day Of The End Of The Training Window
Returns:
rolling_X (DataFrame): Features
rolling_y (DataFrame): Labels
"""
assert X.shape[0] == y.shape[0], (
'X.shape: %s y.shape: %s' % (X.shape, y.shape))
rolling_X, rolling_y = [],[]
for i in range(len(X)-train_period-predict_period-(n_day_later_predict)):
curr_X=X.iloc[i:i+train_period,:]
curr_y=y.iloc[i+train_period+n_day_later_predict:i+train_period+predict_period+n_day_later_predict]
rolling_X.append(curr_X.values.tolist())
if predict_period == 1:
rolling_y.append(curr_y.values.tolist()[0])
else:
rolling_y.append(curr_y.values.tolist())
rolling_X = np.array(rolling_X)
rolling_y = np.array(rolling_y)
return rolling_X, rolling_y
def load_data(database_filepath):
"""
Loading Data From Database.
Splitting X And Y Columns As TimeSeries Data By Calling get_rolling_data Method.
Parameters:
database_filepath (str): Filepath Where Database Is Located.
Returns:
X (DataFrame): Features
Y (DataFrame): Labels
"""
# loading data from database
db_name = 'sqlite:///{}'.format(database_filepath)
engine = create_engine(db_name)
# using pandas to read table from database
df = pd.read_sql_table('Stock',engine)
rolling_X, rolling_y = get_rolling_data(df, df.loc[:,'Stock_Adj Close'], train_period=train_period,
predict_period=predict_period,
n_day_later_predict=n_day_later_predict)
return rolling_X , rolling_y
class ModelData():
'''Data Class For Model Train, Predict And Validate.'''
def __init__(self,X,y,seed=None,shuffle=True):
self._seed = seed
np.random.seed(self._seed)
assert X.shape[0] == y.shape[0], (
'X.shape: %s y.shape: %s' % (X.shape, y.shape))
self._num_examples = X.shape[0]
# If shuffle
if shuffle:
np.random.seed(self._seed)
randomList = np.arange(X.shape[0])
np.random.shuffle(randomList)
self._X, self._y = X[randomList], y[randomList]
self._X = X
self._y = y
self._epochs_completed = 0
self._index_in_epoch = 0
def train_validate_test_split(self,validate_size=0.10,test_size=0.10):
'''Train, Predict And Validate Splitting Function'''
validate_start = int(self._num_examples*(1-validate_size-test_size)) + 1
test_start = int(self._num_examples*(1-test_size)) + 1
if validate_start > len(self._X) or test_start > len(self._X):
pass
train_X,train_y = self._X[:validate_start],self._y[:validate_start]
validate_X, validate_y = self._X[validate_start:test_start],self._y[validate_start:test_start]
test_X,test_y = self._X[test_start:],self._y[test_start:]
if test_size == 0:
return ModelData(train_X,train_y,self._seed), ModelData(validate_X,validate_y,self._seed)
else:
return ModelData(train_X,train_y,self._seed), ModelData(validate_X,validate_y,self._seed), ModelData(test_X,test_y,self._seed)
@property
def X(self):
return self._X
@property
def y(self):
return self._y
def build_model():
"""
Build Model Function.
This Function's Output Is A Dictionary Of 3 Best Regressor Models i.e. XGB Regressor,
Catboost Regressor And LGBM Regressor.
Returns:
model (Dict) : A Dictionary Of Regressor Models
"""
# xgb regressor
xgb_reg = xgb.XGBRegressor(n_estimators=10000,min_child_weight= 40,learning_rate=0.01,colsample_bytree = 1,subsample = 0.9)
# catboost regressor
cat_reg = CatBoostRegressor(iterations=10000,learning_rate=0.005,loss_function = 'RMSE')
# lgbm regressor
lgbm_reg = lgb.LGBMRegressor(num_leaves=31,learning_rate=0.001, max_bin = 30,n_estimators=10000)
model = {'xgb':xgb_reg,'cat':cat_reg,'lgbm':lgbm_reg}
return model
def evaluate_model(model, X_test, Y_test):
"""
Model Evaluation Function.
Evaluating The Models On Test Set And Computing R2 Score And Mean Absolute Error.
Parameters:
model (Dict) : A Dictionary Of Trained Regressor Models
X_test (DataFrame) : Test Features
Y_test (DataFrame) : Test Labels
"""
# predict on test data
pred = (model['xgb'].predict(X_test) + model['cat'].predict(X_test) + model['lgbm'].predict(X_test)) / 3
# rescaling the predictions
real = np.exp(Y_test)
pred = np.exp(pred)
# computing the r2 score
print('R2 Score :')
print(r2_score(real,pred))
# computing the mean absolute error
print('Mean Absolute Error :')
print(mean_absolute_error(real,pred))
def save_model(model, model_filepath):
"""
Save Model function
This Function Saves Trained Models As Pickle File, To Be Loaded Later.
Parameters:
model (Dict) : A Dictionary Of Trained Regressor Models
model_filepath (str) : Destination Path To Save .pkl File
"""
filename = model_filepath
pickle.dump(model, open(filename, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y = load_data(database_filepath)
model_data = ModelData(X, Y,seed=666,shuffle=False)
model_train_data, model_validate_data = model_data.train_validate_test_split(validate_size=0.10,test_size=0)
y_train = model_train_data.y[:,np.newaxis]
y_validate = model_validate_data.y[:,np.newaxis]
X_train = model_train_data.X
X_validate = model_validate_data.X
X_train = X_train.reshape((X_train.shape[0],X_train.shape[1]*X_train.shape[2]))
X_validate = X_validate.reshape((X_validate.shape[0],X_validate.shape[1]*X_validate.shape[2]))
print('Building model...')
model = build_model()
print('Training XGB model...')
model['xgb'].fit(X_train, y_train,eval_set = [(X_validate[:300],y_validate[:300])],early_stopping_rounds = 50,verbose = False)
print('Training Catboost model...')
model['cat'].fit(X_train, y_train,eval_set = [(X_validate[:300],y_validate[:300])],early_stopping_rounds = 50,verbose = False)
print('Training Lgbm model...')
model['lgbm'].fit(X_train, y_train,eval_set = [(X_validate[:300],y_validate[:300].ravel())],early_stopping_rounds = 50,verbose = False)
print('Evaluating Combined model...')
evaluate_model(model, X_validate, y_validate)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the Stock database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_regressor.py ../data/Stock.db regressor.pkl')
if __name__ == '__main__':
main()
|
[
"sqlalchemy.create_engine",
"lightgbm.LGBMRegressor",
"catboost.CatBoostRegressor",
"numpy.array",
"xgboost.XGBRegressor",
"numpy.exp",
"numpy.random.seed",
"pandas.read_sql_table",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.r2_score",
"warnings.filterwarnings",
"numpy.arange",
"numpy.random.shuffle"
] |
[((38, 71), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (61, 71), False, 'import warnings\n'), ((1432, 1451), 'numpy.array', 'np.array', (['rolling_X'], {}), '(rolling_X)\n', (1440, 1451), True, 'import numpy as np\n'), ((1468, 1487), 'numpy.array', 'np.array', (['rolling_y'], {}), '(rolling_y)\n', (1476, 1487), True, 'import numpy as np\n'), ((1959, 1981), 'sqlalchemy.create_engine', 'create_engine', (['db_name'], {}), '(db_name)\n', (1972, 1981), False, 'from sqlalchemy import create_engine\n'), ((2039, 2073), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""Stock"""', 'engine'], {}), "('Stock', engine)\n", (2056, 2073), True, 'import pandas as pd\n'), ((4411, 4528), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {'n_estimators': '(10000)', 'min_child_weight': '(40)', 'learning_rate': '(0.01)', 'colsample_bytree': '(1)', 'subsample': '(0.9)'}), '(n_estimators=10000, min_child_weight=40, learning_rate=\n 0.01, colsample_bytree=1, subsample=0.9)\n', (4427, 4528), True, 'import xgboost as xgb\n'), ((4564, 4642), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'iterations': '(10000)', 'learning_rate': '(0.005)', 'loss_function': '"""RMSE"""'}), "(iterations=10000, learning_rate=0.005, loss_function='RMSE')\n", (4581, 4642), False, 'from catboost import CatBoostRegressor\n'), ((4679, 4768), 'lightgbm.LGBMRegressor', 'lgb.LGBMRegressor', ([], {'num_leaves': '(31)', 'learning_rate': '(0.001)', 'max_bin': '(30)', 'n_estimators': '(10000)'}), '(num_leaves=31, learning_rate=0.001, max_bin=30,\n n_estimators=10000)\n', (4696, 4768), True, 'import lightgbm as lgb\n'), ((5439, 5453), 'numpy.exp', 'np.exp', (['Y_test'], {}), '(Y_test)\n', (5445, 5453), True, 'import numpy as np\n'), ((5465, 5477), 'numpy.exp', 'np.exp', (['pred'], {}), '(pred)\n', (5471, 5477), True, 'import numpy as np\n'), ((2543, 2569), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (2557, 2569), True, 'import numpy as np\n'), ((5541, 5561), 'sklearn.metrics.r2_score', 'r2_score', (['real', 'pred'], {}), '(real, pred)\n', (5549, 5561), False, 'from sklearn.metrics import r2_score, mean_absolute_error\n'), ((5647, 5678), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['real', 'pred'], {}), '(real, pred)\n', (5666, 5678), False, 'from sklearn.metrics import r2_score, mean_absolute_error\n'), ((2784, 2810), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (2798, 2810), True, 'import numpy as np\n'), ((2836, 2857), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (2845, 2857), True, 'import numpy as np\n'), ((2870, 2899), 'numpy.random.shuffle', 'np.random.shuffle', (['randomList'], {}), '(randomList)\n', (2887, 2899), True, 'import numpy as np\n')]
|
import base64
import io
from matplotlib import pyplot
import numpy as np
import rasterio
def read_raster_file(input_fn, band = 1):
with rasterio.open(input_fn) as src:
return src.read(band)
def plot_raster_layer(input_fn, band = 1, from_logits = True):
pyplot.figure(figsize = (10,10))
data = read_raster_file(input_fn, band)
if from_logits:
data = np.exp(data)
pyplot.imshow(data, cmap='viridis')
pyplot.show()
def plot_histogram(input_fn, band = 1, from_logits = True):
pyplot.figure(figsize = (10,10))
data = read_raster_file(input_fn, band)
pyplot.hist(np.rint(data), bins='auto')
pyplot.show()
def get_base64_image(input_fn, band = 1, from_logits = True):
pyplot.figure(figsize = (10,10))
data = read_raster_file(input_fn, band)
pyplot.imshow(data, cmap='viridis')
pic_IObytes = io.BytesIO()
pyplot.savefig(pic_IObytes, format='png')
pic_IObytes.seek(0)
pic_hash = base64.b64encode(pic_IObytes.read())
# We need to remove the quotation and b character
pic_hash = str(pic_hash)[2:-1]
pyplot.close()
return pic_hash
|
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"rasterio.open",
"io.BytesIO",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.rint",
"matplotlib.pyplot.show"
] |
[((278, 309), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (291, 309), False, 'from matplotlib import pyplot\n'), ((407, 442), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['data'], {'cmap': '"""viridis"""'}), "(data, cmap='viridis')\n", (420, 442), False, 'from matplotlib import pyplot\n'), ((447, 460), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (458, 460), False, 'from matplotlib import pyplot\n'), ((531, 562), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (544, 562), False, 'from matplotlib import pyplot\n'), ((657, 670), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (668, 670), False, 'from matplotlib import pyplot\n'), ((739, 770), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (752, 770), False, 'from matplotlib import pyplot\n'), ((820, 855), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['data'], {'cmap': '"""viridis"""'}), "(data, cmap='viridis')\n", (833, 855), False, 'from matplotlib import pyplot\n'), ((875, 887), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (885, 887), False, 'import io\n'), ((892, 933), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['pic_IObytes'], {'format': '"""png"""'}), "(pic_IObytes, format='png')\n", (906, 933), False, 'from matplotlib import pyplot\n'), ((1104, 1118), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (1116, 1118), False, 'from matplotlib import pyplot\n'), ((143, 166), 'rasterio.open', 'rasterio.open', (['input_fn'], {}), '(input_fn)\n', (156, 166), False, 'import rasterio\n'), ((390, 402), 'numpy.exp', 'np.exp', (['data'], {}), '(data)\n', (396, 402), True, 'import numpy as np\n'), ((624, 637), 'numpy.rint', 'np.rint', (['data'], {}), '(data)\n', (631, 637), True, 'import numpy as np\n')]
|
#!/bin/python
#sca_test.py
import matplotlib.pyplot as plt
import coevo2 as ce
import itertools as it
import numpy as np
import copy
import time
reload(ce)
names = ['glgA', 'glgC', 'cydA', 'cydB']
algPath = 'TestSet/eggNOG_aligns/slice_0.9/'
prots = ce.prots_from_scratch(names,path2alg=algPath)
ps = ce.ProtSet(prots,names)
phylo_names = ['aspS','ffh','lepA','pgk','recN','rho','rpoA','ruvB','tig','uvrB']
phylo_prots = ce.prots_from_scratch(phylo_names,path2alg='TestSet/eggNOG_aligns/phylogenes/')
phylo2 = ce.PhyloSet(phylo_prots)
phylo2.set_indexer(thresh=7)
for pt in phylo2.prots: # temporary fix for duplicated locus ids in the same msa
pt.msa = pt.msa[~pt.msa.index.duplicated(keep='first')]
phylo2.set_sim_mat()
protsmats,pairmats,pairrandmats,sca_score,sca_score2 = ce.sca(ps,phylo2,delta=0.0001)
for pt,sca in it.izip(ps.prots,protsmats): pt.sca_mat = sca
for pair,sca_cat in it.izip(ps.pairs,pairmats): pair.sca_mat = sca_cat
np.save('GettingStartedSCACalcs.npy',ps)
print(sca_score2)
|
[
"coevo2.ProtSet",
"itertools.izip",
"coevo2.PhyloSet",
"coevo2.sca",
"numpy.save",
"coevo2.prots_from_scratch"
] |
[((253, 299), 'coevo2.prots_from_scratch', 'ce.prots_from_scratch', (['names'], {'path2alg': 'algPath'}), '(names, path2alg=algPath)\n', (274, 299), True, 'import coevo2 as ce\n'), ((304, 328), 'coevo2.ProtSet', 'ce.ProtSet', (['prots', 'names'], {}), '(prots, names)\n', (314, 328), True, 'import coevo2 as ce\n'), ((425, 510), 'coevo2.prots_from_scratch', 'ce.prots_from_scratch', (['phylo_names'], {'path2alg': '"""TestSet/eggNOG_aligns/phylogenes/"""'}), "(phylo_names, path2alg='TestSet/eggNOG_aligns/phylogenes/'\n )\n", (446, 510), True, 'import coevo2 as ce\n'), ((515, 539), 'coevo2.PhyloSet', 'ce.PhyloSet', (['phylo_prots'], {}), '(phylo_prots)\n', (526, 539), True, 'import coevo2 as ce\n'), ((788, 820), 'coevo2.sca', 'ce.sca', (['ps', 'phylo2'], {'delta': '(0.0001)'}), '(ps, phylo2, delta=0.0001)\n', (794, 820), True, 'import coevo2 as ce\n'), ((834, 862), 'itertools.izip', 'it.izip', (['ps.prots', 'protsmats'], {}), '(ps.prots, protsmats)\n', (841, 862), True, 'import itertools as it\n'), ((900, 927), 'itertools.izip', 'it.izip', (['ps.pairs', 'pairmats'], {}), '(ps.pairs, pairmats)\n', (907, 927), True, 'import itertools as it\n'), ((951, 992), 'numpy.save', 'np.save', (['"""GettingStartedSCACalcs.npy"""', 'ps'], {}), "('GettingStartedSCACalcs.npy', ps)\n", (958, 992), True, 'import numpy as np\n')]
|
# coding: utf-8
""" MIT License """
'''
<NAME> & <NAME>
<NAME> & <NAME>
---
Description:
Function designed to evaluate all parameters provided to the gp and identify the best parameters.
Saves all fitness of individuals by logging them into csv files which will then be evaluated on plots.py
---
Copyright (c) 2018
'''
# libraries and dependencies
# ---------------------------------------------------------------------------- #
from evolution import Evolution
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import classifier
import random
import utils
import csv
import os
# ---------------------------------------------------------------------------- #
if __name__ == '__main__':
# import data
X, y = utils.load_data(
filename='data_trimmed.csv',
clean=False,
normalize=True,
resample=2 # (2) to downsample the negative cases
)
# concatenate selected features with their target values
dataset = np.column_stack((X, y))
popsize = [100, 250, 500, 1000]
GenMax = [50, 100, 250, 500]
mutRate = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
crRate = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]# 0.7, 0.8, 0.9]
reps = 5
states = 1
i = 6
evo = Evolution(
dataset = dataset.tolist(), # data samples
popsize = popsize[3], # initial population size
hofsize = 10, # the number of best individual to track
cx = crRate[i], # crossover rate
mut = mutRate[6], # mutation rate
maxgen = GenMax[2], # max number of generations
)
logs = pd.DataFrame()
gen = np.zeros((reps, GenMax[2]+1))
nevals = np.zeros((reps, GenMax[2]+1))
avg = np.zeros((reps, GenMax[2]+1))
mini = np.zeros((reps, GenMax[2]+1))
maxi = np.zeros((reps, GenMax[2]+1))
for l in range(reps):
np.random.seed(reps)
pop, logbook, hof= evo.run()
gen[l][:] = logbook.select('gen')
nevals[l][:] = logbook.select('nevals')
avg[l][:] = logbook.select('avg')
mini[l][:] = logbook.select('min')
maxi[l][:] = logbook.select('max')
AvgEval = []
Avg = []
AvgMin = []
AvgMax = []
for n in range(GenMax[2]+1):
totalEval = 0
totalAvg = 0
totalMin = 0
totalMax = 0
for m in range(reps):
totalEval += nevals[m][n]
totalAvg += avg[m][n]
totalMin += mini[m][n]
totalMax += maxi[m][n]
AvgEval.append(totalEval/reps)
Avg.append(totalAvg/reps)
AvgMin.append(totalMin/reps)
AvgMax.append(totalMax/reps)
logs['gen'] = gen[l][:]
logs['nEval'] = AvgEval
logs['Avg Fitness'] = Avg
logs['Avg Min'] = AvgMin
logs['Avg Max'] = AvgMax
#print(logs)
cwd = os.getcwd()
pth_to_save = cwd + "/results/mutEphemeralAll.6_cxOnePoint_.6_selDoubleTournament_codeBloatOn.csv"
logs.to_csv(pth_to_save)
print('Done')
|
[
"utils.load_data",
"numpy.column_stack",
"os.getcwd",
"numpy.zeros",
"numpy.random.seed",
"pandas.DataFrame"
] |
[((777, 866), 'utils.load_data', 'utils.load_data', ([], {'filename': '"""data_trimmed.csv"""', 'clean': '(False)', 'normalize': '(True)', 'resample': '(2)'}), "(filename='data_trimmed.csv', clean=False, normalize=True,\n resample=2)\n", (792, 866), False, 'import utils\n'), ((1019, 1042), 'numpy.column_stack', 'np.column_stack', (['(X, y)'], {}), '((X, y))\n', (1034, 1042), True, 'import numpy as np\n'), ((1733, 1747), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1745, 1747), True, 'import pandas as pd\n'), ((1759, 1790), 'numpy.zeros', 'np.zeros', (['(reps, GenMax[2] + 1)'], {}), '((reps, GenMax[2] + 1))\n', (1767, 1790), True, 'import numpy as np\n'), ((1802, 1833), 'numpy.zeros', 'np.zeros', (['(reps, GenMax[2] + 1)'], {}), '((reps, GenMax[2] + 1))\n', (1810, 1833), True, 'import numpy as np\n'), ((1842, 1873), 'numpy.zeros', 'np.zeros', (['(reps, GenMax[2] + 1)'], {}), '((reps, GenMax[2] + 1))\n', (1850, 1873), True, 'import numpy as np\n'), ((1883, 1914), 'numpy.zeros', 'np.zeros', (['(reps, GenMax[2] + 1)'], {}), '((reps, GenMax[2] + 1))\n', (1891, 1914), True, 'import numpy as np\n'), ((1928, 1959), 'numpy.zeros', 'np.zeros', (['(reps, GenMax[2] + 1)'], {}), '((reps, GenMax[2] + 1))\n', (1936, 1959), True, 'import numpy as np\n'), ((2991, 3002), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3000, 3002), False, 'import os\n'), ((1997, 2017), 'numpy.random.seed', 'np.random.seed', (['reps'], {}), '(reps)\n', (2011, 2017), True, 'import numpy as np\n')]
|
"""
Most recently tested against PySAM 2.1.4
"""
from pathlib import Path
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import PySAM.Singleowner as Singleowner
import time
import multiprocessing
from itertools import product
import PySAM.Pvsamv1 as Pvsamv1
solar_resource_file = Path(__file__).parent / "tests" / "blythe_ca_33.617773_-114.588261_psmv3_60_tmy.csv"
def gcr_func(gcr, cost_per_land_area):
"""
Returns the Internal Rate of Return of a default PV single owner project given modified ground-coverage-ratio (GCR)
and cost per land area
Args:
gcr: ratio, between 0.1 - 1
cost_per_land_area: $
Returns: IRR
"""
# set up base
a = Pvsamv1.default("FlatPlatePVSingleowner")
a.SolarResource.solar_resource_file = solar_resource_file
b = Singleowner.default("FlatPlatePVSingleowner")
# set up shading
a.Shading.subarray1_shade_mode = 1
a.Layout.subarray1_nmodx = 12
a.Layout.subarray1_nmody = 2
a.SystemDesign.subarray1_gcr = float(gcr)
land_area = a.CECPerformanceModelWithModuleDatabase.cec_area * (a.SystemDesign.subarray1_nstrings
* a.SystemDesign.subarray1_modules_per_string) / gcr * 0.0002471
a.execute(0)
# total_installed_cost = total_direct_cost + permitting_total + engr_total + grid_total + landprep_total + sales_tax_total + land_total
b.SystemCosts.total_installed_cost += cost_per_land_area * land_area * 1000
b.SystemOutput.system_pre_curtailment_kwac = a.Outputs.gen
b.SystemOutput.gen = a.Outputs.gen
b.execute(0)
return b.Outputs.analysis_period_irr
gcrs = np.arange(1, 10)
costs = np.arange(1, 10)
multi1 = time.process_time()
if __name__ == '__main__':
with multiprocessing.Pool(processes=4) as pool:
results = pool.starmap(gcr_func, product(gcrs / 10, repeat=2))
multi2 = time.process_time()
print("multi process time:", multi2 - multi1, "\n")
results = np.array([results])
results = np.reshape(results, (-1, 9))
X, Y = np.meshgrid(gcrs, costs)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(X, Y, results)
plt.title("Internal Rate of Return")
plt.xlabel("GCR")
plt.ylabel("$ / land area")
plt.show()
plt.contour(X, Y, results)
plt.title("Internal Rate of Return")
plt.xlabel("GCR")
plt.ylabel("$ / land area")
plt.show()
|
[
"PySAM.Pvsamv1.default",
"PySAM.Singleowner.default",
"numpy.reshape",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"itertools.product",
"numpy.array",
"matplotlib.pyplot.figure",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.contour",
"multiprocessing.Pool",
"time.process_time",
"numpy.meshgrid",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1671, 1687), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (1680, 1687), True, 'import numpy as np\n'), ((1696, 1712), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (1705, 1712), True, 'import numpy as np\n'), ((1723, 1742), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1740, 1742), False, 'import time\n'), ((1904, 1923), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1921, 1923), False, 'import time\n'), ((1987, 2006), 'numpy.array', 'np.array', (['[results]'], {}), '([results])\n', (1995, 2006), True, 'import numpy as np\n'), ((2017, 2045), 'numpy.reshape', 'np.reshape', (['results', '(-1, 9)'], {}), '(results, (-1, 9))\n', (2027, 2045), True, 'import numpy as np\n'), ((2054, 2078), 'numpy.meshgrid', 'np.meshgrid', (['gcrs', 'costs'], {}), '(gcrs, costs)\n', (2065, 2078), True, 'import numpy as np\n'), ((2085, 2097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2095, 2097), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2114), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (2109, 2114), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2146, 2182), 'matplotlib.pyplot.title', 'plt.title', (['"""Internal Rate of Return"""'], {}), "('Internal Rate of Return')\n", (2155, 2182), True, 'import matplotlib.pyplot as plt\n'), ((2183, 2200), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""GCR"""'], {}), "('GCR')\n", (2193, 2200), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2228), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ / land area"""'], {}), "('$ / land area')\n", (2211, 2228), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2237, 2239), True, 'import matplotlib.pyplot as plt\n'), ((2241, 2267), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'results'], {}), '(X, Y, results)\n', (2252, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2304), 'matplotlib.pyplot.title', 'plt.title', (['"""Internal Rate of Return"""'], {}), "('Internal Rate of Return')\n", (2277, 2304), True, 'import matplotlib.pyplot as plt\n'), ((2305, 2322), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""GCR"""'], {}), "('GCR')\n", (2315, 2322), True, 'import matplotlib.pyplot as plt\n'), ((2323, 2350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ / land area"""'], {}), "('$ / land area')\n", (2333, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2359, 2361), True, 'import matplotlib.pyplot as plt\n'), ((739, 780), 'PySAM.Pvsamv1.default', 'Pvsamv1.default', (['"""FlatPlatePVSingleowner"""'], {}), "('FlatPlatePVSingleowner')\n", (754, 780), True, 'import PySAM.Pvsamv1 as Pvsamv1\n'), ((852, 897), 'PySAM.Singleowner.default', 'Singleowner.default', (['"""FlatPlatePVSingleowner"""'], {}), "('FlatPlatePVSingleowner')\n", (871, 897), True, 'import PySAM.Singleowner as Singleowner\n'), ((1780, 1813), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (1800, 1813), False, 'import multiprocessing\n'), ((329, 343), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (333, 343), False, 'from pathlib import Path\n'), ((1864, 1892), 'itertools.product', 'product', (['(gcrs / 10)'], {'repeat': '(2)'}), '(gcrs / 10, repeat=2)\n', (1871, 1892), False, 'from itertools import product\n')]
|
from viroconcom.fitting import Fit
from viroconcom.contours import IFormContour
import numpy as np
prng = np.random.RandomState(42)
# Draw 1000 observations from a Weibull distribution with
# shape=1.5 and scale=3, which represents significant
# wave height.
sample_0 = prng.weibull(1.5, 1000) * 3
# Let the second sample, which represents spectral peak
# period, increase with significant wave height and follow
# a lognormal distribution with sigma=0.2.
sample_1 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_0]
# Define a bivariate probabilistic model that will be fitted to
# the samples. Set a parametric distribution for each variable and
# a dependence structure. Set the lognormal distribution's scale
# parameter to depend on the variable with index 0, which represents
# significant wave height by using the 'dependency' key-value pair.
# A 3-parameter exponential function is chosen to define the
# dependency by setting the function to 'exp3'. The dependency for
# the parameters must be given in the order shape, location, scale.
dist_description_0 = {'name': 'Weibull',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit based on maximum likelihood estimation.
my_fit = Fit((sample_0, sample_1),
(dist_description_0, dist_description_1))
# Compute an environmental contour with a return period of
# 25 years and a sea state duration of 3 hours. 100 data points
# along the contour shall be calculated.
iform_contour = IFormContour(my_fit.mul_var_dist, 25, 3, 100)
|
[
"numpy.exp",
"viroconcom.fitting.Fit",
"viroconcom.contours.IFormContour",
"numpy.random.RandomState"
] |
[((107, 132), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (128, 132), True, 'import numpy as np\n'), ((1458, 1525), 'viroconcom.fitting.Fit', 'Fit', (['(sample_0, sample_1)', '(dist_description_0, dist_description_1)'], {}), '((sample_0, sample_1), (dist_description_0, dist_description_1))\n', (1461, 1525), False, 'from viroconcom.fitting import Fit\n'), ((1720, 1765), 'viroconcom.contours.IFormContour', 'IFormContour', (['my_fit.mul_var_dist', '(25)', '(3)', '(100)'], {}), '(my_fit.mul_var_dist, 25, 3, 100)\n', (1732, 1765), False, 'from viroconcom.contours import IFormContour\n'), ((483, 502), 'numpy.exp', 'np.exp', (['(0.2 * point)'], {}), '(0.2 * point)\n', (489, 502), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.testing import assert_array_equal
from seai_deap import dim
def test_calculate_building_volume() -> None:
expected_output = np.array(4)
output = dim.calculate_building_volume(
ground_floor_area=np.array(1),
first_floor_area=np.array(1),
second_floor_area=np.array(1),
third_floor_area=np.array(1),
ground_floor_height=np.array(1),
first_floor_height=np.array(1),
second_floor_height=np.array(1),
third_floor_height=np.array(1),
)
assert_array_equal(output, expected_output)
def test_calculate_total_floor_area() -> None:
expected_output = np.array((4))
output = dim.calculate_total_floor_area(
ground_floor_area=np.array(1),
first_floor_area=np.array(1),
second_floor_area=np.array(1),
third_floor_area=np.array(1),
)
assert_array_equal(output, expected_output)
|
[
"numpy.array",
"numpy.testing.assert_array_equal"
] |
[((162, 173), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (170, 173), True, 'import numpy as np\n'), ((546, 589), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['output', 'expected_output'], {}), '(output, expected_output)\n', (564, 589), False, 'from numpy.testing import assert_array_equal\n'), ((662, 673), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (670, 673), True, 'import numpy as np\n'), ((887, 930), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['output', 'expected_output'], {}), '(output, expected_output)\n', (905, 930), False, 'from numpy.testing import assert_array_equal\n'), ((245, 256), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (253, 256), True, 'import numpy as np\n'), ((283, 294), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (291, 294), True, 'import numpy as np\n'), ((322, 333), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (330, 333), True, 'import numpy as np\n'), ((360, 371), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (368, 371), True, 'import numpy as np\n'), ((401, 412), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (409, 412), True, 'import numpy as np\n'), ((441, 452), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (449, 452), True, 'import numpy as np\n'), ((482, 493), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (490, 493), True, 'import numpy as np\n'), ((522, 533), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (530, 533), True, 'import numpy as np\n'), ((748, 759), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (756, 759), True, 'import numpy as np\n'), ((786, 797), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (794, 797), True, 'import numpy as np\n'), ((825, 836), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (833, 836), True, 'import numpy as np\n'), ((863, 874), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (871, 874), True, 'import numpy as np\n')]
|
import pdb
import numpy
import geometry.conversions
import geometry.helpers
import geometry.quaternion
import geodesy.conversions
import environments.earth
import spherical_geometry.vector
import spherical_geometry.great_circle_arc
def line_distance(point_1, point_2, ignore_alt=True):
"""Compute the straight line distance between two points on the earth
Arguments:
point_1: numpy (1,3) array giving lat/lon/alt of the first point
point_2: numpy (1,3) array giving lat/lon/alt of the second point
ignore_alt: optional, ignore the altitude component, defaults True
Returns:
r: distance between the points (m)
"""
dX = geodesy.conversions.lla_to_ned(point_1, point_2)
if ignore_alt:
dX *= numpy.array([1.0, 1.0, 0.0], ndmin=2)
return numpy.linalg.norm(dX)
def arc_length(point_1, point_2, ignore_alt=True):
"""Compute the great circle arc length between two points on a sphere
Arguments:
point_1: numpy (1,3) array giving lat/lon/alt of the first point
point_2: numpy (1,3) array giving lat/lon/alt of the second point
ignore_alt: optional, ignore the altitude component, defaults True
Returns:
arc_length: the great circle distance
"""
p1_X = geodesy.conversions.lla_to_vector(point_1)
p2_X = geodesy.conversions.lla_to_vector(point_2)
theta = spherical_geometry.great_circle_arc.length(
p1_X, p2_X, degrees=False)
return theta
def arc_distance(point_1, point_2, r=None, ignore_alt=True):
"""Compute the great circle distance between two points on a sphere
Arguments:
point_1: numpy (1,3) array giving lat/lon/alt of the first point
point_2: numpy (1,3) array giving lat/lon/alt of the second point
r: radius of the sphere we're on. Defaults to the earth
ignore_alt: optional, ignore the altitude component, defaults True
Returns:
arc_distance: the great circle distance
"""
theta = arc_length(point_1, point_2, ignore_alt=ignore_alt)
if r is None:
return theta * environments.earth.constants['r0']
return theta * r
def great_circle_direction(point_1, point_2):
"""Compute the direction for a great circle arc
Arguments:
point_1: the starting point of the great circle. The direction will be
given in a NED frame at this point. Numpy (3,) array in radians, lla
point_2: the other end of the great circle. can specify a numpy (3,)
array for a single computation or a numpy (n,3) array for a series
of computations.
Returns:
r_hat: the initial direction of the great circle starting from point_1
"""
if point_2.ndim > 1:
directions = numpy.zeros(point_2.shape)
for idx, coord in enumerate(point_2):
directions[idx] = great_circle_direction(point_1, coord)
return directions
xyz_1 = geodesy.conversions.lla_to_xyz(point_1)
xyz_2 = geodesy.conversions.lla_to_xyz(point_2)
khat_xyz = geometry.conversions.to_unit_vector(xyz_1)
delta = xyz_2 - xyz_1
delta_hat = geometry.conversions.to_unit_vector(delta)
r_xyz = numpy.cross(khat_xyz, numpy.cross(delta_hat, khat_xyz))
r_hat = geodesy.conversions.xyz_to_ned(
r_xyz + xyz_1, numpy.array(point_1, ndmin=2))[0]
return geometry.conversions.to_unit_vector(r_hat)
def distance_on_great_circle(start_point, direction, distance):
"""compute the location of a point a specified distance along a great circle
NOTE: This assumes a spherical earth. The error introduced in the location
is pretty small (~15 km for a 13000 km path), but it totall screws with
the altitude. YOU SHOULD NOT USE THE ALTITUDE COMING OUT OF THIS, ESPECIALLY
IF YOU HAVE ANY MEANGINFUL DISTANCE
Arguments:
start_point: the starting point of the great circle. The direction is
given in a NED frame at this point. Numpy (3,) array in radians, lla
direction: a NED vector indicating the direction of the great circle
distance: the length of the great circle arc (m)
Returns:
end_point: the end of a great circle path of length <distance> from
<start_point> with initial <direction>
"""
start_xyz = geodesy.conversions.lla_to_xyz(start_point)
direction = geometry.conversions.to_unit_vector(direction)
delta_xyz = geodesy.conversions.ned_to_xyz(
direction, numpy.array(start_point, ndmin=2))
rotation_axis = -geometry.conversions.to_unit_vector(
numpy.cross(start_xyz, delta_xyz))
rotation_magnitude = distance / environments.earth.constants['r0']
rotation_quaternion = geometry.quaternion.Quaternion()
rotation_quaternion.from_axis_and_rotation(
rotation_axis, rotation_magnitude)
end_point_xyz = rotation_quaternion.rot(start_xyz)
end_point = geodesy.conversions.xyz_to_lla(end_point_xyz)
return end_point
|
[
"numpy.array",
"numpy.zeros",
"numpy.cross",
"numpy.linalg.norm"
] |
[((810, 831), 'numpy.linalg.norm', 'numpy.linalg.norm', (['dX'], {}), '(dX)\n', (827, 831), False, 'import numpy\n'), ((760, 797), 'numpy.array', 'numpy.array', (['[1.0, 1.0, 0.0]'], {'ndmin': '(2)'}), '([1.0, 1.0, 0.0], ndmin=2)\n', (771, 797), False, 'import numpy\n'), ((2757, 2783), 'numpy.zeros', 'numpy.zeros', (['point_2.shape'], {}), '(point_2.shape)\n', (2768, 2783), False, 'import numpy\n'), ((3209, 3241), 'numpy.cross', 'numpy.cross', (['delta_hat', 'khat_xyz'], {}), '(delta_hat, khat_xyz)\n', (3220, 3241), False, 'import numpy\n'), ((4469, 4502), 'numpy.array', 'numpy.array', (['start_point'], {'ndmin': '(2)'}), '(start_point, ndmin=2)\n', (4480, 4502), False, 'import numpy\n'), ((3310, 3339), 'numpy.array', 'numpy.array', (['point_1'], {'ndmin': '(2)'}), '(point_1, ndmin=2)\n', (3321, 3339), False, 'import numpy\n'), ((4571, 4604), 'numpy.cross', 'numpy.cross', (['start_xyz', 'delta_xyz'], {}), '(start_xyz, delta_xyz)\n', (4582, 4604), False, 'import numpy\n')]
|
# Copyright 2017 Neosapience, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import unittest
import darkon
import tensorflow as tf
import numpy as np
_classes = 2
def nn_graph(activation):
# create graph
x = tf.placeholder(tf.float32, (1, 2, 2, 3), 'x_placeholder')
y = tf.placeholder(tf.int32, name='y_placeholder', shape=[1, 2])
with tf.name_scope('conv1'):
conv_1 = tf.layers.conv2d(
inputs=x,
filters=10,
kernel_size=[2, 2],
padding="same",
activation=activation)
with tf.name_scope('fc2'):
flatten = tf.layers.flatten(conv_1)
top = tf.layers.dense(flatten, _classes)
logits = tf.nn.softmax(top)
return x
class GradcamGuidedBackprop(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
def tearDown(self):
x = nn_graph(activation=self.activation_fn)
image = np.random.uniform(size=(2, 2, 3))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
gradcam_ops = darkon.Gradcam.candidate_featuremap_op_names(sess)
if self.enable_guided_backprop:
_ = darkon.Gradcam(x, _classes, gradcam_ops[-1])
g = tf.get_default_graph()
from_ts = g.get_operation_by_name(gradcam_ops[-1]).outputs
to_ts = g.get_operation_by_name(gradcam_ops[-2]).outputs
max_output = tf.reduce_max(from_ts, axis=3)
y = tf.reduce_sum(-max_output * 1e2)
grad = tf.gradients(y, to_ts)[0]
grad_val = sess.run(grad, feed_dict={x: np.expand_dims(image, 0)})
if self.enable_guided_backprop:
self.assertTrue(not np.any(grad_val))
else:
self.assertTrue(np.any(grad_val))
def test_relu(self):
self.activation_fn = tf.nn.relu
self.enable_guided_backprop = False
def test_relu_guided(self):
self.activation_fn = tf.nn.relu
self.enable_guided_backprop = True
def test_tanh(self):
self.activation_fn = tf.nn.tanh
self.enable_guided_backprop = False
def test_tanh_guided(self):
self.activation_fn = tf.nn.tanh
self.enable_guided_backprop = True
def test_sigmoid(self):
self.activation_fn = tf.nn.sigmoid
self.enable_guided_backprop = False
def test_sigmoid_guided(self):
self.activation_fn = tf.nn.sigmoid
self.enable_guided_backprop = True
def test_relu6(self):
self.activation_fn = tf.nn.relu6
self.enable_guided_backprop = False
def test_relu6_guided(self):
self.activation_fn = tf.nn.relu6
self.enable_guided_backprop = True
def test_elu(self):
self.activation_fn = tf.nn.elu
self.enable_guided_backprop = False
def test_elu_guided(self):
self.activation_fn = tf.nn.elu
self.enable_guided_backprop = True
def test_selu(self):
self.activation_fn = tf.nn.selu
self.enable_guided_backprop = False
def test_selu_guided(self):
self.activation_fn = tf.nn.selu
self.enable_guided_backprop = True
def test_softplus(self):
self.activation_fn = tf.nn.softplus
self.enable_guided_backprop = False
def test_test_softplus_guided(self):
self.activation_fn = tf.nn.softplus
self.enable_guided_backprop = True
def test_softsign(self):
self.activation_fn = tf.nn.softsign
self.enable_guided_backprop = False
def test_softsign_guided(self):
self.activation_fn = tf.nn.softsign
self.enable_guided_backprop = True
|
[
"tensorflow.reset_default_graph",
"tensorflow.layers.flatten",
"darkon.Gradcam.candidate_featuremap_op_names",
"darkon.Gradcam",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.any",
"tensorflow.reduce_max",
"tensorflow.global_variables_initializer",
"tensorflow.layers.conv2d",
"tensorflow.gradients",
"tensorflow.name_scope",
"tensorflow.nn.softmax",
"numpy.random.uniform",
"numpy.expand_dims",
"tensorflow.layers.dense",
"tensorflow.get_default_graph"
] |
[((799, 856), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(1, 2, 2, 3)', '"""x_placeholder"""'], {}), "(tf.float32, (1, 2, 2, 3), 'x_placeholder')\n", (813, 856), True, 'import tensorflow as tf\n'), ((865, 925), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""y_placeholder"""', 'shape': '[1, 2]'}), "(tf.int32, name='y_placeholder', shape=[1, 2])\n", (879, 925), True, 'import tensorflow as tf\n'), ((1275, 1293), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['top'], {}), '(top)\n', (1288, 1293), True, 'import tensorflow as tf\n'), ((936, 958), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv1"""'], {}), "('conv1')\n", (949, 958), True, 'import tensorflow as tf\n'), ((977, 1078), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'x', 'filters': '(10)', 'kernel_size': '[2, 2]', 'padding': '"""same"""', 'activation': 'activation'}), "(inputs=x, filters=10, kernel_size=[2, 2], padding='same',\n activation=activation)\n", (993, 1078), True, 'import tensorflow as tf\n'), ((1146, 1166), 'tensorflow.name_scope', 'tf.name_scope', (['"""fc2"""'], {}), "('fc2')\n", (1159, 1166), True, 'import tensorflow as tf\n'), ((1186, 1211), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['conv_1'], {}), '(conv_1)\n', (1203, 1211), True, 'import tensorflow as tf\n'), ((1226, 1260), 'tensorflow.layers.dense', 'tf.layers.dense', (['flatten', '_classes'], {}), '(flatten, _classes)\n', (1241, 1260), True, 'import tensorflow as tf\n'), ((1386, 1410), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1408, 1410), True, 'import tensorflow as tf\n'), ((1504, 1537), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(2, 2, 3)'}), '(size=(2, 2, 3))\n', (1521, 1537), True, 'import numpy as np\n'), ((1552, 1564), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1562, 1564), True, 'import tensorflow as tf\n'), ((1657, 1707), 'darkon.Gradcam.candidate_featuremap_op_names', 'darkon.Gradcam.candidate_featuremap_op_names', (['sess'], {}), '(sess)\n', (1701, 1707), False, 'import darkon\n'), ((1835, 1857), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1855, 1857), True, 'import tensorflow as tf\n'), ((2024, 2054), 'tensorflow.reduce_max', 'tf.reduce_max', (['from_ts'], {'axis': '(3)'}), '(from_ts, axis=3)\n', (2037, 2054), True, 'import tensorflow as tf\n'), ((2071, 2105), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(-max_output * 100.0)'], {}), '(-max_output * 100.0)\n', (2084, 2105), True, 'import tensorflow as tf\n'), ((1595, 1628), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1626, 1628), True, 'import tensorflow as tf\n'), ((1773, 1817), 'darkon.Gradcam', 'darkon.Gradcam', (['x', '_classes', 'gradcam_ops[-1]'], {}), '(x, _classes, gradcam_ops[-1])\n', (1787, 1817), False, 'import darkon\n'), ((2124, 2146), 'tensorflow.gradients', 'tf.gradients', (['y', 'to_ts'], {}), '(y, to_ts)\n', (2136, 2146), True, 'import tensorflow as tf\n'), ((2378, 2394), 'numpy.any', 'np.any', (['grad_val'], {}), '(grad_val)\n', (2384, 2394), True, 'import numpy as np\n'), ((2202, 2226), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (2216, 2226), True, 'import numpy as np\n'), ((2310, 2326), 'numpy.any', 'np.any', (['grad_val'], {}), '(grad_val)\n', (2316, 2326), True, 'import numpy as np\n')]
|
import numpy as np
import sys
def micrograph2np(width,shift):
r = int(width/shift-1)
#I = np.load("../DATA_SETS/004773_ProtRelionRefine3D/kino.micrograph.numpy.npy")
I = np.load("../DATA_SETS/004773_ProtRelionRefine3D/full_micrograph.stack_0001.numpy.npy")
I = (I-I.mean())/I.std()
N = int(I.shape[0]/shift)
M = int(I.shape[1]/shift)
S=[]
for i in range(N-r):
for j in range(M-r):
x1 = i*shift
x2 = x1+width
y1 = j*shift
y2 = y1+width
w = I[x1:x2,y1:y2]
S.append(w)
S = np.array(S)
np.save("../DATA_SETS/004773_ProtRelionRefine3D/fraction_micrograph.numpy", S)
|
[
"numpy.array",
"numpy.load",
"numpy.save"
] |
[((180, 276), 'numpy.load', 'np.load', (['"""../DATA_SETS/004773_ProtRelionRefine3D/full_micrograph.stack_0001.numpy.npy"""'], {}), "(\n '../DATA_SETS/004773_ProtRelionRefine3D/full_micrograph.stack_0001.numpy.npy'\n )\n", (187, 276), True, 'import numpy as np\n'), ((541, 552), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (549, 552), True, 'import numpy as np\n'), ((555, 633), 'numpy.save', 'np.save', (['"""../DATA_SETS/004773_ProtRelionRefine3D/fraction_micrograph.numpy"""', 'S'], {}), "('../DATA_SETS/004773_ProtRelionRefine3D/fraction_micrograph.numpy', S)\n", (562, 633), True, 'import numpy as np\n')]
|
import os
import warnings
import torch.backends.cudnn as cudnn
warnings.filterwarnings("ignore")
from torch.utils.data import DataLoader
from decaps import CapsuleNet
from torch.optim import Adam
import numpy as np
from config import options
import torch
import torch.nn.functional as F
from utils.eval_utils import binary_cls_compute_metrics
import torch.nn as nn
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
theta_c = 0.5 # crop region with attention values higher than this
theta_d = 0.5 # drop region with attention values higher than this
def log_string(out_str):
LOG_FOUT.write(out_str + '\n')
LOG_FOUT.flush()
print(out_str)
@torch.no_grad()
def evaluate():
capsule_net.eval()
test_loss = np.zeros(4)
targets, predictions_raw, predictions_crop, predictions_drop, predictions_combined = [], [], [], [], []
outputs_raw, outputs_crop, outputs_combined = [], [], []
with torch.no_grad():
for batch_id, (data, target) in enumerate(test_loader):
data, target = data.cuda(), target.cuda()
target_ohe = F.one_hot(target, options.num_classes)
y_pred_raw, x_reconst, output, attention_map, _, c_maps, out_vec_raw = capsule_net(data, target_ohe)
loss = capsule_loss(output, target)
targets += [target_ohe]
outputs_raw += [output]
predictions_raw += [y_pred_raw]
test_loss[0] += loss
##################################
# Object Localization and Refinement
##################################
bbox_coords = []
upsampled_attention_map = F.upsample_bilinear(attention_map, size=(data.size(2), data.size(3)))
crop_mask = upsampled_attention_map > theta_c
crop_images = []
for batch_index in range(crop_mask.size(0)):
nonzero_indices = torch.nonzero(crop_mask[batch_index, 0, ...])
height_min = nonzero_indices[:, 0].min()
height_max = nonzero_indices[:, 0].max()
width_min = nonzero_indices[:, 1].min()
width_max = nonzero_indices[:, 1].max()
bbox_coord = np.array([height_min, height_max, width_min, width_max])
bbox_coords.append(bbox_coord)
crop_images.append(F.upsample_bilinear(
data[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max],
size=options.img_h))
crop_images = torch.cat(crop_images, dim=0)
y_pred_crop, _, output_crop, _, _, c_maps_crop, out_vec_crop = capsule_net(crop_images, target_ohe)
loss = capsule_loss(output_crop, target)
predictions_crop += [y_pred_crop]
outputs_crop += [output_crop]
test_loss[1] += loss
# final prediction
output_combined = (output + output_crop) / 2
outputs_combined += [output_combined]
y_pred_combined = output_combined.argmax(dim=1)
y_pred_combined_ohe = F.one_hot(y_pred_combined, options.num_classes)
test_loss[3] += capsule_loss(output_combined, target)
predictions_combined += [y_pred_combined_ohe]
##################################
# Attention Dropping
##################################
drop_mask = F.upsample_bilinear(attention_map, size=(data.size(2), data.size(3))) <= theta_d
drop_images = data * drop_mask.float()
# drop images forward
y_pred_drop, _, output_drop, _, _, c_maps_drop, out_vec_drop = capsule_net(drop_images.cuda(), target_ohe)
loss = capsule_loss(output_crop, target)
predictions_drop += [y_pred_drop]
test_loss[2] += loss
test_loss /= (batch_id + 1)
metrics_raw = binary_cls_compute_metrics(torch.cat(outputs_raw).cpu(), torch.cat(targets).cpu())
metrics_crop = binary_cls_compute_metrics(torch.cat(outputs_crop).cpu(), torch.cat(targets).cpu())
metrics_combined = binary_cls_compute_metrics(torch.cat(outputs_combined).cpu(), torch.cat(targets).cpu())
# display
log_string(" - (Raw) loss: {0:.4f}, acc: {1:.02%}, auc: {2:.02%}"
.format(test_loss[0], metrics_raw['acc'], metrics_raw['auc']))
log_string(" - (Crop) loss: {0:.4f}, acc: {1:.02%}, auc: {2:.02%}"
.format(test_loss[1], metrics_crop['acc'], metrics_crop['auc']))
log_string(" - (Combined) loss: {0:.4f}, acc: {1:.02%}, auc: {2:.02%}"
.format(test_loss[2], metrics_combined['acc'], metrics_combined['auc']))
if __name__ == '__main__':
##################################
# Initialize saving directory
##################################
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
iter_num = options.load_model_path.split('/')[-1].split('.')[0]
save_dir = os.path.dirname(os.path.dirname(options.load_model_path))
img_dir = os.path.join(save_dir, 'imgs')
if not os.path.exists(img_dir):
os.makedirs(img_dir)
viz_dir = os.path.join(img_dir, iter_num+'_crop_{}'.format(theta_c))
if not os.path.exists(viz_dir):
os.makedirs(viz_dir)
LOG_FOUT = open(os.path.join(save_dir, 'log_inference.txt'), 'w')
LOG_FOUT.write(str(options) + '\n')
# bkp of inference
os.system('cp {}/inference.py {}'.format(BASE_DIR, save_dir))
##################################
# Create the model
##################################
capsule_net = CapsuleNet(options)
log_string('Model Generated.')
log_string("Number of trainable parameters: {}".format(sum(param.numel() for param in capsule_net.parameters())))
##################################
# Use cuda
##################################
cudnn.benchmark = True
capsule_net.cuda()
capsule_net = nn.DataParallel(capsule_net)
##################################
# Load the trained model
##################################
ckpt = options.load_model_path
checkpoint = torch.load(ckpt)
state_dict = checkpoint['state_dict']
# Load weights
capsule_net.load_state_dict(state_dict)
log_string('Model successfully loaded from {}'.format(ckpt))
if 'feature_center' in checkpoint:
feature_center = checkpoint['feature_center'].to(torch.device("cuda"))
log_string('feature_center loaded from {}'.format(ckpt))
##################################
# Loss and Optimizer
##################################
if options.loss_type == 'margin':
from utils.loss_utils import MarginLoss
capsule_loss = MarginLoss(options)
elif options.loss_type == 'spread':
from utils.loss_utils import SpreadLoss
capsule_loss = SpreadLoss(options)
elif options.loss_type == 'cross-entropy':
capsule_loss = nn.CrossEntropyLoss()
if options.add_decoder:
from utils.loss_utils import ReconstructionLoss
reconst_loss = ReconstructionLoss()
optimizer = Adam(capsule_net.parameters(), lr=options.lr, betas=(options.beta1, 0.999))
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.9)
##################################
# Load dataset
##################################
if options.data_name == 'mnist':
from dataset.mnist import MNIST as data
os.system('cp {}/dataset/mnist.py {}'.format(BASE_DIR, save_dir))
elif options.data_name == 'fashion_mnist':
from dataset.fashion_mnist import FashionMNIST as data
os.system('cp {}/dataset/fashion_mnist.py {}'.format(BASE_DIR, save_dir))
elif options.data_name == 't_mnist':
from dataset.mnist_translate import MNIST as data
os.system('cp {}/dataset/mnist_translate.py {}'.format(BASE_DIR, save_dir))
elif options.data_name == 'c_mnist':
from dataset.mnist_clutter import MNIST as data
os.system('cp {}/dataset/mnist_clutter.py {}'.format(BASE_DIR, save_dir))
elif options.data_name == 'cub':
from dataset.dataset_CUB import CUB as data
os.system('cp {}/dataset/dataset_CUB.py {}'.format(BASE_DIR, save_dir))
elif options.data_name == 'chexpert':
from dataset.chexpert_dataset import CheXpertDataSet as data
os.system('cp {}/dataset/chexpert_dataset.py {}'.format(BASE_DIR, save_dir))
test_dataset = data(mode='test')
test_loader = DataLoader(test_dataset, batch_size=options.batch_size,
shuffle=False, num_workers=options.workers, drop_last=False)
##################################
# TESTING
##################################
log_string('')
log_string('Start Testing')
evaluate()
|
[
"torch.nn.CrossEntropyLoss",
"utils.loss_utils.ReconstructionLoss",
"numpy.array",
"torch.nn.functional.upsample_bilinear",
"utils.loss_utils.MarginLoss",
"os.path.exists",
"dataset.chexpert_dataset.CheXpertDataSet.cuda",
"config.options.load_model_path.split",
"dataset.chexpert_dataset.CheXpertDataSet.size",
"decaps.CapsuleNet",
"dataset.chexpert_dataset.CheXpertDataSet",
"os.path.dirname",
"torch.nn.functional.one_hot",
"utils.loss_utils.SpreadLoss",
"warnings.filterwarnings",
"torch.cat",
"torch.device",
"os.makedirs",
"torch.load",
"os.path.join",
"torch.nn.DataParallel",
"torch.nonzero",
"numpy.zeros",
"torch.utils.data.DataLoader",
"os.path.abspath",
"torch.no_grad"
] |
[((63, 96), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (86, 96), False, 'import warnings\n'), ((650, 665), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (663, 665), False, 'import torch\n'), ((722, 733), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (730, 733), True, 'import numpy as np\n'), ((5041, 5071), 'os.path.join', 'os.path.join', (['save_dir', '"""imgs"""'], {}), "(save_dir, 'imgs')\n", (5053, 5071), False, 'import os\n'), ((5596, 5615), 'decaps.CapsuleNet', 'CapsuleNet', (['options'], {}), '(options)\n', (5606, 5615), False, 'from decaps import CapsuleNet\n'), ((5931, 5959), 'torch.nn.DataParallel', 'nn.DataParallel', (['capsule_net'], {}), '(capsule_net)\n', (5946, 5959), True, 'import torch.nn as nn\n'), ((6120, 6136), 'torch.load', 'torch.load', (['ckpt'], {}), '(ckpt)\n', (6130, 6136), False, 'import torch\n'), ((8452, 8469), 'dataset.chexpert_dataset.CheXpertDataSet', 'data', ([], {'mode': '"""test"""'}), "(mode='test')\n", (8456, 8469), True, 'from dataset.chexpert_dataset import CheXpertDataSet as data\n'), ((8488, 8608), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'options.batch_size', 'shuffle': '(False)', 'num_workers': 'options.workers', 'drop_last': '(False)'}), '(test_dataset, batch_size=options.batch_size, shuffle=False,\n num_workers=options.workers, drop_last=False)\n', (8498, 8608), False, 'from torch.utils.data import DataLoader\n'), ((913, 928), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (926, 928), False, 'import torch\n'), ((4858, 4883), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4873, 4883), False, 'import os\n'), ((4985, 5025), 'os.path.dirname', 'os.path.dirname', (['options.load_model_path'], {}), '(options.load_model_path)\n', (5000, 5025), False, 'import os\n'), ((5083, 5106), 'os.path.exists', 'os.path.exists', (['img_dir'], {}), '(img_dir)\n', (5097, 5106), False, 'import os\n'), ((5116, 5136), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (5127, 5136), False, 'import os\n'), ((5221, 5244), 'os.path.exists', 'os.path.exists', (['viz_dir'], {}), '(viz_dir)\n', (5235, 5244), False, 'import os\n'), ((5254, 5274), 'os.makedirs', 'os.makedirs', (['viz_dir'], {}), '(viz_dir)\n', (5265, 5274), False, 'import os\n'), ((5296, 5339), 'os.path.join', 'os.path.join', (['save_dir', '"""log_inference.txt"""'], {}), "(save_dir, 'log_inference.txt')\n", (5308, 5339), False, 'import os\n'), ((6705, 6724), 'utils.loss_utils.MarginLoss', 'MarginLoss', (['options'], {}), '(options)\n', (6715, 6724), False, 'from utils.loss_utils import MarginLoss\n'), ((7057, 7077), 'utils.loss_utils.ReconstructionLoss', 'ReconstructionLoss', ([], {}), '()\n', (7075, 7077), False, 'from utils.loss_utils import ReconstructionLoss\n'), ((1073, 1111), 'torch.nn.functional.one_hot', 'F.one_hot', (['target', 'options.num_classes'], {}), '(target, options.num_classes)\n', (1082, 1111), True, 'import torch.nn.functional as F\n'), ((2515, 2544), 'torch.cat', 'torch.cat', (['crop_images'], {'dim': '(0)'}), '(crop_images, dim=0)\n', (2524, 2544), False, 'import torch\n'), ((3065, 3112), 'torch.nn.functional.one_hot', 'F.one_hot', (['y_pred_combined', 'options.num_classes'], {}), '(y_pred_combined, options.num_classes)\n', (3074, 3112), True, 'import torch.nn.functional as F\n'), ((6404, 6424), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6416, 6424), False, 'import torch\n'), ((6837, 6856), 'utils.loss_utils.SpreadLoss', 'SpreadLoss', (['options'], {}), '(options)\n', (6847, 6856), False, 'from utils.loss_utils import SpreadLoss\n'), ((1021, 1032), 'dataset.chexpert_dataset.CheXpertDataSet.cuda', 'data.cuda', ([], {}), '()\n', (1030, 1032), True, 'from dataset.chexpert_dataset import CheXpertDataSet as data\n'), ((1881, 1926), 'torch.nonzero', 'torch.nonzero', (['crop_mask[batch_index, 0, ...]'], {}), '(crop_mask[batch_index, 0, ...])\n', (1894, 1926), False, 'import torch\n'), ((2184, 2240), 'numpy.array', 'np.array', (['[height_min, height_max, width_min, width_max]'], {}), '([height_min, height_max, width_min, width_max])\n', (2192, 2240), True, 'import numpy as np\n'), ((6927, 6948), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6946, 6948), True, 'import torch.nn as nn\n'), ((2324, 2450), 'torch.nn.functional.upsample_bilinear', 'F.upsample_bilinear', (['data[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max\n ]'], {'size': 'options.img_h'}), '(data[batch_index:batch_index + 1, :, height_min:\n height_max, width_min:width_max], size=options.img_h)\n', (2343, 2450), True, 'import torch.nn.functional as F\n'), ((3894, 3916), 'torch.cat', 'torch.cat', (['outputs_raw'], {}), '(outputs_raw)\n', (3903, 3916), False, 'import torch\n'), ((3924, 3942), 'torch.cat', 'torch.cat', (['targets'], {}), '(targets)\n', (3933, 3942), False, 'import torch\n'), ((4000, 4023), 'torch.cat', 'torch.cat', (['outputs_crop'], {}), '(outputs_crop)\n', (4009, 4023), False, 'import torch\n'), ((4031, 4049), 'torch.cat', 'torch.cat', (['targets'], {}), '(targets)\n', (4040, 4049), False, 'import torch\n'), ((4111, 4138), 'torch.cat', 'torch.cat', (['outputs_combined'], {}), '(outputs_combined)\n', (4120, 4138), False, 'import torch\n'), ((4146, 4164), 'torch.cat', 'torch.cat', (['targets'], {}), '(targets)\n', (4155, 4164), False, 'import torch\n'), ((4900, 4934), 'config.options.load_model_path.split', 'options.load_model_path.split', (['"""/"""'], {}), "('/')\n", (4929, 4934), False, 'from config import options\n'), ((1674, 1686), 'dataset.chexpert_dataset.CheXpertDataSet.size', 'data.size', (['(2)'], {}), '(2)\n', (1683, 1686), True, 'from dataset.chexpert_dataset import CheXpertDataSet as data\n'), ((1688, 1700), 'dataset.chexpert_dataset.CheXpertDataSet.size', 'data.size', (['(3)'], {}), '(3)\n', (1697, 1700), True, 'from dataset.chexpert_dataset import CheXpertDataSet as data\n'), ((3431, 3443), 'dataset.chexpert_dataset.CheXpertDataSet.size', 'data.size', (['(2)'], {}), '(2)\n', (3440, 3443), True, 'from dataset.chexpert_dataset import CheXpertDataSet as data\n'), ((3445, 3457), 'dataset.chexpert_dataset.CheXpertDataSet.size', 'data.size', (['(3)'], {}), '(3)\n', (3454, 3457), True, 'from dataset.chexpert_dataset import CheXpertDataSet as data\n')]
|
#!/usr/bin/env python
# coding: utf-8
"""
Classification Using Hidden Markov Model
========================================
This is a demonstration using the implemented Hidden Markov model to classify multiple targets.
We will attempt to classify 3 targets in an undefined region.
Our sensor will be all-seeing, and provide us with indirect observations of the targets such that,
using the implemented Hidden Markov Model (HMM), we should hopefully successfully classify exactly
3 targets correctly.
"""
# %%
# All Stone Soup imports will be given in order of usage.
from datetime import datetime, timedelta
import numpy as np
# %%
# Ground Truth
# ^^^^^^^^^^^^
# The targets may take one of three discrete hidden classes: 'bike', 'car' and 'bus'.
# It will be assumed that the targets cannot transition from one class to another, hence an
# identity transition matrix is given to the :class:`~.CategoricalTransitionModel`.
#
# A :class:`~.CategoricalState` class is used to store information on the classification/category
# of the targets. The state vector will define a categorical distribution over the 3 possible
# classes, whereby each component defines the probability that a target is of the corresponding
# class. For example, the state vector (0.2, 0.3, 0.5), with category names ('bike', 'car', 'bus')
# indicates that a target has a 20% probability of being class 'bike', a 30% probability of being
# class 'car' etc.
# It does not make sense to have a true target being a distribution over the possible classes, and
# therefore the true categorical states will have binary state vectors indicating a specific class
# (i.e. a '1' at one state vector index, and '0's elsewhere).
# The :class:`~.CategoricalGroundTruthState` class inherits directly from the base
# :class:`~.CategoricalState` class.
#
# While the category will remain the same, a :class:`~.CategoricalTransitionModel` is used here
# for the sake of demonstration.
#
# The category and timings for one of the ground truth paths will be printed.
from stonesoup.models.transition.categorical import CategoricalTransitionModel
from stonesoup.types.groundtruth import CategoricalGroundTruthState
from stonesoup.types.groundtruth import GroundTruthPath
category_transition = CategoricalTransitionModel(transition_matrix=np.eye(3),
transition_covariance=0.1 * np.eye(3))
start = datetime.now()
hidden_classes = ['bike', 'car', 'bus']
# Generating ground truth
ground_truths = list()
for i in range(1, 4):
state_vector = np.zeros(3) # create a vector with 3 zeroes
state_vector[np.random.choice(3, 1, p=[1/3, 1/3, 1/3])] = 1 # pick a random class out of the 3
ground_truth_state = CategoricalGroundTruthState(state_vector,
timestamp=start,
category_names=hidden_classes)
ground_truth = GroundTruthPath([ground_truth_state], id=f"GT{i}")
for _ in range(10):
new_vector = category_transition.function(ground_truth[-1],
noise=True,
time_interval=timedelta(seconds=1))
new_state = CategoricalGroundTruthState(
new_vector,
timestamp=ground_truth[-1].timestamp + timedelta(seconds=1),
category_names=hidden_classes
)
ground_truth.append(new_state)
ground_truths.append(ground_truth)
for states in np.vstack(ground_truths).T:
print(f"{states[0].timestamp:%H:%M:%S}", end="")
for state in states:
print(f" -- {state.category}", end="")
print()
# %%
# Measurement
# ^^^^^^^^^^^
# Using a Hidden markov model, it is assumed the hidden class of a target cannot be directly
# observed, and instead indirect observations are taken. In this instance, observations of the
# targets' sizes are taken ('small' or 'large'), which have direct implications as to the targets'
# hidden classes, and this relationship is modelled by the `emission matrix` of the
# :class:`~.CategoricalMeasurementModel`, which is used by the :class:`~.CategoricalSensor` to
# provide :class:`~.CategoricalDetection` types.
# We will model this such that a 'bike' has a very small chance of being observed as a 'big'
# target. Similarly, a 'bus' will tend to appear as 'large'. Whereas, a 'car' has equal chance of
# being observed as either.
from stonesoup.models.measurement.categorical import CategoricalMeasurementModel
from stonesoup.sensor.categorical import CategoricalSensor
E = np.array([[0.99, 0.01], # P(small | bike) P(large | bike)
[0.5, 0.5],
[0.01, 0.99]])
model = CategoricalMeasurementModel(ndim_state=3,
emission_matrix=E,
emission_covariance=0.1 * np.eye(2),
mapping=[0, 1, 2])
eo = CategoricalSensor(measurement_model=model,
category_names=['small', 'large'])
# Generating measurements
measurements = list()
for index, states in enumerate(np.vstack(ground_truths).T):
if index == 5:
measurements_at_time = set() # Give tracker chance to use prediction instead
else:
measurements_at_time = eo.measure(states)
timestamp = next(iter(states)).timestamp
measurements.append((timestamp, measurements_at_time))
print(f"{timestamp:%H:%M:%S} -- {[meas.category for meas in measurements_at_time]}")
# %%
# Tracking Components
# ^^^^^^^^^^^^^^^^^^^
# %%
# Predictor
# ---------
# A :class:`~.HMMPredictor` specifically uses :class:`~.CategoricalTransitionModel` types to
# predict.
from stonesoup.predictor.categorical import HMMPredictor
predictor = HMMPredictor(category_transition)
# %%
# Updater
# -------
from stonesoup.updater.categorical import HMMUpdater
updater = HMMUpdater()
# %%
# Hypothesiser
# ------------
# A :class:`~.CategoricalHypothesiser` is used for calculating categorical hypotheses.
# It utilises the :class:`~.ObservationAccuracy` measure: a multi-dimensional extension of an
# 'accuracy' score, essentially providing a measure of the similarity between two categorical
# distributions.
from stonesoup.hypothesiser.categorical import CategoricalHypothesiser
hypothesiser = CategoricalHypothesiser(predictor=predictor, updater=updater)
# %%
# Data Associator
# ---------------
# We will use a standard :class:`~.GNNWith2DAssignment` data associator.
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
data_associator = GNNWith2DAssignment(hypothesiser)
# %%
# Prior
# -----
# As we are tracking in a categorical state space, we should initiate with a categorical state for
# the prior. Equal probability is given to all 3 of the possible hidden classes that a target
# might take (the category names are also provided here).
from stonesoup.types.state import CategoricalState
prior = CategoricalState([1 / 3, 1 / 3, 1 / 3], category_names=hidden_classes)
# %%
# Initiator
# ---------
# For each unassociated detection, a new track will be initiated. In this instance we use a
# :class:`~.SimpleCategoricalInitiator`, which specifically handles categorical state priors.
from stonesoup.initiator.categorical import SimpleCategoricalInitiator
initiator = SimpleCategoricalInitiator(prior_state=prior, measurement_model=None)
# %%
# Deleter
# -------
# We can use a standard :class:`~.UpdateTimeStepsDeleter`.
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(2)
# %%
# Tracker
# -------
# We can use a standard :class:`~.MultiTargetTracker`.
from stonesoup.tracker.simple import MultiTargetTracker
tracker = MultiTargetTracker(initiator, deleter, measurements, data_associator, updater)
# %%
# Tracking
# ^^^^^^^^
tracks = set()
for time, ctracks in tracker:
tracks.update(ctracks)
print(f"Number of tracks: {len(tracks)}")
for track in tracks:
certainty = track.state_vector[np.argmax(track.state_vector)][0] * 100
print(f"id: {track.id} -- category: {track.category} -- certainty: {certainty}%")
for state in track:
_time = state.timestamp.strftime('%H:%M')
_type = str(type(state)).replace("class 'stonesoup.types.", "").strip("<>'. ")
state_string = f"{_time} -- {_type} -- {state.category}"
try:
meas_string = f"associated measurement: {state.hypothesis.measurement.category}"
except AttributeError:
pass
else:
state_string += f" -- {meas_string}"
print(state_string)
print()
# %%
# Metric
# ^^^^^^
# Determining tracking accuracy.
# In calculating how many targets were classified correctly, only tracks with the highest
# classification certainty are considered. In the situation where probabilities are equal, a
# random classification is chosen.
excess_tracks = len(tracks) - len(ground_truths) # target value = 0
sorted_tracks = sorted(tracks,
key=lambda track: track.state_vector[np.argmax(track.state_vector)][0],
reverse=True)
best_tracks = sorted_tracks[:3]
true_classifications = [ground_truth.category for ground_truth in ground_truths]
track_classifications = [track.category for track in best_tracks]
num_correct_classifications = 0 # target value = num ground truths
for true_classification in true_classifications:
for i in range(len(track_classifications)):
if track_classifications[i] == true_classification:
num_correct_classifications += 1
del track_classifications[i]
break
print(f"Excess tracks: {excess_tracks}")
print(f"No. correct classifications: {num_correct_classifications}")
|
[
"numpy.array",
"stonesoup.hypothesiser.categorical.CategoricalHypothesiser",
"datetime.timedelta",
"numpy.vstack",
"stonesoup.initiator.categorical.SimpleCategoricalInitiator",
"stonesoup.tracker.simple.MultiTargetTracker",
"stonesoup.types.state.CategoricalState",
"stonesoup.predictor.categorical.HMMPredictor",
"stonesoup.dataassociator.neighbour.GNNWith2DAssignment",
"numpy.eye",
"stonesoup.sensor.categorical.CategoricalSensor",
"numpy.random.choice",
"stonesoup.types.groundtruth.GroundTruthPath",
"numpy.argmax",
"stonesoup.types.groundtruth.CategoricalGroundTruthState",
"stonesoup.deleter.time.UpdateTimeStepsDeleter",
"stonesoup.updater.categorical.HMMUpdater",
"datetime.datetime.now",
"numpy.zeros"
] |
[((2407, 2421), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2419, 2421), False, 'from datetime import datetime, timedelta\n'), ((4603, 4653), 'numpy.array', 'np.array', (['[[0.99, 0.01], [0.5, 0.5], [0.01, 0.99]]'], {}), '([[0.99, 0.01], [0.5, 0.5], [0.01, 0.99]])\n', (4611, 4653), True, 'import numpy as np\n'), ((4957, 5034), 'stonesoup.sensor.categorical.CategoricalSensor', 'CategoricalSensor', ([], {'measurement_model': 'model', 'category_names': "['small', 'large']"}), "(measurement_model=model, category_names=['small', 'large'])\n", (4974, 5034), False, 'from stonesoup.sensor.categorical import CategoricalSensor\n'), ((5780, 5813), 'stonesoup.predictor.categorical.HMMPredictor', 'HMMPredictor', (['category_transition'], {}), '(category_transition)\n', (5792, 5813), False, 'from stonesoup.predictor.categorical import HMMPredictor\n'), ((5904, 5916), 'stonesoup.updater.categorical.HMMUpdater', 'HMMUpdater', ([], {}), '()\n', (5914, 5916), False, 'from stonesoup.updater.categorical import HMMUpdater\n'), ((6332, 6393), 'stonesoup.hypothesiser.categorical.CategoricalHypothesiser', 'CategoricalHypothesiser', ([], {'predictor': 'predictor', 'updater': 'updater'}), '(predictor=predictor, updater=updater)\n', (6355, 6393), False, 'from stonesoup.hypothesiser.categorical import CategoricalHypothesiser\n'), ((6595, 6628), 'stonesoup.dataassociator.neighbour.GNNWith2DAssignment', 'GNNWith2DAssignment', (['hypothesiser'], {}), '(hypothesiser)\n', (6614, 6628), False, 'from stonesoup.dataassociator.neighbour import GNNWith2DAssignment\n'), ((6962, 7032), 'stonesoup.types.state.CategoricalState', 'CategoricalState', (['[1 / 3, 1 / 3, 1 / 3]'], {'category_names': 'hidden_classes'}), '([1 / 3, 1 / 3, 1 / 3], category_names=hidden_classes)\n', (6978, 7032), False, 'from stonesoup.types.state import CategoricalState\n'), ((7333, 7402), 'stonesoup.initiator.categorical.SimpleCategoricalInitiator', 'SimpleCategoricalInitiator', ([], {'prior_state': 'prior', 'measurement_model': 'None'}), '(prior_state=prior, measurement_model=None)\n', (7359, 7402), False, 'from stonesoup.initiator.categorical import SimpleCategoricalInitiator\n'), ((7557, 7582), 'stonesoup.deleter.time.UpdateTimeStepsDeleter', 'UpdateTimeStepsDeleter', (['(2)'], {}), '(2)\n', (7579, 7582), False, 'from stonesoup.deleter.time import UpdateTimeStepsDeleter\n'), ((7731, 7809), 'stonesoup.tracker.simple.MultiTargetTracker', 'MultiTargetTracker', (['initiator', 'deleter', 'measurements', 'data_associator', 'updater'], {}), '(initiator, deleter, measurements, data_associator, updater)\n', (7749, 7809), False, 'from stonesoup.tracker.simple import MultiTargetTracker\n'), ((2554, 2565), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2562, 2565), True, 'import numpy as np\n'), ((2724, 2818), 'stonesoup.types.groundtruth.CategoricalGroundTruthState', 'CategoricalGroundTruthState', (['state_vector'], {'timestamp': 'start', 'category_names': 'hidden_classes'}), '(state_vector, timestamp=start, category_names=\n hidden_classes)\n', (2751, 2818), False, 'from stonesoup.types.groundtruth import CategoricalGroundTruthState\n'), ((2940, 2990), 'stonesoup.types.groundtruth.GroundTruthPath', 'GroundTruthPath', (['[ground_truth_state]'], {'id': 'f"""GT{i}"""'}), "([ground_truth_state], id=f'GT{i}')\n", (2955, 2990), False, 'from stonesoup.types.groundtruth import GroundTruthPath\n'), ((3524, 3548), 'numpy.vstack', 'np.vstack', (['ground_truths'], {}), '(ground_truths)\n', (3533, 3548), True, 'import numpy as np\n'), ((2299, 2308), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2305, 2308), True, 'import numpy as np\n'), ((2616, 2663), 'numpy.random.choice', 'np.random.choice', (['(3)', '(1)'], {'p': '[1 / 3, 1 / 3, 1 / 3]'}), '(3, 1, p=[1 / 3, 1 / 3, 1 / 3])\n', (2632, 2663), True, 'import numpy as np\n'), ((5138, 5162), 'numpy.vstack', 'np.vstack', (['ground_truths'], {}), '(ground_truths)\n', (5147, 5162), True, 'import numpy as np\n'), ((2387, 2396), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2393, 2396), True, 'import numpy as np\n'), ((4885, 4894), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4891, 4894), True, 'import numpy as np\n'), ((3210, 3230), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (3219, 3230), False, 'from datetime import datetime, timedelta\n'), ((8010, 8039), 'numpy.argmax', 'np.argmax', (['track.state_vector'], {}), '(track.state_vector)\n', (8019, 8039), True, 'import numpy as np\n'), ((3356, 3376), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (3365, 3376), False, 'from datetime import datetime, timedelta\n'), ((9055, 9084), 'numpy.argmax', 'np.argmax', (['track.state_vector'], {}), '(track.state_vector)\n', (9064, 9084), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import numpy as np
import os
import sys
import argparse
import glob
import time
import onnx
import onnxruntime
import cv2
import caffe
from cvi_toolkit.model import OnnxModel
from cvi_toolkit.utils.yolov3_util import preprocess, postprocess_v2, postprocess_v3, postprocess_v4_tiny, draw
def check_files(args):
if not os.path.isfile(args.model_def):
print("cannot find the file %s", args.model_def)
sys.exit(1)
if not os.path.isfile(args.input_file):
print("cannot find the file %s", args.input_file)
sys.exit(1)
python/cvi_toolkit/inference/onnx/run_onnx_detector_yolo.py
def parse_args():
parser = argparse.ArgumentParser(description='Eval YOLO networks.')
parser.add_argument('--model_def', type=str, default='',
help="Model definition file")
parser.add_argument("--net_input_dims", default='416,416',
help="'height,width' dimensions of net input tensors.")
parser.add_argument("--input_file", type=str, default='',
help="Input image for testing")
parser.add_argument("--label_file", type=str, default='',
help="coco lable file in txt format")
parser.add_argument("--draw_image", type=str, default='',
help="Draw results on image")
parser.add_argument("--dump_blobs",
help="Dump all blobs into a file in npz format")
parser.add_argument("--obj_threshold", type=float, default=0.3,
help="Object confidence threshold")
parser.add_argument("--nms_threshold", type=float, default=0.5,
help="NMS threshold")
parser.add_argument("--batch_size", type=int, default=1,
help="Set batch size")
parser.add_argument("--yolov3", type=int, default=1,
help="yolov2 or yolov3")
parser.add_argument("--yolov4-tiny", type=int, default=0,
help="set to yolov4")
args = parser.parse_args()
check_files(args)
return args
def main(argv):
args = parse_args()
# Make Detector
net_input_dims = [int(s) for s in args.net_input_dims.split(',')]
obj_threshold = float(args.obj_threshold)
nms_threshold = float(args.nms_threshold)
yolov3 = True if args.yolov3 else False
yolov4_tiny = True if args.yolov4_tiny else False
print("net_input_dims", net_input_dims)
print("obj_threshold", obj_threshold)
print("nms_threshold", nms_threshold)
print("yolov3", yolov3)
print("yolov4_tiny", yolov4_tiny)
image = cv2.imread(args.input_file)
image_x = preprocess(image, net_input_dims)
image_x = np.expand_dims(image_x, axis=0)
inputs = image_x
for i in range(1, args.batch_size):
inputs = np.append(inputs, image_x, axis=0)
input_shape = np.array([net_input_dims[0], net_input_dims[1]], dtype=np.float32).reshape(1, 2)
ort_session = onnxruntime.InferenceSession(args.model_def)
ort_inputs = {'input': inputs}
ort_outs = ort_session.run(None, ort_inputs)
out_feat = {}
if yolov4_tiny:
batched_predictions = postprocess_v4_tiny(ort_outs, image.shape, net_input_dims,
obj_threshold, nms_threshold, args.batch_size)
else:
out_feat['layer82-conv'] = ort_outs[0]
out_feat['layer94-conv'] = ort_outs[1]
out_feat['layer106-conv'] = ort_outs[2]
batched_predictions = postprocess_v3(out_feat, image.shape, net_input_dims,
obj_threshold, nms_threshold, False, args.batch_size)
print(batched_predictions[0])
if args.draw_image:
image = draw(image, batched_predictions[0], args.label_file)
cv2.imwrite(args.draw_image, image)
if args.dump_blobs:
# second pass for dump all output
# plz refre https://github.com/microsoft/onnxruntime/issues/1455
output_keys = []
for i in range(len(ort_outs)):
output_keys.append('output_{}'.format(i))
model = onnx.load(args.model_def)
# tested commited #c3cea486d https://github.com/microsoft/onnxruntime.git
for x in model.graph.node:
_intermediate_tensor_name = list(x.output)
intermediate_tensor_name = ",".join(_intermediate_tensor_name)
intermediate_layer_value_info = onnx.helper.ValueInfoProto()
intermediate_layer_value_info.name = intermediate_tensor_name
model.graph.output.append(intermediate_layer_value_info)
output_keys.append(intermediate_layer_value_info.name + '_' + x.op_type)
dump_all_onnx = "dump_all.onnx"
if not os.path.exists(dump_all_onnx):
onnx.save(model, dump_all_onnx)
else:
print("{} is exitsed!".format(dump_all_onnx))
print("dump multi-output onnx all tensor at ", dump_all_onnx)
# dump all inferneced tensor
ort_session = onnxruntime.InferenceSession(dump_all_onnx)
ort_outs = ort_session.run(None, ort_inputs)
tensor_all_dict = dict(zip(output_keys, map(np.ndarray.flatten, ort_outs)))
tensor_all_dict['input'] = inputs
np.savez(args.dump_blobs, **tensor_all_dict)
print("dump all tensor at ", args.dump_blobs)
if __name__ == '__main__':
main(sys.argv)
|
[
"cv2.imwrite",
"cvi_toolkit.utils.yolov3_util.draw",
"numpy.savez",
"os.path.exists",
"onnx.save",
"argparse.ArgumentParser",
"onnxruntime.InferenceSession",
"os.path.isfile",
"cvi_toolkit.utils.yolov3_util.preprocess",
"numpy.append",
"numpy.array",
"onnx.helper.ValueInfoProto",
"onnx.load",
"numpy.expand_dims",
"sys.exit",
"cvi_toolkit.utils.yolov3_util.postprocess_v4_tiny",
"cv2.imread",
"cvi_toolkit.utils.yolov3_util.postprocess_v3"
] |
[((669, 727), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Eval YOLO networks."""'}), "(description='Eval YOLO networks.')\n", (692, 727), False, 'import argparse\n'), ((2620, 2647), 'cv2.imread', 'cv2.imread', (['args.input_file'], {}), '(args.input_file)\n', (2630, 2647), False, 'import cv2\n'), ((2662, 2695), 'cvi_toolkit.utils.yolov3_util.preprocess', 'preprocess', (['image', 'net_input_dims'], {}), '(image, net_input_dims)\n', (2672, 2695), False, 'from cvi_toolkit.utils.yolov3_util import preprocess, postprocess_v2, postprocess_v3, postprocess_v4_tiny, draw\n'), ((2711, 2742), 'numpy.expand_dims', 'np.expand_dims', (['image_x'], {'axis': '(0)'}), '(image_x, axis=0)\n', (2725, 2742), True, 'import numpy as np\n'), ((2971, 3015), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['args.model_def'], {}), '(args.model_def)\n', (2999, 3015), False, 'import onnxruntime\n'), ((346, 376), 'os.path.isfile', 'os.path.isfile', (['args.model_def'], {}), '(args.model_def)\n', (360, 376), False, 'import os\n'), ((443, 454), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (451, 454), False, 'import sys\n'), ((467, 498), 'os.path.isfile', 'os.path.isfile', (['args.input_file'], {}), '(args.input_file)\n', (481, 498), False, 'import os\n'), ((566, 577), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (574, 577), False, 'import sys\n'), ((2819, 2853), 'numpy.append', 'np.append', (['inputs', 'image_x'], {'axis': '(0)'}), '(inputs, image_x, axis=0)\n', (2828, 2853), True, 'import numpy as np\n'), ((3169, 3278), 'cvi_toolkit.utils.yolov3_util.postprocess_v4_tiny', 'postprocess_v4_tiny', (['ort_outs', 'image.shape', 'net_input_dims', 'obj_threshold', 'nms_threshold', 'args.batch_size'], {}), '(ort_outs, image.shape, net_input_dims, obj_threshold,\n nms_threshold, args.batch_size)\n', (3188, 3278), False, 'from cvi_toolkit.utils.yolov3_util import preprocess, postprocess_v2, postprocess_v3, postprocess_v4_tiny, draw\n'), ((3489, 3600), 'cvi_toolkit.utils.yolov3_util.postprocess_v3', 'postprocess_v3', (['out_feat', 'image.shape', 'net_input_dims', 'obj_threshold', 'nms_threshold', '(False)', 'args.batch_size'], {}), '(out_feat, image.shape, net_input_dims, obj_threshold,\n nms_threshold, False, args.batch_size)\n', (3503, 3600), False, 'from cvi_toolkit.utils.yolov3_util import preprocess, postprocess_v2, postprocess_v3, postprocess_v4_tiny, draw\n'), ((3707, 3759), 'cvi_toolkit.utils.yolov3_util.draw', 'draw', (['image', 'batched_predictions[0]', 'args.label_file'], {}), '(image, batched_predictions[0], args.label_file)\n', (3711, 3759), False, 'from cvi_toolkit.utils.yolov3_util import preprocess, postprocess_v2, postprocess_v3, postprocess_v4_tiny, draw\n'), ((3768, 3803), 'cv2.imwrite', 'cv2.imwrite', (['args.draw_image', 'image'], {}), '(args.draw_image, image)\n', (3779, 3803), False, 'import cv2\n'), ((4079, 4104), 'onnx.load', 'onnx.load', (['args.model_def'], {}), '(args.model_def)\n', (4088, 4104), False, 'import onnx\n'), ((4987, 5030), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['dump_all_onnx'], {}), '(dump_all_onnx)\n', (5015, 5030), False, 'import onnxruntime\n'), ((5218, 5262), 'numpy.savez', 'np.savez', (['args.dump_blobs'], {}), '(args.dump_blobs, **tensor_all_dict)\n', (5226, 5262), True, 'import numpy as np\n'), ((2872, 2938), 'numpy.array', 'np.array', (['[net_input_dims[0], net_input_dims[1]]'], {'dtype': 'np.float32'}), '([net_input_dims[0], net_input_dims[1]], dtype=np.float32)\n', (2880, 2938), True, 'import numpy as np\n'), ((4397, 4425), 'onnx.helper.ValueInfoProto', 'onnx.helper.ValueInfoProto', ([], {}), '()\n', (4423, 4425), False, 'import onnx\n'), ((4710, 4739), 'os.path.exists', 'os.path.exists', (['dump_all_onnx'], {}), '(dump_all_onnx)\n', (4724, 4739), False, 'import os\n'), ((4753, 4784), 'onnx.save', 'onnx.save', (['model', 'dump_all_onnx'], {}), '(model, dump_all_onnx)\n', (4762, 4784), False, 'import onnx\n')]
|
import pickle
import time
import numpy as np
from macrel import graphs
from macrel import vast11data as vast
INTERVAL_SEC = 900.0
N = len(vast.NODES)
SERVICES = {
1: "Mux",
17: "Quote",
21: "FTP",
22: "SSH",
23: "Telnet",
25: "SMTP",
53: "DNS",
80: "HTTP",
88: "Kerberos",
123: "NTP",
135: "DCE",
139: "NETBIOS",
255: "Reserved",
389: "LDAP",
443: "HTTPS",
445: "Microsoft-DS",
464: "kpasswd",
481: "ph",
}
tally_map = {port: graphs.ConnectionTally(N) for port in SERVICES.keys()}
other_tally = graphs.ConnectionTally(N)
def add_tally(event):
src = vast.NODE_BY_IP.get(event.source_ip)
dest = vast.NODE_BY_IP.get(event.dest_ip)
if not src or not dest: return
port = event.dest_port
tally = tally_map.get(port, other_tally)
tally.connect(src.id, dest.id, event.conn_built)
start_times = []
snapshots = {port: [] for port in SERVICES}
snapshots["other"] = []
snapshots["all"] = []
def take_snapshot():
start_times.append(start_time)
all_totals = None
for port, tally in tally_map.items():
totals = tally.to_sparse_matrix()
snapshots[port].append(totals)
if all_totals is None:
all_totals = totals.copy()
else:
all_totals += totals
snapshots["other"].append(other_tally.to_sparse_matrix())
snapshots["all"].append(all_totals)
parser = vast.FWEventParser()
events = parser.parse_all_fw_events()
first_event = next(events)
start_time = first_event.time
tt = time.gmtime(start_time)
start_time -= (tt.tm_min * 60) # align to hour
end_time = start_time + INTERVAL_SEC
t = first_event.time
while t > end_time:
take_snapshot()
start_time = end_time
end_time = start_time + INTERVAL_SEC
add_tally(first_event)
for event in events:
t = event.time
while t > end_time:
take_snapshot()
start_time = end_time
end_time = start_time + INTERVAL_SEC
add_tally(event)
take_snapshot()
data = dict(
services = SERVICES,
start_times = np.asarray(start_times),
snapshots = snapshots,
)
pickle.dump(data, open("vast11-connections-by-port.pickle", "wb"))
|
[
"macrel.vast11data.FWEventParser",
"numpy.asarray",
"macrel.vast11data.NODE_BY_IP.get",
"time.gmtime",
"macrel.graphs.ConnectionTally"
] |
[((575, 600), 'macrel.graphs.ConnectionTally', 'graphs.ConnectionTally', (['N'], {}), '(N)\n', (597, 600), False, 'from macrel import graphs\n'), ((1344, 1364), 'macrel.vast11data.FWEventParser', 'vast.FWEventParser', ([], {}), '()\n', (1362, 1364), True, 'from macrel import vast11data as vast\n'), ((1466, 1489), 'time.gmtime', 'time.gmtime', (['start_time'], {}), '(start_time)\n', (1477, 1489), False, 'import time\n'), ((506, 531), 'macrel.graphs.ConnectionTally', 'graphs.ConnectionTally', (['N'], {}), '(N)\n', (528, 531), False, 'from macrel import graphs\n'), ((631, 667), 'macrel.vast11data.NODE_BY_IP.get', 'vast.NODE_BY_IP.get', (['event.source_ip'], {}), '(event.source_ip)\n', (650, 667), True, 'from macrel import vast11data as vast\n'), ((676, 710), 'macrel.vast11data.NODE_BY_IP.get', 'vast.NODE_BY_IP.get', (['event.dest_ip'], {}), '(event.dest_ip)\n', (695, 710), True, 'from macrel import vast11data as vast\n'), ((1944, 1967), 'numpy.asarray', 'np.asarray', (['start_times'], {}), '(start_times)\n', (1954, 1967), True, 'import numpy as np\n')]
|
import sys
from pathlib import Path
import os
import torch
from torch.optim import Adam
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from networks.critic import Critic
from networks.actor import NoisyActor, CategoricalActor, GaussianActor
base_dir = Path(__file__).resolve().parent.parent.parent
sys.path.append(str(base_dir))
from common.buffer import Replay_buffer as buffer
def get_trajectory_property(): #for adding terms to the memory buffer
return ["action"]
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
def update_params(optim, loss, clip=False, param_list=False,retain_graph=False):
optim.zero_grad()
loss.backward(retain_graph=retain_graph)
if clip is not False:
for i in param_list:
torch.nn.utils.clip_grad_norm_(i, clip)
optim.step()
class SAC(object):
def __init__(self, args):
self.state_dim = args.obs_space
self.action_dim = args.action_space
self.gamma = args.gamma
self.tau = args.tau
self.action_continuous = args.action_continuous
self.batch_size = args.batch_size
self.hidden_size = args.hidden_size
self.actor_lr = args.a_lr
self.critic_lr = args.c_lr
self.alpha_lr = args.alpha_lr
self.buffer_size = args.buffer_capacity
self.policy_type = 'discrete' if (not self.action_continuous) else args.policy_type #deterministic or gaussian policy
self.device = 'cpu'
given_critic = Critic #need to set a default value
self.preset_alpha = args.alpha
if self.policy_type == 'deterministic':
self.tune_entropy = False
hid_layer = args.num_hid_layer
self.policy = NoisyActor(state_dim = self.state_dim, hidden_dim=self.hidden_size, out_dim=1,
num_hidden_layer=hid_layer).to(self.device)
self.policy_target = NoisyActor(state_dim = self.state_dim, hidden_dim=self.hidden_size, out_dim=1,
num_hidden_layer=hid_layer).to(self.device)
self.policy_target.load_state_dict(self.policy.state_dict())
self.q1 = given_critic(self.state_dim+self.action_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q1.apply(weights_init_)
self.q1_target = given_critic(self.state_dim+self.action_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q1_target.load_state_dict(self.q1.state_dict())
self.critic_optim = Adam(self.q1.parameters(), lr = self.critic_lr)
elif self.policy_type == 'discrete':
self.tune_entropy = args.tune_entropy
self.target_entropy_ratio = args.target_entropy_ratio
self.policy = CategoricalActor(self.state_dim, self.hidden_size, self.action_dim).to(self.device)
hid_layer = args.num_hid_layer
self.q1 = given_critic(self.state_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q1.apply(weights_init_)
self.q2 = given_critic(self.state_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q2.apply(weights_init_)
self.q1_target = given_critic(self.state_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q2_target = given_critic(self.state_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q1_target.load_state_dict(self.q1.state_dict())
self.q2_target.load_state_dict(self.q2.state_dict())
self.critic_optim = Adam(list(self.q1.parameters()) + list(self.q2.parameters()), lr=self.critic_lr)
elif self.policy_type == 'gaussian':
self.tune_entropy = args.tune_entropy
self.target_entropy_ratio = args.target_entropy_ratio
self.policy = GaussianActor(self.state_dim, self.hidden_size, 1, tanh = False).to(self.device)
#self.policy_target = GaussianActor(self.state_dim, self.hidden_size, 1, tanh = False).to(self.device)
hid_layer = args.num_hid_layer
self.q1 = given_critic(self.state_dim+self.action_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q1.apply(weights_init_)
self.critic_optim = Adam(self.q1.parameters(), lr = self.critic_lr)
self.q1_target = given_critic(self.state_dim+self.action_dim, self.action_dim, self.hidden_size, hid_layer).to(self.device)
self.q1_target.load_state_dict(self.q1.state_dict())
else:
raise NotImplementedError
self.eps = args.epsilon
self.eps_end = args.epsilon_end
self.eps_delay = 1 / (args.max_episodes * 100)
self.learn_step_counter = 0
self.target_replace_iter = args.target_replace
self.policy_optim = Adam(self.policy.parameters(), lr = self.actor_lr)
trajectory_property = get_trajectory_property()
self.memory = buffer(self.buffer_size, trajectory_property)
self.memory.init_item_buffers()
if self.tune_entropy:
self.target_entropy = -np.log(1./self.action_dim) * self.target_entropy_ratio
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
#self.alpha = self.log_alpha.exp()
self.alpha = torch.tensor([self.preset_alpha])
self.alpha_optim = Adam([self.log_alpha], lr=self.alpha_lr)
else:
self.alpha = torch.tensor([self.preset_alpha]) # coefficiency for entropy term
def choose_action(self, state, train = True):
state = torch.tensor(state, dtype=torch.float).view(1, -1)
if self.policy_type == 'discrete':
if train:
action, _, _, _ = self.policy.sample(state)
action = action.item()
self.add_experience({"action": action})
else:
_, _, _, action = self.policy.sample(state)
action = action.item()
return {'action': action}
elif self.policy_type == 'deterministic':
if train:
_,_,_,action = self.policy.sample(state)
action = action.item()
self.add_experience({"action": action})
else:
_,_,_,action = self.policy.sample(state)
action = action.item()
return {'action':action}
elif self.policy_type == 'gaussian':
if train:
action, _, _ = self.policy.sample(state)
action = action.detach().numpy().squeeze(1)
self.add_experience({"action": action})
else:
_, _, action = self.policy.sample(state)
action = action.item()
return {'action':action}
else:
raise NotImplementedError
def add_experience(self, output):
agent_id = 0
for k, v in output.items():
self.memory.insert(k, agent_id, v)
def critic_loss(self, current_state, batch_action, next_state, reward, mask):
with torch.no_grad():
next_state_action, next_state_pi, next_state_log_pi, _ = self.policy.sample(next_state)
#qf1_next_target, qf2_next_target = self.critic_target(next_state)
qf1_next_target = self.q1_target(next_state)
qf2_next_target = self.q2_target(next_state)
min_qf_next_target = next_state_pi * (torch.min(qf1_next_target, qf2_next_target) - self.alpha
* next_state_log_pi) # V function
min_qf_next_target = min_qf_next_target.sum(dim=1, keepdim=True)
next_q_value = reward + mask * self.gamma * (min_qf_next_target)
#qf1, qf2 = self.critic(current_state) # Two Q-functions to mitigate positive bias in the policy improvement step, [batch, action_num]
qf1 = self.q1(current_state)
qf2 = self.q2(current_state)
qf1 = qf1.gather(1, batch_action.long())
qf2 = qf2.gather(1, batch_action.long()) #[batch, 1] , pick the actin-value for the given batched actions
qf1_loss = torch.mean((qf1 - next_q_value).pow(2))
qf2_loss = torch.mean((qf2 - next_q_value).pow(2))
return qf1_loss, qf2_loss
def policy_loss(self, current_state):
with torch.no_grad():
#qf1_pi, qf2_pi = self.critic(current_state)
qf1_pi = self.q1(current_state)
qf2_pi = self.q2(current_state)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
pi, prob, log_pi, _ = self.policy.sample(current_state)
inside_term = self.alpha.detach() * log_pi - min_qf_pi # [batch, action_dim]
policy_loss = ((prob * inside_term).sum(1)).mean()
return policy_loss, prob.detach(), log_pi.detach()
def alpha_loss(self, action_prob, action_logprob):
if self.tune_entropy:
entropies = -torch.sum(action_prob * action_logprob, dim=1, keepdim=True) #[batch, 1]
entropies = entropies.detach()
alpha_loss = -torch.mean(self.log_alpha * (self.target_entropy - entropies))
alpha_logs = self.log_alpha.exp().detach()
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_logs = self.alpha.detach().clone()
return alpha_loss, alpha_logs
def learn(self):
data = self.memory.sample(self.batch_size)
transitions = {
"o_0": np.array(data['states']),
"o_next_0": np.array(data['states_next']),
"r_0": np.array(data['rewards']).reshape(-1, 1),
"u_0": np.array(data['action']),
"d_0": np.array(data['dones']).reshape(-1, 1),
}
obs = torch.tensor(transitions["o_0"], dtype=torch.float)
obs_ = torch.tensor(transitions["o_next_0"], dtype=torch.float)
action = torch.tensor(transitions["u_0"], dtype=torch.long).view(self.batch_size, -1)
reward = torch.tensor(transitions["r_0"], dtype=torch.float)
done = torch.tensor(transitions["d_0"], dtype=torch.float)
if self.policy_type == 'discrete':
qf1_loss, qf2_loss = self.critic_loss(obs, action, obs_, reward, (1-done))
policy_loss, prob, log_pi = self.policy_loss(obs)
alpha_loss, alpha_logs = self.alpha_loss(prob, log_pi)
qf_loss = qf1_loss + qf2_loss
update_params(self.critic_optim,qf_loss)
update_params(self.policy_optim, policy_loss)
if self.tune_entropy:
update_params(self.alpha_optim, alpha_loss)
self.alpha = self.log_alpha.exp().detach()
if self.learn_step_counter % self.target_replace_iter == 0:
#self.critic_target.load_state_dict(self.critic.state_dict())
self.q1_target.load_state_dict(self.q1.state_dict())
self.q2_target.load_state_dict(self.q2.state_dict())
self.learn_step_counter += 1
elif self.policy_type == 'deterministic':
current_q = self.q1(torch.cat([obs, action], 1))
target_next_action = self.policy_target(obs_)
target_next_q = self.q1_target(torch.cat([obs_, target_next_action], 1))
next_q_value = reward + (1-done) * self.gamma * target_next_q
qf_loss = F.mse_loss(current_q, next_q_value.detach())
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
_, _, _, current_action = self.policy.sample(obs)
qf_pi = self.q1(torch.cat([obs, current_action], 1))
policy_loss = -qf_pi.mean()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.learn_step_counter % self.target_replace_iter == 0:
for param, target_param in zip(self.q1.parameters(), self.q1_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1.-self.tau) * target_param.data)
for param, target_param in zip(self.policy.parameters(), self.policy_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1.-self.tau) * target_param.data)
elif self.policy_type == 'gaussian':
action = torch.tensor(transitions["u_0"], dtype=torch.float).view(self.batch_size, -1)
with torch.no_grad():
# next_action, next_action_logprob, _ = self.policy_target.sample(obs_)
next_action, next_action_logprob, _ = self.policy.sample(obs_)
target_next_q = self.q1_target(
torch.cat([obs_, next_action], 1)) - self.alpha * next_action_logprob
next_q_value = reward + (1 - done) * self.gamma * target_next_q
qf1 = self.q1(torch.cat([obs, action], 1))
qf_loss = F.mse_loss(qf1, next_q_value)
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
pi, log_pi, _ = self.policy.sample(obs)
qf_pi = self.q1(torch.cat([obs, pi], 1))
policy_loss = ((self.alpha * log_pi) - qf_pi).mean()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.tune_entropy:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
else:
pass
if self.learn_step_counter % self.target_replace_iter == 0:
for param, target_param in zip(self.q1.parameters(), self.q1_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1. - self.tau) * target_param.data)
# for param, target_param in zip(self.policy.parameters(), self.policy_target.parameters()):
# target_param.data.copy_(self.tau * param.data + (1.-self.tau) * target_param.data)
else:
raise NotImplementedError
def save(self, save_path, episode):
base_path = os.path.join(save_path, 'trained_model')
if not os.path.exists(base_path):
os.makedirs(base_path)
model_actor_path = os.path.join(base_path, "actor_" + str(episode) + ".pth")
torch.save(self.policy.state_dict(), model_actor_path)
def load(self, file):
self.policy.load_state_dict(torch.load(file))
|
[
"torch.nn.init.constant_",
"torch.nn.utils.clip_grad_norm_",
"numpy.log",
"torch.min",
"numpy.array",
"torch.sum",
"networks.actor.NoisyActor",
"os.path.exists",
"networks.actor.CategoricalActor",
"pathlib.Path",
"torch.nn.init.xavier_uniform_",
"torch.mean",
"networks.actor.GaussianActor",
"torch.nn.functional.mse_loss",
"torch.cat",
"torch.optim.Adam",
"os.makedirs",
"torch.load",
"os.path.join",
"common.buffer.Replay_buffer",
"torch.tensor",
"torch.no_grad",
"torch.zeros"
] |
[((569, 616), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight'], {'gain': '(1)'}), '(m.weight, gain=1)\n', (598, 616), False, 'import torch\n'), ((625, 659), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (648, 659), False, 'import torch\n'), ((5158, 5203), 'common.buffer.Replay_buffer', 'buffer', (['self.buffer_size', 'trajectory_property'], {}), '(self.buffer_size, trajectory_property)\n', (5164, 5203), True, 'from common.buffer import Replay_buffer as buffer\n'), ((9964, 10015), 'torch.tensor', 'torch.tensor', (["transitions['o_0']"], {'dtype': 'torch.float'}), "(transitions['o_0'], dtype=torch.float)\n", (9976, 10015), False, 'import torch\n'), ((10031, 10087), 'torch.tensor', 'torch.tensor', (["transitions['o_next_0']"], {'dtype': 'torch.float'}), "(transitions['o_next_0'], dtype=torch.float)\n", (10043, 10087), False, 'import torch\n'), ((10199, 10250), 'torch.tensor', 'torch.tensor', (["transitions['r_0']"], {'dtype': 'torch.float'}), "(transitions['r_0'], dtype=torch.float)\n", (10211, 10250), False, 'import torch\n'), ((10266, 10317), 'torch.tensor', 'torch.tensor', (["transitions['d_0']"], {'dtype': 'torch.float'}), "(transitions['d_0'], dtype=torch.float)\n", (10278, 10317), False, 'import torch\n'), ((14511, 14551), 'os.path.join', 'os.path.join', (['save_path', '"""trained_model"""'], {}), "(save_path, 'trained_model')\n", (14523, 14551), False, 'import os\n'), ((876, 915), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['i', 'clip'], {}), '(i, clip)\n', (906, 915), False, 'import torch\n'), ((5394, 5448), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)', 'device': 'self.device'}), '(1, requires_grad=True, device=self.device)\n', (5405, 5448), False, 'import torch\n'), ((5521, 5554), 'torch.tensor', 'torch.tensor', (['[self.preset_alpha]'], {}), '([self.preset_alpha])\n', (5533, 5554), False, 'import torch\n'), ((5586, 5626), 'torch.optim.Adam', 'Adam', (['[self.log_alpha]'], {'lr': 'self.alpha_lr'}), '([self.log_alpha], lr=self.alpha_lr)\n', (5590, 5626), False, 'from torch.optim import Adam\n'), ((5666, 5699), 'torch.tensor', 'torch.tensor', (['[self.preset_alpha]'], {}), '([self.preset_alpha])\n', (5678, 5699), False, 'import torch\n'), ((7291, 7306), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7304, 7306), False, 'import torch\n'), ((8549, 8564), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8562, 8564), False, 'import torch\n'), ((8736, 8761), 'torch.min', 'torch.min', (['qf1_pi', 'qf2_pi'], {}), '(qf1_pi, qf2_pi)\n', (8745, 8761), False, 'import torch\n'), ((9693, 9717), 'numpy.array', 'np.array', (["data['states']"], {}), "(data['states'])\n", (9701, 9717), True, 'import numpy as np\n'), ((9743, 9772), 'numpy.array', 'np.array', (["data['states_next']"], {}), "(data['states_next'])\n", (9751, 9772), True, 'import numpy as np\n'), ((9854, 9878), 'numpy.array', 'np.array', (["data['action']"], {}), "(data['action'])\n", (9862, 9878), True, 'import numpy as np\n'), ((14567, 14592), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (14581, 14592), False, 'import os\n'), ((14606, 14628), 'os.makedirs', 'os.makedirs', (['base_path'], {}), '(base_path)\n', (14617, 14628), False, 'import os\n'), ((14840, 14856), 'torch.load', 'torch.load', (['file'], {}), '(file)\n', (14850, 14856), False, 'import torch\n'), ((5801, 5839), 'torch.tensor', 'torch.tensor', (['state'], {'dtype': 'torch.float'}), '(state, dtype=torch.float)\n', (5813, 5839), False, 'import torch\n'), ((9144, 9204), 'torch.sum', 'torch.sum', (['(action_prob * action_logprob)'], {'dim': '(1)', 'keepdim': '(True)'}), '(action_prob * action_logprob, dim=1, keepdim=True)\n', (9153, 9204), False, 'import torch\n'), ((9292, 9354), 'torch.mean', 'torch.mean', (['(self.log_alpha * (self.target_entropy - entropies))'], {}), '(self.log_alpha * (self.target_entropy - entropies))\n', (9302, 9354), False, 'import torch\n'), ((10105, 10155), 'torch.tensor', 'torch.tensor', (["transitions['u_0']"], {'dtype': 'torch.long'}), "(transitions['u_0'], dtype=torch.long)\n", (10117, 10155), False, 'import torch\n'), ((281, 295), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (285, 295), False, 'from pathlib import Path\n'), ((1849, 1957), 'networks.actor.NoisyActor', 'NoisyActor', ([], {'state_dim': 'self.state_dim', 'hidden_dim': 'self.hidden_size', 'out_dim': '(1)', 'num_hidden_layer': 'hid_layer'}), '(state_dim=self.state_dim, hidden_dim=self.hidden_size, out_dim=1,\n num_hidden_layer=hid_layer)\n', (1859, 1957), False, 'from networks.actor import NoisyActor, CategoricalActor, GaussianActor\n'), ((2042, 2150), 'networks.actor.NoisyActor', 'NoisyActor', ([], {'state_dim': 'self.state_dim', 'hidden_dim': 'self.hidden_size', 'out_dim': '(1)', 'num_hidden_layer': 'hid_layer'}), '(state_dim=self.state_dim, hidden_dim=self.hidden_size, out_dim=1,\n num_hidden_layer=hid_layer)\n', (2052, 2150), False, 'from networks.actor import NoisyActor, CategoricalActor, GaussianActor\n'), ((5310, 5339), 'numpy.log', 'np.log', (['(1.0 / self.action_dim)'], {}), '(1.0 / self.action_dim)\n', (5316, 5339), True, 'import numpy as np\n'), ((7652, 7695), 'torch.min', 'torch.min', (['qf1_next_target', 'qf2_next_target'], {}), '(qf1_next_target, qf2_next_target)\n', (7661, 7695), False, 'import torch\n'), ((9450, 9467), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (9462, 9467), False, 'import torch\n'), ((9793, 9818), 'numpy.array', 'np.array', (["data['rewards']"], {}), "(data['rewards'])\n", (9801, 9818), True, 'import numpy as np\n'), ((9899, 9922), 'numpy.array', 'np.array', (["data['dones']"], {}), "(data['dones'])\n", (9907, 9922), True, 'import numpy as np\n'), ((11299, 11326), 'torch.cat', 'torch.cat', (['[obs, action]', '(1)'], {}), '([obs, action], 1)\n', (11308, 11326), False, 'import torch\n'), ((11431, 11471), 'torch.cat', 'torch.cat', (['[obs_, target_next_action]', '(1)'], {}), '([obs_, target_next_action], 1)\n', (11440, 11471), False, 'import torch\n'), ((11816, 11851), 'torch.cat', 'torch.cat', (['[obs, current_action]', '(1)'], {}), '([obs, current_action], 1)\n', (11825, 11851), False, 'import torch\n'), ((13135, 13164), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['qf1', 'next_q_value'], {}), '(qf1, next_q_value)\n', (13145, 13164), True, 'import torch.nn.functional as F\n'), ((2924, 2991), 'networks.actor.CategoricalActor', 'CategoricalActor', (['self.state_dim', 'self.hidden_size', 'self.action_dim'], {}), '(self.state_dim, self.hidden_size, self.action_dim)\n', (2940, 2991), False, 'from networks.actor import NoisyActor, CategoricalActor, GaussianActor\n'), ((12656, 12671), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12669, 12671), False, 'import torch\n'), ((13084, 13111), 'torch.cat', 'torch.cat', (['[obs, action]', '(1)'], {}), '([obs, action], 1)\n', (13093, 13111), False, 'import torch\n'), ((13357, 13380), 'torch.cat', 'torch.cat', (['[obs, pi]', '(1)'], {}), '([obs, pi], 1)\n', (13366, 13380), False, 'import torch\n'), ((4033, 4095), 'networks.actor.GaussianActor', 'GaussianActor', (['self.state_dim', 'self.hidden_size', '(1)'], {'tanh': '(False)'}), '(self.state_dim, self.hidden_size, 1, tanh=False)\n', (4046, 4095), False, 'from networks.actor import NoisyActor, CategoricalActor, GaussianActor\n'), ((12560, 12611), 'torch.tensor', 'torch.tensor', (["transitions['u_0']"], {'dtype': 'torch.float'}), "(transitions['u_0'], dtype=torch.float)\n", (12572, 12611), False, 'import torch\n'), ((12908, 12941), 'torch.cat', 'torch.cat', (['[obs_, next_action]', '(1)'], {}), '([obs_, next_action], 1)\n', (12917, 12941), False, 'import torch\n')]
|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pickle
import numpy as np
from IPython import embed
from matplotlib import pylab as plt
import shared
import mmd_experiment
def process_mmd_experiment(width_class):
results_file_name = mmd_experiment.results_file_stub + "_" + width_class + ".pickle"
results = pickle.load( open(results_file_name,'rb' ) )
callibration_mmds = np.loadtxt('results/callibration_mmds.csv')
mean_callibration = np.mean(callibration_mmds)
mmd_squareds = results['mmd_squareds']
hidden_layer_numbers = results['hidden_layer_numbers']
hidden_unit_numbers = results['hidden_unit_numbers']
num_repeats = mmd_squareds.shape[2]
mean_mmds = np.mean( mmd_squareds, axis = 2 )
std_mmds = np.std( mmd_squareds, axis = 2 ) / np.sqrt(num_repeats)
plt.figure()
for hidden_layer_number, index in zip(hidden_layer_numbers,range(len(hidden_layer_numbers))):
if hidden_layer_number==1:
layer_string = ' hidden layer'
else:
layer_string = ' hidden layers'
line_name = str(hidden_layer_number) + layer_string
plt.errorbar( hidden_unit_numbers, mean_mmds[:,index], yerr = 2.*std_mmds[:,index], label = line_name)
plt.xlabel('Number of hidden units per layer')
plt.xlim([0,60])
plt.ylabel('MMD SQUARED(GP, NN)')
plt.ylim([0.,0.02])
plt.axhline(y=mean_callibration, color='r', linestyle='--')
plt.legend()
output_file_name = "../figures/mmds_" + width_class + ".pdf"
plt.savefig(output_file_name)
embed()
plt.show()
if __name__ == '__main__':
if len(sys.argv)!=2 or sys.argv[1] not in shared.valid_width_classes:
print("Usage: ", sys.argv[0], " <width_class>")
sys.exit(-1)
process_mmd_experiment(sys.argv[1])
|
[
"numpy.mean",
"matplotlib.pylab.xlim",
"matplotlib.pylab.savefig",
"numpy.sqrt",
"matplotlib.pylab.errorbar",
"sys.exit",
"matplotlib.pylab.figure",
"matplotlib.pylab.axhline",
"matplotlib.pylab.ylim",
"matplotlib.pylab.legend",
"IPython.embed",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.show",
"numpy.std",
"numpy.loadtxt",
"matplotlib.pylab.ylabel"
] |
[((930, 973), 'numpy.loadtxt', 'np.loadtxt', (['"""results/callibration_mmds.csv"""'], {}), "('results/callibration_mmds.csv')\n", (940, 973), True, 'import numpy as np\n'), ((998, 1024), 'numpy.mean', 'np.mean', (['callibration_mmds'], {}), '(callibration_mmds)\n', (1005, 1024), True, 'import numpy as np\n'), ((1250, 1279), 'numpy.mean', 'np.mean', (['mmd_squareds'], {'axis': '(2)'}), '(mmd_squareds, axis=2)\n', (1257, 1279), True, 'import numpy as np\n'), ((1364, 1376), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (1374, 1376), True, 'from matplotlib import pylab as plt\n'), ((1791, 1837), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Number of hidden units per layer"""'], {}), "('Number of hidden units per layer')\n", (1801, 1837), True, 'from matplotlib import pylab as plt\n'), ((1842, 1859), 'matplotlib.pylab.xlim', 'plt.xlim', (['[0, 60]'], {}), '([0, 60])\n', (1850, 1859), True, 'from matplotlib import pylab as plt\n'), ((1863, 1896), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""MMD SQUARED(GP, NN)"""'], {}), "('MMD SQUARED(GP, NN)')\n", (1873, 1896), True, 'from matplotlib import pylab as plt\n'), ((1901, 1922), 'matplotlib.pylab.ylim', 'plt.ylim', (['[0.0, 0.02]'], {}), '([0.0, 0.02])\n', (1909, 1922), True, 'from matplotlib import pylab as plt\n'), ((1925, 1984), 'matplotlib.pylab.axhline', 'plt.axhline', ([], {'y': 'mean_callibration', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(y=mean_callibration, color='r', linestyle='--')\n", (1936, 1984), True, 'from matplotlib import pylab as plt\n'), ((1989, 2001), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (1999, 2001), True, 'from matplotlib import pylab as plt\n'), ((2071, 2100), 'matplotlib.pylab.savefig', 'plt.savefig', (['output_file_name'], {}), '(output_file_name)\n', (2082, 2100), True, 'from matplotlib import pylab as plt\n'), ((2105, 2112), 'IPython.embed', 'embed', ([], {}), '()\n', (2110, 2112), False, 'from IPython import embed\n'), ((2117, 2127), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2125, 2127), True, 'from matplotlib import pylab as plt\n'), ((1299, 1327), 'numpy.std', 'np.std', (['mmd_squareds'], {'axis': '(2)'}), '(mmd_squareds, axis=2)\n', (1305, 1327), True, 'import numpy as np\n'), ((1334, 1354), 'numpy.sqrt', 'np.sqrt', (['num_repeats'], {}), '(num_repeats)\n', (1341, 1354), True, 'import numpy as np\n'), ((1684, 1791), 'matplotlib.pylab.errorbar', 'plt.errorbar', (['hidden_unit_numbers', 'mean_mmds[:, index]'], {'yerr': '(2.0 * std_mmds[:, index])', 'label': 'line_name'}), '(hidden_unit_numbers, mean_mmds[:, index], yerr=2.0 * std_mmds[\n :, index], label=line_name)\n', (1696, 1791), True, 'from matplotlib import pylab as plt\n'), ((2294, 2306), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2302, 2306), False, 'import sys\n')]
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
data=pd.read_csv(path)
data.rename(columns={'Total':'Total_Medals'},inplace=True)
data.head(10)
# --------------
#Code starts here
data['Better_Event']=np.where(data['Total_Summer']==data['Total_Winter'],'Both',(np.where(data['Total_Summer']>data['Total_Winter'],'Summer','Winter')) )
better_event=data['Better_Event'].value_counts().idxmax()
# --------------
#Code starts here
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries.drop(top_countries.index[-1],inplace=True)
def top_ten(variable1,variable2):
country_list=[]
country_list=variable1.nlargest(10,variable2).iloc[:,0]
return country_list
top_10_summer=list(top_ten(top_countries,'Total_Summer'))
top_10_winter=list(top_ten(top_countries,'Total_Winter'))
top_10=list(top_ten(top_countries,'Total_Medals'))
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
# --------------
#Code starts here
summer_df=data[data['Country_Name'].isin(top_10_summer)]
winter_df=data[data['Country_Name'].isin(top_10_winter)]
print(winter_df)
top_df=data[data['Country_Name'].isin(top_10)]
print(top_df)
fig, (ax_1,ax_2,ax_3)=plt.subplots(3,1)
summer_df.plot(x='Country_Name',y='Total_Medals',kind='bar',ax=ax_1)
winter_df.plot(x='Country_Name',y='Total_Medals',kind='bar',ax=ax_2)
top_df.plot(x='Country_Name',y='Total_Medals',kind='bar',ax=ax_3)
# --------------
#Code starts here
summer_df['Golden_Ratio']=summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio =summer_df['Golden_Ratio'].max()
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio']==summer_max_ratio,'Country_Name'].iloc[0]
winter_df['Golden_Ratio']=winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio =winter_df['Golden_Ratio'].max()
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio']==winter_max_ratio,'Country_Name'].iloc[0]
top_df['Golden_Ratio']=top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio =top_df['Golden_Ratio'].max()
top_country_gold=top_df.loc[top_df['Golden_Ratio']==top_max_ratio,'Country_Name'].iloc[0]
# --------------
#Code starts here
data_1=data.drop(data.index[-1])
data_1['Total_Points']=(data_1['Gold_Total']*3) + (data_1['Silver_Total']*2)+data_1['Bronze_Total']
most_points=data_1['Total_Points'].max()
best_country=data_1.loc[data_1['Total_Points']==most_points,'Country_Name'].iloc[0]
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar(stacked=True)
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
|
[
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] |
[((169, 186), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (180, 186), True, 'import pandas as pd\n'), ((1371, 1389), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (1383, 1389), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (2817, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2836, 2862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (2846, 2862), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2887), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2874, 2887), True, 'import matplotlib.pyplot as plt\n'), ((387, 460), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (395, 460), True, 'import numpy as np\n')]
|
from numpy import ndarray, array
from electripy.physics.charges import PointCharge
class _ChargesSet:
"""
A _ChargesSet instance is a group of charges. The electric
field at a given point can be calculated as the sum of each
electric field at that point for every charge in the charge
set.
"""
def __init__(self, charges: list[PointCharge]) -> None:
self.charges = charges
def electric_field(self, point: ndarray) -> ndarray:
"""
Returns the electric field at the specified point.
"""
ef = array([0.0, 0.0])
for charge in self.charges:
ef += charge.electric_field(point)
return ef
def electric_force(self, charge: PointCharge) -> ndarray:
"""
Returns the force of the electric field exerted
on the charge.
"""
ef = self.electric_field(charge.position)
return ef * charge.charge
def __getitem__(self, index):
return self.charges[index]
class ChargeDistribution:
def __init__(self):
"""
There is one group for each charge in charges.
Each group is a two dimensional vector. The first element is
a charge, and the second element is the ChargeSet instance
containing all charges in charges except the charge itself.
"""
self.groups = []
self.charges_set = _ChargesSet([])
def add_charge(self, charge: PointCharge) -> None:
"""
Adds the charge to charges_set and updates the groups.
"""
self.charges_set.charges.append(charge)
self._update_groups(self.charges_set.charges)
def remove_charge(self, charge: PointCharge) -> None:
"""
Removes the charge to charges_set and updates the groups.
"""
self.charges_set.charges.remove(charge)
self._update_groups(self.charges_set.charges)
def _update_groups(self, charges: list[PointCharge]) -> None:
"""
Let X be a charge from the charge distribution. Computing X electric
force involves computing the electric force exerted on X by all
the other charges on the charge distribution.
This means that, in order to compute the electric force of X,
we need a two dimensional vector where the first component is
the charge X itself and the second component is a ChargeSet
instance cointaning all charges on the charge distribution except
X. This vector is called 'group'.
"""
self.groups = []
for charge in charges:
self.groups.append(
[
charge,
_ChargesSet([c for c in charges if c is not charge]),
]
)
def get_electric_forces(self) -> list[tuple[PointCharge, ndarray]]:
"""
Returns a list of electric forces. There is one electric force for
each charge in charges. Each electric force is a two dimensional
vector. The first element is the charge and the second element is
the electric force the other charges make on it.
"""
electric_forces = []
for group in self.groups:
electric_forces.append((group[0], group[1].electric_force(group[0])))
return electric_forces
def get_electric_field(self, position: ndarray) -> ndarray:
"""
Returns the electric force array at the given point.
"""
return self.charges_set.electric_field(position)
def __len__(self):
return len(self.charges_set.charges)
def __getitem__(self, index):
return self.charges_set[index]
|
[
"numpy.array"
] |
[((566, 583), 'numpy.array', 'array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (571, 583), False, 'from numpy import ndarray, array\n')]
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, TrafficLightArray , TrafficLight
from std_msgs.msg import Int32
import numpy as np
from threading import Thread, Lock
from copy import deepcopy
class GT_TL_Pub(object):
def __init__(self):
rospy.init_node('gt_TL_Publisher')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.gt_traffic_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.gt_TL_pub = rospy.Publisher('traffic_waypoint', Int32, queue_size=1)
self.mutex = Lock()
self.base_waypoints = None
self.current_pose = None
self.next_waypoint_id = None
self.traffic_light_waypoint_id = None
self.gt_tl_waypoint_id = -1
self.t_0 = rospy.get_time()
# Loop Event for updating final_waypoints
rate = rospy.Rate(40)
while not rospy.is_shutdown():
self.mutex.acquire()
self.publish_gt_TL_waypoint()
self.mutex.release()
rate.sleep()
def publish_gt_TL_waypoint(self):
if self.gt_tl_waypoint_id is not None:
self.gt_TL_pub.publish(data=self.gt_tl_waypoint_id)
# rospy.loginfo("tl waypoint id = %d", self.gt_tl_waypoint_id)
def nearest_waypoint(self,x,y,waypoints_list):
min_dist = float('inf')
nearest_point_id = -1
for id , waypoint in enumerate(waypoints_list.waypoints):
waypoint_x = waypoint.pose.pose.position.x
waypoint_y = waypoint.pose.pose.position.y
dist = (waypoint_x-x)**2 + (waypoint_y-y)**2
if dist < min_dist:
min_dist = dist
nearest_point_id = id
return nearest_point_id
def gt_traffic_cb(self,msg):
# t_0 = rospy.get_time()
self.mutex.acquire()
# process ground truth information to get nearest Traffic light and its corrosponding waypoint id
self.gt_tl_waypoint_id = -1
trafficlight_array = msg.lights
# rospy.loginfo("state = {}".format(np.uint8(trafficlight_array[0].state)))
if self.base_waypoints is not None and self.current_pose is not None: #and not trafficlight_array[0].state:
current_pose_x = self.current_pose.pose.position.x
current_pose_y = self.current_pose.pose.position.y
min_dist = float('inf')
nearest_point_id = -1
for id in range(len(trafficlight_array)):
tl_x = trafficlight_array[id].pose.pose.position.x
tl_y = trafficlight_array[id].pose.pose.position.y
dist = (current_pose_x - tl_x) ** 2 + (current_pose_y - tl_y) ** 2
if dist < min_dist:
min_dist = dist
nearest_point_id = id
if nearest_point_id != -1 and not np.uint8(trafficlight_array[0].state):
self.gt_tl_waypoint_id = self.nearest_waypoint(
trafficlight_array[nearest_point_id].pose.pose.position.x,
trafficlight_array[nearest_point_id].pose.pose.position.y,
self.base_waypoints)
elif np.uint8(trafficlight_array[0].state):
self.gt_tl_waypoint_id = -1
self.mutex.release()
# rospy.loginfo("processig time = {}".format(t_0 - rospy.get_time()))
def pose_cb(self, msg):
self.current_pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
if __name__ == '__main__':
try:
GT_TL_Pub()
except rospy.ROSInterruptException:
rospy.logerr('Could not start GT_TL_Pub node.')
|
[
"rospy.logerr",
"numpy.uint8",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_time",
"threading.Lock",
"rospy.Rate",
"rospy.Publisher"
] |
[((312, 346), 'rospy.init_node', 'rospy.init_node', (['"""gt_TL_Publisher"""'], {}), "('gt_TL_Publisher')\n", (327, 346), False, 'import rospy\n'), ((356, 416), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (372, 416), False, 'import rospy\n'), ((425, 485), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (441, 485), False, 'import rospy\n'), ((494, 581), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/vehicle/traffic_lights"""', 'TrafficLightArray', 'self.gt_traffic_cb'], {}), "('/vehicle/traffic_lights', TrafficLightArray, self.\n gt_traffic_cb)\n", (510, 581), False, 'import rospy\n'), ((688, 744), 'rospy.Publisher', 'rospy.Publisher', (['"""traffic_waypoint"""', 'Int32'], {'queue_size': '(1)'}), "('traffic_waypoint', Int32, queue_size=1)\n", (703, 744), False, 'import rospy\n'), ((767, 773), 'threading.Lock', 'Lock', ([], {}), '()\n', (771, 773), False, 'from threading import Thread, Lock\n'), ((980, 996), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (994, 996), False, 'import rospy\n'), ((1063, 1077), 'rospy.Rate', 'rospy.Rate', (['(40)'], {}), '(40)\n', (1073, 1077), False, 'import rospy\n'), ((1096, 1115), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1113, 1115), False, 'import rospy\n'), ((3831, 3878), 'rospy.logerr', 'rospy.logerr', (['"""Could not start GT_TL_Pub node."""'], {}), "('Could not start GT_TL_Pub node.')\n", (3843, 3878), False, 'import rospy\n'), ((3392, 3429), 'numpy.uint8', 'np.uint8', (['trafficlight_array[0].state'], {}), '(trafficlight_array[0].state)\n', (3400, 3429), True, 'import numpy as np\n'), ((3073, 3110), 'numpy.uint8', 'np.uint8', (['trafficlight_array[0].state'], {}), '(trafficlight_array[0].state)\n', (3081, 3110), True, 'import numpy as np\n')]
|
import numpy as np
import torch
class ModuleMixin(object):
"""
Adds convenince functions to a torch module
"""
def number_of_parameters(self, trainable=True):
return number_of_parameters(self, trainable)
def number_of_parameters(model, trainable=True):
"""
Returns number of trainable parameters in a torch module
Example:
>>> import netharn as nh
>>> model = nh.models.ToyNet2d()
>>> number_of_parameters(model)
824
"""
if trainable:
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
else:
model_parameters = model.parameters()
n_params = sum([np.prod(p.size()) for p in model_parameters])
return n_params
class grad_context(object):
"""
Context manager for controlling if autograd is enabled.
"""
def __init__(self, flag):
if tuple(map(int, torch.__version__.split('.')[0:2])) < (0, 4):
self.prev = None
self.flag = flag
else:
self.prev = torch.is_grad_enabled()
self.flag = flag
def __enter__(self):
if self.prev is not None:
torch.set_grad_enabled(self.flag)
def __exit__(self, *args):
if self.prev is not None:
torch.set_grad_enabled(self.prev)
return False
class DisableBatchNorm(object):
def __init__(self, model, enabled=True):
self.model = model
self.enabled = enabled
self.previous_state = None
def __enter__(self):
if self.enabled:
self.previous_state = {}
for name, layer in trainable_layers(self.model, names=True):
if isinstance(layer, torch.nn.modules.batchnorm._BatchNorm):
self.previous_state[name] = layer.training
layer.training = False
return self
def __exit__(self, *args):
if self.previous_state:
for name, layer in trainable_layers(self.model, names=True):
if name in self.previous_state:
layer.training = self.previous_state[name]
def trainable_layers(model, names=False):
"""
Example:
>>> import torchvision
>>> model = torchvision.models.AlexNet()
>>> list(trainable_layers(model, names=True))
"""
if names:
stack = [('', '', model)]
while stack:
prefix, basename, item = stack.pop()
name = '.'.join([p for p in [prefix, basename] if p])
if isinstance(item, torch.nn.modules.conv._ConvNd):
yield name, item
elif isinstance(item, torch.nn.modules.batchnorm._BatchNorm):
yield name, item
elif hasattr(item, 'reset_parameters'):
yield name, item
child_prefix = name
for child_basename, child_item in list(item.named_children())[::-1]:
stack.append((child_prefix, child_basename, child_item))
else:
queue = [model]
while queue:
item = queue.pop(0)
# TODO: need to put all trainable layer types here
# (I think this is just everything with reset_parameters)
if isinstance(item, torch.nn.modules.conv._ConvNd):
yield item
elif isinstance(item, torch.nn.modules.batchnorm._BatchNorm):
yield item
elif hasattr(item, 'reset_parameters'):
yield item
# if isinstance(input, torch.nn.modules.Linear):
# yield item
# if isinstance(input, torch.nn.modules.Bilinear):
# yield item
# if isinstance(input, torch.nn.modules.Embedding):
# yield item
# if isinstance(input, torch.nn.modules.EmbeddingBag):
# yield item
for child in item.children():
queue.append(child)
def one_hot_embedding(labels, num_classes, dtype=None):
"""
Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N,#classes].
References:
https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/4
CommandLine:
python -m netharn.loss one_hot_embedding
Example:
>>> # each element in target has to have 0 <= value < C
>>> labels = torch.LongTensor([0, 0, 1, 4, 2, 3])
>>> num_classes = max(labels) + 1
>>> t = one_hot_embedding(labels, num_classes)
>>> assert all(row[y] == 1 for row, y in zip(t.numpy(), labels.numpy()))
>>> import ubelt as ub
>>> print(ub.repr2(t.numpy().tolist()))
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
]
>>> t2 = one_hot_embedding(labels.numpy(), num_classes)
>>> assert np.all(t2 == t.numpy())
>>> if torch.cuda.is_available():
>>> t3 = one_hot_embedding(labels.to(0), num_classes)
>>> assert np.all(t3.cpu().numpy() == t.numpy())
"""
if isinstance(labels, np.ndarray):
dtype = dtype or np.float
y = np.eye(num_classes, dtype=dtype)
y_onehot = y[labels]
else: # if torch.is_tensor(labels):
dtype = dtype or torch.float
y = torch.eye(num_classes, device=labels.device, dtype=dtype)
y_onehot = y[labels]
return y_onehot
def one_hot_lookup(probs, labels):
"""
Return probbility of a particular label (usually true labels) for each item
Each item in labels corresonds to a row in probs. Returns the index
specified at each row.
Example:
>>> probs = np.array([
>>> [0, 1, 2],
>>> [3, 4, 5],
>>> [6, 7, 8],
>>> [9, 10, 11],
>>> ])
>>> labels = np.array([0, 1, 2, 1])
>>> one_hot_lookup(probs, labels)
array([ 0, 4, 8, 10])
"""
return probs[np.eye(probs.shape[1], dtype=np.bool)[labels]]
|
[
"numpy.eye",
"torch.__version__.split",
"torch.eye",
"torch.set_grad_enabled",
"torch.is_grad_enabled"
] |
[((5383, 5415), 'numpy.eye', 'np.eye', (['num_classes'], {'dtype': 'dtype'}), '(num_classes, dtype=dtype)\n', (5389, 5415), True, 'import numpy as np\n'), ((5535, 5592), 'torch.eye', 'torch.eye', (['num_classes'], {'device': 'labels.device', 'dtype': 'dtype'}), '(num_classes, device=labels.device, dtype=dtype)\n', (5544, 5592), False, 'import torch\n'), ((1043, 1066), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (1064, 1066), False, 'import torch\n'), ((1168, 1201), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['self.flag'], {}), '(self.flag)\n', (1190, 1201), False, 'import torch\n'), ((1280, 1313), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['self.prev'], {}), '(self.prev)\n', (1302, 1313), False, 'import torch\n'), ((6180, 6217), 'numpy.eye', 'np.eye', (['probs.shape[1]'], {'dtype': 'np.bool'}), '(probs.shape[1], dtype=np.bool)\n', (6186, 6217), True, 'import numpy as np\n'), ((901, 929), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (924, 929), False, 'import torch\n')]
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for pyvo.dal.datalink
"""
from functools import partial
from urllib.parse import parse_qsl
from pyvo.dal.adhoc import DatalinkResults
from pyvo.dal.params import find_param_by_keyword, get_converter
from pyvo.dal.exceptions import DALServiceError
import pytest
import numpy as np
import astropy.units as u
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj
get_pkg_data_contents = partial(
get_pkg_data_contents, package=__package__, encoding='binary')
get_pkg_data_fileobj = partial(
get_pkg_data_fileobj, package=__package__, encoding='binary')
@pytest.fixture()
def proc(mocker):
def callback(request, context):
return get_pkg_data_contents('data/datalink/proc.xml')
with mocker.register_uri(
'GET', 'http://example.com/proc', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_ds(mocker):
def callback(request, context):
return b''
with mocker.register_uri(
'GET', 'http://example.com/proc', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_units(mocker):
def callback(request, context):
return get_pkg_data_contents('data/datalink/proc_units.xml')
with mocker.register_uri(
'GET', 'http://example.com/proc_units', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_units_ds(mocker):
def callback(request, context):
data = dict(parse_qsl(request.query))
if 'band' in data:
assert data['band'] == (
'6.000000000000001e-07 8.000000000000001e-06')
return b''
with mocker.register_uri(
'GET', 'http://example.com/proc_units_ds', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_inf(mocker):
def callback(request, context):
return get_pkg_data_contents('data/datalink/proc_inf.xml')
with mocker.register_uri(
'GET', 'http://example.com/proc_inf', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_inf_ds(mocker):
def callback(request, context):
data = dict(parse_qsl(request.query))
if 'band' in data:
assert data['band'] == (
'6.000000000000001e-07 +Inf')
return b''
with mocker.register_uri(
'GET', 'http://example.com/proc_inf_ds', content=callback
) as matcher:
yield matcher
@pytest.mark.usefixtures('proc')
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W06")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W48")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.E02")
def test_find_param_by_keyword():
datalink = DatalinkResults.from_result_url('http://example.com/proc')
proc = datalink[0]
input_params = {param.name: param for param in proc.input_params}
polygon_lower = find_param_by_keyword('polygon', input_params)
polygon_upper = find_param_by_keyword('POLYGON', input_params)
circle_lower = find_param_by_keyword('circle', input_params)
circle_upper = find_param_by_keyword('CIRCLE', input_params)
assert polygon_lower == polygon_upper
assert circle_lower == circle_upper
@pytest.mark.usefixtures('proc')
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W06")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W48")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.E02")
def test_serialize():
datalink = DatalinkResults.from_result_url('http://example.com/proc')
proc = datalink[0]
input_params = {param.name: param for param in proc.input_params}
polygon_conv = get_converter(
find_param_by_keyword('polygon', input_params))
circle_conv = get_converter(
find_param_by_keyword('circle', input_params))
scale_conv = get_converter(
find_param_by_keyword('scale', input_params))
kind_conv = get_converter(
find_param_by_keyword('kind', input_params))
assert polygon_conv.serialize((1, 2, 3)) == "1 2 3"
assert polygon_conv.serialize(np.array((1, 2, 3))) == "1 2 3"
assert circle_conv.serialize((1.1, 2.2, 3.3)) == "1.1 2.2 3.3"
assert circle_conv.serialize(np.array((1.1, 2.2, 3.3))) == "1.1 2.2 3.3"
assert scale_conv.serialize(1) == "1"
assert kind_conv.serialize("DATA") == "DATA"
@pytest.mark.usefixtures('proc')
@pytest.mark.usefixtures('proc_ds')
def test_serialize_exceptions():
datalink = DatalinkResults.from_result_url('http://example.com/proc')
proc = datalink[0]
input_params = {param.name: param for param in proc.input_params}
polygon_conv = get_converter(
find_param_by_keyword('polygon', input_params))
circle_conv = get_converter(
find_param_by_keyword('circle', input_params))
band_conv = get_converter(
find_param_by_keyword('band', input_params))
with pytest.raises(DALServiceError):
polygon_conv.serialize((1, 2, 3, 4))
with pytest.raises(DALServiceError):
circle_conv.serialize((1, 2, 3, 4))
with pytest.raises(DALServiceError):
band_conv.serialize((1, 2, 3))
@pytest.mark.usefixtures('proc_units')
@pytest.mark.usefixtures('proc_units_ds')
def test_units():
datalink = DatalinkResults.from_result_url('http://example.com/proc_units')
proc = datalink[0]
proc.process(band=(6000*u.Angstrom, 80000*u.Angstrom))
@pytest.mark.usefixtures('proc_inf')
@pytest.mark.usefixtures('proc_inf_ds')
def test_inf():
datalink = DatalinkResults.from_result_url('http://example.com/proc_inf')
proc = datalink[0]
proc.process(band=(6000, +np.inf) * u.Angstrom)
|
[
"pytest.mark.filterwarnings",
"pyvo.dal.params.find_param_by_keyword",
"pyvo.dal.adhoc.DatalinkResults.from_result_url",
"astropy.utils.data.get_pkg_data_contents",
"numpy.array",
"functools.partial",
"pytest.mark.usefixtures",
"pytest.raises",
"pytest.fixture",
"urllib.parse.parse_qsl"
] |
[((505, 575), 'functools.partial', 'partial', (['get_pkg_data_contents'], {'package': '__package__', 'encoding': '"""binary"""'}), "(get_pkg_data_contents, package=__package__, encoding='binary')\n", (512, 575), False, 'from functools import partial\n'), ((605, 674), 'functools.partial', 'partial', (['get_pkg_data_fileobj'], {'package': '__package__', 'encoding': '"""binary"""'}), "(get_pkg_data_fileobj, package=__package__, encoding='binary')\n", (612, 674), False, 'from functools import partial\n'), ((683, 699), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (697, 699), False, 'import pytest\n'), ((950, 966), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (964, 966), False, 'import pytest\n'), ((1176, 1192), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1190, 1192), False, 'import pytest\n'), ((1461, 1477), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1475, 1477), False, 'import pytest\n'), ((1876, 1892), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1890, 1892), False, 'import pytest\n'), ((2155, 2171), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2169, 2171), False, 'import pytest\n'), ((2549, 2580), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc"""'], {}), "('proc')\n", (2572, 2580), False, 'import pytest\n'), ((2582, 2653), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W06"""'], {}), "('ignore::astropy.io.votable.exceptions.W06')\n", (2608, 2653), False, 'import pytest\n'), ((2655, 2726), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W48"""'], {}), "('ignore::astropy.io.votable.exceptions.W48')\n", (2681, 2726), False, 'import pytest\n'), ((2728, 2799), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.E02"""'], {}), "('ignore::astropy.io.votable.exceptions.E02')\n", (2754, 2799), False, 'import pytest\n'), ((3353, 3384), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc"""'], {}), "('proc')\n", (3376, 3384), False, 'import pytest\n'), ((3386, 3457), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W06"""'], {}), "('ignore::astropy.io.votable.exceptions.W06')\n", (3412, 3457), False, 'import pytest\n'), ((3459, 3530), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W48"""'], {}), "('ignore::astropy.io.votable.exceptions.W48')\n", (3485, 3530), False, 'import pytest\n'), ((3532, 3603), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.E02"""'], {}), "('ignore::astropy.io.votable.exceptions.E02')\n", (3558, 3603), False, 'import pytest\n'), ((4505, 4536), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc"""'], {}), "('proc')\n", (4528, 4536), False, 'import pytest\n'), ((4538, 4572), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_ds"""'], {}), "('proc_ds')\n", (4561, 4572), False, 'import pytest\n'), ((5293, 5330), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_units"""'], {}), "('proc_units')\n", (5316, 5330), False, 'import pytest\n'), ((5332, 5372), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_units_ds"""'], {}), "('proc_units_ds')\n", (5355, 5372), False, 'import pytest\n'), ((5557, 5592), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_inf"""'], {}), "('proc_inf')\n", (5580, 5592), False, 'import pytest\n'), ((5594, 5632), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_inf_ds"""'], {}), "('proc_inf_ds')\n", (5617, 5632), False, 'import pytest\n'), ((2849, 2907), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc"""'], {}), "('http://example.com/proc')\n", (2880, 2907), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((3022, 3068), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""polygon"""', 'input_params'], {}), "('polygon', input_params)\n", (3043, 3068), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3089, 3135), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""POLYGON"""', 'input_params'], {}), "('POLYGON', input_params)\n", (3110, 3135), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3156, 3201), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""circle"""', 'input_params'], {}), "('circle', input_params)\n", (3177, 3201), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3221, 3266), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""CIRCLE"""', 'input_params'], {}), "('CIRCLE', input_params)\n", (3242, 3266), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3641, 3699), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc"""'], {}), "('http://example.com/proc')\n", (3672, 3699), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((4621, 4679), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc"""'], {}), "('http://example.com/proc')\n", (4652, 4679), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((5406, 5470), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc_units"""'], {}), "('http://example.com/proc_units')\n", (5437, 5470), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((5664, 5726), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc_inf"""'], {}), "('http://example.com/proc_inf')\n", (5695, 5726), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((769, 816), 'astropy.utils.data.get_pkg_data_contents', 'get_pkg_data_contents', (['"""data/datalink/proc.xml"""'], {}), "('data/datalink/proc.xml')\n", (790, 816), False, 'from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj\n'), ((1268, 1321), 'astropy.utils.data.get_pkg_data_contents', 'get_pkg_data_contents', (['"""data/datalink/proc_units.xml"""'], {}), "('data/datalink/proc_units.xml')\n", (1289, 1321), False, 'from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj\n'), ((1966, 2017), 'astropy.utils.data.get_pkg_data_contents', 'get_pkg_data_contents', (['"""data/datalink/proc_inf.xml"""'], {}), "('data/datalink/proc_inf.xml')\n", (1987, 2017), False, 'from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj\n'), ((3836, 3882), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""polygon"""', 'input_params'], {}), "('polygon', input_params)\n", (3857, 3882), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3925, 3970), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""circle"""', 'input_params'], {}), "('circle', input_params)\n", (3946, 3970), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4012, 4056), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""scale"""', 'input_params'], {}), "('scale', input_params)\n", (4033, 4056), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4097, 4140), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""kind"""', 'input_params'], {}), "('kind', input_params)\n", (4118, 4140), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4816, 4862), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""polygon"""', 'input_params'], {}), "('polygon', input_params)\n", (4837, 4862), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4905, 4950), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""circle"""', 'input_params'], {}), "('circle', input_params)\n", (4926, 4950), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4991, 5034), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""band"""', 'input_params'], {}), "('band', input_params)\n", (5012, 5034), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((5046, 5076), 'pytest.raises', 'pytest.raises', (['DALServiceError'], {}), '(DALServiceError)\n', (5059, 5076), False, 'import pytest\n'), ((5133, 5163), 'pytest.raises', 'pytest.raises', (['DALServiceError'], {}), '(DALServiceError)\n', (5146, 5163), False, 'import pytest\n'), ((5219, 5249), 'pytest.raises', 'pytest.raises', (['DALServiceError'], {}), '(DALServiceError)\n', (5232, 5249), False, 'import pytest\n'), ((1561, 1585), 'urllib.parse.parse_qsl', 'parse_qsl', (['request.query'], {}), '(request.query)\n', (1570, 1585), False, 'from urllib.parse import parse_qsl\n'), ((2253, 2277), 'urllib.parse.parse_qsl', 'parse_qsl', (['request.query'], {}), '(request.query)\n', (2262, 2277), False, 'from urllib.parse import parse_qsl\n'), ((4233, 4252), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (4241, 4252), True, 'import numpy as np\n'), ((4366, 4391), 'numpy.array', 'np.array', (['(1.1, 2.2, 3.3)'], {}), '((1.1, 2.2, 3.3))\n', (4374, 4391), True, 'import numpy as np\n')]
|
import numpy as np
__copyright__ = 'Copyright (C) 2018 ICTP'
__author__ = '<NAME> <<EMAIL>>'
__credits__ = ["<NAME>", "<NAME>"]
def get_x(lon, clon, cone):
if clon >= 0.0 and lon >= 0.0 or clon < 0.0 and lon < 0.0:
return np.radians(clon - lon) * cone
elif clon >= 0.0:
if abs(clon - lon + 360.0) < abs(clon - lon):
return np.radians(clon - lon + 360) * cone
else:
return np.radians(clon - lon) * cone
elif abs(clon - lon - 360.0) < abs(clon - lon):
return np.radians(clon - lon - 360) * cone
else:
return np.radians(clon - lon) * cone
def grid_to_earth_uvrotate(proj, lon, lat, clon, clat, cone=None, plon=None,
plat=None):
if proj == 'NORMER':
return 1, 0
elif proj == 'ROTMER':
zphi = np.radians(lat)
zrla = np.radians(lon)
zrla = np.where(abs(lat) > 89.99999, 0.0, zrla)
if plat > 0.0:
pollam = plon + 180.0
polphi = 90.0 - plat
else:
pollam = plon
polphi = 90.0 + plat
if pollam > 180.0:
pollam = pollam - 360.0
polcphi = np.cos(np.radians(polphi))
polsphi = np.sin(np.radians(polphi))
zrlap = np.radians(pollam) - zrla
zarg1 = polcphi * np.sin(zrlap)
zarg2 = polsphi*np.cos(zphi) - polcphi*np.sin(zphi)*np.cos(zrlap)
znorm = 1.0/np.sqrt(zarg1**2+zarg2**2)
sindel = zarg1*znorm
cosdel = zarg2*znorm
return cosdel, sindel
else:
if np.isscalar(lon):
x = get_x(lon, clon, cone)
else:
c = np.vectorize(get_x, excluded=['clon', 'cone'])
x = c(lon, clon, cone)
xc = np.cos(x)
xs = np.sin(x)
if clat >= 0:
xs *= -1
return xc, xs
|
[
"numpy.radians",
"numpy.sqrt",
"numpy.isscalar",
"numpy.cos",
"numpy.sin",
"numpy.vectorize"
] |
[((238, 260), 'numpy.radians', 'np.radians', (['(clon - lon)'], {}), '(clon - lon)\n', (248, 260), True, 'import numpy as np\n'), ((829, 844), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (839, 844), True, 'import numpy as np\n'), ((860, 875), 'numpy.radians', 'np.radians', (['lon'], {}), '(lon)\n', (870, 875), True, 'import numpy as np\n'), ((1564, 1580), 'numpy.isscalar', 'np.isscalar', (['lon'], {}), '(lon)\n', (1575, 1580), True, 'import numpy as np\n'), ((1746, 1755), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1752, 1755), True, 'import numpy as np\n'), ((1769, 1778), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1775, 1778), True, 'import numpy as np\n'), ((1185, 1203), 'numpy.radians', 'np.radians', (['polphi'], {}), '(polphi)\n', (1195, 1203), True, 'import numpy as np\n'), ((1230, 1248), 'numpy.radians', 'np.radians', (['polphi'], {}), '(polphi)\n', (1240, 1248), True, 'import numpy as np\n'), ((1266, 1284), 'numpy.radians', 'np.radians', (['pollam'], {}), '(pollam)\n', (1276, 1284), True, 'import numpy as np\n'), ((1318, 1331), 'numpy.sin', 'np.sin', (['zrlap'], {}), '(zrlap)\n', (1324, 1331), True, 'import numpy as np\n'), ((1426, 1458), 'numpy.sqrt', 'np.sqrt', (['(zarg1 ** 2 + zarg2 ** 2)'], {}), '(zarg1 ** 2 + zarg2 ** 2)\n', (1433, 1458), True, 'import numpy as np\n'), ((1651, 1697), 'numpy.vectorize', 'np.vectorize', (['get_x'], {'excluded': "['clon', 'cone']"}), "(get_x, excluded=['clon', 'cone'])\n", (1663, 1697), True, 'import numpy as np\n'), ((364, 392), 'numpy.radians', 'np.radians', (['(clon - lon + 360)'], {}), '(clon - lon + 360)\n', (374, 392), True, 'import numpy as np\n'), ((433, 455), 'numpy.radians', 'np.radians', (['(clon - lon)'], {}), '(clon - lon)\n', (443, 455), True, 'import numpy as np\n'), ((531, 559), 'numpy.radians', 'np.radians', (['(clon - lon - 360)'], {}), '(clon - lon - 360)\n', (541, 559), True, 'import numpy as np\n'), ((593, 615), 'numpy.radians', 'np.radians', (['(clon - lon)'], {}), '(clon - lon)\n', (603, 615), True, 'import numpy as np\n'), ((1356, 1368), 'numpy.cos', 'np.cos', (['zphi'], {}), '(zphi)\n', (1362, 1368), True, 'import numpy as np\n'), ((1392, 1405), 'numpy.cos', 'np.cos', (['zrlap'], {}), '(zrlap)\n', (1398, 1405), True, 'import numpy as np\n'), ((1379, 1391), 'numpy.sin', 'np.sin', (['zphi'], {}), '(zphi)\n', (1385, 1391), True, 'import numpy as np\n')]
|
import logging
import os
import shutil
import tempfile
from urllib import request as request
from urllib.error import HTTPError, URLError
from ase import Atoms
import numpy as np
from schnetpack.data import AtomsData
from schnetpack.environment import SimpleEnvironmentProvider
class MD17(AtomsData):
"""
MD17 benchmark data set for molecular dynamics of small molecules containing molecular forces.
Args:
path (str): path to database
dataset (str): Name of molecule to load into database. Allowed are:
aspirin
benzene
ethanol
malonaldehyde
naphthalene
salicylic_acid
toluene
uracil
subset (list): indices of subset. Set to None for entire dataset
(default: None)
download (bool): set true if dataset should be downloaded
(default: True)
calculate_triples (bool): set true if triples for angular functions
should be computed (default: False)
parse_all (bool): set true to generate the ase dbs of all molecules in
the beginning (default: False)
See: http://quantum-machine.org/datasets/
"""
energies = 'energy'
forces = 'forces'
datasets_dict = dict(aspirin='aspirin_dft.npz',
#aspirin_ccsd='aspirin_ccsd.zip',
azobenzene='azobenzene_dft.npz',
benzene='benzene_dft.npz',
ethanol='ethanol_dft.npz',
#ethanol_ccsdt='ethanol_ccsd_t.zip',
malonaldehyde='malonaldehyde_dft.npz',
#malonaldehyde_ccsdt='malonaldehyde_ccsd_t.zip',
naphthalene='naphthalene_dft.npz',
paracetamol='paracetamol_dft.npz',
salicylic_acid='salicylic_dft.npz',
toluene='toluene_dft.npz',
#toluene_ccsdt='toluene_ccsd_t.zip',
uracil='uracil_dft.npz'
)
existing_datasets = datasets_dict.keys()
def __init__(self, dbdir, dataset, subset=None, download=True, collect_triples=False, parse_all=False,
properties=None):
self.load_all = parse_all
if dataset not in self.datasets_dict.keys():
raise ValueError("Unknown dataset specification {:s}".format(dataset))
self.dbdir = dbdir
self.dataset = dataset
self.database = dataset + ".db"
dbpath = os.path.join(self.dbdir, self.database)
self.collect_triples = collect_triples
environment_provider = SimpleEnvironmentProvider()
if properties is None:
properties = ["energy", "forces"]
super(MD17, self).__init__(dbpath, subset, properties, environment_provider,
collect_triples)
if download:
self.download()
def create_subset(self, idx):
idx = np.array(idx)
subidx = idx if self.subset is None else np.array(self.subset)[idx]
return MD17(self.dbdir, self.dataset, subset=subidx, download=False, collect_triples=self.collect_triples)
def download(self):
"""
download data if not already on disk.
"""
success = True
if not os.path.exists(self.dbdir):
os.makedirs(self.dbdir)
if not os.path.exists(self.dbpath):
success = success and self._load_data()
return success
def _load_data(self):
for molecule in self.datasets_dict.keys():
# if requested, convert only the required molecule
if not self.load_all:
if molecule != self.dataset:
continue
logging.info("Downloading {} data".format(molecule))
tmpdir = tempfile.mkdtemp("MD")
rawpath = os.path.join(tmpdir, self.datasets_dict[molecule])
url = "http://www.quantum-machine.org/gdml/data/npz/" + self.datasets_dict[molecule]
try:
request.urlretrieve(url, rawpath)
except HTTPError as e:
logging.error("HTTP Error:", e.code, url)
return False
except URLError as e:
logging.error("URL Error:", e.reason, url)
return False
logging.info("Parsing molecule {:s}".format(molecule))
data = np.load(rawpath)
numbers = data['z']
atoms_list = []
properties_list = []
for positions, energies, forces in zip(data['R'], data['E'], data['F']):
properties_list.append(dict(energy=energies, forces=forces))
atoms_list.append(Atoms(positions=positions, numbers=numbers))
self.add_systems(atoms_list, properties_list)
logging.info("Cleanining up the mess...")
logging.info('{} molecule done'.format(molecule))
shutil.rmtree(tmpdir)
return True
|
[
"os.path.exists",
"os.makedirs",
"urllib.request.urlretrieve",
"ase.Atoms",
"os.path.join",
"numpy.array",
"tempfile.mkdtemp",
"shutil.rmtree",
"numpy.load",
"logging.info",
"logging.error",
"schnetpack.environment.SimpleEnvironmentProvider"
] |
[((2703, 2742), 'os.path.join', 'os.path.join', (['self.dbdir', 'self.database'], {}), '(self.dbdir, self.database)\n', (2715, 2742), False, 'import os\n'), ((2822, 2849), 'schnetpack.environment.SimpleEnvironmentProvider', 'SimpleEnvironmentProvider', ([], {}), '()\n', (2847, 2849), False, 'from schnetpack.environment import SimpleEnvironmentProvider\n'), ((3166, 3179), 'numpy.array', 'np.array', (['idx'], {}), '(idx)\n', (3174, 3179), True, 'import numpy as np\n'), ((5039, 5080), 'logging.info', 'logging.info', (['"""Cleanining up the mess..."""'], {}), "('Cleanining up the mess...')\n", (5051, 5080), False, 'import logging\n'), ((5147, 5168), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (5160, 5168), False, 'import shutil\n'), ((3504, 3530), 'os.path.exists', 'os.path.exists', (['self.dbdir'], {}), '(self.dbdir)\n', (3518, 3530), False, 'import os\n'), ((3544, 3567), 'os.makedirs', 'os.makedirs', (['self.dbdir'], {}), '(self.dbdir)\n', (3555, 3567), False, 'import os\n'), ((3584, 3611), 'os.path.exists', 'os.path.exists', (['self.dbpath'], {}), '(self.dbpath)\n', (3598, 3611), False, 'import os\n'), ((4026, 4048), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""MD"""'], {}), "('MD')\n", (4042, 4048), False, 'import tempfile\n'), ((4071, 4121), 'os.path.join', 'os.path.join', (['tmpdir', 'self.datasets_dict[molecule]'], {}), '(tmpdir, self.datasets_dict[molecule])\n', (4083, 4121), False, 'import os\n'), ((4619, 4635), 'numpy.load', 'np.load', (['rawpath'], {}), '(rawpath)\n', (4626, 4635), True, 'import numpy as np\n'), ((3229, 3250), 'numpy.array', 'np.array', (['self.subset'], {}), '(self.subset)\n', (3237, 3250), True, 'import numpy as np\n'), ((4253, 4286), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url', 'rawpath'], {}), '(url, rawpath)\n', (4272, 4286), True, 'from urllib import request as request\n'), ((4338, 4379), 'logging.error', 'logging.error', (['"""HTTP Error:"""', 'e.code', 'url'], {}), "('HTTP Error:', e.code, url)\n", (4351, 4379), False, 'import logging\n'), ((4459, 4501), 'logging.error', 'logging.error', (['"""URL Error:"""', 'e.reason', 'url'], {}), "('URL Error:', e.reason, url)\n", (4472, 4501), False, 'import logging\n'), ((4926, 4969), 'ase.Atoms', 'Atoms', ([], {'positions': 'positions', 'numbers': 'numbers'}), '(positions=positions, numbers=numbers)\n', (4931, 4969), False, 'from ase import Atoms\n')]
|
"""
Aggregator
====================================
*Aggregators* are used to combine multiple matrices to a single matrix.
This is used to combine similarity and dissimilarity matrices of multiple attributes to a single one.
Thus, an *Aggregator* :math:`\\mathcal{A}` is a mapping of the form
:math:`\\mathcal{A} : \\mathbb{R}^{n \\times n \\times k} \\rightarrow \\mathbb{R}^{n \\times n}`,
with :math:`n` being the amount of features and :math:`k` being the number of similarity or dissimilarity matrices
of type :math:`D \\in \\mathbb{R}^{n \\times n}`, i.e. the amount of attributes/columns of the dataset.
Currently, the following *Aggregators* are implement:
=========== ===========
Name Formula
----------- -----------
mean :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\frac{1}{k} \\sum_{i=1}^{k} D^i`
median :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\left\\{ \\begin{array}{ll} D^{\\frac{k}{2}} & \\mbox{, if } k \\mbox{ is even} \\\\ \\frac{1}{2} \\left( D^{\\frac{k-1}{2}} + D^{\\frac{k+1}{2}} \\right) & \\mbox{, if } k \\mbox{ is odd} \\end{array} \\right.`
max :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = max_{ l} \\; D_{i,j}^l`
min :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = min_{ l} \\; D_{i,j}^l`
=========== ===========
"""
import numpy as np
from abc import ABC, abstractmethod
class Aggregator(ABC):
"""
An abstract base class for *Aggregators*.
If custom *Aggregators* are created,
it is enough to derive from this class
and use it whenever an *Aggregator* is needed.
"""
@abstractmethod
def aggregate(self, matrices):
"""
The abstract method that is implemented by the concrete *Aggregators*.
:param matrices: a list of similarity or dissimilarity matrices as 2D numpy arrays.
:return: a single 2D numpy array.
"""
pass
class AggregatorFactory:
"""
The factory class for creating concrete instances of the implemented *Aggregators* with default values.
"""
@staticmethod
def create(aggregator):
"""
Creates an instance of the given *Aggregator* name.
:param aggregator: The name of the *Aggregator*, which can be ``mean``, ``median``, ``max`` or ``min``.
:return: An instance of the *Aggregator*.
:raise ValueError: The given *Aggregator* does not exist.
"""
if aggregator == "mean":
return MeanAggregator()
elif aggregator == "median":
return MedianAggregator()
elif aggregator == "max":
return MaxAggregator()
elif aggregator == "min":
return MinAggregator()
else:
raise ValueError(f"An aggregator of type {aggregator} does not exist.")
class MeanAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``mean``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MeanAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\frac{1}{k} \\sum_{i=1}^{k} D^i`.
"""
def aggregate(self, matrices):
"""
Calculates the mean of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.mean(matrices, axis=0)
class MedianAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``median``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MedianAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\left{ \\begin{array}{ll} D^{\\frac{k}{2}} & \\mbox{, if } k \\mbox{ is even} \\\\ \\frac{1}{2} \\left( D^{\\frac{k-1}{2}} + D^{\\frac{k+1}{2}} \\right) & \\mbox{, if } k \\mbox{ is odd} \\end{array} \\right.`
"""
def aggregate(self, matrices):
"""
Calculates the median of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.median(matrices, axis=0)
class MaxAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``max``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MaxAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = max_{ l} \\; D_{i,j}^l`.
"""
def aggregate(self, matrices):
"""
Calculates the max of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.max(matrices, axis=0)
class MinAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``min``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MinAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = min_{ l} \\; D_{i,j}^l`.
"""
def aggregate(self, matrices):
"""
Calculates the min of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.min(matrices, axis=0)
|
[
"numpy.mean",
"numpy.median",
"numpy.min",
"numpy.max"
] |
[((3369, 3394), 'numpy.mean', 'np.mean', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (3376, 3394), True, 'import numpy as np\n'), ((4191, 4218), 'numpy.median', 'np.median', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (4200, 4218), True, 'import numpy as np\n'), ((4816, 4840), 'numpy.max', 'np.max', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (4822, 4840), True, 'import numpy as np\n'), ((5438, 5462), 'numpy.min', 'np.min', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (5444, 5462), True, 'import numpy as np\n')]
|
""" suggest a sensible tolerance for a matrix and coverage-rate (default 0.6).
"""
from typing import Optional
import numpy as np
from tqdm import trange
from logzero import logger
from .coverage_rate import coverage_rate
# fmt: off
def suggest_tolerance(
mat: np.ndarray,
c_rate: float = 0.66,
limit: Optional[int] = None,
) -> int:
# fmt: on
""" suggest a sensible tolerance for a matrix and coverage-rate (default 0.66).
"""
mat = np.asarray(mat)
try:
_, col = mat.shape
except Exception as exc:
logger.erorr(exc)
raise
if limit is None:
limit = max(col // 2, 6)
tolerance = 3
if coverage_rate(mat, tolerance) >= c_rate:
return tolerance
# may try binary tree to speed up
for tol in trange(tolerance + 1, limit + 1):
_ = coverage_rate(mat, tol)
if _ >= c_rate:
logger.info(" search succeeded for mat of size %s", mat.size)
return tol
logger.warning(" mat of size %s most likely not a score matrix", mat.shape)
logger.waning(" we searched hard but were unable to find a sensible tolerance, setting to max(half of %s, 6): %s", col, max(col // 2, 6))
return max(col // 2, 6)
|
[
"numpy.asarray",
"logzero.logger.warning",
"logzero.logger.info",
"tqdm.trange",
"logzero.logger.erorr"
] |
[((480, 495), 'numpy.asarray', 'np.asarray', (['mat'], {}), '(mat)\n', (490, 495), True, 'import numpy as np\n'), ((804, 836), 'tqdm.trange', 'trange', (['(tolerance + 1)', '(limit + 1)'], {}), '(tolerance + 1, limit + 1)\n', (810, 836), False, 'from tqdm import trange\n'), ((999, 1074), 'logzero.logger.warning', 'logger.warning', (['""" mat of size %s most likely not a score matrix"""', 'mat.shape'], {}), "(' mat of size %s most likely not a score matrix', mat.shape)\n", (1013, 1074), False, 'from logzero import logger\n'), ((570, 587), 'logzero.logger.erorr', 'logger.erorr', (['exc'], {}), '(exc)\n', (582, 587), False, 'from logzero import logger\n'), ((910, 971), 'logzero.logger.info', 'logger.info', (['""" search succeeded for mat of size %s"""', 'mat.size'], {}), "(' search succeeded for mat of size %s', mat.size)\n", (921, 971), False, 'from logzero import logger\n')]
|
import gym
from gym import spaces
import numpy as np
import os
import sys
from m_gym.envs.createsim import CreateSimulation
from m_gym.envs.meveahandle import MeveaHandle
from time import sleep
from math import exp
class ExcavatorDiggingSparseEnv(gym.Env):
def __init__(self):
super(ExcavatorDiggingSparseEnv, self).__init__()
self.config= {
"model_name": "Excavator",
"model_file_location": "Excavator",
"debug": False,
"episode_duration": 45,
"excluded": ["Input_Reset"],
"render": True,
"service_inputs": ["Input_Reset","Input_Ready"],
"service_outputs": ["Output_Reset_Done"],
"reset_input_block": 12,
"reset_done_output_block": 1,
}
#"workers_directory":"..\\Workers"
self.sim = CreateSimulation(self.config)
self.new_folder = self.sim.new_folder
self.model_file_path = self.sim.model_file_path
# Get amount of the parameters in the observation vector
self.obs_len = self.sim.observation_len
# Get amount of the parameters in the action vector
self.act_len = self.sim.action_len
# Create observation and action numpy array
self.observation = np.zeros(self.obs_len, dtype=np.float32)
self.action = np.zeros(self.act_len, dtype=np.float32)
self.action_high = np.ones(self.act_len, dtype=np.float32)
self.action_low = -self.action_high
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.observation.shape)
self.action_space = spaces.Box(low=self.action_low, high=self.action_high, shape=self.action.shape)
self.mh = MeveaHandle(self.sim.worker_number, self.sim.analog_inputs_blocks , self.sim.digital_inputs_blocks, self.sim.analog_outputs_blocks, self.sim.digital_outputs_blocks)
self.mh.start_process(os.path.abspath(self.model_file_path), self.config['render'])
self.bucket_trigger_mass = 100
self.dumpster_trigger_mass = 100
del self.sim
# Returns Box observation space
def get_observation_space(self):
return spaces.Box(low=-np.inf, high=np.inf, shape=self.observation.shape)
# Returns Box action space
def get_action_space(self):
return spaces.Box(low=self.action_low, high=self.action_high, shape=self.action.shape)
def step(self, action):
self.mh.set_inputs(action)
sleep(1)
obs = self.mh.get_outputs()
print(self.get_action_space())
reward = 1
done = False
print(obs[11], obs[11] >= self.config['episode_duration'])
if obs[11] >= self.config['episode_duration']:
done = True
return obs, reward, done, {}
def reset(self):
'''
print("restart")
self.mh.set_single_digital_input(self.config['reset_input_block'], self.mh.worker_number, 1)
sleep(1)
self.mh.set_single_digital_input(self.config['reset_input_block'], self.mh.worker_number, 0)
obs = self.mh.get_outputs()
while round(obs[11]) != 0:
self.mh.set_single_digital_input(self.config['reset_input_block'], self.mh.worker_number, 0)
obs = self.mh.get_outputs()
sleep(0.1)
'''
return self.mh.get_outputs()
def render(self, mode='', close=False):
pass
def close(self):
self.mh.terminate()
self.mh.delete_folder(self.new_folder)
print('Simulation environment closed!')
|
[
"numpy.ones",
"m_gym.envs.meveahandle.MeveaHandle",
"m_gym.envs.createsim.CreateSimulation",
"time.sleep",
"gym.spaces.Box",
"numpy.zeros",
"os.path.abspath"
] |
[((754, 783), 'm_gym.envs.createsim.CreateSimulation', 'CreateSimulation', (['self.config'], {}), '(self.config)\n', (770, 783), False, 'from m_gym.envs.createsim import CreateSimulation\n'), ((1156, 1196), 'numpy.zeros', 'np.zeros', (['self.obs_len'], {'dtype': 'np.float32'}), '(self.obs_len, dtype=np.float32)\n', (1164, 1196), True, 'import numpy as np\n'), ((1215, 1255), 'numpy.zeros', 'np.zeros', (['self.act_len'], {'dtype': 'np.float32'}), '(self.act_len, dtype=np.float32)\n', (1223, 1255), True, 'import numpy as np\n'), ((1279, 1318), 'numpy.ones', 'np.ones', (['self.act_len'], {'dtype': 'np.float32'}), '(self.act_len, dtype=np.float32)\n', (1286, 1318), True, 'import numpy as np\n'), ((1394, 1460), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': 'self.observation.shape'}), '(low=-np.inf, high=np.inf, shape=self.observation.shape)\n', (1404, 1460), False, 'from gym import spaces\n'), ((1485, 1564), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self.action_low', 'high': 'self.action_high', 'shape': 'self.action.shape'}), '(low=self.action_low, high=self.action_high, shape=self.action.shape)\n', (1495, 1564), False, 'from gym import spaces\n'), ((1580, 1753), 'm_gym.envs.meveahandle.MeveaHandle', 'MeveaHandle', (['self.sim.worker_number', 'self.sim.analog_inputs_blocks', 'self.sim.digital_inputs_blocks', 'self.sim.analog_outputs_blocks', 'self.sim.digital_outputs_blocks'], {}), '(self.sim.worker_number, self.sim.analog_inputs_blocks, self.sim\n .digital_inputs_blocks, self.sim.analog_outputs_blocks, self.sim.\n digital_outputs_blocks)\n', (1591, 1753), False, 'from m_gym.envs.meveahandle import MeveaHandle\n'), ((2011, 2077), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': 'self.observation.shape'}), '(low=-np.inf, high=np.inf, shape=self.observation.shape)\n', (2021, 2077), False, 'from gym import spaces\n'), ((2149, 2228), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self.action_low', 'high': 'self.action_high', 'shape': 'self.action.shape'}), '(low=self.action_low, high=self.action_high, shape=self.action.shape)\n', (2159, 2228), False, 'from gym import spaces\n'), ((2292, 2300), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2297, 2300), False, 'from time import sleep\n'), ((1771, 1808), 'os.path.abspath', 'os.path.abspath', (['self.model_file_path'], {}), '(self.model_file_path)\n', (1786, 1808), False, 'import os\n')]
|
#!/usr/bin/env python
"""PyDEC: Software and Algorithms for Discrete Exterior Calculus
"""
DOCLINES = __doc__.split("\n")
import os
import sys
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
Intended Audience :: Education
License :: OSI Approved :: BSD License
Programming Language :: Python
Topic :: Education
Topic :: Software Development
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Mathematics
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('pydec')
config.add_data_files(('pydec','*.txt'))
config.get_version(os.path.join('pydec','version.py')) # sets config.version
return config
def setup_package():
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
old_path = os.getcwd()
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(local_path)
sys.path.insert(0,local_path)
sys.path.insert(0,os.path.join(local_path,'pydec')) # to retrive version
try:
setup(
name = 'pydec',
maintainer = "PyDEC Developers",
maintainer_email = "<EMAIL>",
description = DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
url = "http://www.graphics.cs.uiuc.edu/~wnbell/",
download_url = "http://code.google.com/p/pydec/downloads/list",
license = 'BSD',
classifiers=filter(None, CLASSIFIERS.split('\n')),
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
configuration=configuration )
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
|
[
"os.path.exists",
"sys.path.insert",
"os.path.join",
"numpy.distutils.misc_util.Configuration",
"os.getcwd",
"os.chdir",
"os.path.abspath",
"os.remove"
] |
[((762, 788), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (776, 788), False, 'import os\n'), ((790, 811), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (799, 811), False, 'import os\n'), ((934, 979), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (947, 979), False, 'from numpy.distutils.misc_util import Configuration\n'), ((1481, 1492), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1490, 1492), False, 'import os\n'), ((1560, 1580), 'os.chdir', 'os.chdir', (['local_path'], {}), '(local_path)\n', (1568, 1580), False, 'import os\n'), ((1585, 1615), 'sys.path.insert', 'sys.path.insert', (['(0)', 'local_path'], {}), '(0, local_path)\n', (1600, 1615), False, 'import sys\n'), ((1263, 1298), 'os.path.join', 'os.path.join', (['"""pydec"""', '"""version.py"""'], {}), "('pydec', 'version.py')\n", (1275, 1298), False, 'import os\n'), ((1526, 1554), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1541, 1554), False, 'import os\n'), ((1637, 1670), 'os.path.join', 'os.path.join', (['local_path', '"""pydec"""'], {}), "(local_path, 'pydec')\n", (1649, 1670), False, 'import os\n'), ((2321, 2339), 'os.chdir', 'os.chdir', (['old_path'], {}), '(old_path)\n', (2329, 2339), False, 'import os\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.