code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
"""
Apply cluster correction for independent-samples T-test based on spatial proximity and cluster size.
Inspired by MNE tutorial.
Created on Fri Feb 22 13:21:40 2019
@author: <NAME> <<EMAIL>>
"""
import numpy as np
from scipy import stats
from scipy.io import loadmat
import matplotlib.pyplot as plt
import os
from permutation_cluster_test_AT import _permutation_cluster_test_AT
print(__doc__)
#%% file paths
conn = '/media/cbru/SMEDY/scripts_speech_rest/stats/mantel/connectivity.npy'
results_dir = '/media/cbru/SMEDY/results/dys_con_contrast/2020_02_redo_subject_perm/'
read_dir = '/media/cbru/SMEDY/DATA/group_fake_iscs/'
#%% read connectivity
print('Read connectivity.')
connectivity = np.load(conn)
connectivity_sparse = connectivity[()]
#%% cluster correction
# for each permutation:
# 1. Compute the test statistic for each voxel individually.
# 2. Threshold the test statistic values.
# 3. Cluster voxels that exceed this threshold (with the same sign) based on adjacency.
# 4. Retain the size of the largest cluster (measured, e.g., by a simple voxel count,
# or by the sum of voxel t-values within the cluster) to build the null distribution.
# define conditions
cons = '_1' # '_1' listening to speech
freqs = {'5.000000e-01-4Hz', '4-8Hz', '8-12Hz', '12-25Hz', '25-45Hz', '55-90Hz'}
if cons == '_1':
window = '_613'
elif cons == '_2':
window = '_579'
else:
print('Check condition!')
for freq in freqs:
if os.path.isfile(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat'):
print(cons + ' ' + freq)
# read in fake and actual T-test results
fake_values = loadmat(read_dir + 'fake_t_vals_' + freq +
window + cons + '.mat')['fake_t_vals']
real_values = loadmat(read_dir + 'real_t_vals_' + freq + window + cons +
'.mat')['real_t_vals']
# get threshold
threshold = loadmat(read_dir + 'tthreshold_uncorrected_' + freq +
window + cons + '.mat')['tthreshold_uncorrected']
print(threshold)
# reshape fake_values to (n_observations, n_times, n_vertices)
fake_values = fake_values[:, :, np.newaxis]
fake_values = fake_values.reshape((5000, 1, 20484))
# reshape real_values
real_values = real_values[:, :, np.newaxis]
real_values = real_values.reshape((1, 1, 20484))
# search for clusters (only once)
# max_clu_lens, clusters = _permutation_cluster_test_AT(fake_values,
# threshold=threshold[0][0],
# n_permutations=5000,
# tail=0,
# connectivity=connectivity_sparse,
# n_jobs=4, seed=10,
# max_step=1, t_power=1,
# out_type='indices',
# exclude=None,
# step_down_p=0,
# check_disjoint=False,
# buffer_size=1000)
#
# np.save(results_dir + 'max_clu_lens_' + freq + window + cons, max_clu_lens)
max_clu_lens = np.load(results_dir + 'max_clu_lens_' + freq + window + cons + '.npy')
# null distribution
plt.figure(0)
plt.hist(max_clu_lens)
kde = stats.gaussian_kde(max_clu_lens)
x = np.linspace(max_clu_lens.min(), max_clu_lens.max(), 100)
p = kde(x)
# cutoff for a cluster size that is significant
plt.figure(1)
plt.plot(x, p)
plt.hlines(0.095, 0, 14) # visualization of cutoff
# take maximum across all freq bands
cutoff = np.max(max_clu_lens)
print(['cutoff length is ', cutoff])
max_clu_lens2, clusters = _permutation_cluster_test_AT(real_values,
threshold=threshold[0][0],
n_permutations=1,
tail=0,
connectivity=connectivity_sparse,
n_jobs=4, seed=10,
max_step=1,
t_power=1,
out_type='indices',
exclude=None,
step_down_p=0,
check_disjoint=False,
buffer_size=1000)
# length of all initial clusters
clu_lens = np.zeros(len(clusters))
for j in range(0, len(clusters)):
clu_lens[j] = len(clusters[j][0])
# hists
plt.figure(1)
plt.hist(max_clu_lens)
plt.hist(clu_lens)
# out in format required by MNE cluster function (for visualization)
t_out = real_values.reshape(1, 20484)
clusters_new = clusters
for c, l, i in zip(clusters, clu_lens, range(0, len(clusters))):
clusters_new[i] = np.zeros(np.int(l), dtype='int'), c[0]
clu = t_out, clusters_new
np.save(results_dir + 'clu_' + freq + window + cons, clu)
# see how many clusters exceed the threshold (i.e. survive the correction)
ids = np.where(clu_lens > cutoff)[0]
clu_sig = clusters[0:len(ids)]
for i in range(0, len(ids)):
clu_sig[i] = clusters[ids[i]]
sig_clu_lens = np.zeros(len(clu_sig))
for j in range(0, len(clu_sig)):
sig_clu_lens[j] = len(clu_sig[j][0])
else: print('No uncorrected p-vals < 0.05 for ' + freq)
|
[
"permutation_cluster_test_AT._permutation_cluster_test_AT",
"numpy.load",
"numpy.save",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.plot",
"scipy.io.loadmat",
"scipy.stats.gaussian_kde",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.where",
"numpy.int",
"matplotlib.pyplot.hlines"
] |
[((700, 713), 'numpy.load', 'np.load', (['conn'], {}), '(conn)\n', (707, 713), True, 'import numpy as np\n'), ((1462, 1535), 'os.path.isfile', 'os.path.isfile', (["(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')\n", (1476, 1535), False, 'import os\n'), ((3735, 3805), 'numpy.load', 'np.load', (["(results_dir + 'max_clu_lens_' + freq + window + cons + '.npy')"], {}), "(results_dir + 'max_clu_lens_' + freq + window + cons + '.npy')\n", (3742, 3805), True, 'import numpy as np\n'), ((3858, 3871), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (3868, 3871), True, 'import matplotlib.pyplot as plt\n'), ((3884, 3906), 'matplotlib.pyplot.hist', 'plt.hist', (['max_clu_lens'], {}), '(max_clu_lens)\n', (3892, 3906), True, 'import matplotlib.pyplot as plt\n'), ((3925, 3957), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['max_clu_lens'], {}), '(max_clu_lens)\n', (3943, 3957), False, 'from scipy import stats\n'), ((4126, 4139), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4136, 4139), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4166), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'p'], {}), '(x, p)\n', (4160, 4166), True, 'import matplotlib.pyplot as plt\n'), ((4179, 4203), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0.095)', '(0)', '(14)'], {}), '(0.095, 0, 14)\n', (4189, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4300, 4320), 'numpy.max', 'np.max', (['max_clu_lens'], {}), '(max_clu_lens)\n', (4306, 4320), True, 'import numpy as np\n'), ((4421, 4692), 'permutation_cluster_test_AT._permutation_cluster_test_AT', '_permutation_cluster_test_AT', (['real_values'], {'threshold': 'threshold[0][0]', 'n_permutations': '(1)', 'tail': '(0)', 'connectivity': 'connectivity_sparse', 'n_jobs': '(4)', 'seed': '(10)', 'max_step': '(1)', 't_power': '(1)', 'out_type': '"""indices"""', 'exclude': 'None', 'step_down_p': '(0)', 'check_disjoint': '(False)', 'buffer_size': '(1000)'}), "(real_values, threshold=threshold[0][0],\n n_permutations=1, tail=0, connectivity=connectivity_sparse, n_jobs=4,\n seed=10, max_step=1, t_power=1, out_type='indices', exclude=None,\n step_down_p=0, check_disjoint=False, buffer_size=1000)\n", (4449, 4692), False, 'from permutation_cluster_test_AT import _permutation_cluster_test_AT\n'), ((5723, 5736), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5733, 5736), True, 'import matplotlib.pyplot as plt\n'), ((5749, 5771), 'matplotlib.pyplot.hist', 'plt.hist', (['max_clu_lens'], {}), '(max_clu_lens)\n', (5757, 5771), True, 'import matplotlib.pyplot as plt\n'), ((5784, 5802), 'matplotlib.pyplot.hist', 'plt.hist', (['clu_lens'], {}), '(clu_lens)\n', (5792, 5802), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6245), 'numpy.save', 'np.save', (["(results_dir + 'clu_' + freq + window + cons)", 'clu'], {}), "(results_dir + 'clu_' + freq + window + cons, clu)\n", (6195, 6245), True, 'import numpy as np\n'), ((1653, 1719), 'scipy.io.loadmat', 'loadmat', (["(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat')\n", (1660, 1719), False, 'from scipy.io import loadmat\n'), ((1801, 1867), 'scipy.io.loadmat', 'loadmat', (["(read_dir + 'real_t_vals_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'real_t_vals_' + freq + window + cons + '.mat')\n", (1808, 1867), False, 'from scipy.io import loadmat\n'), ((1978, 2055), 'scipy.io.loadmat', 'loadmat', (["(read_dir + 'tthreshold_uncorrected_' + freq + window + cons + '.mat')"], {}), "(read_dir + 'tthreshold_uncorrected_' + freq + window + cons + '.mat')\n", (1985, 2055), False, 'from scipy.io import loadmat\n'), ((6360, 6387), 'numpy.where', 'np.where', (['(clu_lens > cutoff)'], {}), '(clu_lens > cutoff)\n', (6368, 6387), True, 'import numpy as np\n'), ((6099, 6108), 'numpy.int', 'np.int', (['l'], {}), '(l)\n', (6105, 6108), True, 'import numpy as np\n')]
|
"""Plotting function for birdsonganalysis."""
import numpy as np
import seaborn as sns
import matplotlib.patches as p
import matplotlib.pyplot as plt
from .songfeatures import spectral_derivs
from .constants import FREQ_RANGE
def spectral_derivs_plot(spec_der, contrast=0.1, ax=None, freq_range=None,
fft_step=None, fft_size=None):
"""
Plot the spectral derivatives of a song in a grey scale.
spec_der - The spectral derivatives of the song (computed with
`spectral_derivs`) or the song itself
contrast - The contrast of the plot
ax - The matplotlib axis where the plot must be drawn, if None, a new axis
is created
freq_range - The amount of frequency to plot, usefull only if `spec_der` is
a song. Given to `spectral_derivs`
ov_params - The Parameters to override, passed to `spectral_derivs`
"""
if spec_der.ndim == 1:
spec_der = spectral_derivs(spec_der, freq_range, fft_step, fft_size)
ax = sns.heatmap(spec_der.T, yticklabels=50, xticklabels=50,
vmin=-contrast, vmax=contrast, ax=ax, cmap='Greys',
cbar=False)
ax.invert_yaxis()
return ax
def plot_over_spec(data, ax, freq_range=FREQ_RANGE, zoom=1, **plot_params):
"""
Plot the feature over a spectral derivatives plot.
The data are first normalized then rescale to fit the ylim of the axis.
"""
# Normalize the data so that they fit in the graph
ndata = (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data))
# We take for abscisse axis the line corresponding to 5% of freq_range
# We rescale the data so that they take 75% of the graph
ax.plot(zoom * (5/100 * freq_range + 75/100 * freq_range * ndata),
**plot_params)
return ax
def similarity_plot(sim, song, refsong):
"""Do a similarity plot with the result of `bsa.similarity`."""
fig, ax = plt.subplots(2, 2, figsize=(13, 13),
gridspec_kw={'width_ratios': [1, 4],
'height_ratios': [1, 4]})
ax[0, 0].axis('off')
sds = spectral_derivs(song)
sdr = spectral_derivs(refsong)
ax[0, 1] = spectral_derivs_plot(sds, 0.05, ax[0, 1])
ax[0, 1].set_title('Song')
ax[1, 0] = spectral_derivs_plot(np.flip(sdr.T, 1), 0.05,
ax[1, 0])
ax[1, 0].set_title('Reference Song')
ax[1, 1] = sns.heatmap(sim['glob_matrix'], ax=ax[1, 1], cbar=False,
vmin=0, vmax=1)
for section in sim['sections']:
xy = (section['beg'][0],
sim['glob_matrix'].shape[1] - section['end'][1])
width = section['end'][0] - section['beg'][0]
height = section['end'][1] - section['beg'][1]
ax[1, 1].add_patch(p.Rectangle(xy, width, height, fill=False,
edgecolor='y', linewidth=3))
return fig
|
[
"numpy.flip",
"seaborn.heatmap",
"matplotlib.patches.Rectangle",
"numpy.nanmin",
"matplotlib.pyplot.subplots",
"numpy.nanmax"
] |
[((1018, 1141), 'seaborn.heatmap', 'sns.heatmap', (['spec_der.T'], {'yticklabels': '(50)', 'xticklabels': '(50)', 'vmin': '(-contrast)', 'vmax': 'contrast', 'ax': 'ax', 'cmap': '"""Greys"""', 'cbar': '(False)'}), "(spec_der.T, yticklabels=50, xticklabels=50, vmin=-contrast,\n vmax=contrast, ax=ax, cmap='Greys', cbar=False)\n", (1029, 1141), True, 'import seaborn as sns\n'), ((1945, 2048), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(13, 13)', 'gridspec_kw': "{'width_ratios': [1, 4], 'height_ratios': [1, 4]}"}), "(2, 2, figsize=(13, 13), gridspec_kw={'width_ratios': [1, 4],\n 'height_ratios': [1, 4]})\n", (1957, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2527), 'seaborn.heatmap', 'sns.heatmap', (["sim['glob_matrix']"], {'ax': 'ax[1, 1]', 'cbar': '(False)', 'vmin': '(0)', 'vmax': '(1)'}), "(sim['glob_matrix'], ax=ax[1, 1], cbar=False, vmin=0, vmax=1)\n", (2466, 2527), True, 'import seaborn as sns\n'), ((2328, 2345), 'numpy.flip', 'np.flip', (['sdr.T', '(1)'], {}), '(sdr.T, 1)\n', (2335, 2345), True, 'import numpy as np\n'), ((1517, 1532), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (1526, 1532), True, 'import numpy as np\n'), ((1537, 1552), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (1546, 1552), True, 'import numpy as np\n'), ((1555, 1570), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (1564, 1570), True, 'import numpy as np\n'), ((2823, 2893), 'matplotlib.patches.Rectangle', 'p.Rectangle', (['xy', 'width', 'height'], {'fill': '(False)', 'edgecolor': '"""y"""', 'linewidth': '(3)'}), "(xy, width, height, fill=False, edgecolor='y', linewidth=3)\n", (2834, 2893), True, 'import matplotlib.patches as p\n')]
|
import numpy as np
test=np.load('/home/ubuntu/hzy/pythia/data/m4c_textvqa_ocr_en_frcn_features/train_images/f441f29812b385ad_info.npy',encoding = "latin1",allow_pickle=True) #加载文件
doc = open('contrast9.txt', 'a') #打开一个存储文件,并依次写入
print(test, file=doc) #将打印内容写入文件中
|
[
"numpy.load"
] |
[((24, 183), 'numpy.load', 'np.load', (['"""/home/ubuntu/hzy/pythia/data/m4c_textvqa_ocr_en_frcn_features/train_images/f441f29812b385ad_info.npy"""'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(\n '/home/ubuntu/hzy/pythia/data/m4c_textvqa_ocr_en_frcn_features/train_images/f441f29812b385ad_info.npy'\n , encoding='latin1', allow_pickle=True)\n", (31, 183), True, 'import numpy as np\n')]
|
import os
import shutil
import zipfile
import networkx as nx
import numpy as np
import pandas as pd
import requests
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from spektral.utils import nx_to_numpy
DATASET_URL = 'https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets'
DATASET_CLEAN_URL = 'https://raw.githubusercontent.com/nd7141/graph_datasets/master/datasets'
DATA_PATH = os.path.expanduser('~/.spektral/datasets/')
AVAILABLE_DATASETS = [
d[:-4]
for d in pd.read_html(DATASET_URL)[0].Name[2:-1].values.tolist()
]
def load_data(dataset_name, normalize_features=None, clean=False):
"""
Loads one of the Benchmark Data Sets for Graph Kernels from TU Dortmund
([link](https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets)).
The node features are computed by concatenating the following features for
each node:
- node attributes, if available, normalized as specified in `normalize_features`;
- clustering coefficient, normalized with z-score;
- node degrees, normalized as specified in `normalize_features`;
- node labels, if available, one-hot encoded.
:param dataset_name: name of the dataset to load (see `spektral.datasets.tud.AVAILABLE_DATASETS`).
:param normalize_features: `None`, `'zscore'` or `'ohe'`, how to normalize
the node features (only works for node attributes).
:param clean: if True, return a version of the dataset with no isomorphic
graphs.
:return:
- a list of adjacency matrices;
- a list of node feature matrices;
- a numpy array containing the one-hot encoded targets.
"""
if dataset_name not in AVAILABLE_DATASETS:
raise ValueError('Available datasets: {}'.format(AVAILABLE_DATASETS))
if clean:
dataset_name += '_clean'
if not os.path.exists(DATA_PATH + dataset_name):
_download_data(dataset_name)
# Read data
nx_graphs, y = _read_graphs(dataset_name)
# Preprocessing
y = np.array(y)[..., None]
y = OneHotEncoder(sparse=False, categories='auto').fit_transform(y)
# Get node attributes
try:
A, X_attr, _ = nx_to_numpy(nx_graphs, nf_keys=['attributes'], auto_pad=False)
X_attr = _normalize_node_features(X_attr, normalize_features)
except KeyError:
print('Featureless nodes')
A, X_attr, _ = nx_to_numpy(nx_graphs, auto_pad=False)
# Get clustering coefficients (always zscore norm)
clustering_coefficients = [np.array(list(nx.clustering(g).values()))[..., None] for g in nx_graphs]
clustering_coefficients = _normalize_node_features(clustering_coefficients, 'zscore')
# Get node degrees
node_degrees = np.array([np.sum(_, axis=-1, keepdims=True) for _ in A])
node_degrees = _normalize_node_features(node_degrees, 'zscore')
# Get node labels
try:
_, X_labs, _ = nx_to_numpy(nx_graphs, nf_keys=['label'], auto_pad=False)
X_labs = _normalize_node_features(X_labs, 'ohe')
except KeyError:
print('Label-less nodes')
X_labs = None
# Concatenate features
Xs = [node_degrees, clustering_coefficients]
if X_attr is not None:
Xs.append(X_attr)
if X_labs is not None:
Xs.append(X_labs)
X = [np.concatenate(x_, axis=-1) for x_ in zip(*Xs)]
X = np.array(X)
return A, X, y
def _read_graphs(dataset_name):
file_prefix = DATA_PATH + dataset_name + '/' + dataset_name
with open(file_prefix + "_graph_indicator.txt", "r") as f:
graph_indicator = [int(i) - 1 for i in list(f)]
# Nodes
num_graphs = max(graph_indicator)
node_indices = []
offset = []
c = 0
for i in range(num_graphs + 1):
offset.append(c)
c_i = graph_indicator.count(i)
node_indices.append((c, c + c_i - 1))
c += c_i
graph_list = []
vertex_list = []
for i in node_indices:
g = nx.Graph(directed=False)
vertex_list_g = []
for j in range(i[1] - i[0] + 1):
vertex_list_g.append(g.add_node(j))
graph_list.append(g)
vertex_list.append(vertex_list_g)
# Edges
with open(file_prefix + "_A.txt", "r") as f:
edges = [i.strip().split(',') for i in list(f)]
edges = [(int(e[0].strip()) - 1, int(e[1].strip()) - 1) for e in edges]
edge_indicator = []
edge_list = []
for e in edges:
g_id = graph_indicator[e[0]]
edge_indicator.append(g_id)
g = graph_list[g_id]
off = offset[g_id]
# Avoid multigraph
edge_list.append(g.add_edge(e[0] - off, e[1] - off))
# Node labels
if os.path.exists(file_prefix + "_node_labels.txt"):
with open(file_prefix + "_node_labels.txt", "r") as f:
node_labels = [int(i.strip()) for i in list(f)]
i = 0
for g in graph_list:
for n in g.nodes():
g.nodes[n]['label'] = node_labels[i]
i += 1
# Node Attributes
if os.path.exists(file_prefix + "_node_attributes.txt"):
with open(file_prefix + "_node_attributes.txt", "r") as f:
node_attributes = [map(float, i.strip().split(',')) for i in list(f)]
i = 0
for g in graph_list:
for n in g.nodes():
g.nodes[n]['attributes'] = list(node_attributes[i])
i += 1
# Classes
with open(file_prefix + "_graph_labels.txt", "r") as f:
classes = [int(float(i.strip())) for i in list(f)]
return graph_list, classes
def _download_data(dataset_name):
print('Dowloading ' + dataset_name + ' dataset.')
if dataset_name.endswith('_clean'):
true_name = dataset_name[:-6]
url = DATASET_CLEAN_URL
else:
true_name = dataset_name
url = DATASET_URL
data_url = '{}/{}.zip'.format(url, true_name)
req = requests.get(data_url)
os.makedirs(DATA_PATH, exist_ok=True)
with open(DATA_PATH + dataset_name + '.zip', 'wb') as out_file:
out_file.write(req.content)
with zipfile.ZipFile(DATA_PATH + dataset_name + '.zip', 'r') as zip_ref:
zip_ref.extractall(DATA_PATH + dataset_name + '/')
os.remove(DATA_PATH + dataset_name + '.zip')
subfolder = os.path.join(DATA_PATH, dataset_name, true_name)
parentfolder = os.path.join(DATA_PATH, dataset_name)
for filename in os.listdir(subfolder):
try:
suffix = filename.split(true_name)[1]
except IndexError:
# Probably the README
continue
shutil.move(
os.path.join(subfolder, filename),
os.path.join(parentfolder, dataset_name + suffix)
)
shutil.rmtree(subfolder)
def _normalize_node_features(feat_list, norm=None):
"""
Apply one-hot encoding or z-score to a list of node features
"""
if norm == 'ohe':
fnorm = OneHotEncoder(sparse=False, categories='auto')
elif norm == 'zscore':
fnorm = StandardScaler()
else:
return feat_list
fnorm.fit(np.vstack(feat_list))
feat_list = [fnorm.transform(feat_.astype(np.float32)) for feat_ in feat_list]
return feat_list
|
[
"os.remove",
"numpy.sum",
"sklearn.preprocessing.StandardScaler",
"shutil.rmtree",
"os.path.join",
"os.path.exists",
"requests.get",
"networkx.clustering",
"spektral.utils.nx_to_numpy",
"sklearn.preprocessing.OneHotEncoder",
"os.listdir",
"numpy.vstack",
"numpy.concatenate",
"pandas.read_html",
"zipfile.ZipFile",
"os.makedirs",
"numpy.array",
"networkx.Graph",
"os.path.expanduser"
] |
[((413, 456), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.spektral/datasets/"""'], {}), "('~/.spektral/datasets/')\n", (431, 456), False, 'import os\n'), ((3310, 3321), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3318, 3321), True, 'import numpy as np\n'), ((4619, 4667), 'os.path.exists', 'os.path.exists', (["(file_prefix + '_node_labels.txt')"], {}), "(file_prefix + '_node_labels.txt')\n", (4633, 4667), False, 'import os\n'), ((4974, 5026), 'os.path.exists', 'os.path.exists', (["(file_prefix + '_node_attributes.txt')"], {}), "(file_prefix + '_node_attributes.txt')\n", (4988, 5026), False, 'import os\n'), ((5839, 5861), 'requests.get', 'requests.get', (['data_url'], {}), '(data_url)\n', (5851, 5861), False, 'import requests\n'), ((5867, 5904), 'os.makedirs', 'os.makedirs', (['DATA_PATH'], {'exist_ok': '(True)'}), '(DATA_PATH, exist_ok=True)\n', (5878, 5904), False, 'import os\n'), ((6149, 6193), 'os.remove', 'os.remove', (["(DATA_PATH + dataset_name + '.zip')"], {}), "(DATA_PATH + dataset_name + '.zip')\n", (6158, 6193), False, 'import os\n'), ((6211, 6259), 'os.path.join', 'os.path.join', (['DATA_PATH', 'dataset_name', 'true_name'], {}), '(DATA_PATH, dataset_name, true_name)\n', (6223, 6259), False, 'import os\n'), ((6279, 6316), 'os.path.join', 'os.path.join', (['DATA_PATH', 'dataset_name'], {}), '(DATA_PATH, dataset_name)\n', (6291, 6316), False, 'import os\n'), ((6337, 6358), 'os.listdir', 'os.listdir', (['subfolder'], {}), '(subfolder)\n', (6347, 6358), False, 'import os\n'), ((6649, 6673), 'shutil.rmtree', 'shutil.rmtree', (['subfolder'], {}), '(subfolder)\n', (6662, 6673), False, 'import shutil\n'), ((1821, 1861), 'os.path.exists', 'os.path.exists', (['(DATA_PATH + dataset_name)'], {}), '(DATA_PATH + dataset_name)\n', (1835, 1861), False, 'import os\n'), ((1992, 2003), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2000, 2003), True, 'import numpy as np\n'), ((2146, 2208), 'spektral.utils.nx_to_numpy', 'nx_to_numpy', (['nx_graphs'], {'nf_keys': "['attributes']", 'auto_pad': '(False)'}), "(nx_graphs, nf_keys=['attributes'], auto_pad=False)\n", (2157, 2208), False, 'from spektral.utils import nx_to_numpy\n'), ((2870, 2927), 'spektral.utils.nx_to_numpy', 'nx_to_numpy', (['nx_graphs'], {'nf_keys': "['label']", 'auto_pad': '(False)'}), "(nx_graphs, nf_keys=['label'], auto_pad=False)\n", (2881, 2927), False, 'from spektral.utils import nx_to_numpy\n'), ((3254, 3281), 'numpy.concatenate', 'np.concatenate', (['x_'], {'axis': '(-1)'}), '(x_, axis=-1)\n', (3268, 3281), True, 'import numpy as np\n'), ((3903, 3927), 'networkx.Graph', 'nx.Graph', ([], {'directed': '(False)'}), '(directed=False)\n', (3911, 3927), True, 'import networkx as nx\n'), ((6018, 6073), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(DATA_PATH + dataset_name + '.zip')", '"""r"""'], {}), "(DATA_PATH + dataset_name + '.zip', 'r')\n", (6033, 6073), False, 'import zipfile\n'), ((6847, 6893), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'categories': '"""auto"""'}), "(sparse=False, categories='auto')\n", (6860, 6893), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((7003, 7023), 'numpy.vstack', 'np.vstack', (['feat_list'], {}), '(feat_list)\n', (7012, 7023), True, 'import numpy as np\n'), ((2023, 2069), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'categories': '"""auto"""'}), "(sparse=False, categories='auto')\n", (2036, 2069), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((2358, 2396), 'spektral.utils.nx_to_numpy', 'nx_to_numpy', (['nx_graphs'], {'auto_pad': '(False)'}), '(nx_graphs, auto_pad=False)\n', (2369, 2396), False, 'from spektral.utils import nx_to_numpy\n'), ((2700, 2733), 'numpy.sum', 'np.sum', (['_'], {'axis': '(-1)', 'keepdims': '(True)'}), '(_, axis=-1, keepdims=True)\n', (2706, 2733), True, 'import numpy as np\n'), ((6538, 6571), 'os.path.join', 'os.path.join', (['subfolder', 'filename'], {}), '(subfolder, filename)\n', (6550, 6571), False, 'import os\n'), ((6585, 6634), 'os.path.join', 'os.path.join', (['parentfolder', '(dataset_name + suffix)'], {}), '(parentfolder, dataset_name + suffix)\n', (6597, 6634), False, 'import os\n'), ((6937, 6953), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6951, 6953), False, 'from sklearn.preprocessing import OneHotEncoder, StandardScaler\n'), ((2498, 2514), 'networkx.clustering', 'nx.clustering', (['g'], {}), '(g)\n', (2511, 2514), True, 'import networkx as nx\n'), ((504, 529), 'pandas.read_html', 'pd.read_html', (['DATASET_URL'], {}), '(DATASET_URL)\n', (516, 529), True, 'import pandas as pd\n')]
|
import os
import errno
import copy
import json
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from .ccd import CCD
from Stele.processing.processing_hsg.helper_functions import gauss
from .helper_functions import calc_laser_frequencies
np.set_printoptions(linewidth=500)
class HighSidebandCCD(CCD):
def __init__(
self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
"""
This will read the appropriate file. The header needs to be fixed to
reflect the changes to the output header from the Andor file. Because
another helper file will do the cleaning and background subtraction,
those are no longer part of this init. This also turns all wavelengths
from nm (NIR ones) or cm-1 (THz ones) into eV.
OR, if an array is thrown in there, it'll handle the array and dict
Input:
For post-processing analysis:
hsg_thing = file name of the hsg spectrum from CCD superclass
spectrometer_offset = number of nanometers the spectrometer is
off by, should be 0.0...but can be 0.2 or 1.0
For Live-software:
hsg_thing = np array of spectrum from camera
parameter_dict = equipment dict generated by software
Internal:
self.hsg_thing = the filename
self.parameters = string with all the relevant experimental perameters
self.description = the description we added to the file as the data
was being taken
self.proc_data = processed data that has gone is frequency
vs counts/pulse
self.dark_stdev = this is not currently handled appropriately
self.addenda = the list of things that have been added to the file, in
form of [constant, *spectra_added]
self.subtrahenda = the list of spectra that have been subtracted from
the file. Constant subtraction is dealt with with
self.addenda
:param hsg_thing: file name for the file to be opened. OR the actually
hsg np.ndarray. Fun!
:type hsg_thing: str OR np.ndarray
:param parameter_dict: If being loaded through the data acquisition
GUI, throw the dict in here
:type parameter_dict: dict
:param spectrometer_offset: Number of nm the spectrometer is off by
:type spectrometer_offset: float
:return: None, technically
"""
if isinstance(hsg_thing, str):
super(HighSidebandCCD, self).__init__(
hsg_thing, spectrometer_offset=spectrometer_offset)
# TODO: fix addenda bullshit
self.addenda = []
self.subtrahenda = []
elif isinstance(hsg_thing, np.ndarray):
# Probably shouldn't shoehorn this in this way
self.parameters = parameter_dict.copy()
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of 1s
self.ccd_data = np.column_stack((
self.ccd_data, np.ones_like(self.ccd_data[:, 1])))
# Because turning into eV switches direction
self.ccd_data = np.flipud(self.ccd_data)
self.fname = "Live Data"
else:
raise Exception(
"I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV),
# signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84
# / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get(
"nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 *
# float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get(
"fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get(
"nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(
self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(
self.parameters["pulseEnergies"]["std"])
except Exception: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get(
"fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD
object, then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a
HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'],
other.parameters['center_lambda']):
ret.proc_data[:, 1] = (
self.proc_data[:, 1] + other.proc_data[:, 1])
ret.proc_data[:, 2] = np.sqrt(
self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception(
'Source: Spectrum.__add__:\n' +
'These are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a
HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
# Need to choose a name
ret.proc_data[:, 1] = self.proc_data[:, 1] - other
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = (
self.proc_data[:, 1] - other.proc_data[:, 1])
ret.proc_data[:, 2] = np.sqrt(
self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception(
'Source: Spectrum.__sub__:\n' +
'These are not from the same grating settings')
return ret
def __repr__(self):
"""
This returns a string of filename, series, spectrometer step,
and the wavelengths of FEL and NIR lasers.
"""
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(
os.path.basename(self.fname), **self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq:
thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
# TODO: break the following definition into multiple parts, possibly files
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum
data value in the array and guessing what sideband it is. It creates
an array that includes this information. It will then step down,
initially by one THz frequency, then by twos after it hasn't found any
odd ones. It then goes up from the max and finds everything above in
much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know
what it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately.
# Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
try:
error = np.array(self.proc_data[:, 2])
except IndexError:
# Happens on old data where spectra weren't calculated in the live
# software.
error = np.ones_like(x_axis)
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(
check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave,
check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum(
[i ** 2 for i in error[global_max - 2:global_max + 3]]))
/ (check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(
sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes
# one's noisy or something, so we keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order
# dependent because higher orders get wider, so we need to look at
# more. Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices tenergies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound)
& (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(
check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound], [check_y[check_max_index]]
* 2, 'b', label="{} Box".format(order))
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
# get slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[
found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose:
print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index is False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index is False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index is False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
# This assumes that two floats won't be identical
check_max_index = np.argmax(check_y)
# To be able to break down check_y into eighths
octant = len(check_y) // 8
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:
check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound],
[check_y[check_max_index]] * 2, 'b', label=order)
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (
check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
# print "\tI found", order, "at index", found_index, "at freq", last_sb
if verbose:
print(
"\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb),
end=' ')
sb_freq_guess.append(
x_axis[found_index])
sb_amp_guess.append(
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
error_est = (
np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]]))
/ (check_max_area - (2 * octant + 1) * check_ave))
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
# self.sb_guess = [frequency guess, amplitude guess,
# relative error of amplitude] for each sideband.
self.sb_guess = np.array([np.asarray(sb_freq_guess),
np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# TODO: altar guess_sidebands and guess_sidebandsOld to share functions
def guess_sidebandsOld(
self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
05/24/18
Old code from Hunter's days (or nearly, I've already started cleaning
some stuff up). keeping it around in case I break too much stuff
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum
data value in the array and guessing what sideband it is. It creates
an array that includes this information. It will then step down,
initially by one THz frequency, then by twos after it hasn't found any
odd ones. It then goes up from the max and finds everything above in
much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know
what it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately.
# Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
error = np.array(self.proc_data[:, 2])
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(
check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave,
check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[
global_max - 2:global_max + 3]]))
/ (check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(
sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes
# one's noisy or something, so we'd keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order
# dependent because higher orders get wider, so we need to look at
# more. Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound)
& (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(
check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound], [check_y[check_max_index]]
* 2, 'b', label="{} Box".format(order))
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
# get slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[
found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose:
print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds is True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index is False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index is False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index is False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# To be able to break down check_y into eighths
octant = len(check_y) // 8
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:
check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot(
[lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot(
[lo_freq_bound, hi_freq_bound],
[check_y[check_max_index]] * 2, 'b', label=order)
plt.text(
(lo_freq_bound + hi_freq_bound) / 2,
check_y[check_max_index], order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = ((check_max_area - (2 * octant + 1) * check_ave)
/ check_stdev)
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave",
"check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
# This raises the barrier for odd sideband detection
if order % 2 == 1:
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
# print "\tI found", order, "at index", found_index, "at freq", last_sb
if verbose:
print(
"\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(
check_max_area - (2 * octant + 1) * check_ave)
error_est = (
np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]]))
/ (check_max_area - (2 * octant + 1) * check_ave))
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds is False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array(
[np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess,
# relative error of amplitude] for each sideband.
def fit_sidebands(self, plot=False, verbose=False):
"""
This takes self.sb_guess and fits to each maxima to get the details of
each sideband. It's really ugly, but it works. The error of the
sideband area is approximated from the data, not the curve fit. All
else is from the curve fit. Which is definitely underestimating the
error, but we don't care too much about those errors (at this point).
self.sb_guess = [frequency guess, amplitude guess, relative error of
amplitude] for each sideband.
Temporary stuff:
sb_fits = holder of the fitting results until all spectra have been fit
window = an integer that determines the "radius" of the fit window,
proportional to thz_freq.
Attributes created:
self.sb_results = the money maker. Column order:
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.),
Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 ,
4 , 5 , 6 ]
self.full_dict = a dictionary similar to sb_results, but now the keys
are the sideband orders. Column ordering is otherwise the same.
:param plot: Do you want to see the fits plotted with the data?
:type plot: bool
:param verbose: Do you want to see the details
AND the initial guess fits?
:type verbose: bool
:return: None
"""
# print "Trying to fit these"
sb_fits = []
if verbose:
print("=" * 15)
print()
print("Fitting CCD Sidebands")
print(os.path.basename(self.fname))
print()
print("=" * 15)
# pretty sure you want this up here so things don't break
# when no sidebands found
self.full_dict = {}
thz_freq = self.parameters["thz_freq"]
# Adjust the fit window based on the sideband spacing The 15's are based on
# empirical knowledge that for 540 GHz (2.23 meV), the best window size is 30
# and that it seems like the window size should grow slowly?
window = 15 + int(15 * thz_freq / 0.0022)
# Have to do this because guess_sidebands doesn't out put data in the
# most optimized way
for elem, peakIdx in enumerate(self.sb_index):
if peakIdx < window:
data_temp = self.proc_data[:peakIdx + window, :]
elif (1600 - peakIdx) < window:
data_temp = self.proc_data[peakIdx - window:, :]
else:
data_temp = self.proc_data[
peakIdx - window:peakIdx + window, :]
# so the width guess gets wider as order goes up
width_guess = 0.0001 + 0.000001 * self.sb_list[elem]
p0 = np.array([self.sb_guess[elem, 0],
self.sb_guess[elem, 1] * width_guess,
width_guess,
0.1])
# print "Let's fit this shit!"
if verbose:
# TODO: check that . operator can carry to next line
print(
"Fitting SB {}. Peak index: {}, {}th peak in spectra".
format(self.sb_list[elem], peakIdx, elem))
# print "\nnumber:", elem, num
# print "data_temp:", data_temp
# print "p0:", p0
print(' '*20 + "p0 = " + np.array_str(p0, precision=4))
# This is to disable plotting the guess function
# plot_guess = True
if verbose and plot:
plt.figure('CCD data')
linewidth = 3
x_vals = np.linspace(
data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *p0),
# I don't really know. Mostly
plt.gca().get_lines()[-1].get_color() + '--',
# just looked around at what functions
# matplotlib has...
linewidth=linewidth)
# to prevent weird mac issues with the matplotlib things?
except Exception:
plt.plot(
x_vals, gauss(x_vals, *p0), '--',
linewidth=linewidth)
else:
plt.plot(
x_vals, gauss(x_vals, *p0), '--',
linewidth=linewidth)
try:
# 11/1/16
# had to bump maxfev up to 2k since a sideband wasn't being fit
# Fix for sb 106
# 05-23 Loren 10nm\hsg_640_Perp352seq_spectrum.txt
# TODO: find new name for guass parameter and correct code
coeff, var_list = curve_fit(
gauss, data_temp[:, 0], data_temp[:, 1],
p0=p0, maxfev=2000)
except Exception as e:
if verbose:
print("\tThe fit failed:")
print("\t\t", e)
print("\tFitting region: {}->{}".format(
peakIdx-window, peakIdx+window))
# print "I couldn't fit", elem
# print "It's sideband", num
# print "In file", self.fname
# print "because", e
# print "wanted to fit xindx", peakIdx, "+-", window
self.sb_list[elem] = None
# This will ensure the rest of the loop is not run without
# an actual fit.
continue
# The amplitude could be negative if the linewidth is negative
coeff[1] = abs(coeff[1])
# The linewidth shouldn't be negative
coeff[2] = abs(coeff[2])
if verbose:
print("\tFit successful: ", end=' ')
print("p = " + np.array_str(coeff, precision=4))
# print "coeffs:", coeff
# print "sigma for {}: {}".format(self.sb_list[elem], coeff[2])
if 10e-4 > coeff[2] > 10e-6:
try:
sb_fits.append(np.hstack((
self.sb_list[elem], coeff,
np.sqrt(np.diag(var_list)))))
except RuntimeWarning:
sb_fits.append(np.hstack((
self.sb_list[elem], coeff,
np.sqrt(np.abs(np.diag(var_list))))))
# the var_list wasn't approximating the error well enough, even
# when using sigma and absoluteSigma self.sb_guess[elem, 2] is
# the relative error as calculated by the guess_sidebands
# method coeff[1] is the area from the fit. Therefore, the
# product should be the absolute error of the integrated area
# of the sideband. The other errors are still underestimated.
#
# 1/12/18 note: So it looks like what hunter did is calculate
# an error estimate for the strength/area by the quadrature sum
# of errors of the points in the peak
# (from like 813 in guess_sidebands:
# error_est = np.sqrt(sum([i ** 2 for i in error[
# found_index - 1:found_index + 2]])) / (
# Where the error is what comes from the CCD by averaging 4
# spectra. As far as I can tell, it doesn't currently pull in
# the dark counts or anything like that, except maybe
# indirectly since it'll cause the variations in the peaks
sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]
if verbose:
print(
"\tRel.Err: {:.4e} | Abs.Err: {:.4e}".format(
self.sb_guess[elem, 2],
coeff[1] * self.sb_guess[elem, 2]))
print()
# print "The rel. error guess is",
# self.sb_guess[elem, 2]
# print "The abs. error guess is",
# coeff[1] * self.sb_guess[elem, 2]
# The error from self.sb_guess[elem, 2] is a relative error
if plot and verbose:
plt.figure('CCD data')
linewidth = 5
x_vals = np.linspace(
data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *coeff),
plt.gca().get_lines()[-1].get_color() + '--',
# I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
linewidth=linewidth)
# to prevent weird mac issues with the matplotlib things?
except Exception:
plt.plot(
x_vals, gauss(x_vals, *coeff), '--',
linewidth=linewidth)
else:
plt.plot(
x_vals, gauss(x_vals, *coeff), '--',
linewidth=linewidth)
sb_fits_temp = np.asarray(sb_fits)
reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]
# Reorder the list to put the error of the i-th parameter as the i+1th.
try:
sb_fits = sb_fits_temp[:, reorder]
# if verbose: print "The abs. error guess is", sb_fits[:, 0:5]
except Exception:
raise RuntimeError("No sidebands to fit?")
# Going to label the appropriate row with the sideband
self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))
sb_names = np.vstack(self.sb_list)
# Sort by SB order
sorter = np.argsort(sb_fits[:, 0])
self.sb_results = np.array(sb_fits[sorter, :7])
if verbose:
print("\tsb_results:")
print(
"\t\t" + ("{:^5s}" + ("{:^12s}")*(self.sb_results.shape[1]-1)).
format("SB", "Cen.En.", "", "Area", "", "Width", ""))
for line in self.sb_results:
print(
'\t\t[' + ("{:^5.0f}" + "{:<12.4g}"*(line.size-1)).format(
*line) + ']')
print('-'*19)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def infer_frequencies(
self, nir_units="wavenumber", thz_units="GHz", bad_points=-2):
"""
This guy tries to fit the results from fit_sidebands to a line to get
the relevant frequencies
:param nir_units: What units do you want this to output?
:type nir_units: 'nm', 'wavenumber', 'eV', 'THz'
:param thz_units: What units do you want this to output for the THz?
:type thz_units: 'GHz', 'wavenumber', 'meV'
:param bad_points: How many more-positive order sidebands shall this
ignore?
:type bad_points: int
:return: freqNIR, freqTHz, the frequencies in the appropriate units
"""
# force same units for in dict
freqNIR, freqTHz = calc_laser_frequencies(
self, "wavenumber", "wavenumber", bad_points)
self.parameters["calculated NIR freq (cm-1)"] = "{}".format(
freqNIR, nir_units)
self.parameters["calculated THz freq (cm-1)"] = "{}".format(
freqTHz, freqTHz)
freqNIR, freqTHz = calc_laser_frequencies(
self, nir_units, thz_units, bad_points)
return freqNIR, freqTHz
def save_processing(
self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it.
Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the
self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when
marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
# But [:, 3] is already area?
ampli = np.array([temp[:, 3] / temp[:, 5]])
# (The old name was area)
# I think it must be amplitude
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
self.parameters['addenda'] = self.addenda
self.parameters['subtrahenda'] = self.subtrahenda
try:
parameter_str = json.dumps(
self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except Exception:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
# Make the number of lines constant so importing is easier
num_lines = parameter_str.count('#')
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = (
'\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.')
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = (
# TODO: ensure splitting lines with a + for concatenation works
'\nSideband,Center energy,error,Sideband strength,error,'
+ 'Linewidth,error,Amplitude')
origin_import_fits += '\norder,eV,,arb. u.,,meV,,arb. u.'
origin_import_fits += "\n{},,,{},,,".format(marker, marker)
fits_header = '#' + parameter_str + origin_import_fits
# print "DEBUG: in saving", folder_str, ",", spectra_fname
np.savetxt(
os.path.join(folder_str, spectra_fname), self.proc_data,
delimiter=',', header=spec_header, comments='', fmt='%0.6e')
np.savetxt(
os.path.join(folder_str, fit_fname), save_results,
delimiter=',', header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(
folder_str, spectra_fname)))
|
[
"os.mkdir",
"numpy.sum",
"numpy.argmax",
"numpy.array_str",
"json.dumps",
"numpy.argsort",
"numpy.isclose",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.diag",
"os.path.join",
"numpy.set_printoptions",
"numpy.std",
"numpy.append",
"numpy.linspace",
"copy.deepcopy",
"numpy.ones_like",
"os.path.basename",
"numpy.asarray",
"numpy.flipud",
"Stele.processing.processing_hsg.helper_functions.gauss",
"numpy.hstack",
"matplotlib.pyplot.text",
"scipy.optimize.curve_fit",
"numpy.vstack",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.sqrt"
] |
[((277, 311), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(500)'}), '(linewidth=500)\n', (296, 311), True, 'import numpy as np\n'), ((3686, 3709), 'numpy.array', 'np.array', (['self.ccd_data'], {}), '(self.ccd_data)\n', (3694, 3709), True, 'import numpy as np\n'), ((4968, 4980), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4976, 4980), True, 'import numpy as np\n'), ((5005, 5017), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5013, 5017), True, 'import numpy as np\n'), ((5070, 5082), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5078, 5082), True, 'import numpy as np\n'), ((6064, 6083), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (6077, 6083), False, 'import copy\n'), ((7849, 7868), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (7862, 7868), False, 'import copy\n'), ((12199, 12229), 'numpy.array', 'np.array', (['self.proc_data[:, 0]'], {}), '(self.proc_data[:, 0])\n', (12207, 12229), True, 'import numpy as np\n'), ((12247, 12277), 'numpy.array', 'np.array', (['self.proc_data[:, 1]'], {}), '(self.proc_data[:, 1])\n', (12255, 12277), True, 'import numpy as np\n'), ((12965, 12982), 'numpy.argmax', 'np.argmax', (['y_axis'], {}), '(y_axis)\n', (12974, 12982), True, 'import numpy as np\n'), ((13554, 13572), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (13563, 13572), True, 'import numpy as np\n'), ((13598, 13654), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 2:check_max_index + 3]'], {}), '(check_y[check_max_index - 2:check_max_index + 3])\n', (13604, 13654), True, 'import numpy as np\n'), ((13689, 13742), 'numpy.mean', 'np.mean', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (13696, 13742), True, 'import numpy as np\n'), ((13765, 13817), 'numpy.std', 'np.std', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (13771, 13817), True, 'import numpy as np\n'), ((28961, 28991), 'numpy.array', 'np.array', (['self.proc_data[:, 0]'], {}), '(self.proc_data[:, 0])\n', (28969, 28991), True, 'import numpy as np\n'), ((29009, 29039), 'numpy.array', 'np.array', (['self.proc_data[:, 1]'], {}), '(self.proc_data[:, 1])\n', (29017, 29039), True, 'import numpy as np\n'), ((29056, 29086), 'numpy.array', 'np.array', (['self.proc_data[:, 2]'], {}), '(self.proc_data[:, 2])\n', (29064, 29086), True, 'import numpy as np\n'), ((29539, 29556), 'numpy.argmax', 'np.argmax', (['y_axis'], {}), '(y_axis)\n', (29548, 29556), True, 'import numpy as np\n'), ((30128, 30146), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (30137, 30146), True, 'import numpy as np\n'), ((30172, 30228), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 2:check_max_index + 3]'], {}), '(check_y[check_max_index - 2:check_max_index + 3])\n', (30178, 30228), True, 'import numpy as np\n'), ((30263, 30316), 'numpy.mean', 'np.mean', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (30270, 30316), True, 'import numpy as np\n'), ((30339, 30391), 'numpy.std', 'np.std', (['check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]]'], {}), '(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n', (30345, 30391), True, 'import numpy as np\n'), ((53038, 53057), 'numpy.asarray', 'np.asarray', (['sb_fits'], {}), '(sb_fits)\n', (53048, 53057), True, 'import numpy as np\n'), ((53563, 53586), 'numpy.vstack', 'np.vstack', (['self.sb_list'], {}), '(self.sb_list)\n', (53572, 53586), True, 'import numpy as np\n'), ((53632, 53657), 'numpy.argsort', 'np.argsort', (['sb_fits[:, 0]'], {}), '(sb_fits[:, 0])\n', (53642, 53657), True, 'import numpy as np\n'), ((53684, 53713), 'numpy.array', 'np.array', (['sb_fits[sorter, :7]'], {}), '(sb_fits[sorter, :7])\n', (53692, 53713), True, 'import numpy as np\n'), ((57102, 57127), 'numpy.array', 'np.array', (['self.sb_results'], {}), '(self.sb_results)\n', (57110, 57127), True, 'import numpy as np\n'), ((57183, 57218), 'numpy.array', 'np.array', (['[temp[:, 3] / temp[:, 5]]'], {}), '([temp[:, 3] / temp[:, 5]])\n', (57191, 57218), True, 'import numpy as np\n'), ((57495, 57521), 'numpy.hstack', 'np.hstack', (['(temp, ampli.T)'], {}), '((temp, ampli.T))\n', (57504, 57521), True, 'import numpy as np\n'), ((6367, 6445), 'numpy.isclose', 'np.isclose', (["ret.parameters['center_lambda']", "other.parameters['center_lambda']"], {}), "(ret.parameters['center_lambda'], other.parameters['center_lambda'])\n", (6377, 6445), True, 'import numpy as np\n'), ((8202, 8256), 'numpy.isclose', 'np.isclose', (['ret.proc_data[0, 0]', 'other.proc_data[0, 0]'], {}), '(ret.proc_data[0, 0], other.proc_data[0, 0])\n', (8212, 8256), True, 'import numpy as np\n'), ((9193, 9221), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (9209, 9221), False, 'import os\n'), ((12311, 12341), 'numpy.array', 'np.array', (['self.proc_data[:, 2]'], {}), '(self.proc_data[:, 2])\n', (12319, 12341), True, 'import numpy as np\n'), ((16672, 16690), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (16681, 16690), True, 'import numpy as np\n'), ((16929, 16985), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 1:check_max_index + 2]'], {}), '(check_y[check_max_index - 1:check_max_index + 2])\n', (16935, 16985), True, 'import numpy as np\n'), ((17690, 17761), 'numpy.append', 'np.append', (['check_y[:check_max_index - 1]', 'check_y[check_max_index + 2:]'], {}), '(check_y[:check_max_index - 1], check_y[check_max_index + 2:])\n', (17699, 17761), True, 'import numpy as np\n'), ((22107, 22125), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (22116, 22125), True, 'import numpy as np\n'), ((22310, 22384), 'numpy.sum', 'np.sum', (['check_y[check_max_index - octant - 1:check_max_index + octant + 1]'], {}), '(check_y[check_max_index - octant - 1:check_max_index + octant + 1])\n', (22316, 22384), True, 'import numpy as np\n'), ((33253, 33271), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (33262, 33271), True, 'import numpy as np\n'), ((33510, 33566), 'numpy.sum', 'np.sum', (['check_y[check_max_index - 1:check_max_index + 2]'], {}), '(check_y[check_max_index - 1:check_max_index + 2])\n', (33516, 33566), True, 'import numpy as np\n'), ((34271, 34342), 'numpy.append', 'np.append', (['check_y[:check_max_index - 1]', 'check_y[check_max_index + 2:]'], {}), '(check_y[:check_max_index - 1], check_y[check_max_index + 2:])\n', (34280, 34342), True, 'import numpy as np\n'), ((38626, 38644), 'numpy.argmax', 'np.argmax', (['check_y'], {}), '(check_y)\n', (38635, 38644), True, 'import numpy as np\n'), ((38896, 38970), 'numpy.sum', 'np.sum', (['check_y[check_max_index - octant - 1:check_max_index + octant + 1]'], {}), '(check_y[check_max_index - octant - 1:check_max_index + octant + 1])\n', (38902, 38970), True, 'import numpy as np\n'), ((46294, 46388), 'numpy.array', 'np.array', (['[self.sb_guess[elem, 0], self.sb_guess[elem, 1] * width_guess, width_guess, 0.1\n ]'], {}), '([self.sb_guess[elem, 0], self.sb_guess[elem, 1] * width_guess,\n width_guess, 0.1])\n', (46302, 46388), True, 'import numpy as np\n'), ((54245, 54263), 'numpy.asarray', 'np.asarray', (['sb[1:]'], {}), '(sb[1:])\n', (54255, 54263), True, 'import numpy as np\n'), ((56936, 56956), 'os.mkdir', 'os.mkdir', (['folder_str'], {}), '(folder_str)\n', (56944, 56956), False, 'import os\n'), ((57867, 57944), 'json.dumps', 'json.dumps', (['self.parameters'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))\n", (57877, 57944), False, 'import json\n'), ((59144, 59183), 'os.path.join', 'os.path.join', (['folder_str', 'spectra_fname'], {}), '(folder_str, spectra_fname)\n', (59156, 59183), False, 'import os\n'), ((59306, 59341), 'os.path.join', 'os.path.join', (['folder_str', 'fit_fname'], {}), '(folder_str, fit_fname)\n', (59318, 59341), False, 'import os\n'), ((3071, 3090), 'numpy.array', 'np.array', (['hsg_thing'], {}), '(hsg_thing)\n', (3079, 3090), True, 'import numpy as np\n'), ((3432, 3456), 'numpy.flipud', 'np.flipud', (['self.ccd_data'], {}), '(self.ccd_data)\n', (3441, 3456), True, 'import numpy as np\n'), ((6617, 6680), 'numpy.sqrt', 'np.sqrt', (['(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)'], {}), '(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)\n', (6624, 6680), True, 'import numpy as np\n'), ((8402, 8465), 'numpy.sqrt', 'np.sqrt', (['(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)'], {}), '(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)\n', (8409, 8465), True, 'import numpy as np\n'), ((12054, 12082), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (12070, 12082), False, 'import os\n'), ((12492, 12512), 'numpy.ones_like', 'np.ones_like', (['x_axis'], {}), '(x_axis)\n', (12504, 12512), True, 'import numpy as np\n'), ((16345, 16406), 'numpy.where', 'np.where', (['((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))'], {}), '((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))\n', (16353, 16406), True, 'import numpy as np\n'), ((17053, 17075), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (17063, 17075), True, 'import matplotlib.pyplot as plt\n'), ((17092, 17157), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (17100, 17157), True, 'import matplotlib.pyplot as plt\n'), ((17195, 17260), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (17203, 17260), True, 'import matplotlib.pyplot as plt\n'), ((17463, 17541), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (17471, 17541), True, 'import matplotlib.pyplot as plt\n'), ((22477, 22499), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (22487, 22499), True, 'import matplotlib.pyplot as plt\n'), ((22516, 22581), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (22524, 22581), True, 'import matplotlib.pyplot as plt\n'), ((22619, 22684), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (22627, 22684), True, 'import matplotlib.pyplot as plt\n'), ((22722, 22816), 'matplotlib.pyplot.plot', 'plt.plot', (['[lo_freq_bound, hi_freq_bound]', '([check_y[check_max_index]] * 2)', '"""b"""'], {'label': 'order'}), "([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] * 2,\n 'b', label=order)\n", (22730, 22816), True, 'import matplotlib.pyplot as plt\n'), ((22870, 22948), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (22878, 22948), True, 'import matplotlib.pyplot as plt\n'), ((28816, 28844), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (28832, 28844), False, 'import os\n'), ((32926, 32987), 'numpy.where', 'np.where', (['((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))'], {}), '((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))\n', (32934, 32987), True, 'import numpy as np\n'), ((33634, 33656), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (33644, 33656), True, 'import matplotlib.pyplot as plt\n'), ((33673, 33738), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (33681, 33738), True, 'import matplotlib.pyplot as plt\n'), ((33776, 33841), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (33784, 33841), True, 'import matplotlib.pyplot as plt\n'), ((34044, 34122), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (34052, 34122), True, 'import matplotlib.pyplot as plt\n'), ((39063, 39085), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (39073, 39085), True, 'import matplotlib.pyplot as plt\n'), ((39102, 39167), 'matplotlib.pyplot.plot', 'plt.plot', (['([lo_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (39110, 39167), True, 'import matplotlib.pyplot as plt\n'), ((39205, 39270), 'matplotlib.pyplot.plot', 'plt.plot', (['([hi_freq_bound] * 2)', '[0, check_y[check_max_index]]', '"""b"""'], {}), "([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n", (39213, 39270), True, 'import matplotlib.pyplot as plt\n'), ((39308, 39402), 'matplotlib.pyplot.plot', 'plt.plot', (['[lo_freq_bound, hi_freq_bound]', '([check_y[check_max_index]] * 2)', '"""b"""'], {'label': 'order'}), "([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] * 2,\n 'b', label=order)\n", (39316, 39402), True, 'import matplotlib.pyplot as plt\n'), ((39456, 39534), 'matplotlib.pyplot.text', 'plt.text', (['((lo_freq_bound + hi_freq_bound) / 2)', 'check_y[check_max_index]', 'order'], {}), '((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index], order)\n', (39464, 39534), True, 'import matplotlib.pyplot as plt\n'), ((45142, 45170), 'os.path.basename', 'os.path.basename', (['self.fname'], {}), '(self.fname)\n', (45158, 45170), False, 'import os\n'), ((47106, 47128), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (47116, 47128), True, 'import matplotlib.pyplot as plt\n'), ((47184, 47239), 'numpy.linspace', 'np.linspace', (['data_temp[0, 0]', 'data_temp[-1, 0]'], {'num': '(500)'}), '(data_temp[0, 0], data_temp[-1, 0], num=500)\n', (47195, 47239), True, 'import numpy as np\n'), ((48449, 48519), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss', 'data_temp[:, 0]', 'data_temp[:, 1]'], {'p0': 'p0', 'maxfev': '(2000)'}), '(gauss, data_temp[:, 0], data_temp[:, 1], p0=p0, maxfev=2000)\n', (48458, 48519), False, 'from scipy.optimize import curve_fit\n'), ((51997, 52019), 'matplotlib.pyplot.figure', 'plt.figure', (['"""CCD data"""'], {}), "('CCD data')\n", (52007, 52019), True, 'import matplotlib.pyplot as plt\n'), ((52075, 52130), 'numpy.linspace', 'np.linspace', (['data_temp[0, 0]', 'data_temp[-1, 0]'], {'num': '(500)'}), '(data_temp[0, 0], data_temp[-1, 0], num=500)\n', (52086, 52130), True, 'import numpy as np\n'), ((13258, 13283), 'numpy.zeros', 'np.zeros', (['(15 - global_max)'], {}), '(15 - global_max)\n', (13266, 13283), True, 'import numpy as np\n'), ((26735, 26760), 'numpy.asarray', 'np.asarray', (['sb_freq_guess'], {}), '(sb_freq_guess)\n', (26745, 26760), True, 'import numpy as np\n'), ((26796, 26820), 'numpy.asarray', 'np.asarray', (['sb_amp_guess'], {}), '(sb_amp_guess)\n', (26806, 26820), True, 'import numpy as np\n'), ((26856, 26880), 'numpy.asarray', 'np.asarray', (['sb_error_est'], {}), '(sb_error_est)\n', (26866, 26880), True, 'import numpy as np\n'), ((29832, 29857), 'numpy.zeros', 'np.zeros', (['(15 - global_max)'], {}), '(15 - global_max)\n', (29840, 29857), True, 'import numpy as np\n'), ((43164, 43189), 'numpy.asarray', 'np.asarray', (['sb_freq_guess'], {}), '(sb_freq_guess)\n', (43174, 43189), True, 'import numpy as np\n'), ((43191, 43215), 'numpy.asarray', 'np.asarray', (['sb_amp_guess'], {}), '(sb_amp_guess)\n', (43201, 43215), True, 'import numpy as np\n'), ((43233, 43257), 'numpy.asarray', 'np.asarray', (['sb_error_est'], {}), '(sb_error_est)\n', (43243, 43257), True, 'import numpy as np\n'), ((59504, 59543), 'os.path.join', 'os.path.join', (['folder_str', 'spectra_fname'], {}), '(folder_str, spectra_fname)\n', (59516, 59543), False, 'import os\n'), ((3311, 3344), 'numpy.ones_like', 'np.ones_like', (['self.ccd_data[:, 1]'], {}), '(self.ccd_data[:, 1])\n', (3323, 3344), True, 'import numpy as np\n'), ((13421, 13448), 'numpy.zeros', 'np.zeros', (['(global_max - 1585)'], {}), '(global_max - 1585)\n', (13429, 13448), True, 'import numpy as np\n'), ((29995, 30022), 'numpy.zeros', 'np.zeros', (['(global_max - 1585)'], {}), '(global_max - 1585)\n', (30003, 30022), True, 'import numpy as np\n'), ((46933, 46962), 'numpy.array_str', 'np.array_str', (['p0'], {'precision': '(4)'}), '(p0, precision=4)\n', (46945, 46962), True, 'import numpy as np\n'), ((48044, 48062), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*p0'], {}), '(x_vals, *p0)\n', (48049, 48062), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((49573, 49605), 'numpy.array_str', 'np.array_str', (['coeff'], {'precision': '(4)'}), '(coeff, precision=4)\n', (49585, 49605), True, 'import numpy as np\n'), ((52941, 52962), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*coeff'], {}), '(x_vals, *coeff)\n', (52946, 52962), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((23246, 23264), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (23255, 23264), True, 'import numpy as np\n'), ((23266, 23288), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (23275, 23288), True, 'import numpy as np\n'), ((23376, 23394), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (23385, 23394), True, 'import numpy as np\n'), ((23396, 23418), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (23405, 23418), True, 'import numpy as np\n'), ((39832, 39850), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (39841, 39850), True, 'import numpy as np\n'), ((39852, 39874), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (39861, 39874), True, 'import numpy as np\n'), ((39962, 39980), 'numpy.arange', 'np.arange', (['no_peak'], {}), '(no_peak)\n', (39971, 39980), True, 'import numpy as np\n'), ((39982, 40004), 'numpy.arange', 'np.arange', (['(-no_peak)', '(0)'], {}), '(-no_peak, 0)\n', (39991, 40004), True, 'import numpy as np\n'), ((47357, 47375), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*p0'], {}), '(x_vals, *p0)\n', (47362, 47375), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((52248, 52269), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*coeff'], {}), '(x_vals, *coeff)\n', (52253, 52269), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((47884, 47902), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*p0'], {}), '(x_vals, *p0)\n', (47889, 47902), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((52778, 52799), 'Stele.processing.processing_hsg.helper_functions.gauss', 'gauss', (['x_vals', '*coeff'], {}), '(x_vals, *coeff)\n', (52783, 52799), False, 'from Stele.processing.processing_hsg.helper_functions import gauss\n'), ((49920, 49937), 'numpy.diag', 'np.diag', (['var_list'], {}), '(var_list)\n', (49927, 49937), True, 'import numpy as np\n'), ((50118, 50135), 'numpy.diag', 'np.diag', (['var_list'], {}), '(var_list)\n', (50125, 50135), True, 'import numpy as np\n'), ((47473, 47482), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (47480, 47482), True, 'import matplotlib.pyplot as plt\n'), ((52304, 52313), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (52311, 52313), True, 'import matplotlib.pyplot as plt\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import os.path as osp
import argparse
import time
import numpy as np
from tqdm import tqdm
import json
import torch
import torch.backends.cudnn as cudnn
import cv2
import _init_paths
from _init_paths import get_path
from utils.utilitys import plot_keypoint, PreProcess, write, load_json
from config import cfg, update_config
from utils.transforms import *
from utils.inference import get_final_preds
import models
sys.path.pop(0)
pre_dir, cur_dir, chk_root, data_root, lib_root, output_root = get_path(__file__)
cfg_dir = pre_dir + '/experiments/coco/hrnet/'
model_dir = chk_root + 'hrnet/pose_coco/'
# Loading human detector model
sys.path.insert(0, lib_root)
from detector import load_model as yolo_model
from detector import yolo_human_det as yolo_det
from track.sort import Sort
sys.path.pop(0)
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, default=cfg_dir + 'w48_384x288_adam_lr1e-3.yaml',
help='experiment configure file name')
parser.add_argument('opts', nargs=argparse.REMAINDER, default=None,
help="Modify config options using the command-line")
parser.add_argument('--modelDir', type=str, default=model_dir + 'pose_hrnet_w48_384x288.pth',
help='The model directory')
parser.add_argument('--det-dim', type=int, default=416,
help='The input dimension of the detected image')
parser.add_argument('--thred-score', type=float, default=0.70,
help='The threshold of object Confidence')
parser.add_argument('-a', '--animation', action='store_true',
help='output animation')
parser.add_argument('-np', '--num-person', type=int, default=1,
help='The maximum number of estimated poses')
parser.add_argument("-v", "--video", type=str, default='camera',
help="input video file name")
args = parser.parse_args()
return args
def reset_config(args):
update_config(cfg, args)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# load model
def model_load(config):
print('Loading HRNet model ...')
# lib/models/pose_hrnet.py:get_pose_net
model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(config, is_train=False)
if torch.cuda.is_available():
model = model.cuda()
state_dict = torch.load(config.OUTPUT_DIR)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
# print(name,'\t')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
print('HRNet network successfully loaded')
return model
def load_default_model():
args = parse_args()
reset_config(args)
print('Loading HRNet model ...')
# lib/models/pose_hrnet.py:get_pose_net
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False)
if torch.cuda.is_available():
model = model.cuda()
state_dict = torch.load(cfg.OUTPUT_DIR)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
# print(name,'\t')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
print('HRNet network successfully loaded')
return model
def gen_img_kpts(image, human_model, pose_model, human_sort, det_dim=416, num_peroson=2):
"""
:param image: Input image matrix instead of image path
:param human_model: The YOLOv3 model
:param pose_model: The HRNet model
:param human_sort: Input initialized sort tracker
:param det_dim: The input dimension of YOLOv3. [160, 320, 416]
:param num_peroson: The number of tracked people
:return:
kpts: (M, N, 2)
scores: (M, N, 1)
bboxs_track: (x1, y1, x2, y2, ID)
human_sort: Updated human_sort
"""
args = parse_args()
reset_config(args)
thred_score = args.thred_score
bboxs, bbox_scores = yolo_det(image, human_model, reso=det_dim, confidence=thred_score)
if bboxs is None or not bboxs.any():
return None, None, None
# Using Sort to track people
# people_track: Num_bbox × [x1, y1, x2, y2, ID]
people_track = human_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
bboxs_track = people_track[-1].reshape(1, 5)
else:
people_track_ = people_track[-num_peroson:].reshape(num_peroson, 5)
bboxs_track = people_track_[::-1]
with torch.no_grad():
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(image, bboxs_track, cfg, num_peroson)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)
scores = np.zeros((num_peroson, 17, 1), dtype=np.float32)
for i, kpt in enumerate(preds):
kpts[i] = kpt
for i, score in enumerate(maxvals):
scores[i] = score
human_indexes = []
for i in range(len(bboxs_track)):
human_indexes.append(bboxs_track[i, -1])
return kpts, scores, human_indexes
def gen_video_kpts(video, det_dim=416, num_peroson=1, gen_output=False):
# Updating configuration
args = parse_args()
reset_config(args)
cap = cv2.VideoCapture(video)
assert cap.isOpened(), 'Cannot capture source'
# Loading detector and pose model, initialize sort for track
human_model = yolo_model(inp_dim=det_dim)
pose_model = model_load(cfg)
people_sort = Sort()
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# video_length = 1000
# collect keypoints coordinate
print('Generating 2D pose ...')
kpts_result = []
scores_result = []
for i in tqdm(range(video_length)):
ret, frame = cap.read()
if not ret:
continue
# start = time.time()
try:
bboxs, scores = yolo_det(frame, human_model, reso=det_dim, confidence=args.thred_score)
if bboxs is None or not bboxs.any():
print('No person detected!')
# print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
continue
# Using Sort to track people
people_track = people_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
people_track_ = people_track[-1, :-1].reshape(1, 4)
elif people_track.shape[0] >= 2:
people_track_ = people_track[-num_peroson:, :-1].reshape(num_peroson, 4)
people_track_ = people_track_[::-1]
else:
continue
track_bboxs = []
for bbox in people_track_:
bbox = [round(i, 2) for i in list(bbox)]
track_bboxs.append(bbox)
except Exception as e:
print(e)
exit(0)
continue
with torch.no_grad():
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(frame, track_bboxs, cfg, num_peroson)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
if gen_output:
kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)
scores = np.zeros((num_peroson, 17), dtype=np.float32)
for i, kpt in enumerate(preds):
kpts[i] = kpt
for i, score in enumerate(maxvals):
scores[i] = score.squeeze()
kpts_result.append(kpts)
scores_result.append(scores)
else:
index_bboxs = [bbox + [i] for i, bbox in enumerate(track_bboxs)]
list(map(lambda x: write(x, frame), index_bboxs))
plot_keypoint(frame, preds, maxvals, 0.3)
# print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
if gen_output:
keypoints = np.array(kpts_result)
scores = np.array(scores_result)
keypoints = keypoints.transpose(1, 0, 2, 3) # (T, M, N, 2) --> (M, T, N, 2)
scores = scores.transpose(1, 0, 2) # (T, M, N) --> (M, T, N)
return keypoints, scores
def generate_ntu_kpts_json(video_path, kpts_file):
args = parse_args()
reset_config(args)
# Loading detector and pose model, initialize sort for track
human_model = yolo_model()
pose_model = model_load(cfg)
people_sort = Sort()
with torch.no_grad():
cap = cv2.VideoCapture(video_path)
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# collect keypoints information
kpts_info = dict()
data = []
for i in tqdm(range(video_length)):
frame_info = {'frame_index': i + 1}
ret, frame = cap.read()
try:
bboxs, scores = yolo_det(frame, human_model, confidence=args.thred_score)
if bboxs is None or not bboxs.any():
print('No person detected!')
continue
# Using Sort to track people
people_track = people_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
people_track_ = people_track[-1, :-1].reshape(1, 4)
elif people_track.shape[0] >= 2:
people_track_ = people_track[-2:, :-1].reshape(2, 4)
people_track_ = people_track_[::-1]
else:
skeleton = {'skeleton': [{'pose': [], 'score': [], 'bbox': []}]}
frame_info.update(skeleton)
data.append(frame_info)
continue
track_bboxs = []
for bbox in people_track_:
bbox = [round(i, 3) for i in list(bbox)]
track_bboxs.append(bbox)
except Exception as e:
print(e)
continue
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(frame, bboxs, cfg, args.num_person)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs.cuda())
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center),
np.asarray(scale))
skeleton = []
for num, bbox in enumerate(track_bboxs):
pose = preds[num].tolist()
score = maxvals[num].tolist()
pose = round_list(pose)
score = round_list(score)
one_skeleton = {'pose': pose,
'score': score,
'bbox': bbox}
skeleton.append(one_skeleton)
frame_info.update({'skeleton': skeleton})
data.append(frame_info)
kpts_info.update({'data': data})
with open(kpts_file, 'w') as fw:
json.dump(kpts_info, fw)
print('Finishing!')
def round_list(input_list, decimals=3):
dim = len(input_list)
for i in range(dim):
for j in range(len(input_list[i])):
input_list[i][j] = round(input_list[i][j], decimals)
return input_list
|
[
"sys.path.pop",
"argparse.ArgumentParser",
"utils.utilitys.plot_keypoint",
"config.update_config",
"cv2.imshow",
"detector.load_model",
"torch.no_grad",
"detector.yolo_human_det",
"torch.load",
"json.dump",
"cv2.waitKey",
"numpy.asarray",
"torch.cuda.is_available",
"track.sort.Sort",
"utils.utilitys.write",
"_init_paths.get_path",
"numpy.zeros",
"sys.path.insert",
"utils.utilitys.PreProcess",
"cv2.VideoCapture",
"numpy.array",
"collections.OrderedDict"
] |
[((546, 561), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (558, 561), False, 'import sys\n'), ((626, 644), '_init_paths.get_path', 'get_path', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'from _init_paths import get_path\n'), ((766, 794), 'sys.path.insert', 'sys.path.insert', (['(0)', 'lib_root'], {}), '(0, lib_root)\n', (781, 794), False, 'import sys\n'), ((917, 932), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (929, 932), False, 'import sys\n'), ((966, 1028), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train keypoints network"""'}), "(description='Train keypoints network')\n", (989, 1028), False, 'import argparse\n'), ((2220, 2244), 'config.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (2233, 2244), False, 'from config import cfg, update_config\n'), ((2651, 2676), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2674, 2676), False, 'import torch\n'), ((2725, 2754), 'torch.load', 'torch.load', (['config.OUTPUT_DIR'], {}), '(config.OUTPUT_DIR)\n', (2735, 2754), False, 'import torch\n'), ((2816, 2829), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2827, 2829), False, 'from collections import OrderedDict\n'), ((3333, 3358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3356, 3358), False, 'import torch\n'), ((3407, 3433), 'torch.load', 'torch.load', (['cfg.OUTPUT_DIR'], {}), '(cfg.OUTPUT_DIR)\n', (3417, 3433), False, 'import torch\n'), ((3495, 3508), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3506, 3508), False, 'from collections import OrderedDict\n'), ((4456, 4522), 'detector.yolo_human_det', 'yolo_det', (['image', 'human_model'], {'reso': 'det_dim', 'confidence': 'thred_score'}), '(image, human_model, reso=det_dim, confidence=thred_score)\n', (4464, 4522), True, 'from detector import yolo_human_det as yolo_det\n'), ((6043, 6066), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (6059, 6066), False, 'import cv2\n'), ((6202, 6229), 'detector.load_model', 'yolo_model', ([], {'inp_dim': 'det_dim'}), '(inp_dim=det_dim)\n', (6212, 6229), True, 'from detector import load_model as yolo_model\n'), ((6281, 6287), 'track.sort.Sort', 'Sort', ([], {}), '()\n', (6285, 6287), False, 'from track.sort import Sort\n'), ((9519, 9531), 'detector.load_model', 'yolo_model', ([], {}), '()\n', (9529, 9531), True, 'from detector import load_model as yolo_model\n'), ((9583, 9589), 'track.sort.Sort', 'Sort', ([], {}), '()\n', (9587, 9589), False, 'from track.sort import Sort\n'), ((5018, 5033), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5031, 5033), False, 'import torch\n'), ((5117, 5165), 'utils.utilitys.PreProcess', 'PreProcess', (['image', 'bboxs_track', 'cfg', 'num_peroson'], {}), '(image, bboxs_track, cfg, num_peroson)\n', (5127, 5165), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((5216, 5241), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5239, 5241), False, 'import torch\n'), ((5475, 5523), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17, 2)'], {'dtype': 'np.float32'}), '((num_peroson, 17, 2), dtype=np.float32)\n', (5483, 5523), True, 'import numpy as np\n'), ((5541, 5589), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17, 1)'], {'dtype': 'np.float32'}), '((num_peroson, 17, 1), dtype=np.float32)\n', (5549, 5589), True, 'import numpy as np\n'), ((9083, 9104), 'numpy.array', 'np.array', (['kpts_result'], {}), '(kpts_result)\n', (9091, 9104), True, 'import numpy as np\n'), ((9122, 9145), 'numpy.array', 'np.array', (['scores_result'], {}), '(scores_result)\n', (9130, 9145), True, 'import numpy as np\n'), ((9600, 9615), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9613, 9615), False, 'import torch\n'), ((9631, 9659), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (9647, 9659), False, 'import cv2\n'), ((5420, 5438), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (5430, 5438), True, 'import numpy as np\n'), ((5440, 5457), 'numpy.asarray', 'np.asarray', (['scale'], {}), '(scale)\n', (5450, 5457), True, 'import numpy as np\n'), ((6674, 6745), 'detector.yolo_human_det', 'yolo_det', (['frame', 'human_model'], {'reso': 'det_dim', 'confidence': 'args.thred_score'}), '(frame, human_model, reso=det_dim, confidence=args.thred_score)\n', (6682, 6745), True, 'from detector import yolo_human_det as yolo_det\n'), ((7738, 7753), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7751, 7753), False, 'import torch\n'), ((7845, 7893), 'utils.utilitys.PreProcess', 'PreProcess', (['frame', 'track_bboxs', 'cfg', 'num_peroson'], {}), '(frame, track_bboxs, cfg, num_peroson)\n', (7855, 7893), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((7952, 7977), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7975, 7977), False, 'import torch\n'), ((8254, 8302), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17, 2)'], {'dtype': 'np.float32'}), '((num_peroson, 17, 2), dtype=np.float32)\n', (8262, 8302), True, 'import numpy as np\n'), ((8324, 8369), 'numpy.zeros', 'np.zeros', (['(num_peroson, 17)'], {'dtype': 'np.float32'}), '((num_peroson, 17), dtype=np.float32)\n', (8332, 8369), True, 'import numpy as np\n'), ((8782, 8823), 'utils.utilitys.plot_keypoint', 'plot_keypoint', (['frame', 'preds', 'maxvals', '(0.3)'], {}), '(frame, preds, maxvals, 0.3)\n', (8795, 8823), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((8922, 8948), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (8932, 8948), False, 'import cv2\n'), ((8967, 8981), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8978, 8981), False, 'import cv2\n'), ((11242, 11288), 'utils.utilitys.PreProcess', 'PreProcess', (['frame', 'bboxs', 'cfg', 'args.num_person'], {}), '(frame, bboxs, cfg, args.num_person)\n', (11252, 11288), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n'), ((11346, 11371), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11369, 11371), False, 'import torch\n'), ((12280, 12304), 'json.dump', 'json.dump', (['kpts_info', 'fw'], {}), '(kpts_info, fw)\n', (12289, 12304), False, 'import json\n'), ((8172, 8190), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (8182, 8190), True, 'import numpy as np\n'), ((8192, 8209), 'numpy.asarray', 'np.asarray', (['scale'], {}), '(scale)\n', (8202, 8209), True, 'import numpy as np\n'), ((9987, 10044), 'detector.yolo_human_det', 'yolo_det', (['frame', 'human_model'], {'confidence': 'args.thred_score'}), '(frame, human_model, confidence=args.thred_score)\n', (9995, 10044), True, 'from detector import yolo_human_det as yolo_det\n'), ((11572, 11590), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (11582, 11590), True, 'import numpy as np\n'), ((11637, 11654), 'numpy.asarray', 'np.asarray', (['scale'], {}), '(scale)\n', (11647, 11654), True, 'import numpy as np\n'), ((8739, 8754), 'utils.utilitys.write', 'write', (['x', 'frame'], {}), '(x, frame)\n', (8744, 8754), False, 'from utils.utilitys import plot_keypoint, PreProcess, write, load_json\n')]
|
import tensorflow as tf
import numpy as np
import gpflow
from gpflow.base import Parameter
from gpflow.utilities import positive
class ReLUKernel(gpflow.kernels.Kernel):
"""
Kernel such that the mean 0 GP with the corresponding covariance function is equal in distribution
to an infinitely wide BNN prior with mean O and "Neal scaling" on the weights. The recursive equations used
are from https://arxiv.org/abs/1711.00165.
"""
def __init__(self, prior_weight_std, prior_bias_std, depth):
"""
Args:
prior_weight_std: non-negative float or tuple
of length depth+1 of floats, corresponding BNN has prior variance prior_weight_std / sqrt(num_inputs)
If tuple separate standard deviation for each layer
prior_bias_std: non-negative float or tuple
of length depth+1 of floats, corresponding BNN has prior variance prior_bias_std
If tuple separate standard deviation for each layer
depth: int, number of hidden layers in corresponding BNN
"""
super(ReLUKernel, self).__init__()
if isinstance(prior_weight_std, float) or isinstance(prior_weight_std, int):
prior_weight_std = prior_weight_std * np.ones(depth + 1)
if isinstance(prior_bias_std, float) or isinstance(prior_bias_std, int):
prior_bias_std = prior_bias_std * np.ones(depth + 1)
assert len(prior_weight_std) == len(prior_bias_std) == depth + 1
self.weight_variance = Parameter(prior_weight_std ** 2, transform=positive(1e-5))
self.bias_variance = Parameter(prior_bias_std ** 2, transform=positive(1e-5))
self.depth = depth
def K(self, X, X2=None):
"""
Computes covariance matrix between k(X,X2), if X2 is None computes covariance matrix k(X,X)
Args:
X: [N,D] float
X2: None or [N,D] float, if None X2=X
Returns: [N,N] matrix k(X,X2)
"""
D = X.shape[1] # input dimension
jitter = 1e-15 # jitter for arccosine for numerical reasons
if X2 is None: # compute symmetric version
X2 = X
# base case for recursive formula
Ki = self.bias_variance[0] + self.weight_variance[0] * tf.matmul(X, X2, transpose_b=True) / D
KiX = self.bias_variance[0] + self.weight_variance[0] * tf.reduce_sum(tf.square(X), axis=1) / D
KiX2 = self.bias_variance[0] + self.weight_variance[0] * tf.reduce_sum(tf.square(X2), axis=1) / D
# flattened recursion
for i in range(1, self.depth + 1):
sqrt_term = tf.sqrt(KiX[:, None] * KiX2[None, :]) # outer product of norms
theta = tf.acos(jitter + (1 - 2 * jitter) * Ki/sqrt_term) # angle, 'squash' for numerical stability
J_term = tf.sin(theta) + (np.pi - theta) * tf.cos(theta)
# update kernel matrices
Ki = self.bias_variance[i] + self.weight_variance[i] / (2 * np.pi) * sqrt_term * J_term
if i != self.depth: # these are only needed for the recursion, don't update on last call
KiX = self.bias_variance[i] + KiX * self.weight_variance[i] / 2.
KiX2 = self.bias_variance[i] + KiX2 * self.weight_variance[i] / 2.
return Ki
def K_diag(self, X):
"""
Computes diagonal entries of k(X,X)
Args:
X: [N,D] float
Returns: [N] float. diag(k(X,X))
"""
D = X.shape[1] # input dimension
KiX = self.bias_variance[0] + self.weight_variance[0] * tf.reduce_sum(tf.square(X), axis=1) / D
for i in range(1, self.depth + 1):
KiX = self.bias_variance[i] + KiX * self.weight_variance[i] / 2.
return KiX
|
[
"tensorflow.sin",
"gpflow.utilities.positive",
"numpy.ones",
"tensorflow.acos",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.sqrt",
"tensorflow.cos"
] |
[((2614, 2651), 'tensorflow.sqrt', 'tf.sqrt', (['(KiX[:, None] * KiX2[None, :])'], {}), '(KiX[:, None] * KiX2[None, :])\n', (2621, 2651), True, 'import tensorflow as tf\n'), ((2698, 2749), 'tensorflow.acos', 'tf.acos', (['(jitter + (1 - 2 * jitter) * Ki / sqrt_term)'], {}), '(jitter + (1 - 2 * jitter) * Ki / sqrt_term)\n', (2705, 2749), True, 'import tensorflow as tf\n'), ((1252, 1270), 'numpy.ones', 'np.ones', (['(depth + 1)'], {}), '(depth + 1)\n', (1259, 1270), True, 'import numpy as np\n'), ((1398, 1416), 'numpy.ones', 'np.ones', (['(depth + 1)'], {}), '(depth + 1)\n', (1405, 1416), True, 'import numpy as np\n'), ((1564, 1579), 'gpflow.utilities.positive', 'positive', (['(1e-05)'], {}), '(1e-05)\n', (1572, 1579), False, 'from gpflow.utilities import positive\n'), ((1650, 1665), 'gpflow.utilities.positive', 'positive', (['(1e-05)'], {}), '(1e-05)\n', (1658, 1665), False, 'from gpflow.utilities import positive\n'), ((2812, 2825), 'tensorflow.sin', 'tf.sin', (['theta'], {}), '(theta)\n', (2818, 2825), True, 'import tensorflow as tf\n'), ((2267, 2301), 'tensorflow.matmul', 'tf.matmul', (['X', 'X2'], {'transpose_b': '(True)'}), '(X, X2, transpose_b=True)\n', (2276, 2301), True, 'import tensorflow as tf\n'), ((2846, 2859), 'tensorflow.cos', 'tf.cos', (['theta'], {}), '(theta)\n', (2852, 2859), True, 'import tensorflow as tf\n'), ((2384, 2396), 'tensorflow.square', 'tf.square', (['X'], {}), '(X)\n', (2393, 2396), True, 'import tensorflow as tf\n'), ((2489, 2502), 'tensorflow.square', 'tf.square', (['X2'], {}), '(X2)\n', (2498, 2502), True, 'import tensorflow as tf\n'), ((3580, 3592), 'tensorflow.square', 'tf.square', (['X'], {}), '(X)\n', (3589, 3592), True, 'import tensorflow as tf\n')]
|
def dir_name(config, method):
if config.game.kind == "Breakthrough":
return "{}-breakthrough-{}".format(method, config.game.size)
elif config.game.kind == "Gym":
return "{}-gym-{}".format(method, config.game.name)
else:
print("Unknown game in config file.")
exit(-1)
def get_board_shape(config):
if config.game.kind == "Breakthrough":
return (config.game.history, config.game.size, config.game.size, 3)
elif config.game.kind == "Gym":
if config.game.name == "Breakout-v0":
return (config.game.history, 96, 96, 3)
else:
print("Gym not implemented for this game.")
exit(-1)
else:
print("Unknown game in config file.")
exit(-1)
def get_action_shape(config):
if config.game.kind == "Breakthrough":
return (config.game.size, config.game.size, 3)
elif config.game.kind == "Gym":
if config.game.name == "Breakout-v0":
return (4,)
else:
print("Gym not implemented for this game.")
exit(-1)
else:
print("Unknown game in config file.")
exit(-1)
import numpy as np
# scalar to categorical transformation.
def value_to_support(v, support_size):
# invertible transformation
scaled = np.sign(v) * ((np.sqrt(np.abs(v)+1)-1)) + 0.001*v
# clamp to support
clamped = np.clip(scaled, -support_size, support_size)
v1 = np.floor(clamped)
p1 = 1 - (clamped - v1)
v2 = v1 + 1
p2 = 1 - p1
result = np.zeros(shape=(support_size*2+1,))
result[int(v1) + support_size] = p1
if int(v2) + support_size < support_size*2+1:
result[int(v2) + support_size] = p2
return result
from tensorflow.keras import losses
def mu_loss_unrolled_cce(config):
def loss(y_true, y_pred):
policy_loss = 0.
for i in range(config.mu.unroll_steps):
policy_loss += losses.categorical_crossentropy(
y_true[:, i], y_pred[:, i]) / config.mu.unroll_steps
return policy_loss
return loss
def get_support_shape(x):
return (x or 0)*2+1
"""
## GAME SETTINGS, make sure this is coherent with the generator and evaluator
GAME = "breakthrough"
if GAME == "breakthrough":
BT_K = 5
HISTORY_LENGTH = 2
BOARD_SHAPE = (HISTORY_LENGTH, BT_K, BT_K, 3)
ACTION_PLANES = 3
ACTION_SHAPE = (BT_K, BT_K, ACTION_PLANES)
HIDDEN_PLANES = 16
HIDDEN_SHAPE = (BT_K, BT_K, HIDDEN_PLANES)
SUPPORT_SIZE = 1
elif GAME == "atari":
HISTORY_LENGTH = 8
BOARD_SHAPE = (HISTORY_LENGTH, 96, 96, 3)
ACTION_PLANES = 4 # breakout
ACTION_SHAPE = (ACTION_PLANES, )
HIDDEN_PLANES = 16
HIDDEN_SHAPE = (6, 6, HIDDEN_PLANES)
SUPPORT_SIZE = 300
SUPPORT_SHAPE = 2*SUPPORT_SIZE+1
# MUZERO SPECIFIC
N_UNROLL_STEPS = 5
N_TD_STEPS = 300
DISCOUNT = 0.997
WEIGHT_DECAY = 1e-4
REPLAY_BUFFER_SIZE = 5000 # SAVE THE LAST 5k GAMES
EPOCH_SIZE = 5*REPLAY_BUFFER_SIZE
BATCH_SIZE = 512
N_EPOCH = 50000
SAVE_REPLAY_BUFFER_FREQ = 64 # backup replay buffer every _ games
CHECKPOINT_FREQ = 5*EPOCH_SIZE # save model
EVALUATION_FREQ = 5*EPOCH_SIZE # evaluate model
"""
|
[
"numpy.abs",
"numpy.floor",
"numpy.zeros",
"numpy.clip",
"tensorflow.keras.losses.categorical_crossentropy",
"numpy.sign"
] |
[((1394, 1438), 'numpy.clip', 'np.clip', (['scaled', '(-support_size)', 'support_size'], {}), '(scaled, -support_size, support_size)\n', (1401, 1438), True, 'import numpy as np\n'), ((1449, 1466), 'numpy.floor', 'np.floor', (['clamped'], {}), '(clamped)\n', (1457, 1466), True, 'import numpy as np\n'), ((1541, 1580), 'numpy.zeros', 'np.zeros', ([], {'shape': '(support_size * 2 + 1,)'}), '(shape=(support_size * 2 + 1,))\n', (1549, 1580), True, 'import numpy as np\n'), ((1307, 1317), 'numpy.sign', 'np.sign', (['v'], {}), '(v)\n', (1314, 1317), True, 'import numpy as np\n'), ((2050, 2109), 'tensorflow.keras.losses.categorical_crossentropy', 'losses.categorical_crossentropy', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (2081, 2109), False, 'from tensorflow.keras import losses\n'), ((1330, 1339), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (1336, 1339), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
"""
Python3 class to work with Aravis/GenICam cameras, subclass of sdss-basecam.
.. module:: araviscam
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import sys
import math
import asyncio
import numpy
import astropy
from basecam.mixins import ImageAreaMixIn
from basecam import (
CameraSystem,
BaseCamera,
CameraEvent,
CameraConnectionError,
models,
ExposureError,
)
from lvmcam.actor import modules
# Since the aravis wrapper for GenICam cameras (such as the Blackfly)
# is using glib2 GObjects to represent cameras and streams, the
# PyGObject module allows to call the C functions of aravis in python.
# https://pygobject.readthedocs.io/en/latest/
from lvmcam.araviscam.aravis import Aravis
import basecam.models.card as card
from lvmcam.actor.commands import expose
# https://pypi.org/project/sdss-basecam/
# https://githum.com/sdss/basecam/
# from sdsstools import read_yaml_file
__all__ = ["BlackflyCameraSystem", "BlackflyCamera", "BlackflyImageAreaMixIn"]
class BlackflyCameraSystem(CameraSystem):
"""A collection of GenICam cameras, possibly online
:param camera_class : `.BaseCamera` subclass
The subclass of `.BaseCamera` to use with this camera system.
:param camera_config :
A dictionary with the configuration parameters for the multiple
cameras that can be present in the system, or the path to a YAML file.
Refer to the documentation for details on the accepted format.
:type camera_config : dict or path
:param include : List of camera UIDs that can be connected.
:type include : list
:param exclude : list
List of camera UIDs that will be ignored.
:param logger : ~logging.Logger
The logger instance to use. If `None`, a new logger will be created.
:param log_header : A string to be prefixed to each message logged.
:type log_header : str
:param log_file : The path to which to log.
:type log_file : str
:param verbose : Whether to log to stdout.
:type verbose : bool
:param ip_list: A list of IP-Adresses to be checked/pinged.
:type ip_list: List of strings.
"""
__version__ = "0.0.301"
# A list of ip addresses in the usual "xxx.yyy.zzz.ttt" or "name.subnet.net"
# format that have been added manually/explicitly and may not be found by the
# usual broadcase auto-detection (i.e., possibly on some other global network).
ips_nonlocal = []
def __init__(
self,
camera_class=None,
camera_config=None,
include=None,
exclude=None,
logger=None,
log_header=None,
log_file=None,
verbose=False,
ip_list=None,
):
super().__init__(
camera_class=camera_class,
camera_config=camera_config,
include=include,
exclude=exclude,
logger=logger,
log_header=log_header,
log_file=log_file,
verbose=verbose,
)
# If the ctor is fed with an explicit list of IP addresses, add them to
# the scanner (with delayed inspection in list_available_cameras).
if ip_list is not None:
self.ips_nonlocal.extend(ip_list)
# debuging: print yaml configuration
# print(self._config)
# @modules.timeit
def list_available_cameras(self):
"""Gather serial numbers of online Aravis/Genicam devices.
:return: a list of serial numbers (as strings). This list may be
empty if no cameras are online/switched on.
For cameras explicitly addressed by IP, the serial
numbers have the format sn@ip, with an @ between number and address.
:rtype: list
.. todo:: optionally implement a specific filter for Blackfly's if Basler
cameras should not be listed.
"""
# Start with (pessimistic) initially empty set of online devices
serialNums = []
addrs = []
# Broadcast ethernet/bus for recognized cameras.
# Warning/todo: this gathers also cameras that are not of the Blackfly class,
# and in conjunction with the SDSS may also recognize the Basler cameras..
Aravis.update_device_list()
Ndev = Aravis.get_n_devices()
# print(str(Ndev) + " cameras online")
# get_device_id returns a string of type, SN, MAC etc
for i in range(Ndev):
cam = Aravis.Camera.new(Aravis.get_device_id(i))
uid = cam.get_string("DeviceSerialNumber")
serialNums.append(uid)
addrs.append("")
# Try to ping cameras explicitly proposed with ctor.
for ip in self.ips_nonlocal:
try:
cam = Aravis.Camera.new(ip)
uid = cam.get_string("DeviceSerialNumber")
# If is this was already in the scan: discard, else add
if uid not in serialNums:
serialNums.append(uid)
addrs.append("@" + ip)
except:
# apparently no such camera at this address....
pass
# we zip the two lists to the format 'serialnumber{@ip}'
ids = []
for cam in range(len(serialNums)):
ids.append(serialNums[cam] + addrs[cam])
return ids
from basecam.models.builtin import basic_fz_fits_model
class BlackflyCamera(BaseCamera):
"""A FLIR (formerly Point Grey Research) Blackfly camera.
Given the pixel scale on the benches of LVMi and the assumption
of 9 um pixel sizes of the LVMi cameras, we assume that the
cameras have roughly 1 arsec per pixel, so they are used without binning.
In addition we let the camera flip the standard image orientation of the data
values assuming that values are stored into a FITS interface (where
the first values in the sequential data are the bottom row).
So this is not done in this python code but by the camera.
"""
# fits_model=basic_fz_fits_model
def __init__(
self,
uid,
camera_system,
name=None,
force=False,
image_namer=None,
camera_params={},
):
super().__init__(
uid=uid,
camera_system=camera_system,
name=name,
force=force,
image_namer=image_namer,
camera_params=camera_params,
)
self.header = []
@modules.atimeit
async def _connect_internal(self, **kwargs):
"""Connect to a camera and upload basic binning and ROI parameters.
:param kwargs: recognizes the key uid with integer value, the serial number
If the key uid is absent, tries to attach to the first camera.
This is a subdictionary of 'cameras' in practise.
"""
# print(self.name)
# search for an optional uid key in the arguments
try:
uid = kwargs["uid"]
except:
uid = None
# reverse lookup of the uid in the list of known cameras
cs = BlackflyCameraSystem(BlackflyCamera)
slist = cs.list_available_cameras()
if uid is None:
# uid was not specified: grab the first device that is found
# print("no uid provided, attaching to first camera")
idx = 0
else:
# print("searching " + uid + " in " + str(slist) )
idx = -1
for id in slist:
# remove the optional ip address of the id
slistuid = id.split("@")[0]
if slistuid == uid:
idx = slist.index(id)
# not found
if idx < 0:
raise CameraConnectionError("SN " + uid + " not connected")
cam = None
try:
if "@" in slist[idx]:
# if the camera was not on local network use the address part
cam = Aravis.Camera.new(slist[idx].split("@")[1])
else:
# otherwise the index is the same as the search order...
cam = Aravis.Camera.new(Aravis.get_device_id(idx))
except:
raise CameraConnectionError(" not connected")
# search for an optional gain key in the arguments
# todo: one could interpret gain=0 here as to call set_gain_auto(ARV_AUTO_ON)
try:
gain = kwargs["gain"]
if gain > 0.0:
# todo: it might make sense to squeeze this into the minimum
# and maximum range of the camera's gain if outside that range.
self.device.set_gain_auto(0)
cam.set_gain(gain)
except Exception as ex:
# print("failed to set gain " + str(ex))
pass
# see arvenums.h for the list of pixel formats. This is MONO_16 here, always
cam.set_pixel_format(0x01100007)
# search for an optional x and y binning factor
try:
var = kwargs["binning"]
cam.set_binning(var[0], var[1])
except Exception as ex:
# print("failed to set binning " + str(ex))
# horizontal and vertical binning set to 1
cam.set_binning(1, 1)
# scan the general list of genicam featured values
# of the four native types
for typp, arvLst in kwargs.items():
if arvLst is not None:
if typp == "bool":
for genkey, genval in arvLst.items():
try:
cam.set_boolean(genkey, int(genval))
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "int":
for genkey, genval in arvLst.items():
try:
cam.set_integer(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "float":
for genkey, genval in arvLst.items():
try:
cam.set_float(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "string":
for genkey, genval in arvLst.items():
try:
cam.set_string(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
dev = cam.get_device()
# Take full frames by default (maximizing probability of LVM guide camera
# to find guide stars in the field)
roiBounds = [-1, -1]
try:
roiBounds[0] = dev.get_integer_feature_value("WidthMax")
roiBounds[1] = dev.get_integer_feature_value("HeightMax")
# print(" ROI " + str(roiBounds[0]) + " x " + str(roiBounds[1]) )
cam.set_region(0, 0, roiBounds[0], roiBounds[1])
except Exception as ex:
# print("failed to set ROI " + str(ex))
pass
self.device = cam
self.regionBounds = roiBounds
@modules.atimeit
async def _disconnect_internal(self):
"""Close connection to camera."""
self.device = None
# @modules.atimeit
async def _expose_grabFrame(self, exposure):
"""Read a single unbinned full frame.
The class splits the parent class' exposure into this function and
the part which generates the FITS file, because applications in guiders
are usually only interested in the frame's data, and would not
take the detour of generating a FITS file and reading it back from
disk.
:param exposure: On entry, exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data is the numpy array of the 16bit data
arranged in FITS order (i.e., the data of the bottom row appear first...)
:return: The dictionary with the window location and size (x=,y=,width=,height=)
"""
# To avoid being left over by other programs with no change
# to set the exposure time, we switch the auto=0=off first
self.device.set_exposure_time_auto(0)
# Aravis assumes exptime in micro second integers
exptime_ms = int(0.5 + exposure.exptime * 1e6)
self.device.set_exposure_time(exptime_ms)
# timeout (factor 2: assuming there may be two frames in auto mode taken
# internally)
# And 5 seconds margin for any sort of transmission overhead over PoE
tout_ms = int(1.0e6 * (2.0 * exposure.exptime + 5))
self.notify(CameraEvent.EXPOSURE_INTEGRATING)
# the buffer allocated/created within the acquisition()
buf = await self.loop.run_in_executor(None, self.device.acquisition, tout_ms)
if buf is None:
raise ExposureError(
"Exposing for "
+ str(exposure.exptime)
+ " sec failed. Timout "
+ str(tout_ms / 1.0e6)
)
# Decipher which methods this aravis buffer has...
# print(dir(buf))
# reg becomes a x=, y=, width= height= dictionary
# these are in standard X11 coordinates where upper left =(0,0)
reg = buf.get_image_region()
# print('region',reg)
data = buf.get_data()
exposure.data = numpy.ndarray(
buffer=data, dtype=numpy.uint16, shape=(1, reg.height, reg.width)
)
# print("exposure data shape", exposure.data.shape)
return reg
@modules.atimeit
async def _expose_internal(self, exposure):
"""Read a single unbinned full frame and store in a FITS file.
:param exposure: On entry exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data contains the 16bit data of a single frame
:return: There is no return value
"""
# fill exposure.data with the frame's 16bit data
# reg becomes a x=, y=, width= height= dictionary
# these are in standard X11 coordinates where upper left =(0,0)
reg = await self._expose_grabFrame(exposure)
# print('region',reg)
binxy = {}
try:
# becomes a dictionary with dx=... dy=... for the 2 horiz/vert binn fact
binxy = self.device.get_binning()
except Exception as ex:
binxy = None
# append FITS header cards
# For the x/y coordinates transform from X11 to FITS coordinates
# Todo: reports the camera y-flipped reg.y if ReversY=true above??
addHeaders = [
("BinX", binxy.dx, "[ct] Horizontal Bin Factor 1, 2 or 4"),
("BinY", binxy.dy, "[ct] Vertical Bin Factor 1, 2 or 4"),
("Width", reg.width, "[ct] Pixel Columns"),
("Height", reg.height, "[ct] Pixel Rows"),
("RegX", 1 + reg.x, "[ct] Pixel Region Horiz start"),
# The lower left FITS corner is the upper left X11 corner...
(
"RegY",
self.regionBounds[1] - (reg.y + reg.height - 1),
"[ct] Pixel Region Vert start",
),
]
dev = self.device.get_device()
# print(dir(dev))
# print(dir(self))
# print(self.camera_system.get_camera(self.name))
# print(self.camera_system._config[self.name])
try:
gain = dev.get_float_feature_value("Gain")
addHeaders.append(("Gain", gain, "Gain"))
except Exception as ex:
# print("failed to read gain" + str(ex))
pass
imgrev = [False, False]
try:
imgrev[0] = self.device.get_boolean("ReverseX")
addHeaders.append(("ReverseX", imgrev[0] != 0, " Flipped left-right"))
imgrev[1] = self.device.get_boolean("ReverseY")
addHeaders.append(("ReverseY", imgrev[1] != 0, " Flipped up-down"))
# print("reversed" + str(imgrev[0]) + str(imgrev[1]) )
except Exception as ex:
# print("failed to read ReversXY" + str(ex))
pass
# This is an enumeration in the GenICam. See features list of
# `arv-tool-0.8 --address=192.168.70.50 features`
binMod = [-1, -1]
try:
binMod[0] = dev.get_integer_feature_value("BinningHorizontalMode")
if binMod[0] == 0:
addHeaders.append(
("BinModeX", "Averag", "Horiz Bin Mode Sum or Averag")
)
else:
addHeaders.append(("BinModeX", "Sum", "Horiz Bin Mode Sum or Averag"))
binMod[1] = dev.get_integer_feature_value("BinningVerticalMode")
if binMod[1] == 0:
addHeaders.append(("BinModeY", "Averag", "Vert Bin Mode Sum or Averag"))
else:
addHeaders.append(("BinModeY", "Sum", "Vert Bin Mode Sum or Averag"))
except Exception as ex:
# print("failed to read binmode" + str(ex))
pass
tmp = False
try:
tmp = self.device.get_boolean("BlackLevelClampingEnable")
addHeaders.append(
("CAMBLCLM", tmp != 0, "Black Level Clamping en/disabled")
)
# print("BlackLevelClampingEnable" + str(imgrev[0]) + str(imgrev[1]) )
except Exception as ex:
# print("failed to read BlackLevelClampingEnable" + str(ex))
pass
try:
camtyp = self.device.get_model_name()
addHeaders.append(("CAMTYP", camtyp, "Camera model"))
except:
pass
# call _expose_wcs() to gather WCS header keywords
addHeaders.extend(self._expose_wcs(exposure, reg))
# for headr in addHeaders:
# exposure.fits_model[0].header_model.append(models.Card(headr))
self.header = addHeaders
# print(repr(exposure.to_hdu()[0].header))
# unref() is currently usupported in this GObject library.
# Hope that this does not lead to any memory leak....
# buf.unref()
return
# @modules.timeit
def _expose_wcs(self, exposure, reg):
"""Gather information for the WCS FITS keywords
:param exposure: On entry exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data contains the 16bit data of a single frame
:param reg The binning and region information
"""
# the section/dictionary of the yaml file for this camera
yamlconfig = self.camera_system._config[self.name]
wcsHeaders = []
# The distance from the long edge of the FLIR camera to the center
# of the focus (fiber) is 7.144+4.0 mm according to SDSS-V_0110 figure 6
# and 11.14471 according to figure 3-1 of LVMi-0081
# For the *w or *e cameras the pixel row 1 (in FITS) is that far
# away in the y-coordinate and in the middle of the x-coordinate.
# For the *c cameras at the fiber bundle we assume them to be in the beam center.
wcsHeaders.append(("CRPIX1", reg.width / 2, "[px] RA center along axis 1"))
if self.name[-1] == "c":
wcsHeaders.append(
("CRPIX2", reg.height / 2, "[px] DEC center along axis 2")
)
else:
# convert 11.14471 mm to microns and to to pixels
crefy = 11.14471 * 1000.0 / yamlconfig["pixsize"]
wcsHeaders.append(("CRPIX2", -crefy, "[px] DEC center along axis 2"))
return wcsHeaders
class BlackflyImageAreaMixIn(ImageAreaMixIn):
"""Allows to select image region and binning factors"""
async def _get_image_area_internal(self):
pass
async def _set_image_area_internal(self, area=None):
pass
async def _get_binning_internal(self):
pass
async def _set_binning_internal(self, hbin, vbin):
pass
# async def singleFrame(
# exptim,
# name,
# verb=False,
# ip_add=None,
# config="cameras.yaml",
# targ=None,
# kmirr=0.0,
# flen=None,
# ):
# """Expose once and write the image to a FITS file.
# :param exptim: The exposure time in seconds. Non-negative.
# :type exptim: float
# :param verb: Verbosity on or off
# :type verb: boolean
# :param ip_add: list of explicit IP's (like 192.168.70.51 or lvmt.irws2.mpia.de)
# :type ip_add: list of strings
# :param config: Name of the YAML file with the cameras configuration
# :type config: string of the file name
# :param targ: alpha/delta ra/dec of the sidereal target
# :type targ: astropy.coordinates.SkyCoord
# :param kmirr: Kmirr angle in degrees (0 if up, positive with right hand rule along North on bench)
# :type kmirr: float
# :param flen: focal length of telescope/siderostat in mm
# If not provided it will be taken from the configuration file
# :type flen: float
# """
# cs = BlackflyCameraSystem(
# BlackflyCamera, camera_config=config, verbose=verb, ip_list=ip_add
# )
# cam = await cs.add_camera(name=name)
# # print("cameras", cs.cameras)
# # print("config" ,config)
# exp = await cam.expose(exptim, "LAB TEST")
# if targ is not None and kmirr is not None:
# # if there is already a (partial) header information, keep it,
# # otherwise create one ab ovo.
# if exp.wcs is None:
# wcshdr = astropy.io.fits.Header()
# else:
# wcshdr = exp.wcs.to_header()
# key = astropy.io.fits.Card("CUNIT1", "deg", "WCS units along axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CUNIT2", "deg", "WCS units along axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE1", "RA---TAN", "WCS type axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE2", "DEC--TAN", "WCS type axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CRVAL1", targ.ra.deg, "[deg] RA at reference pixel")
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CRVAL2", targ.dec.deg, "[deg] DEC at reference pixel"
# )
# wcshdr.append(key)
# # field angle: degrees, then radians
# # direction of NCP on the detectors (where we have already flipped pixels
# # on all detectors so fieldrot=kmirr=0 implies North is up and East is left)
# # With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)
# # so the direction is the vector ( sin(f), cos(f)) before the K-mirror.
# # Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))
# # and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.
# # todo: get starting value from a siderostat field rotation tracking model
# fieldrot = 0.0
# if name[-1] == "c":
# # without prism, assuming center camera placed horizontally
# if name[:4] == "spec":
# # without K-mirror
# pass
# else:
# # with K-mirror
# # in the configuration the y-axis of the image has been flipped,
# # the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))
# # and applied to the input vector this is (sin(2m+f), cos(2m+f))
# fieldrot += 2.0 * kmirr
# else:
# # with prism
# if name[:4] == "spec":
# # without K-mirror
# # Applied to input beam this gives (-sin(f), cos(f)) but prism effect
# # had been undone by vertical flip in the FLIR image.
# pass
# else:
# # with K-mirror
# # Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).
# # Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).
# fieldrot += 2.0 * kmirr + 180.0
# if name[-1] == "w":
# # Camera is vertically,
# # so up in the lab is right in the image
# fieldrot += 90
# else:
# # Camera is vertically,
# # so up in the lab is left in the image
# fieldrot -= 90
# fieldrot = math.radians(fieldrot)
# # the section/dictionary of the yaml file for this camera
# yamlconfig = cs._config[name]
# if flen is None:
# flen = yamlconfig["flen"]
# # pixel scale per arcseconds is focal length *pi/180 /3600
# # = flen * mm *pi/180 /3600
# # = flen * um *pi/180 /3.6, so in microns per arcsec...
# pixscal = math.radians(flen) / 3.6
# # degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600
# degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# # for the right handed coordinates
# # (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot
# # (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)
# # (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)
# # (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)
# # Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)
# cosperpix = degperpix * math.cos(fieldrot)
# sinperpix = degperpix * math.sin(fieldrot)
# key = astropy.io.fits.Card("CD1_1", -cosperpix, "[deg/px] WCS matrix diagonal")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CD2_2", cosperpix, "[deg/px] WCS matrix diagonal")
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CD1_2", sinperpix, "[deg/px] WCS matrix outer diagonal"
# )
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CD2_1", sinperpix, "[deg/px] WCS matrix outer diagonal"
# )
# wcshdr.append(key)
# exp.wcs = astropy.wcs.WCS(wcshdr)
# # print(exp.wcs.to_header_string())
# for headr in wcshdr.cards:
# exp.fits_model[0].header_model.append(models.Card(headr))
# await exp.write()
# if verb:
# print("wrote ", exp.filename)
# # A debugging aid, demonstrator and simple test run
# # This allows to call this file as an executable from the command line.
# # The last command line argument must be the name of the camera
# # as used in the configuration file.
# # Example
# # BlackflyCam.py [-e seconds] [-v] [-c ../etc/cameras.yaml] [-r 2h10m10s] [-d -20d10m3s]
# # [-K kmirrdegrees] [-s "LCO"|"MPIA"|"APO"|"KHU"] [-f focallengthmm] {spec.age|spec.agw|...}
# if __name__ == "__main__":
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument(
# "-e",
# "--exptime",
# type=float,
# default=5.0,
# help="Expose for for exptime seconds",
# )
# parser.add_argument(
# "-v", "--verbose", action="store_true", help="print some notes to stdout"
# )
# # With the -i switch we can add an explicit IP-Adress for a
# # camera if we want to read a camera that is not reachable
# # by the broadcast scanner.
# parser.add_argument("-i", "--ip", help="IP address of camera")
# # Name of an optional YAML file
# parser.add_argument(
# "-c", "--cfg", default="cameras.yaml", help="YAML file of lvmt cameras"
# )
# # right ascension in degrees
# parser.add_argument("-r", "--ra", help="RA J2000 in degrees or in xxhxxmxxs format")
# # declination in degrees
# parser.add_argument(
# "-d", "--dec", help="DEC J2000 in degrees or in +-xxdxxmxxs format"
# )
# # K-mirror angle in degrees
# # Note this is only relevant for 3 of the 4 tables/telescopes
# parser.add_argument("-K", "--Kmirr", type=float, help="K-mirror angle in degrees")
# # focal length of telescope in mm
# # Default is the LCO triple lens configuration of 1.8 meters
# parser.add_argument(
# "-f", "--flen", type=float, default=1839.8, help="focal length in mm"
# )
# # shortcut for site coordinates: observatory
# # parser.add_argument("-s", '--site', default="LCO", help="LCO or MPIA or APO or KHU")
# # the last argument is mandatory: must be the name of exactly one camera
# # as used in the configuration file
# parser.add_argument("camname", default="sci.agw")
# args = parser.parse_args()
# ip_cmdLine = []
# if args.ip is not None:
# ip_cmdLine.append(args.ip)
# # check ranges and combine ra/dec into a single SkyCoord
# if args.ra is not None and args.dec is not None:
# if args.ra.find("h") < 0:
# # apparently simple floating point representation
# targ = astropy.coordinates.SkyCoord(
# ra=float(args.ra), dec=float(args.dec), unit="deg"
# )
# else:
# targ = astropy.coordinates.SkyCoord(args.ra + " " + args.dec)
# else:
# targ = None
# # print(targ)
# # The following 2 lines test that listing the connected cameras works...
# # bsys = BlackflyCameraSystem(camera_class=BlackflyCamera)
# # bsys.list_available_cameras()
# asyncio.run(
# singleFrame(
# args.exptime,
# args.camname,
# verb=args.verbose,
# ip_add=ip_cmdLine,
# config=args.cfg,
# targ=targ,
# kmirr=args.Kmirr,
# flen=args.flen,
# )
# )
class WcsHdrCards(card.MacroCard):
def macro(self, exposure, context={}):
wcshdr = get_wcshdr(modules.variables.cs_list[0], modules.variables.camname, modules.variables.targ, modules.variables.kmirr, modules.variables.flen)
return wcshdr
# @modules.timeit
def get_wcshdr(
cs,
name,
targ,
kmirr,
flen,
):
if targ is not None and kmirr is not None:
# wcshdr = astropy.io.fits.Header()
wcshdr = []
key = astropy.io.fits.Card("CUNIT1", "deg", "WCS units along axis 1")
wcshdr.append(key)
key = astropy.io.fits.Card("CUNIT2", "deg", "WCS units along axis 2")
wcshdr.append(key)
key = astropy.io.fits.Card("CTYPE1", "RA---TAN", "WCS type axis 1")
wcshdr.append(key)
key = astropy.io.fits.Card("CTYPE2", "DEC--TAN", "WCS type axis 2")
wcshdr.append(key)
key = astropy.io.fits.Card("CRVAL1", targ.ra.deg, "[deg] RA at reference pixel")
wcshdr.append(key)
key = astropy.io.fits.Card(
"CRVAL2", targ.dec.deg, "[deg] DEC at reference pixel"
)
wcshdr.append(key)
# field angle: degrees, then radians
# direction of NCP on the detectors (where we have already flipped pixels
# on all detectors so fieldrot=kmirr=0 implies North is up and East is left)
# With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)
# so the direction is the vector ( sin(f), cos(f)) before the K-mirror.
# Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))
# and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.
# todo: get starting value from a siderostat field rotation tracking model
fieldrot = 0.0
if name[-1] == "c":
# without prism, assuming center camera placed horizontally
if name[:4] == "spec":
# without K-mirror
pass
else:
# with K-mirror
# in the configuration the y-axis of the image has been flipped,
# the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))
# and applied to the input vector this is (sin(2m+f), cos(2m+f))
fieldrot += 2.0 * kmirr
else:
# with prism
if name[:4] == "spec":
# without K-mirror
# Applied to input beam this gives (-sin(f), cos(f)) but prism effect
# had been undone by vertical flip in the FLIR image.
pass
else:
# with K-mirror
# Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).
# Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).
fieldrot += 2.0 * kmirr + 180.0
if name[-1] == "w":
# Camera is vertically,
# so up in the lab is right in the image
fieldrot += 90
else:
# Camera is vertically,
# so up in the lab is left in the image
fieldrot -= 90
fieldrot = math.radians(fieldrot)
# the section/dictionary of the yaml file for this camera
yamlconfig = cs._config[name]
if flen is None:
flen = yamlconfig["flen"]
# pixel scale per arcseconds is focal length *pi/180 /3600
# = flen * mm *pi/180 /3600
# = flen * um *pi/180 /3.6, so in microns per arcsec...
pixscal = math.radians(flen) / 3.6
# degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600
degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# for the right handed coordinates
# (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot
# (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)
# (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)
# (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)
# Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)
cosperpix = degperpix * math.cos(fieldrot)
sinperpix = degperpix * math.sin(fieldrot)
key = astropy.io.fits.Card("CD1_1", -cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card("CD2_2", cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD1_2", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD2_1", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
return wcshdr
else:
return None
|
[
"basecam.CameraConnectionError",
"math.radians",
"lvmcam.araviscam.aravis.Aravis.update_device_list",
"lvmcam.araviscam.aravis.Aravis.get_device_id",
"math.sin",
"math.cos",
"lvmcam.araviscam.aravis.Aravis.get_n_devices",
"astropy.io.fits.Card",
"numpy.ndarray",
"lvmcam.araviscam.aravis.Aravis.Camera.new"
] |
[((4241, 4268), 'lvmcam.araviscam.aravis.Aravis.update_device_list', 'Aravis.update_device_list', ([], {}), '()\n', (4266, 4268), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((4284, 4306), 'lvmcam.araviscam.aravis.Aravis.get_n_devices', 'Aravis.get_n_devices', ([], {}), '()\n', (4304, 4306), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((14024, 14109), 'numpy.ndarray', 'numpy.ndarray', ([], {'buffer': 'data', 'dtype': 'numpy.uint16', 'shape': '(1, reg.height, reg.width)'}), '(buffer=data, dtype=numpy.uint16, shape=(1, reg.height, reg.width)\n )\n', (14037, 14109), False, 'import numpy\n'), ((30853, 30916), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CUNIT1"""', '"""deg"""', '"""WCS units along axis 1"""'], {}), "('CUNIT1', 'deg', 'WCS units along axis 1')\n", (30873, 30916), False, 'import astropy\n'), ((30958, 31021), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CUNIT2"""', '"""deg"""', '"""WCS units along axis 2"""'], {}), "('CUNIT2', 'deg', 'WCS units along axis 2')\n", (30978, 31021), False, 'import astropy\n'), ((31063, 31124), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CTYPE1"""', '"""RA---TAN"""', '"""WCS type axis 1"""'], {}), "('CTYPE1', 'RA---TAN', 'WCS type axis 1')\n", (31083, 31124), False, 'import astropy\n'), ((31166, 31227), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CTYPE2"""', '"""DEC--TAN"""', '"""WCS type axis 2"""'], {}), "('CTYPE2', 'DEC--TAN', 'WCS type axis 2')\n", (31186, 31227), False, 'import astropy\n'), ((31269, 31343), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CRVAL1"""', 'targ.ra.deg', '"""[deg] RA at reference pixel"""'], {}), "('CRVAL1', targ.ra.deg, '[deg] RA at reference pixel')\n", (31289, 31343), False, 'import astropy\n'), ((31385, 31461), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CRVAL2"""', 'targ.dec.deg', '"""[deg] DEC at reference pixel"""'], {}), "('CRVAL2', targ.dec.deg, '[deg] DEC at reference pixel')\n", (31405, 31461), False, 'import astropy\n'), ((33639, 33661), 'math.radians', 'math.radians', (['fieldrot'], {}), '(fieldrot)\n', (33651, 33661), False, 'import math\n'), ((34724, 34797), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD1_1"""', '(-cosperpix)', '"""[deg/px] WCS matrix diagonal"""'], {}), "('CD1_1', -cosperpix, '[deg/px] WCS matrix diagonal')\n", (34744, 34797), False, 'import astropy\n'), ((34839, 34911), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD2_2"""', 'cosperpix', '"""[deg/px] WCS matrix diagonal"""'], {}), "('CD2_2', cosperpix, '[deg/px] WCS matrix diagonal')\n", (34859, 34911), False, 'import astropy\n'), ((34953, 35031), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD1_2"""', 'sinperpix', '"""[deg/px] WCS matrix outer diagonal"""'], {}), "('CD1_2', sinperpix, '[deg/px] WCS matrix outer diagonal')\n", (34973, 35031), False, 'import astropy\n'), ((35095, 35173), 'astropy.io.fits.Card', 'astropy.io.fits.Card', (['"""CD2_1"""', 'sinperpix', '"""[deg/px] WCS matrix outer diagonal"""'], {}), "('CD2_1', sinperpix, '[deg/px] WCS matrix outer diagonal')\n", (35115, 35173), False, 'import astropy\n'), ((34017, 34035), 'math.radians', 'math.radians', (['flen'], {}), '(flen)\n', (34029, 34035), False, 'import math\n'), ((34640, 34658), 'math.cos', 'math.cos', (['fieldrot'], {}), '(fieldrot)\n', (34648, 34658), False, 'import math\n'), ((34691, 34709), 'math.sin', 'math.sin', (['fieldrot'], {}), '(fieldrot)\n', (34699, 34709), False, 'import math\n'), ((4483, 4506), 'lvmcam.araviscam.aravis.Aravis.get_device_id', 'Aravis.get_device_id', (['i'], {}), '(i)\n', (4503, 4506), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((4765, 4786), 'lvmcam.araviscam.aravis.Aravis.Camera.new', 'Aravis.Camera.new', (['ip'], {}), '(ip)\n', (4782, 4786), False, 'from lvmcam.araviscam.aravis import Aravis\n'), ((7761, 7814), 'basecam.CameraConnectionError', 'CameraConnectionError', (["('SN ' + uid + ' not connected')"], {}), "('SN ' + uid + ' not connected')\n", (7782, 7814), False, 'from basecam import CameraSystem, BaseCamera, CameraEvent, CameraConnectionError, models, ExposureError\n'), ((8218, 8257), 'basecam.CameraConnectionError', 'CameraConnectionError', (['""" not connected"""'], {}), "(' not connected')\n", (8239, 8257), False, 'from basecam import CameraSystem, BaseCamera, CameraEvent, CameraConnectionError, models, ExposureError\n'), ((8157, 8182), 'lvmcam.araviscam.aravis.Aravis.get_device_id', 'Aravis.get_device_id', (['idx'], {}), '(idx)\n', (8177, 8182), False, 'from lvmcam.araviscam.aravis import Aravis\n')]
|
import json
from collections import Counter
import re
from VQA.PythonHelperTools.vqaTools.vqa import VQA
import random
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot as plt
import os
import VQAModel
from keras.applications.xception import decode_predictions, preprocess_input
# from keras.applications.inception_v3 import decode_predictions, preprocess_input
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
import math
from Environment import DATADIR
versionType = 'v2_' # this should be '' when using VQA v2.0 dataset
taskType = 'OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0
dataType = 'mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0.
dataSubType = 'train2014'
saveDir = 'preprocessed_xcep_24'
annFile = '%s/Annotations/%s%s_%s_annotations.json' % (DATADIR, versionType, dataType, dataSubType)
quesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (DATADIR, versionType, taskType, dataType, dataSubType)
imgDir = '%s/Images/%s/' % (DATADIR, dataSubType)
i = 0
directory = os.fsencode(imgDir)
# 363, 555
# 427, 619
size1 = 299+64
size2 = 299+64
model = VQAModel.createModelXception((size1, size2, 3))
model.summary()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width >= height):
img = img.resize((size2, size1), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i < 1000 and i%100 == 0:
print(i)
if i % 1000 == 0:
print(i)
i += 1
model = VQAModel.createModelXception((size2, size1, 3))
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width < height):
img = img.resize((size1, size2), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
# plt.imshow((img_array[0] + 1)/2)
# plt.show()
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i % 1000 == 0:
print(i)
i += 1
|
[
"os.fsdecode",
"keras.applications.xception.preprocess_input",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"os.fsencode",
"os.path.join",
"os.listdir",
"VQAModel.createModelXception"
] |
[((1165, 1184), 'os.fsencode', 'os.fsencode', (['imgDir'], {}), '(imgDir)\n', (1176, 1184), False, 'import os\n'), ((1248, 1295), 'VQAModel.createModelXception', 'VQAModel.createModelXception', (['(size1, size2, 3)'], {}), '((size1, size2, 3))\n', (1276, 1295), False, 'import VQAModel\n'), ((1324, 1345), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1334, 1345), False, 'import os\n'), ((2167, 2214), 'VQAModel.createModelXception', 'VQAModel.createModelXception', (['(size2, size1, 3)'], {}), '((size2, size1, 3))\n', (2195, 2214), False, 'import VQAModel\n'), ((2227, 2248), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (2237, 2248), False, 'import os\n'), ((1362, 1379), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (1373, 1379), False, 'import os\n'), ((2265, 2282), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (2276, 2282), False, 'import os\n'), ((1432, 1462), 'os.path.join', 'os.path.join', (['imgDir', 'filename'], {}), '(imgDir, filename)\n', (1444, 1462), False, 'import os\n'), ((1512, 1529), 'keras.preprocessing.image.load_img', 'load_img', (['imgPath'], {}), '(imgPath)\n', (1520, 1529), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((2335, 2365), 'os.path.join', 'os.path.join', (['imgDir', 'filename'], {}), '(imgDir, filename)\n', (2347, 2365), False, 'import os\n'), ((2415, 2432), 'keras.preprocessing.image.load_img', 'load_img', (['imgPath'], {}), '(imgPath)\n', (2423, 2432), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((1685, 1702), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1697, 1702), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((1727, 1754), 'keras.applications.xception.preprocess_input', 'preprocess_input', (['img_array'], {}), '(img_array)\n', (1743, 1754), False, 'from keras.applications.xception import decode_predictions, preprocess_input\n'), ((1829, 1862), 'numpy.expand_dims', 'np.expand_dims', (['img_array'], {'axis': '(0)'}), '(img_array, axis=0)\n', (1843, 1862), True, 'import numpy as np\n'), ((2587, 2604), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (2599, 2604), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((2629, 2656), 'keras.applications.xception.preprocess_input', 'preprocess_input', (['img_array'], {}), '(img_array)\n', (2645, 2656), False, 'from keras.applications.xception import decode_predictions, preprocess_input\n'), ((2731, 2764), 'numpy.expand_dims', 'np.expand_dims', (['img_array'], {'axis': '(0)'}), '(img_array, axis=0)\n', (2745, 2764), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_lucas_kanade.html
import numpy as np
import cv2
import sys
feature_params = dict( maxCorners = 100, #100, # params for ShiTomasi corner detection
qualityLevel = 0.2, #0.3,,#0.2,
minDistance = 7, #12, #7,
blockSize = 7)# 7 ) #7 ) #12 )
lk_params = dict( winSize = (50, 50),#(200,200) #(15,15), # Parameters for lucas kanade optical flow
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 0.03))
color = np.random.randint(0,255,(100,3)) # Create some random colors
#color = (0, 0, 255)
#color_of = (0, 255, 0)
pause = False
frameNumber = 0
i = 0
progressArray = ['-', '\\', '|', '/' ]
structures = []
corners = np.ndarray([])
#######################################################################################################
#capFileName = '../obtaningData/basin_DNS.avi'
#apFileName = '../obtaningData/simpson_1972_small.mpg'
#capFileName = '../obtaningData/simpson_1972_fast.mpg'
#capFileName = '../obtaningData/Simpson/frontSimpson.mpg'
capFileName = '../obtaningData/Neufeld/neufeld.mpg'
#capFileName = '../obtaningData/lockExchangeSimpson_filipi_Re2445/test.mp4'
#capFileName = '../frenteSimpson.mpg'
#capFileName = '../obtaningData/Mariana/mariana.mp4'
#######################################################################################################
cap = cv2.VideoCapture(capFileName)
# Take first frame and find corners in it
for i in range(0, 2):
ret, old_frame = cap.read()
ret, old_frame = cap.read()
mask = np.zeros_like(old_frame)
mask = (255-mask)
frame = old_frame
cv2.imshow('frame', frame)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
while(1):
k = cv2.waitKey(30) & 0xff
if k == 27:
break
if k == 32:
# (frameNumber)
frameNumber = frameNumber + 1
#descomentar aqui para redescobrir os cantos
#old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
#p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
old_gray_test = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p3 = cv2.goodFeaturesToTrack(old_gray_test, mask = None, **feature_params)
frame_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
#p1 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
#print(p1)
#print(p0)
#break
#print(p3)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
corner_new = p3.reshape(-1,2) # esses novos cantos vao vir em numero diferente e nao da pra usar o
# indice condicional st, mas precisa dar o reshpe para ficar um array
# de uma posicao com um outro array dentro comos pontos e nao varios arrays
# com os pontos
print(corner_new.size)
#print(good_old)
#print(corner_new)
#break
# draw the tracks
for k,(corner) in enumerate(corner_new):
e,f = corner.ravel()
frame2 = cv2.circle(old_frame,(e,f),5,color[k].tolist(),-1)
cv2.imshow('frame2', frame2)
for k,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
#print(a)
#quit()
mask = cv2.line(mask, (a,b),(c,d), color[k].tolist(), 2)
#mask = cv2.line(old_frame, (a,b),(c,d), color[k].tolist(), 5)
frame = cv2.circle(old_frame,(a,b),5,color[k].tolist(),-1)
#mask = cv2.line(mask, (a,b),(c,d), color_of, 5)
#frame = cv2.circle(old_frame,(a,b),10,color,-1)
#img = cv2.add(frame, mask)
#mask = mask + frame
mask = np.bitwise_and(mask, frame)#<<<<<<<<<<
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
#break
i = ( i + 1 ) % 4
#print(progressArray[i])
sys.stdout.write('\rprocessing frames...[{0}] - {1} {2} '.format(frameNumber, k, progressArray[i]))
sys.stdout.flush()
ret, old_frame = cap.read()
structures.append(k)
if old_frame is None:
#print(frameNumber)
break
cv2.destroyAllWindows()
cap.release()
|
[
"numpy.zeros_like",
"cv2.cvtColor",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.VideoCapture",
"numpy.random.randint",
"cv2.goodFeaturesToTrack",
"numpy.bitwise_and",
"sys.stdout.flush",
"cv2.calcOpticalFlowPyrLK",
"cv2.imshow",
"numpy.ndarray"
] |
[((611, 646), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(100, 3)'], {}), '(0, 255, (100, 3))\n', (628, 646), True, 'import numpy as np\n'), ((822, 836), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (832, 836), True, 'import numpy as np\n'), ((1490, 1519), 'cv2.VideoCapture', 'cv2.VideoCapture', (['capFileName'], {}), '(capFileName)\n', (1506, 1519), False, 'import cv2\n'), ((1653, 1677), 'numpy.zeros_like', 'np.zeros_like', (['old_frame'], {}), '(old_frame)\n', (1666, 1677), True, 'import numpy as np\n'), ((1715, 1741), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (1725, 1741), False, 'import cv2\n'), ((1754, 1797), 'cv2.cvtColor', 'cv2.cvtColor', (['old_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(old_frame, cv2.COLOR_BGR2GRAY)\n', (1766, 1797), False, 'import cv2\n'), ((1804, 1866), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['old_gray'], {'mask': 'None'}), '(old_gray, mask=None, **feature_params)\n', (1827, 1866), False, 'import cv2\n'), ((5094, 5117), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5115, 5117), False, 'import cv2\n'), ((1903, 1918), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (1914, 1918), False, 'import cv2\n'), ((2300, 2343), 'cv2.cvtColor', 'cv2.cvtColor', (['old_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(old_frame, cv2.COLOR_BGR2GRAY)\n', (2312, 2343), False, 'import cv2\n'), ((2358, 2425), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['old_gray_test'], {'mask': 'None'}), '(old_gray_test, mask=None, **feature_params)\n', (2381, 2425), False, 'import cv2\n'), ((2482, 2525), 'cv2.cvtColor', 'cv2.cvtColor', (['old_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(old_frame, cv2.COLOR_BGR2GRAY)\n', (2494, 2525), False, 'import cv2\n'), ((2581, 2650), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['old_gray', 'frame_gray', 'p0', 'None'], {}), '(old_gray, frame_gray, p0, None, **lk_params)\n', (2605, 2650), False, 'import cv2\n'), ((3626, 3654), 'cv2.imshow', 'cv2.imshow', (['"""frame2"""', 'frame2'], {}), "('frame2', frame2)\n", (3636, 3654), False, 'import cv2\n'), ((4349, 4376), 'numpy.bitwise_and', 'np.bitwise_and', (['mask', 'frame'], {}), '(mask, frame)\n', (4363, 4376), True, 'import numpy as np\n'), ((4400, 4424), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (4410, 4424), False, 'import cv2\n'), ((4437, 4463), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4447, 4463), False, 'import cv2\n'), ((4875, 4893), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4891, 4893), False, 'import sys\n')]
|
#!/usr/bin/python
# This script predicts body part in test dataset
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
import tensorflow
from tensorflow import keras
from keras import optimizers
from keras.models import load_model
from keras.preprocessing import image
import csv
import re
#csv
csvFile = open('delta.csv', 'a', newline="")
csvWriter = csv.writer(csvFile)
# Loading and Compiling Model
MODEL = load_model('inception_v3_0.9635416865348816.h5')
MODEL.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='categorical_crossentropy',
metrics=['acc'])
# Path of image you want to predict
for imageFile in os.listdir('./tests/images/'):
# Find out real class
realClass = re.sub("([a-zA-Z]+)\-(\d+).jpg", r"\1", imageFile)
# Convert Img to an appropriate numpy array
IMG = image.load_img('./tests/images/'+imageFile, target_size=(299, 299))
X = image.img_to_array(IMG)
X = np.expand_dims(X, axis=0)
IMAGES = np.vstack([X])
# The actual prediction
CLASSES = MODEL.predict(IMAGES, batch_size=10)
#if(CLASSES[0][CLASSES.argmax(axis=1)] < 0.1):
# print('Predicted Classes for Images: others')
#else:
# Converting result of prediction to readable categories
CATEGORIES = {0: 'anal', 1: 'arms', 2: 'armsAndHands',
3: 'face', 4: 'feet', 5: 'genitalsFemale',
6: 'genitalsMale', 7: 'hands', 8: 'head',
9: 'legs', 10: 'legsAndfeet', 11: 'torso'}
#RESPONSE = [CATEGORIES[i] for i in CLASSES[0]]
# delta: max value - mean value
maxV = CLASSES[0][CLASSES.argmax()]
newClassesWithoutMax = np.delete(CLASSES[0], CLASSES.argmax())
print('Predicted Classes for Images: {}'.format(CATEGORIES[CLASSES.argmax()]))
print("max prediction is", maxV)
print("delta is", maxV - newClassesWithoutMax.mean())
csvWriter.writerow([imageFile, realClass, CATEGORIES[CLASSES.argmax()], maxV, maxV - newClassesWithoutMax.mean()])
|
[
"keras.models.load_model",
"csv.writer",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"keras.optimizers.RMSprop",
"re.sub",
"os.listdir",
"numpy.vstack"
] |
[((420, 439), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (430, 439), False, 'import csv\n'), ((480, 528), 'keras.models.load_model', 'load_model', (['"""inception_v3_0.9635416865348816.h5"""'], {}), "('inception_v3_0.9635416865348816.h5')\n", (490, 528), False, 'from keras.models import load_model\n'), ((714, 743), 'os.listdir', 'os.listdir', (['"""./tests/images/"""'], {}), "('./tests/images/')\n", (724, 743), False, 'import os\n'), ((787, 839), 're.sub', 're.sub', (['"""([a-zA-Z]+)\\\\-(\\\\d+).jpg"""', '"""\\\\1"""', 'imageFile'], {}), "('([a-zA-Z]+)\\\\-(\\\\d+).jpg', '\\\\1', imageFile)\n", (793, 839), False, 'import re\n'), ((898, 967), 'keras.preprocessing.image.load_img', 'image.load_img', (["('./tests/images/' + imageFile)"], {'target_size': '(299, 299)'}), "('./tests/images/' + imageFile, target_size=(299, 299))\n", (912, 967), False, 'from keras.preprocessing import image\n'), ((974, 997), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['IMG'], {}), '(IMG)\n', (992, 997), False, 'from keras.preprocessing import image\n'), ((1006, 1031), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1020, 1031), True, 'import numpy as np\n'), ((1045, 1059), 'numpy.vstack', 'np.vstack', (['[X]'], {}), '([X])\n', (1054, 1059), True, 'import numpy as np\n'), ((553, 581), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(2e-05)'}), '(lr=2e-05)\n', (571, 581), False, 'from keras import optimizers\n')]
|
import typing
import random
from pathlib import Path
import logging
from time import strftime, gmtime
from datetime import datetime
import os
import argparse
import contextlib
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data import Dataset
import torch.distributed as dist
logger = logging.getLogger(__name__)
FloatOrTensor = typing.Union[float, torch.Tensor]
def int_or_str(arg: str) -> typing.Union[int, str]:
try:
return int(arg)
except ValueError:
return arg
def check_is_file(file_path: str) -> str:
if file_path is None or os.path.isfile(file_path):
return file_path
else:
raise argparse.ArgumentTypeError(f"File path: {file_path} is not a valid file")
def check_is_dir(dir_path: str) -> str:
if dir_path is None or os.path.isdir(dir_path):
return dir_path
else:
raise argparse.ArgumentTypeError(f"Directory path: {dir_path} is not a valid directory")
def path_to_datetime(path: Path) -> datetime:
name = path.name
datetime_string = name.split('_')[0]
try:
year, month, day, hour, minute, second = datetime_string.split('-')
except ValueError:
try:
# Deprecated datetime strings
year, month, day, time_str = datetime_string.split('-')
hour, minute, second = time_str.split(':')
except ValueError:
return datetime(1, 1, 1)
pathdatetime = datetime(
int(year), int(month), int(day), int(hour), int(minute), int(second))
return pathdatetime
def get_expname(exp_name: typing.Optional[str],
task: typing.Optional[str] = None,
model_type: typing.Optional[str] = None,
save_name: typing.Optional[str] = 'time') -> str:
if exp_name is None:
# add time_or_name param, to specify ckpt folder's name manually, instead of timestamp+randint
# reason to do so: to make eval script easy, load from the specified folder
if save_name == 'time':
time_stamp = strftime("%y-%m-%d-%H-%M-%S", gmtime())
exp_name = f"{task}_{model_type}_{time_stamp}_{random.randint(0, int(1e6)):0>6d}"
else:
exp_name = f"{task}_{model_type}_{save_name}"
return exp_name
def set_random_seeds(seed: int, n_gpu: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed) # type: ignore
def get_effective_num_gpus(local_rank: int, n_gpu: int) -> int:
if local_rank == -1:
num_gpus = n_gpu
else:
num_gpus = dist.get_world_size()
return num_gpus
def get_effective_batch_size(batch_size: int,
local_rank: int,
n_gpu: int,
gradient_accumulation_steps: int = 1) -> int:
eff_batch_size = float(batch_size)
eff_batch_size /= gradient_accumulation_steps
eff_batch_size /= get_effective_num_gpus(local_rank, n_gpu)
return int(eff_batch_size)
def get_num_train_optimization_steps(dataset: Dataset,
batch_size: int,
num_train_epochs: int) -> int:
return int(len(dataset) / batch_size * num_train_epochs)
class MetricsAccumulator:
def __init__(self, smoothing: float = 0.95):
self._loss_tmp = 0.
self._smoothloss: typing.Optional[float] = None
self._totalloss = 0.
self._metricstmp: typing.Dict[str, float] = defaultdict(lambda: 0.0)
self._smoothmetrics: typing.Dict[str, float] = {}
self._totalmetrics: typing.Dict[str, float] = defaultdict(lambda: 0.0)
self._nacc_steps = 0
self._nupdates = 0
self._smoothing = smoothing
def update(self,
loss: FloatOrTensor,
metrics: typing.Dict[str, FloatOrTensor],
step: bool = True) -> None:
if isinstance(loss, torch.Tensor):
loss = loss.item()
self._loss_tmp += loss
for name, value in metrics.items():
if isinstance(value, torch.Tensor):
value = value.item()
self._metricstmp[name] += value
self._nacc_steps += 1
if step:
self.step()
def step(self) -> typing.Dict[str, float]:
loss_tmp = self._loss_tmp / self._nacc_steps
metricstmp = {name: value / self._nacc_steps
for name, value in self._metricstmp.items()}
if self._smoothloss is None:
self._smoothloss = loss_tmp
else:
self._smoothloss *= self._smoothing
self._smoothloss += (1 - self._smoothing) * loss_tmp
self._totalloss += loss_tmp
for name, value in metricstmp.items():
if name in self._smoothmetrics:
currvalue = self._smoothmetrics[name]
newvalue = currvalue * self._smoothing + value * (1 - self._smoothing)
else:
newvalue = value
self._smoothmetrics[name] = newvalue
self._totalmetrics[name] += value
self._nupdates += 1
self._nacc_steps = 0
self._loss_tmp = 0
self._metricstmp = defaultdict(lambda: 0.0)
metricstmp['loss'] = loss_tmp
return metricstmp
def loss(self) -> float:
if self._smoothloss is None:
raise RuntimeError("Trying to get the loss without any updates")
return self._smoothloss
def metrics(self) -> typing.Dict[str, float]:
if self._nupdates == 0:
raise RuntimeError("Trying to get metrics without any updates")
return dict(self._smoothmetrics)
def final_loss(self) -> float:
return self._totalloss / self._nupdates
def final_metrics(self) -> typing.Dict[str, float]:
return {name: value / self._nupdates
for name, value in self._totalmetrics.items()}
class wrap_cuda_oom_error(contextlib.ContextDecorator):
"""A context manager that wraps the Cuda OOM message so that you get some more helpful
context as to what you can/should change. Can also be used as a decorator.
Examples:
1) As a context manager:
with wrap_cuda_oom_error(local_rank, batch_size, n_gpu, gradient_accumulation):
loss = model.forward(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad
2) As a decorator:
@wrap_cuda_oom_error(local_rank, batch_size, n_gpu, gradient_accumulation)
def run_train_epoch(args):
...
<code to run training epoch>
...
"""
def __init__(self,
local_rank: int,
batch_size: int,
n_gpu: int = 1,
gradient_accumulation_steps: typing.Optional[int] = None):
self._local_rank = local_rank
self._batch_size = batch_size
self._n_gpu = n_gpu
self._gradient_accumulation_steps = gradient_accumulation_steps
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
exc_args = exc_value.args if exc_value is not None else None
if exc_args and 'CUDA out of memory' in exc_args[0]:
eff_ngpu = get_effective_num_gpus(self._local_rank, self._n_gpu)
if self._gradient_accumulation_steps is not None:
eff_batch_size = get_effective_batch_size(
self._batch_size, self._local_rank, self._n_gpu,
self._gradient_accumulation_steps)
message = (f"CUDA out of memory. Reduce batch size or increase "
f"gradient_accumulation_steps to divide each batch over more "
f"forward passes.\n\n"
f"\tHyperparameters:\n"
f"\t\tbatch_size per backward-pass: {self._batch_size}\n"
f"\t\tgradient_accumulation_steps: "
f"{self._gradient_accumulation_steps}\n"
f"\t\tn_gpu: {eff_ngpu}\n"
f"\t\tbatch_size per (gpu * forward-pass): "
f"{eff_batch_size}")
else:
eff_batch_size = get_effective_batch_size(
self._batch_size, self._local_rank, self._n_gpu)
message = (f"CUDA out of memory. Reduce batch size to fit each "
f"iteration in memory.\n\n"
f"\tHyperparameters:\n"
f"\t\tbatch_size per forward-pass: {self._batch_size}\n"
f"\t\tn_gpu: {eff_ngpu}\n"
f"\t\tbatch_size per (gpu * forward-pass): "
f"{eff_batch_size}")
raise RuntimeError(message)
return False
def write_lmdb(filename: str, iterable: typing.Iterable, map_size: int = 2 ** 20):
"""Utility for writing a dataset to an LMDB file.
Args:
filename (str): Output filename to write to
iterable (Iterable): An iterable dataset to write to. Entries must be pickleable.
map_size (int, optional): Maximum allowable size of database in bytes. Required by LMDB.
You will likely have to increase this. Default: 1MB.
"""
import lmdb
import pickle as pkl
env = lmdb.open(filename, map_size=map_size)
with env.begin(write=True) as txn:
for i, entry in enumerate(iterable):
txn.put(str(i).encode(), pkl.dumps(entry))
txn.put(b'num_examples', pkl.dumps(i + 1))
env.close()
class IncrementalNPZ(object):
# Modified npz that allows incremental saving, from https://stackoverflow.com/questions/22712292/how-to-use-numpy-savez-in-a-loop-for-save-more-than-one-array # noqa: E501
def __init__(self, file):
import tempfile
import zipfile
import os
if isinstance(file, str):
if not file.endswith('.npz'):
file = file + '.npz'
compression = zipfile.ZIP_STORED
zipfile = self.zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
self.tmpfile = tmpfile
self.zip = zipfile
self._i = 0
def zipfile_factory(self, *args, **kwargs):
import zipfile
import sys
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
def savez(self, *args, **kwds):
import os
import numpy.lib.format as fmt
namedict = kwds
for val in args:
key = 'arr_%d' % self._i
if key in namedict.keys():
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
self._i += 1
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(self.tmpfile, 'wb')
with open(self.tmpfile, 'wb') as fid:
fmt.write_array(fid, np.asanyarray(val), allow_pickle=True)
self.zip.write(self.tmpfile, arcname=fname)
finally:
os.remove(self.tmpfile)
def close(self):
self.zip.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
[
"os.remove",
"numpy.random.seed",
"collections.defaultdict",
"os.path.isfile",
"os.close",
"torch.distributed.get_world_size",
"argparse.ArgumentTypeError",
"random.seed",
"lmdb.open",
"pickle.dumps",
"torch.manual_seed",
"datetime.datetime",
"zipfile.ZipFile",
"tempfile.mkstemp",
"os.path.isdir",
"time.gmtime",
"numpy.asanyarray",
"torch.cuda.manual_seed_all",
"logging.getLogger"
] |
[((325, 352), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (342, 352), False, 'import logging\n'), ((2349, 2366), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2360, 2366), False, 'import random\n'), ((2371, 2391), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2385, 2391), True, 'import numpy as np\n'), ((2396, 2419), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2413, 2419), False, 'import torch\n'), ((9515, 9553), 'lmdb.open', 'lmdb.open', (['filename'], {'map_size': 'map_size'}), '(filename, map_size=map_size)\n', (9524, 9553), False, 'import lmdb\n'), ((604, 629), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (618, 629), False, 'import os\n'), ((680, 753), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""File path: {file_path} is not a valid file"""'], {}), "(f'File path: {file_path} is not a valid file')\n", (706, 753), False, 'import argparse\n'), ((823, 846), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (836, 846), False, 'import os\n'), ((896, 983), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""Directory path: {dir_path} is not a valid directory"""'], {}), "(\n f'Directory path: {dir_path} is not a valid directory')\n", (922, 983), False, 'import argparse\n'), ((2446, 2478), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2472, 2478), False, 'import torch\n'), ((2640, 2661), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2659, 2661), True, 'import torch.distributed as dist\n'), ((3559, 3584), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (3570, 3584), False, 'from collections import defaultdict\n'), ((3696, 3721), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (3707, 3721), False, 'from collections import defaultdict\n'), ((5278, 5303), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (5289, 5303), False, 'from collections import defaultdict\n'), ((10400, 10437), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '"""-numpy.npy"""'}), "(suffix='-numpy.npy')\n", (10416, 10437), False, 'import tempfile\n'), ((10446, 10458), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (10454, 10458), False, 'import os\n'), ((10723, 10755), 'zipfile.ZipFile', 'zipfile.ZipFile', (['*args'], {}), '(*args, **kwargs)\n', (10738, 10755), False, 'import zipfile\n'), ((9727, 9743), 'pickle.dumps', 'pkl.dumps', (['(i + 1)'], {}), '(i + 1)\n', (9736, 9743), True, 'import pickle as pkl\n'), ((11487, 11510), 'os.remove', 'os.remove', (['self.tmpfile'], {}), '(self.tmpfile)\n', (11496, 11510), False, 'import os\n'), ((2094, 2102), 'time.gmtime', 'gmtime', ([], {}), '()\n', (2100, 2102), False, 'from time import strftime, gmtime\n'), ((9676, 9692), 'pickle.dumps', 'pkl.dumps', (['entry'], {}), '(entry)\n', (9685, 9692), True, 'import pickle as pkl\n'), ((1421, 1438), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1429, 1438), False, 'from datetime import datetime\n'), ((11359, 11377), 'numpy.asanyarray', 'np.asanyarray', (['val'], {}), '(val)\n', (11372, 11377), True, 'import numpy as np\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PoincareNormalize layer."""
import numpy as np
import tensorflow as tf
from tensorflow_addons.layers.poincare import PoincareNormalize
from tensorflow_addons.utils import test_utils
@test_utils.run_all_in_graph_and_eager_modes
class PoincareNormalizeTest(tf.test.TestCase):
def _PoincareNormalize(self, x, dim, epsilon=1e-5):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
norm_x = ((1. - epsilon) * x) / norm
else:
norm = np.expand_dims(
np.apply_along_axis(np.linalg.norm, dim, x), dim)
norm_x = ((1. - epsilon) * x) / norm
return np.where(norm > 1.0 - epsilon, norm_x, x)
def testPoincareNormalize(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
inputs = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
outputs_expected = self._PoincareNormalize(inputs, dim, epsilon)
outputs = test_utils.layer_test(
PoincareNormalize,
kwargs={
'axis': dim,
'epsilon': epsilon
},
input_data=inputs,
expected_output=outputs_expected)
for y in outputs_expected, outputs:
norm = np.linalg.norm(y, axis=dim)
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
def testPoincareNormalizeDimArray(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
inputs = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
outputs_expected = self._PoincareNormalize(inputs, dim, epsilon)
outputs = test_utils.layer_test(
PoincareNormalize,
kwargs={
'axis': dim,
'epsilon': epsilon
},
input_data=inputs,
expected_output=outputs_expected)
for y in outputs_expected, outputs:
norm = np.linalg.norm(y, axis=tuple(dim))
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow_addons.utils.test_utils.layer_test",
"numpy.random.seed",
"numpy.random.random_sample",
"numpy.expand_dims",
"numpy.apply_along_axis",
"numpy.where",
"numpy.linalg.norm"
] |
[((2992, 3006), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3004, 3006), True, 'import tensorflow as tf\n'), ((1425, 1466), 'numpy.where', 'np.where', (['(norm > 1.0 - epsilon)', 'norm_x', 'x'], {}), '(norm > 1.0 - epsilon, norm_x, x)\n', (1433, 1466), True, 'import numpy as np\n'), ((1584, 1601), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1598, 1601), True, 'import numpy as np\n'), ((2364, 2381), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2378, 2381), True, 'import numpy as np\n'), ((2565, 2704), 'tensorflow_addons.utils.test_utils.layer_test', 'test_utils.layer_test', (['PoincareNormalize'], {'kwargs': "{'axis': dim, 'epsilon': epsilon}", 'input_data': 'inputs', 'expected_output': 'outputs_expected'}), "(PoincareNormalize, kwargs={'axis': dim, 'epsilon':\n epsilon}, input_data=inputs, expected_output=outputs_expected)\n", (2586, 2704), False, 'from tensorflow_addons.utils import test_utils\n'), ((1812, 1951), 'tensorflow_addons.utils.test_utils.layer_test', 'test_utils.layer_test', (['PoincareNormalize'], {'kwargs': "{'axis': dim, 'epsilon': epsilon}", 'input_data': 'inputs', 'expected_output': 'outputs_expected'}), "(PoincareNormalize, kwargs={'axis': dim, 'epsilon':\n epsilon}, input_data=inputs, expected_output=outputs_expected)\n", (1833, 1951), False, 'from tensorflow_addons.utils import test_utils\n'), ((1173, 1196), 'numpy.expand_dims', 'np.expand_dims', (['norm', 'd'], {}), '(norm, d)\n', (1187, 1196), True, 'import numpy as np\n'), ((1311, 1354), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.linalg.norm', 'dim', 'x'], {}), '(np.linalg.norm, dim, x)\n', (1330, 1354), True, 'import numpy as np\n'), ((1619, 1651), 'numpy.random.random_sample', 'np.random.random_sample', (['x_shape'], {}), '(x_shape)\n', (1642, 1651), True, 'import numpy as np\n'), ((2142, 2169), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {'axis': 'dim'}), '(y, axis=dim)\n', (2156, 2169), True, 'import numpy as np\n'), ((2399, 2431), 'numpy.random.random_sample', 'np.random.random_sample', (['x_shape'], {}), '(x_shape)\n', (2422, 2431), True, 'import numpy as np\n')]
|
# Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# See read the https://floris.readthedocs.io for documentation
import matplotlib.pyplot as plt
import floris.tools as wfct
import floris.tools.visualization as vis
import floris.tools.cut_plane as cp
from floris.utilities import Vec3
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
# Initialize FLORIS model
fi = wfct.floris_utilities.FlorisInterface("example_input.json")
# set turbine locations to 4 turbines in a row - demonstrate how to change coordinates
D = fi.floris.farm.flow_field.turbine_map.turbines[0].rotor_diameter
layout_x = [0, 7*D, 0, 7*D]
layout_y = [0, 0, 5*D, 5*D]
fi.reinitialize_flow_field(layout_array=(layout_x, layout_y))
# Calculate wake
fi.calculate_wake()
# ================================================================================
print('Plotting the FLORIS flowfield...')
# ================================================================================
# Initialize the horizontal cut
hor_plane = wfct.cut_plane.HorPlane(
fi.get_flow_data(),
fi.floris.farm.turbines[0].hub_height
)
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
# ================================================================================
print('Changing wind direction and wind speed...')
# ================================================================================
ws = np.linspace(6, 8, 3)
wd = [45.0, 170.0, 270.]
# Plot and show
fig, ax = plt.subplots(3, 3, figsize=(15, 15))
power = np.zeros((len(ws), len(wd)))
for i, speed in enumerate(ws):
for j, wdir in enumerate(wd):
print('Calculating wake: wind direction = ',
wdir, 'and wind speed = ', speed)
fi.reinitialize_flow_field(wind_speed=speed, wind_direction=wdir)
# recalculate the wake
fi.calculate_wake()
# record powers
power[i, j] = np.sum(fi.get_turbine_power())
# ============================================
# not necessary if you only want the powers
# ============================================
# Visualize the changes
# Initialize the horizontal cut
hor_plane = wfct.cut_plane.HorPlane(
fi.get_flow_data(),
fi.floris.farm.turbines[0].hub_height
)
im = wfct.visualization.visualize_cut_plane(hor_plane, ax=ax[i, j])
strTitle = 'Wind Dir = ' + \
str(wdir) + 'deg' + ' Speed = ' + str(speed) + 'm/s'
ax[i, j].set_title(strTitle)
fig.colorbar(im, ax=ax[i, j], fraction=0.025, pad=0.04)
# ================================================================================
# print('Set yaw angles...')
# ================================================================================
# assign yaw angles to turbines and calculate wake at 270
# initial power output
fi.calculate_wake()
power_initial = np.sum(fi.get_turbine_power())
# Set the yaw angles
yaw_angles = [25.0, 0, 25.0, 0]
fi.calculate_wake(yaw_angles=yaw_angles)
# Check the new power
power_yaw = np.sum(fi.get_turbine_power())
print('Power aligned: %.1f' % power_initial)
print('Power yawed: %.1f' % power_yaw)
# ================================================================================
print('Plotting the FLORIS flowfield with yaw...')
# ================================================================================
# Initialize the horizontal cut
hor_plane = wfct.cut_plane.HorPlane(
fi.get_flow_data(),
fi.floris.farm.turbines[0].hub_height
)
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
ax.set_title('Flow with yawed front turbines')
plt.show()
|
[
"floris.tools.visualization.visualize_cut_plane",
"matplotlib.pyplot.show",
"numpy.linspace",
"floris.tools.floris_utilities.FlorisInterface",
"matplotlib.pyplot.subplots"
] |
[((910, 969), 'floris.tools.floris_utilities.FlorisInterface', 'wfct.floris_utilities.FlorisInterface', (['"""example_input.json"""'], {}), "('example_input.json')\n", (947, 969), True, 'import floris.tools as wfct\n'), ((1658, 1672), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1670, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1729), 'floris.tools.visualization.visualize_cut_plane', 'wfct.visualization.visualize_cut_plane', (['hor_plane'], {'ax': 'ax'}), '(hor_plane, ax=ax)\n', (1711, 1729), True, 'import floris.tools as wfct\n'), ((1954, 1974), 'numpy.linspace', 'np.linspace', (['(6)', '(8)', '(3)'], {}), '(6, 8, 3)\n', (1965, 1974), True, 'import numpy as np\n'), ((2027, 2063), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(15, 15)'}), '(3, 3, figsize=(15, 15))\n', (2039, 2063), True, 'import matplotlib.pyplot as plt\n'), ((4104, 4118), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4116, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4175), 'floris.tools.visualization.visualize_cut_plane', 'wfct.visualization.visualize_cut_plane', (['hor_plane'], {'ax': 'ax'}), '(hor_plane, ax=ax)\n', (4157, 4175), True, 'import floris.tools as wfct\n'), ((4223, 4233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4231, 4233), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2927), 'floris.tools.visualization.visualize_cut_plane', 'wfct.visualization.visualize_cut_plane', (['hor_plane'], {'ax': 'ax[i, j]'}), '(hor_plane, ax=ax[i, j])\n', (2903, 2927), True, 'import floris.tools as wfct\n')]
|
import os
import numpy as np
import matplotlib.pyplot as plt
import yaml
from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils
# CONFIG
#-------------------------------------------------------------------------------
# plot options
wide_full_comp = True # wide is 2 rows by 5 cols, otherwise 5 by 2
font_size = 22
interval_percentile = 95
plot_width = 3.2
plot_height = 2.4
# data loading options
main_data_dir = '/home/trevor/data/paper-data/bc-view-agnostic/bc_results'
bc_folders_file = 'bc_result_folder_names.yaml'
full_comp_envs = ['LiftSim', 'StackSim', 'PickAndInsertSim', 'DoorSim', 'DoorReal']
conditions = ['mm', 'mf', 'fm', 'ff']
mult_only_envs = ['PickAndInsertReal', 'DrawerReal']
mult_only_envs_vertical = True
sim_bc_seeds = [1, 2, 3, 4, 5]
sim_num_dem = range(25, 201, 25)
real_bc_seeds = [1, 2, 3]
real_num_dem = range(50, 201, 50)
results_filename = 'all_results.npz'
main_exp_dir = '/media/trevor/Data/paper-data/bc-viewag'
exp_dir = main_exp_dir + '/figures/bc_results' # + '/' + datetime.now().strftime("%y-%m-%d_%H-%M-%S")
#--------------------------------------------------------------------------------
plot_utils.setup_pretty_plotting()
bc_folders = yaml.load(open(bc_folders_file, 'r'), yaml.Loader)
# Full comparison fig (4 conditions) -----------------------------------------------------------------
if wide_full_comp:
fig, axes = plt.subplots(nrows=2, ncols=len(full_comp_envs), # sharex=True, sharey=True,
figsize=[plot_width * len(full_comp_envs), plot_height * 2])
axes[0, 0].set_ylabel("Fixed-base Env", labelpad=20, fontsize=font_size - 6)
axes[1, 0].set_ylabel("Multiview Env", labelpad=20, fontsize=font_size - 6)
else:
fig, axes = plt.subplots(nrows=len(full_comp_envs), ncols=2, # sharex=True, sharey=True,
figsize=[plot_width * 2, plot_height * len(full_comp_envs)])
axes[0, 0].set_title("Fixed Env", labelpad=20, fontsize=font_size - 6)
axes[0, 1].set_title("Multiview Env", labelpad=20, fontsize=font_size - 6)
full_comp_data = dict()
for env_i, env in enumerate(full_comp_envs):
full_comp_data[env] = {c: 0 for c in conditions}
for cond_i, cond in enumerate(conditions):
data = np.load(main_data_dir + '/' + bc_folders[env][cond_i] + '/' + results_filename)['per_ep_group']
if 'Sim' in env:
seeds = sim_bc_seeds
num_dem = sim_num_dem
else:
seeds = real_bc_seeds
num_dem = real_num_dem
num_dem, means, uppers, lowers = bc_viewag_plot_utils.get_means_lowers_uppers(data, num_dem, seeds, interval_percentile)
full_comp_data[env][cond] = dict(means=means, lowers=lowers, uppers=uppers)
# plot now that all data collected
fcd = full_comp_data
f_line, m_line = bc_viewag_plot_utils.plot_four_conds(axes, env, env_i, wide_full_comp, font_size - 10, num_dem,
fcd[env]['mm']['means'], fcd[env]['mm']['lowers'], fcd[env]['mm']['uppers'],
fcd[env]['mf']['means'], fcd[env]['mf']['lowers'], fcd[env]['mf']['uppers'],
fcd[env]['fm']['means'], fcd[env]['fm']['lowers'], fcd[env]['fm']['uppers'],
fcd[env]['ff']['means'], fcd[env]['ff']['lowers'], fcd[env]['ff']['uppers'])
fig.legend([m_line, f_line],
labels=["Fixed-base Policy", "Multiview Policy"],
# labels=[r"$\pi_f$", r"$\pi_m$"],
ncol=2,
fancybox=True,
shadow=True,
fontsize=font_size - 6,
# loc="lower left", # on figure
# bbox_to_anchor=(0.1, 0.175), # on figure
loc="lower right", # bottom right -- this is the original one
# bbox_to_anchor=(0.05, 0.015), # bottom right
# loc="lower left", # bottom left
# bbox_to_anchor=(0.05, 0.015), # bottom left
# loc="lower center", # center under
# bbox_to_anchor=(0.535, -0.05) # center under
)
ax = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
# plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
# ax.xaxis.set_label_coords(0.6, -0.1)
plt.ylabel("Success Rate", labelpad=10, fontsize=font_size-6)
plt.tight_layout()
os.makedirs(exp_dir, exist_ok=True)
fig.savefig(exp_dir + '/full_comp_success.pdf', bbox_inches='tight')
# Multiview suc only fig -----------------------------------------------------------------
if mult_only_envs_vertical:
fig, axes = plt.subplots(nrows=len(mult_only_envs), ncols=1, # sharex=True, sharey=True,
figsize=[plot_width, (plot_height * len(mult_only_envs)) + .5])
else:
fig, axes = plt.subplots(nrows=1, ncols=len(mult_only_envs), # sharex=True, sharey=True,
figsize=[plot_width * len(mult_only_envs), plot_height + .5])
ax = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
# plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
# ax.xaxis.set_label_coords(0.6, -0.1)
plt.ylabel("Success Rate", labelpad=10, fontsize=font_size-6)
mult_only_data = dict()
cmap = plt.get_cmap("tab10")
for env_i, env in enumerate(mult_only_envs):
mult_only_data[env] = 0
data = np.load(main_data_dir + '/' + bc_folders[env][0] + '/' + results_filename)['per_ep_group']
if 'Sim' in env:
seeds = sim_bc_seeds
num_dem = sim_num_dem
else:
seeds = real_bc_seeds
num_dem = real_num_dem
num_dem, means, uppers, lowers = bc_viewag_plot_utils.get_means_lowers_uppers(data, num_dem, seeds, interval_percentile)
line = bc_viewag_plot_utils.plot_mean_and_std(axes[env_i], num_dem, means, lowers, uppers, cmap(1),
yticks=np.arange(0, 1.1, .25), xticks=np.arange(50, 210, 50),
ylim=[-.05, 1.05], labelsize=font_size-10, title=env)
# fig.legend([m_line, f_line],
# labels=["Multiview Policy"],
# ncol=1,
# fancybox=True,
# shadow=True,
# fontsize=font_size - 6,
# loc="right", # bottom right
# bbox_to_anchor=(0.96, 0.4), # bottom right
# # loc="lower left", # bottom left
# # bbox_to_anchor=(0.05, 0.015), # bottom left
# # loc="lower center", # center under
# # bbox_to_anchor=(0.535, -0.05) # center under
# )
plt.tight_layout()
fig.savefig(exp_dir + '/mult_only_envs.pdf', bbox_inches='tight')
# plt.show()
|
[
"numpy.load",
"os.makedirs",
"matplotlib.pyplot.get_cmap",
"multiview_manipulation.utils.get_means_lowers_uppers",
"multiview_manipulation.plotting.setup_pretty_plotting",
"multiview_manipulation.utils.plot_four_conds",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] |
[((1169, 1203), 'multiview_manipulation.plotting.setup_pretty_plotting', 'plot_utils.setup_pretty_plotting', ([], {}), '()\n', (1201, 1203), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((4194, 4282), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""none"""', 'top': '(False)', 'bottom': '(False)', 'left': '(False)', 'right': '(False)'}), "(labelcolor='none', top=False, bottom=False, left=False,\n right=False)\n", (4209, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4351, 4422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Training Demonstrations"""'], {'fontsize': '(font_size - 6)'}), "('Number of Training Demonstrations', fontsize=font_size - 6)\n", (4361, 4422), True, 'import matplotlib.pyplot as plt\n'), ((4460, 4523), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Success Rate"""'], {'labelpad': '(10)', 'fontsize': '(font_size - 6)'}), "('Success Rate', labelpad=10, fontsize=font_size - 6)\n", (4470, 4523), True, 'import matplotlib.pyplot as plt\n'), ((4523, 4541), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4539, 4541), True, 'import matplotlib.pyplot as plt\n'), ((4543, 4578), 'os.makedirs', 'os.makedirs', (['exp_dir'], {'exist_ok': '(True)'}), '(exp_dir, exist_ok=True)\n', (4554, 4578), False, 'import os\n'), ((5187, 5275), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""none"""', 'top': '(False)', 'bottom': '(False)', 'left': '(False)', 'right': '(False)'}), "(labelcolor='none', top=False, bottom=False, left=False,\n right=False)\n", (5202, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5344, 5415), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Training Demonstrations"""'], {'fontsize': '(font_size - 6)'}), "('Number of Training Demonstrations', fontsize=font_size - 6)\n", (5354, 5415), True, 'import matplotlib.pyplot as plt\n'), ((5453, 5516), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Success Rate"""'], {'labelpad': '(10)', 'fontsize': '(font_size - 6)'}), "('Success Rate', labelpad=10, fontsize=font_size - 6)\n", (5463, 5516), True, 'import matplotlib.pyplot as plt\n'), ((5547, 5568), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (5559, 5568), True, 'import matplotlib.pyplot as plt\n'), ((6897, 6915), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6913, 6915), True, 'import matplotlib.pyplot as plt\n'), ((2836, 3264), 'multiview_manipulation.utils.plot_four_conds', 'bc_viewag_plot_utils.plot_four_conds', (['axes', 'env', 'env_i', 'wide_full_comp', '(font_size - 10)', 'num_dem', "fcd[env]['mm']['means']", "fcd[env]['mm']['lowers']", "fcd[env]['mm']['uppers']", "fcd[env]['mf']['means']", "fcd[env]['mf']['lowers']", "fcd[env]['mf']['uppers']", "fcd[env]['fm']['means']", "fcd[env]['fm']['lowers']", "fcd[env]['fm']['uppers']", "fcd[env]['ff']['means']", "fcd[env]['ff']['lowers']", "fcd[env]['ff']['uppers']"], {}), "(axes, env, env_i, wide_full_comp, \n font_size - 10, num_dem, fcd[env]['mm']['means'], fcd[env]['mm'][\n 'lowers'], fcd[env]['mm']['uppers'], fcd[env]['mf']['means'], fcd[env][\n 'mf']['lowers'], fcd[env]['mf']['uppers'], fcd[env]['fm']['means'], fcd\n [env]['fm']['lowers'], fcd[env]['fm']['uppers'], fcd[env]['ff']['means'\n ], fcd[env]['ff']['lowers'], fcd[env]['ff']['uppers'])\n", (2872, 3264), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((5933, 6024), 'multiview_manipulation.utils.get_means_lowers_uppers', 'bc_viewag_plot_utils.get_means_lowers_uppers', (['data', 'num_dem', 'seeds', 'interval_percentile'], {}), '(data, num_dem, seeds,\n interval_percentile)\n', (5977, 6024), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((2578, 2669), 'multiview_manipulation.utils.get_means_lowers_uppers', 'bc_viewag_plot_utils.get_means_lowers_uppers', (['data', 'num_dem', 'seeds', 'interval_percentile'], {}), '(data, num_dem, seeds,\n interval_percentile)\n', (2622, 2669), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((5653, 5727), 'numpy.load', 'np.load', (["(main_data_dir + '/' + bc_folders[env][0] + '/' + results_filename)"], {}), "(main_data_dir + '/' + bc_folders[env][0] + '/' + results_filename)\n", (5660, 5727), True, 'import numpy as np\n'), ((2264, 2343), 'numpy.load', 'np.load', (["(main_data_dir + '/' + bc_folders[env][cond_i] + '/' + results_filename)"], {}), "(main_data_dir + '/' + bc_folders[env][cond_i] + '/' + results_filename)\n", (2271, 2343), True, 'import numpy as np\n'), ((6182, 6205), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.25)'], {}), '(0, 1.1, 0.25)\n', (6191, 6205), True, 'import numpy as np\n'), ((6213, 6235), 'numpy.arange', 'np.arange', (['(50)', '(210)', '(50)'], {}), '(50, 210, 50)\n', (6222, 6235), True, 'import numpy as np\n')]
|
"""
Clenshaw-Curtis quadrature method is a good all-around quadrature method
comparable to Gaussian quadrature, but typically limited to finite intervals
without a specific weight function. In addition to be quite accurate, the
weights and abscissas can be calculated quite fast.
Another thing to note is that Clenshaw-Curtis, with an appropriate growth rule
is fully nested. This means, if one applies a method that combines different
order of quadrature rules, the number of evaluations can often be reduced as
the abscissas can be used across levels.
Example usage
-------------
The first few orders with linear growth rule::
>>> distribution = chaospy.Uniform(0, 1)
>>> for order in [0, 1, 2, 3]:
... X, W = chaospy.generate_quadrature(
... order, distribution, rule="clenshaw_curtis")
... print(order, numpy.around(X, 3), numpy.around(W, 3))
0 [[0.5]] [1.]
1 [[0. 1.]] [0.5 0.5]
2 [[0. 0.5 1. ]] [0.167 0.667 0.167]
3 [[0. 0.25 0.75 1. ]] [0.056 0.444 0.444 0.056]
The first few orders with exponential growth rule::
>>> for order in [0, 1, 2]:
... X, W = chaospy.generate_quadrature(
... order, distribution, rule="clenshaw_curtis", growth=True)
... print(order, numpy.around(X, 3), numpy.around(W, 3))
0 [[0.5]] [1.]
1 [[0. 0.5 1. ]] [0.167 0.667 0.167]
2 [[0. 0.146 0.5 0.854 1. ]] [0.033 0.267 0.4 0.267 0.033]
Applying the rule using Smolyak sparse grid::
>>> distribution = chaospy.Iid(chaospy.Uniform(0, 1), 2)
>>> X, W = chaospy.generate_quadrature(
... 2, distribution, rule="clenshaw_curtis",
... growth=True, sparse=True)
>>> print(numpy.around(X, 2))
[[0. 0. 0. 0.15 0.5 0.5 0.5 0.5 0.5 0.85 1. 1. 1. ]
[0. 0.5 1. 0.5 0. 0.15 0.5 0.85 1. 0.5 0. 0.5 1. ]]
>>> print(numpy.around(W, 3))
[ 0.028 -0.022 0.028 0.267 -0.022 0.267 -0.089 0.267 -0.022 0.267
0.028 -0.022 0.028]
"""
from __future__ import division, print_function
import numpy
from .combine import combine_quadrature
def quad_clenshaw_curtis(order, domain, growth=False):
"""
Generate the quadrature nodes and weights in Clenshaw-Curtis quadrature.
Args:
order (int, numpy.ndarray):
Quadrature order.
domain (chaospy.distributions.baseclass.Dist, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
growth (bool):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples.
Returns:
(numpy.ndarray, numpy.ndarray):
abscissas:
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights:
The quadrature weights with ``weights.shape == (N,)``.
Example:
>>> abscissas, weights = quad_clenshaw_curtis(3, (0, 1))
>>> print(numpy.around(abscissas, 4))
[[0. 0.25 0.75 1. ]]
>>> print(numpy.around(weights, 4))
[0.0556 0.4444 0.4444 0.0556]
"""
from ..distributions.baseclass import Dist
if isinstance(domain, Dist):
abscissas, weights = quad_clenshaw_curtis(
order, domain.range(), growth)
weights *= domain.pdf(abscissas).flatten()
weights /= numpy.sum(weights)
return abscissas, weights
order = numpy.asarray(order, dtype=int).flatten()
lower, upper = numpy.array(domain)
lower = numpy.asarray(lower).flatten()
upper = numpy.asarray(upper).flatten()
dim = max(lower.size, upper.size, order.size)
order = numpy.ones(dim, dtype=int)*order
lower = numpy.ones(dim)*lower
upper = numpy.ones(dim)*upper
if growth:
order = numpy.where(order > 0, 2**order, 0)
abscissas, weights = zip(*[_clenshaw_curtis(order_) for order_ in order])
return combine_quadrature(abscissas, weights, (lower, upper))
def _clenshaw_curtis(order):
r"""
Backend method.
Examples:
>>> abscissas, weights = _clenshaw_curtis(0)
>>> print(abscissas)
[0.5]
>>> print(weights)
[1.]
>>> abscissas, weights = _clenshaw_curtis(1)
>>> print(abscissas)
[0. 1.]
>>> print(weights)
[0.5 0.5]
>>> abscissas, weights = _clenshaw_curtis(2)
>>> print(abscissas)
[0. 0.5 1. ]
>>> print(weights)
[0.16666667 0.66666667 0.16666667]
>>> abscissas, weights = _clenshaw_curtis(3)
>>> print(abscissas)
[0. 0.25 0.75 1. ]
>>> print(weights)
[0.05555556 0.44444444 0.44444444 0.05555556]
>>> abscissas, weights = _clenshaw_curtis(4)
>>> print(abscissas)
[0. 0.14644661 0.5 0.85355339 1. ]
>>> print(weights)
[0.03333333 0.26666667 0.4 0.26666667 0.03333333]
>>> abscissas, weights = _clenshaw_curtis(5)
>>> print(abscissas)
[0. 0.0954915 0.3454915 0.6545085 0.9045085 1. ]
>>> print(weights)
[0.02 0.18037152 0.29962848 0.29962848 0.18037152 0.02 ]
"""
if order == 0:
return numpy.array([.5]), numpy.array([1.])
theta = (order-numpy.arange(order+1))*numpy.pi/order
abscisas = 0.5*numpy.cos(theta) + 0.5
idx, idy = numpy.mgrid[:order+1, :order//2]
weights = 2*numpy.cos(2*(idy+1)*theta[idx])/(4*idy*(idy+2)+3)
if order % 2 == 0:
weights[:, -1] *= 0.5
weights = (1-numpy.sum(weights, -1)) / order
weights[0] /= 2
weights[-1] /= 2
return abscisas, weights
|
[
"numpy.sum",
"numpy.asarray",
"numpy.ones",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.cos"
] |
[((3598, 3617), 'numpy.array', 'numpy.array', (['domain'], {}), '(domain)\n', (3609, 3617), False, 'import numpy\n'), ((3471, 3489), 'numpy.sum', 'numpy.sum', (['weights'], {}), '(weights)\n', (3480, 3489), False, 'import numpy\n'), ((3768, 3794), 'numpy.ones', 'numpy.ones', (['dim'], {'dtype': 'int'}), '(dim, dtype=int)\n', (3778, 3794), False, 'import numpy\n'), ((3813, 3828), 'numpy.ones', 'numpy.ones', (['dim'], {}), '(dim)\n', (3823, 3828), False, 'import numpy\n'), ((3847, 3862), 'numpy.ones', 'numpy.ones', (['dim'], {}), '(dim)\n', (3857, 3862), False, 'import numpy\n'), ((3901, 3938), 'numpy.where', 'numpy.where', (['(order > 0)', '(2 ** order)', '(0)'], {}), '(order > 0, 2 ** order, 0)\n', (3912, 3938), False, 'import numpy\n'), ((3537, 3568), 'numpy.asarray', 'numpy.asarray', (['order'], {'dtype': 'int'}), '(order, dtype=int)\n', (3550, 3568), False, 'import numpy\n'), ((3630, 3650), 'numpy.asarray', 'numpy.asarray', (['lower'], {}), '(lower)\n', (3643, 3650), False, 'import numpy\n'), ((3673, 3693), 'numpy.asarray', 'numpy.asarray', (['upper'], {}), '(upper)\n', (3686, 3693), False, 'import numpy\n'), ((5340, 5358), 'numpy.array', 'numpy.array', (['[0.5]'], {}), '([0.5])\n', (5351, 5358), False, 'import numpy\n'), ((5359, 5377), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (5370, 5377), False, 'import numpy\n'), ((5454, 5470), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (5463, 5470), False, 'import numpy\n'), ((5542, 5579), 'numpy.cos', 'numpy.cos', (['(2 * (idy + 1) * theta[idx])'], {}), '(2 * (idy + 1) * theta[idx])\n', (5551, 5579), False, 'import numpy\n'), ((5662, 5684), 'numpy.sum', 'numpy.sum', (['weights', '(-1)'], {}), '(weights, -1)\n', (5671, 5684), False, 'import numpy\n'), ((5397, 5420), 'numpy.arange', 'numpy.arange', (['(order + 1)'], {}), '(order + 1)\n', (5409, 5420), False, 'import numpy\n')]
|
#data preparation utils
import numpy as np
import tensorflow as tf
def partitionByClass(X,y_true):
maxc = np.max(y_true+1)
ids = [[] for i in range(maxc)]
for i in range(np.shape(y_true)[0]):
ids[y_true[i]].append(i)
return ids
def prepareBatch(X,y_true,ids_by_class_train,N_classes = 10, N_support = 10, N_query = 5, permute = True):
maxc = np.max(y_true) #max class number
classes = np.random.choice(range(maxc+1), size = (N_classes), replace = False) #choose subset of N_classes classes
ids_batch = np.array(
[np.random.choice(ids_by_class_train[c],size = (N_support + N_query), replace = False) for c in classes]
)
ids_batch_support = np.ndarray.flatten(ids_batch[:,:N_support])
ids_batch_query = np.ndarray.flatten(ids_batch[:,N_support:])
if permute:
ids_batch_support = np.random.permutation(ids_batch_support)
ids_batch_query = np.random.permutation(ids_batch_query)
return X[ids_batch_support,:,:], y_true[ids_batch_support], X[ids_batch_query,:,:], y_true[ids_batch_query], classes
#preprocessing images (loaded background 1.0, character 0.0)
def invert_img(x):
_,H,W = np.shape(x)
return -2.0 * np.reshape(x,[-1,H,W]) + 1.0
def deinvert_img(x):
_,H,W = np.shape(x)
return 1.0 - 0.5 * x
def resize_img(x,Hold,Wold,Hnew,Wnew):
q = tf.Session().run(tf.image.resize_images(tf.reshape(x,[-1,Hold,Wold,1]),[Hnew,Wnew]))
return np.reshape(q,[-1,Hnew,Wnew])
def subtract_mean(X):
N,H,W = np.shape(X)
Xf = np.reshape(X,[N,H*W])
means = np.mean(Xf, axis = 1, keepdims = True)
Xf = Xf - np.mean(Xf, axis = 1, keepdims = True)
return np.reshape(Xf,np.shape(X)), means
def augment_by_rotations(X,y,ks = [0,1,2,3]):
Xs,ys = [],[]
class_step = np.max(y)+1
for i,k in enumerate(ks):
Xs.append(np.rot90(X, k = k, axes = (1,2)))
ys.append(np.array(y) + (i)*class_step)
Xa = np.concatenate(Xs,axis = 0)
ya = np.concatenate(ys,axis = 0)
return Xa,ya
|
[
"tensorflow.reshape",
"tensorflow.Session",
"numpy.shape",
"numpy.max",
"numpy.mean",
"numpy.rot90",
"numpy.reshape",
"numpy.array",
"numpy.random.choice",
"numpy.random.permutation",
"numpy.concatenate",
"numpy.ndarray.flatten"
] |
[((112, 130), 'numpy.max', 'np.max', (['(y_true + 1)'], {}), '(y_true + 1)\n', (118, 130), True, 'import numpy as np\n'), ((373, 387), 'numpy.max', 'np.max', (['y_true'], {}), '(y_true)\n', (379, 387), True, 'import numpy as np\n'), ((701, 745), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ids_batch[:, :N_support]'], {}), '(ids_batch[:, :N_support])\n', (719, 745), True, 'import numpy as np\n'), ((767, 811), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ids_batch[:, N_support:]'], {}), '(ids_batch[:, N_support:])\n', (785, 811), True, 'import numpy as np\n'), ((1177, 1188), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1185, 1188), True, 'import numpy as np\n'), ((1270, 1281), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1278, 1281), True, 'import numpy as np\n'), ((1451, 1482), 'numpy.reshape', 'np.reshape', (['q', '[-1, Hnew, Wnew]'], {}), '(q, [-1, Hnew, Wnew])\n', (1461, 1482), True, 'import numpy as np\n'), ((1515, 1526), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1523, 1526), True, 'import numpy as np\n'), ((1536, 1561), 'numpy.reshape', 'np.reshape', (['X', '[N, H * W]'], {}), '(X, [N, H * W])\n', (1546, 1561), True, 'import numpy as np\n'), ((1570, 1604), 'numpy.mean', 'np.mean', (['Xf'], {'axis': '(1)', 'keepdims': '(True)'}), '(Xf, axis=1, keepdims=True)\n', (1577, 1604), True, 'import numpy as np\n'), ((1940, 1966), 'numpy.concatenate', 'np.concatenate', (['Xs'], {'axis': '(0)'}), '(Xs, axis=0)\n', (1954, 1966), True, 'import numpy as np\n'), ((1977, 2003), 'numpy.concatenate', 'np.concatenate', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (1991, 2003), True, 'import numpy as np\n'), ((856, 896), 'numpy.random.permutation', 'np.random.permutation', (['ids_batch_support'], {}), '(ids_batch_support)\n', (877, 896), True, 'import numpy as np\n'), ((923, 961), 'numpy.random.permutation', 'np.random.permutation', (['ids_batch_query'], {}), '(ids_batch_query)\n', (944, 961), True, 'import numpy as np\n'), ((1623, 1657), 'numpy.mean', 'np.mean', (['Xf'], {'axis': '(1)', 'keepdims': '(True)'}), '(Xf, axis=1, keepdims=True)\n', (1630, 1657), True, 'import numpy as np\n'), ((1789, 1798), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1795, 1798), True, 'import numpy as np\n'), ((184, 200), 'numpy.shape', 'np.shape', (['y_true'], {}), '(y_true)\n', (192, 200), True, 'import numpy as np\n'), ((562, 647), 'numpy.random.choice', 'np.random.choice', (['ids_by_class_train[c]'], {'size': '(N_support + N_query)', 'replace': '(False)'}), '(ids_by_class_train[c], size=N_support + N_query, replace=False\n )\n', (578, 647), True, 'import numpy as np\n'), ((1207, 1232), 'numpy.reshape', 'np.reshape', (['x', '[-1, H, W]'], {}), '(x, [-1, H, W])\n', (1217, 1232), True, 'import numpy as np\n'), ((1355, 1367), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1365, 1367), True, 'import tensorflow as tf\n'), ((1395, 1429), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, Hold, Wold, 1]'], {}), '(x, [-1, Hold, Wold, 1])\n', (1405, 1429), True, 'import tensorflow as tf\n'), ((1687, 1698), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1695, 1698), True, 'import numpy as np\n'), ((1849, 1878), 'numpy.rot90', 'np.rot90', (['X'], {'k': 'k', 'axes': '(1, 2)'}), '(X, k=k, axes=(1, 2))\n', (1857, 1878), True, 'import numpy as np\n'), ((1901, 1912), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1909, 1912), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # 02__trans_motifs
#
# in this notebook, i find motifs that are associated w/ trans effects using linear models and our RNA-seq data
# In[1]:
import warnings
warnings.filterwarnings('ignore')
import itertools
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
import sys
from itertools import combinations
from scipy.stats import boxcox
from scipy.stats import linregress
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from statsmodels.stats.anova import anova_lm
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
# import utils
sys.path.append("../../../utils")
from plotting_utils import *
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
mpl.rcParams['figure.autolayout'] = False
# In[2]:
sns.set(**PAPER_PRESET)
fontsize = PAPER_FONTSIZE
# In[3]:
np.random.seed(2019)
# In[4]:
QUANT_ALPHA = 0.05
# ## functions
# In[5]:
def calculate_gc(row, col):
cs = row[col].count("C")
gs = row[col].count("G")
gc = (cs+gs)/len(row[col])
return gc
# In[6]:
def calculate_cpg(row, col):
cpgs = row[col].count("CG")
cpg = cpgs/len(row[col])
return cpg
# In[7]:
def sig_status(row):
if row.padj_trans < 0.05:
return "sig"
else:
return "not sig"
# In[8]:
def neg_odds(row):
if row["sig_status"] == "sig hESC":
return -row["hESC_odds"]
elif row["sig_status"] == "sig mESC":
return row["mESC_odds"]
else:
return np.nan
# In[9]:
def direction_match(row):
if row.activ_or_repr == "activating":
if row.beta_trans < 0 and row.logFC < 0:
return "match"
elif row.beta_trans > 0 and row.logFC > 0:
return "match"
else:
return "no match"
elif row.activ_or_repr == "repressing":
if row.beta_trans < 0 and row.logFC > 0:
return "match"
elif row.beta_trans > 0 and row.logFC < 0:
return "match"
else:
return "no match"
else:
return "unclear"
# ## variables
# In[10]:
human_motifs_f = "../../../data/04__mapped_motifs/human_motifs_filtered.txt.gz"
mouse_motifs_f = "../../../data/04__mapped_motifs/mouse_motifs_filtered.txt.gz"
# In[11]:
motif_info_dir = "../../../misc/01__motif_info"
motif_map_f = "%s/00__lambert_et_al_files/00__metadata/curated_motif_map.txt" % motif_info_dir
motif_info_f = "%s/00__lambert_et_al_files/00__metadata/motif_info.txt" % motif_info_dir
# In[12]:
sig_motifs_f = "../../../data/04__mapped_motifs/sig_motifs.txt"
# In[13]:
tss_map_f = "../../../data/01__design/01__mpra_list/mpra_tss.with_ids.RECLASSIFIED_WITH_MAX.txt"
# In[14]:
index_f = "../../../data/01__design/02__index/TWIST_pool4_v8_final.with_element_id.txt.gz"
# In[15]:
data_f = "../../../data/02__mpra/03__results/all_processed_results.txt"
# In[16]:
expr_dir = "../../../data/03__rna_seq/04__TF_expr"
orth_expr_f = "%s/orth_TF_expression.txt" % expr_dir
human_expr_f = "%s/hESC_TF_expression.txt" % expr_dir
mouse_expr_f = "%s/mESC_TF_expression.txt" % expr_dir
# In[17]:
orth_f = "../../../misc/00__ensembl_orthologs/ensembl96_human_mouse_orths.txt.gz"
# ## 1. import data
# In[18]:
index = pd.read_table(index_f, sep="\t")
index_elem = index[["element", "tile_type", "element_id", "name", "tile_number", "chrom", "strand", "actual_start",
"actual_end", "dupe_info"]]
index_elem = index_elem.drop_duplicates()
# In[19]:
tss_map = pd.read_table(tss_map_f, sep="\t")
tss_map.head()
# In[20]:
# this file is already filtered to correct tile nums
human_motifs = pd.read_table(human_motifs_f, sep="\t")
human_motifs.head()
# In[21]:
# this file is already filtered to correct tile nums
mouse_motifs = pd.read_table(mouse_motifs_f, sep="\t")
mouse_motifs.head()
# In[22]:
motif_info = pd.read_table(motif_info_f, sep="\t")
motif_info.head()
# In[23]:
sig_motifs = pd.read_table(sig_motifs_f)
sig_motifs = sig_motifs[sig_motifs["padj"] < 0.05]
print(len(sig_motifs))
sig_motifs.head()
# In[24]:
data = pd.read_table(data_f)
data.head()
# In[25]:
orth_expr = pd.read_table(orth_expr_f, sep="\t")
orth_expr.head()
# In[26]:
human_expr = pd.read_table(human_expr_f, sep="\t")
human_expr.head()
# In[27]:
mouse_expr = pd.read_table(mouse_expr_f, sep="\t")
mouse_expr.head()
# In[28]:
orth = pd.read_table(orth_f, sep="\t")
orth.head()
# ## 2. merge data to build model
# In[29]:
index_elem = index_elem[index_elem["name"].str.contains("EVO")]
index_elem.head()
# In[30]:
index_elem["tss_id"] = index_elem["name"].str.split("__", expand=True)[1]
index_elem["tss_tile_num"] = index_elem["name"].str.split("__", expand=True)[2]
index_elem.sample(5)
# In[31]:
index_human = index_elem[index_elem["name"].str.contains("HUMAN")]
index_mouse = index_elem[index_elem["name"].str.contains("MOUSE")]
index_mouse.sample(5)
# In[32]:
print(len(data))
data_elem = data.merge(index_human[["element", "tss_id", "tss_tile_num"]], left_on=["hg19_id", "tss_tile_num"],
right_on=["tss_id", "tss_tile_num"])
data_elem = data_elem.merge(index_mouse[["element", "tss_id", "tss_tile_num"]], left_on=["mm9_id", "tss_tile_num"],
right_on=["tss_id", "tss_tile_num"], suffixes=("_human", "_mouse"))
data_elem.drop(["tss_id_human", "tss_id_mouse"], axis=1, inplace=True)
print(len(data))
data_elem.head()
# In[33]:
data_elem["gc_human"] = data_elem.apply(calculate_gc, col="element_human", axis=1)
data_elem["gc_mouse"] = data_elem.apply(calculate_gc, col="element_mouse", axis=1)
data_elem["cpg_human"] = data_elem.apply(calculate_cpg, col="element_human", axis=1)
data_elem["cpg_mouse"] = data_elem.apply(calculate_cpg, col="element_mouse", axis=1)
data_elem.sample(5)
# In[34]:
data_elem.columns
# In[35]:
data_human = data_elem[["hg19_id", "tss_tile_num", "logFC_trans_human", "gc_human", "cpg_human", "HUES64_padj_hg19", "trans_status_one"]]
data_mouse = data_elem[["mm9_id", "tss_tile_num", "logFC_trans_mouse", "gc_mouse", "cpg_mouse", "mESC_padj_mm9", "trans_status_one"]]
data_human.columns = ["tss_id", "tss_tile_num", "logFC_trans", "gc", "cpg", "padj", "trans_status"]
data_mouse.columns = ["tss_id", "tss_tile_num", "logFC_trans", "gc", "cpg", "padj", "trans_status"]
data_indiv = data_human.append(data_mouse).drop_duplicates()
print(len(data_indiv))
data_indiv.head()
# ## 3. build reduced model
# In[36]:
scaled_features = StandardScaler().fit_transform(data_indiv[["logFC_trans", "gc", "cpg"]])
data_norm = pd.DataFrame(scaled_features, index=data_indiv.index, columns=["logFC_trans", "gc", "cpg"])
data_norm["padj"] = data_indiv["padj"]
data_norm["tss_id"] = data_indiv["tss_id"]
data_norm["tss_tile_num"] = data_indiv["tss_tile_num"]
data_norm["trans_status"] = data_indiv["trans_status"]
data_norm.head()
# In[37]:
data_filt = data_norm[data_norm["padj"] < QUANT_ALPHA].drop_duplicates()
print(len(data_filt))
data_filt.head()
# In[38]:
mod = smf.ols(formula='logFC_trans ~ gc + cpg',
data=data_filt).fit()
# In[39]:
mod.summary()
# In[40]:
res = mod.resid
fig, ax = plt.subplots(figsize=(2.2, 2.2), ncols=1, nrows=1)
sm.qqplot(res, line='s', ax=ax)
ax.set_title("Normal QQ: trans effects model")
# fig.savefig("avg_activ_qq.pdf", dpi="figure", bbox_inches="tight")
# In[41]:
reduced_llf = mod.llf
reduced_llf
# In[42]:
reduced_rsq = mod.rsquared
reduced_rsq
# ## 4. add motifs to model
# In[43]:
data_filt["tss_index"] = data_filt["tss_id"] + "__" + data_filt["tss_tile_num"]
# In[44]:
human_motifs["hg19_index"] = human_motifs["hg19_id"] + "__" + human_motifs["tss_tile_num"]
mouse_motifs["mm9_index"] = mouse_motifs["mm9_id"] + "__" + mouse_motifs["tss_tile_num"]
# In[45]:
uniq_motifs = list(set(list(human_motifs["#pattern name"].unique()) + list(mouse_motifs["#pattern name"].unique())))
len(uniq_motifs)
# In[46]:
def tss_motif(row):
if row.human_motif:
return True
elif row.mouse_motif:
return True
else:
return False
# In[47]:
motif_results = {}
for i, motif_id in enumerate(uniq_motifs):
tmp = data_filt.copy()
# determine whether motif is in human or mouse sequence
human_motifs_sub = human_motifs[human_motifs["#pattern name"] == motif_id]["hg19_index"].unique()
mouse_motifs_sub = mouse_motifs[mouse_motifs["#pattern name"] == motif_id]["mm9_index"].unique()
tmp["human_motif"] = tmp["tss_index"].isin(human_motifs_sub)
tmp["mouse_motif"] = tmp["tss_index"].isin(mouse_motifs_sub)
tmp["tss_motif"] = tmp.apply(tss_motif, axis=1)
n_w_motif = tmp["tss_motif"].sum()
# make full model
full_mod = smf.ols(formula='logFC_trans ~ gc + cpg + tss_motif',
data=tmp).fit()
full_llf = full_mod.llf
full_rsq = full_mod.rsquared
# # perform likelihood ratio test
# lr, p = lrtest(reduced_llf, full_llf)
# calculate additional variance explained
rsq = full_rsq - reduced_rsq
# record beta
beta = list(full_mod.params)[1]
# beta p
beta_p = list(full_mod.pvalues)[1]
print("(#%s) %s: n w/ motif: %s ... p: %s, rsquared: %s" % (i+1, motif_id, len(tmp), beta_p, rsq))
motif_results[motif_id] = {"rsq": rsq, "beta": beta, "beta_p": beta_p, "n_w_motif": n_w_motif}
# In[48]:
motif_results = pd.DataFrame.from_dict(motif_results, orient="index").reset_index()
motif_results = motif_results[motif_results["n_w_motif"] >= 10]
print(len(motif_results))
motif_results.head()
# In[49]:
motif_results["padj"] = multicomp.multipletests(motif_results["beta_p"], method="fdr_bh")[1]
len(motif_results[motif_results["padj"] < 0.05])
# In[50]:
motif_results.sort_values(by="padj").head(10)
# ## 5. join w/ TF info
# In[51]:
motif_results_mrg = motif_results.merge(sig_motifs, on="index", suffixes=("_trans", "_activ"))
motif_results_mrg.sort_values(by="padj_trans").head()
# In[52]:
sig_results = motif_results_mrg[(motif_results_mrg["padj_trans"] < 0.05)]
sig_results["abs_beta"] = np.abs(sig_results["beta_trans"])
sig_results = sig_results.sort_values(by="abs_beta", ascending=False)
sig_results.head()
# In[53]:
len(sig_results)
# In[54]:
len(sig_results["HGNC symbol"].unique())
# In[55]:
data_filt = data_elem[((data_elem["HUES64_padj_hg19"] < QUANT_ALPHA) | (data_elem["mESC_padj_mm9"] < QUANT_ALPHA))]
print(len(data_filt))
# In[56]:
data_filt_sp = data_filt.drop("orig_species", axis=1)
data_filt_sp.drop_duplicates(inplace=True)
len(data_filt_sp)
# In[57]:
data_filt_hu = data_filt_sp[["hg19_id", "logFC_trans_one", "trans_status_one"]]
data_filt_hu.columns = ["tss_id", "logFC_trans_one", "trans_status_one"]
data_filt_mo = data_filt_sp[["mm9_id", "logFC_trans_one", "trans_status_one"]]
data_filt_mo.columns = ["tss_id", "logFC_trans_one", "trans_status_one"]
data_filt_plot = data_filt_hu.append(data_filt_mo)
data_filt_plot["abs_logFC_trans"] = np.abs(data_filt_plot["logFC_trans_one"])
data_filt_plot.head()
# In[58]:
# example plots
# plot some examples
examps = ["NFE2", "BACH2", "ARNTL", "BHLHE41", "POU2F3"]
order = [False, True]
pal = {False: sns.color_palette("Set2")[7], True: sns.color_palette("Set2")[2]}
for symb in examps:
motif_id = sig_results[sig_results["HGNC symbol"] == symb]["index"].iloc[0]
tmp = data_filt_plot.copy()
# determine whether motif is in human or mouse sequence
human_motifs_sub = human_motifs[human_motifs["#pattern name"] == motif_id]["hg19_id"].unique()
mouse_motifs_sub = mouse_motifs[mouse_motifs["#pattern name"] == motif_id]["mm9_id"].unique()
tmp["hg19_motif"] = tmp["tss_id"].isin(human_motifs_sub)
tmp["mm9_motif"] = tmp["tss_id"].isin(mouse_motifs_sub)
tmp["has_motif"] = tmp[["hg19_motif", "mm9_motif"]].sum(axis=1).astype(bool)
fig, axarr = plt.subplots(figsize=(2.75, 1.5), nrows=1, ncols=2)
ax = axarr[0]
sns.boxplot(data=tmp, x="has_motif", y="abs_logFC_trans", order=order, palette=pal,
flierprops = dict(marker='o', markersize=5), ax=ax)
mimic_r_boxplot(ax)
ax.set_xticklabels(["no motif", "motif"], rotation=50,
ha="right", va="top")
ax.set_ylabel("trans effect size")
ax.set_title(symb)
ax.set_xlabel("")
for i, label in enumerate(order):
n = len(tmp[tmp["has_motif"] == bool(label)])
ax.annotate(str(n), xy=(i, -0.4), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=pal[label], size=fontsize)
ax.set_ylim((-0.5, 2.5))
ax = axarr[1]
sns.boxplot(data=tmp, x="has_motif", y="logFC_trans_one", order=order, palette=pal,
flierprops = dict(marker='o', markersize=5), ax=ax)
ax.set_xticklabels(["no motif", "motif"], rotation=50, ha="right", va="top")
mimic_r_boxplot(ax)
ax.set_ylabel("trans effect size")
ax.set_title(symb)
ax.set_xlabel("")
ax.axhline(y=0, linestyle="dashed", color="black", zorder=100)
for i, label in enumerate(order):
n = len(tmp[tmp["has_motif"] == bool(label)])
ax.annotate(str(n), xy=(i, -2.4), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=pal[label], size=fontsize)
## annotate pvals
sub1 = tmp[tmp["has_motif"] == True]
sub2 = tmp[tmp["has_motif"] == False]
vals1 = np.asarray(sub1["logFC_trans_one"])
vals2 = np.asarray(sub2["logFC_trans_one"])
vals1 = vals1[~np.isnan(vals1)]
vals2 = vals2[~np.isnan(vals2)]
u, pval = stats.mannwhitneyu(vals1, vals2, alternative="two-sided", use_continuity=False)
print(pval)
annotate_pval(ax, 0.2, 0.8, 1, 0, 1, pval, fontsize-1)
ax.set_ylim((-2.5, 2))
plt.subplots_adjust(wspace=0.5)
if symb == "BACH2":
fig.savefig("Fig5C_1.pdf", dpi="figure", bbox_inches="tight")
elif symb == "POU2F3":
fig.savefig("Fig5C_2.pdf", dpi="figure", bbox_inches="tight")
plt.show()
# In[59]:
pal = {"repressing": sns.color_palette("pastel")[3], "activating": sns.color_palette("pastel")[0]}
# In[60]:
full_pal = {}
for i, row in sig_results.iterrows():
full_pal[row["HGNC symbol"]] = pal[row["activ_or_repr"]]
# In[61]:
sig_results_sub = sig_results.head(50)
# In[62]:
fig = plt.figure(figsize=(4.5, 8))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in sig_results_sub.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
sig_results_sub["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=sig_results_sub, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of motif disruption")
sns.barplot(y="HGNC symbol", x="rsq_activ", data=sig_results_sub, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("additional variance explained")
melt = pd.melt(sig_results_sub, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
# fig.savefig("trans_motif_enrichment.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# ## 6. join with expression information
# In[63]:
orth_expr.head()
# In[64]:
trans_orth = motif_results_mrg.merge(orth_expr, left_on="HGNC symbol", right_on="gene_name_human")
len(trans_orth)
# In[65]:
# fisher's exact to see if trans are enriched in DE TFs
trans_ids = trans_orth[trans_orth["padj_trans"] < 0.05]["index"].unique()
no_trans_ids = trans_orth[trans_orth["padj_trans"] >= 0.05]["index"].unique()
DE_ids = trans_orth[trans_orth["sig"] == "sig"]["index"].unique()
trans_w_DE = len([x for x in trans_ids if x in DE_ids])
trans_wo_DE = len([x for x in trans_ids if x not in DE_ids])
no_trans_w_DE = len([x for x in no_trans_ids if x in DE_ids])
no_trans_wo_DE = len([x for x in no_trans_ids if x not in DE_ids])
# fisher's exact test
arr = np.zeros((2, 2))
arr[0, 0] = trans_w_DE
arr[0, 1] = trans_wo_DE
arr[1, 0] = no_trans_w_DE
arr[1, 1] = no_trans_wo_DE
odds, p = stats.fisher_exact(arr)
print(odds)
print(p)
# In[66]:
trans_orth_sig = trans_orth[trans_orth["padj_trans"] < 0.05]
trans_orth_sig["abs_beta"] = np.abs(trans_orth_sig["beta_trans"])
trans_orth_sig = trans_orth_sig.sort_values(by="abs_beta", ascending=False)
len(trans_orth_sig)
# In[67]:
trans_orth_sub = trans_orth_sig[trans_orth_sig["sig"] == "sig"]
len(trans_orth_sub)
# In[68]:
fig = plt.figure(figsize=(4.5, 9))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in trans_orth_sub.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
trans_orth_sub["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=trans_orth_sub, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of\nmotif presence")
sns.barplot(y="HGNC symbol", x="logFC", data=trans_orth_sub, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("log2(mESC/hESC)")
melt = pd.melt(trans_orth_sub, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
fig.savefig("FigS11.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# In[69]:
trans_orth.head()
# In[70]:
fig, ax = plt.subplots(figsize=(2.2, 2.2), nrows=1, ncols=1)
ax.scatter(trans_orth["beta_trans"],
trans_orth["logFC"],
color=sns.color_palette("Set2")[2], alpha=0.75, s=15,
linewidths=0.5, edgecolors="white")
#ax.plot([-0.75, 400000], [-0.75, 400000], "k", linestyle="dashed")
#ax.set_xlim((-0.75, 400000))
#ax.set_ylim((-0.75, 400000))
ax.set_xlabel("trans odds ratio")
ax.set_ylabel("RNA-seq logFC([mESC/hESC])")
# annotate corr
no_nan = trans_orth[(~pd.isnull(trans_orth["beta_trans"])) &
(~pd.isnull(trans_orth["logFC"]))]
r, p = spearmanr(no_nan["beta_trans"], no_nan["logFC"])
ax.text(0.05, 0.97, "r = {:.2f}".format(r), ha="left", va="top", fontsize=fontsize,
transform=ax.transAxes)
ax.text(0.05, 0.90, "n = %s" % (len(no_nan)), ha="left", va="top", fontsize=fontsize,
transform=ax.transAxes)
#fig.savefig("TF_human_v_mouse_scatter.w_sig_outline.pdf", dpi="figure", bbox_inches="tight")
# In[71]:
# filter to those where direction matches
trans_orth_sub["direction_match"] = trans_orth_sub.apply(direction_match, axis=1)
trans_orth_sub.direction_match.value_counts()
# In[72]:
trans_orth_match = trans_orth_sub[trans_orth_sub["direction_match"] == "match"]
# In[73]:
match_activ = trans_orth_match[trans_orth_match["activ_or_repr"] == "activating"]
match_repr = trans_orth_match[trans_orth_match["activ_or_repr"] == "repressing"]
# In[74]:
fig = plt.figure(figsize=(4, 4.4))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in match_activ.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
match_activ["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=match_activ, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of\nmotif presence")
ax1.axvline(x=0, linestyle="dashed", color="black")
sns.barplot(y="HGNC symbol", x="logFC", data=match_activ, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("log2(mESC/hESC)")
ax2.axvline(x=0, linestyle="dashed", color="black")
melt = pd.melt(match_activ, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
fig.savefig("Fig5B.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# In[75]:
fig = plt.figure(figsize=(4, 0.5))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in match_repr.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
match_repr["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=match_repr, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of\nmotif presence")
ax1.axvline(x=0, linestyle="dashed", color="black")
sns.barplot(y="HGNC symbol", x="logFC", data=match_repr, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("log2(mESC/hESC)")
ax2.axvline(x=0, linestyle="dashed", color="black")
melt = pd.melt(match_repr, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
# fig.savefig("trans_motif_enrichment.with_expr.match_only.repr.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# ## 7. join w/ % similarity information
# In[76]:
orth.columns
# In[77]:
orth_sub = orth[["Gene name", "Mouse gene name", "dN with Mouse", "dS with Mouse"]]
orth_sub.columns = ["human_gene_name", "mouse_gene_name", "dN", "dS"]
orth_sub["dNdS"] = orth_sub["dN"]/orth_sub["dS"]
# In[78]:
trans_orth = trans_orth.merge(orth_sub, left_on="HGNC symbol", right_on="human_gene_name").drop_duplicates()
print(len(trans_orth))
trans_orth.sample(5)
# In[79]:
trans_orth["abs_l2fc"] = np.abs(trans_orth["logFC"])
trans_orth["sig_status"] = trans_orth.apply(sig_status, axis=1)
trans_orth.head()
# In[80]:
trans_orth.sig_status.value_counts()
# In[81]:
order = ["not sig", "sig"]
palette = {"not sig": "gray", "sig": sns.color_palette("Set2")[2]}
# In[82]:
trans_orth_sig = trans_orth[trans_orth["sig_status"] == "sig"]
print(len(trans_orth_sig))
trans_orth_sig.head()
# In[83]:
fig = plt.figure(figsize=(1, 1.75))
ax = sns.boxplot(data=trans_orth_sig, x="sig", y="dNdS", palette=palette, order=order,
flierprops = dict(marker='o', markersize=5))
mimic_r_boxplot(ax)
ax.set_xticklabels(order, rotation=50, ha='right', va='top')
ax.set_xlabel("")
ax.set_ylabel("dN/dS")
for i, label in enumerate(order):
n = len(trans_orth_sig[trans_orth_sig["sig"] == label])
ax.annotate(str(n), xy=(i, -0.07), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=palette[label], size=fontsize)
ax.set_ylim((-0.09, 0.4))
# calc p-vals b/w dists
dist1 = np.asarray(trans_orth_sig[trans_orth_sig["sig"] == "sig"]["dNdS"])
dist2 = np.asarray(trans_orth_sig[trans_orth_sig["sig"] != "sig"]["dNdS"])
dist1 = dist1[~np.isnan(dist1)]
dist2 = dist2[~np.isnan(dist2)]
u, pval = stats.mannwhitneyu(dist1, dist2, alternative="two-sided", use_continuity=False)
print(pval)
annotate_pval(ax, 0.2, 0.8, 0.2, 0, 0.2, pval, fontsize-1)
plt.show()
# fig.savefig("DE_v_similarity_boxplot.pdf", dpi="figure", bbox_inches="tight")
plt.close()
|
[
"numpy.random.seed",
"numpy.abs",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.subplot2grid",
"numpy.isnan",
"matplotlib.pyplot.figure",
"statsmodels.api.qqplot",
"pandas.read_table",
"sys.path.append",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.max",
"statsmodels.formula.api.ols",
"matplotlib.pyplot.subplots",
"seaborn.set",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"numpy.asarray",
"seaborn.barplot",
"matplotlib.pyplot.subplots_adjust",
"pandas.melt",
"warnings.filterwarnings",
"scipy.stats.spearmanr",
"numpy.zeros",
"pandas.isnull",
"seaborn.color_palette"
] |
[((203, 236), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (226, 236), False, 'import warnings\n'), ((779, 812), 'sys.path.append', 'sys.path.append', (['"""../../../utils"""'], {}), "('../../../utils')\n", (794, 812), False, 'import sys\n'), ((1029, 1052), 'seaborn.set', 'sns.set', ([], {}), '(**PAPER_PRESET)\n', (1036, 1052), True, 'import seaborn as sns\n'), ((1092, 1112), 'numpy.random.seed', 'np.random.seed', (['(2019)'], {}), '(2019)\n', (1106, 1112), True, 'import numpy as np\n'), ((3491, 3523), 'pandas.read_table', 'pd.read_table', (['index_f'], {'sep': '"""\t"""'}), "(index_f, sep='\\t')\n", (3504, 3523), True, 'import pandas as pd\n'), ((3755, 3789), 'pandas.read_table', 'pd.read_table', (['tss_map_f'], {'sep': '"""\t"""'}), "(tss_map_f, sep='\\t')\n", (3768, 3789), True, 'import pandas as pd\n'), ((3887, 3926), 'pandas.read_table', 'pd.read_table', (['human_motifs_f'], {'sep': '"""\t"""'}), "(human_motifs_f, sep='\\t')\n", (3900, 3926), True, 'import pandas as pd\n'), ((4029, 4068), 'pandas.read_table', 'pd.read_table', (['mouse_motifs_f'], {'sep': '"""\t"""'}), "(mouse_motifs_f, sep='\\t')\n", (4042, 4068), True, 'import pandas as pd\n'), ((4116, 4153), 'pandas.read_table', 'pd.read_table', (['motif_info_f'], {'sep': '"""\t"""'}), "(motif_info_f, sep='\\t')\n", (4129, 4153), True, 'import pandas as pd\n'), ((4199, 4226), 'pandas.read_table', 'pd.read_table', (['sig_motifs_f'], {}), '(sig_motifs_f)\n', (4212, 4226), True, 'import pandas as pd\n'), ((4340, 4361), 'pandas.read_table', 'pd.read_table', (['data_f'], {}), '(data_f)\n', (4353, 4361), True, 'import pandas as pd\n'), ((4400, 4436), 'pandas.read_table', 'pd.read_table', (['orth_expr_f'], {'sep': '"""\t"""'}), "(orth_expr_f, sep='\\t')\n", (4413, 4436), True, 'import pandas as pd\n'), ((4481, 4518), 'pandas.read_table', 'pd.read_table', (['human_expr_f'], {'sep': '"""\t"""'}), "(human_expr_f, sep='\\t')\n", (4494, 4518), True, 'import pandas as pd\n'), ((4564, 4601), 'pandas.read_table', 'pd.read_table', (['mouse_expr_f'], {'sep': '"""\t"""'}), "(mouse_expr_f, sep='\\t')\n", (4577, 4601), True, 'import pandas as pd\n'), ((4641, 4672), 'pandas.read_table', 'pd.read_table', (['orth_f'], {'sep': '"""\t"""'}), "(orth_f, sep='\\t')\n", (4654, 4672), True, 'import pandas as pd\n'), ((6831, 6927), 'pandas.DataFrame', 'pd.DataFrame', (['scaled_features'], {'index': 'data_indiv.index', 'columns': "['logFC_trans', 'gc', 'cpg']"}), "(scaled_features, index=data_indiv.index, columns=[\n 'logFC_trans', 'gc', 'cpg'])\n", (6843, 6927), True, 'import pandas as pd\n'), ((7426, 7476), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.2, 2.2)', 'ncols': '(1)', 'nrows': '(1)'}), '(figsize=(2.2, 2.2), ncols=1, nrows=1)\n', (7438, 7476), True, 'import matplotlib.pyplot as plt\n'), ((7477, 7508), 'statsmodels.api.qqplot', 'sm.qqplot', (['res'], {'line': '"""s"""', 'ax': 'ax'}), "(res, line='s', ax=ax)\n", (7486, 7508), True, 'import statsmodels.api as sm\n'), ((10357, 10390), 'numpy.abs', 'np.abs', (["sig_results['beta_trans']"], {}), "(sig_results['beta_trans'])\n", (10363, 10390), True, 'import numpy as np\n'), ((11253, 11294), 'numpy.abs', 'np.abs', (["data_filt_plot['logFC_trans_one']"], {}), "(data_filt_plot['logFC_trans_one'])\n", (11259, 11294), True, 'import numpy as np\n'), ((14708, 14736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.5, 8)'}), '(figsize=(4.5, 8))\n', (14718, 14736), True, 'import matplotlib.pyplot as plt\n'), ((14744, 14787), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (14760, 14787), True, 'import matplotlib.pyplot as plt\n'), ((14794, 14837), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (14810, 14837), True, 'import matplotlib.pyplot as plt\n'), ((14844, 14887), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (14860, 14887), True, 'import matplotlib.pyplot as plt\n'), ((15148, 15245), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'sig_results_sub', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=sig_results_sub, palette=\n full_pal, ax=ax1)\n", (15159, 15245), True, 'import seaborn as sns\n'), ((15311, 15407), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""rsq_activ"""', 'data': 'sig_results_sub', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='rsq_activ', data=sig_results_sub, palette=\n full_pal, ax=ax2)\n", (15322, 15407), True, 'import seaborn as sns\n'), ((15523, 15651), 'pandas.melt', 'pd.melt', (['sig_results_sub'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(sig_results_sub, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (15530, 15651), True, 'import pandas as pd\n'), ((16086, 16096), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16094, 16096), True, 'import matplotlib.pyplot as plt\n'), ((16176, 16187), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16185, 16187), True, 'import matplotlib.pyplot as plt\n'), ((16954, 16970), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (16962, 16970), True, 'import numpy as np\n'), ((17231, 17267), 'numpy.abs', 'np.abs', (["trans_orth_sig['beta_trans']"], {}), "(trans_orth_sig['beta_trans'])\n", (17237, 17267), True, 'import numpy as np\n'), ((17482, 17510), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.5, 9)'}), '(figsize=(4.5, 9))\n', (17492, 17510), True, 'import matplotlib.pyplot as plt\n'), ((17518, 17561), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (17534, 17561), True, 'import matplotlib.pyplot as plt\n'), ((17568, 17611), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (17584, 17611), True, 'import matplotlib.pyplot as plt\n'), ((17618, 17661), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (17634, 17661), True, 'import matplotlib.pyplot as plt\n'), ((17920, 18016), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'trans_orth_sub', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=trans_orth_sub, palette=\n full_pal, ax=ax1)\n", (17931, 18016), True, 'import seaborn as sns\n'), ((18081, 18172), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""logFC"""', 'data': 'trans_orth_sub', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='logFC', data=trans_orth_sub, palette=\n full_pal, ax=ax2)\n", (18092, 18172), True, 'import seaborn as sns\n'), ((18274, 18401), 'pandas.melt', 'pd.melt', (['trans_orth_sub'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(trans_orth_sub, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (18281, 18401), True, 'import pandas as pd\n'), ((18836, 18846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18844, 18846), True, 'import matplotlib.pyplot as plt\n'), ((18908, 18919), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18917, 18919), True, 'import matplotlib.pyplot as plt\n'), ((18976, 19026), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.2, 2.2)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(2.2, 2.2), nrows=1, ncols=1)\n', (18988, 19026), True, 'import matplotlib.pyplot as plt\n'), ((19560, 19608), 'scipy.stats.spearmanr', 'spearmanr', (["no_nan['beta_trans']", "no_nan['logFC']"], {}), "(no_nan['beta_trans'], no_nan['logFC'])\n", (19569, 19608), False, 'from scipy.stats import spearmanr\n'), ((20413, 20441), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4.4)'}), '(figsize=(4, 4.4))\n', (20423, 20441), True, 'import matplotlib.pyplot as plt\n'), ((20449, 20492), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (20465, 20492), True, 'import matplotlib.pyplot as plt\n'), ((20499, 20542), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (20515, 20542), True, 'import matplotlib.pyplot as plt\n'), ((20549, 20592), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (20565, 20592), True, 'import matplotlib.pyplot as plt\n'), ((20845, 20938), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'match_activ', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=match_activ, palette=\n full_pal, ax=ax1)\n", (20856, 20938), True, 'import seaborn as sns\n'), ((21055, 21142), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""logFC"""', 'data': 'match_activ', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='logFC', data=match_activ, palette=full_pal,\n ax=ax2)\n", (21066, 21142), True, 'import seaborn as sns\n'), ((21297, 21421), 'pandas.melt', 'pd.melt', (['match_activ'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(match_activ, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (21304, 21421), True, 'import pandas as pd\n'), ((21856, 21866), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21864, 21866), True, 'import matplotlib.pyplot as plt\n'), ((21927, 21938), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21936, 21938), True, 'import matplotlib.pyplot as plt\n'), ((21959, 21987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 0.5)'}), '(figsize=(4, 0.5))\n', (21969, 21987), True, 'import matplotlib.pyplot as plt\n'), ((21995, 22038), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (22011, 22038), True, 'import matplotlib.pyplot as plt\n'), ((22045, 22088), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (22061, 22088), True, 'import matplotlib.pyplot as plt\n'), ((22095, 22138), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (22111, 22138), True, 'import matplotlib.pyplot as plt\n'), ((22389, 22481), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'match_repr', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=match_repr, palette=\n full_pal, ax=ax1)\n", (22400, 22481), True, 'import seaborn as sns\n'), ((22598, 22684), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""logFC"""', 'data': 'match_repr', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='logFC', data=match_repr, palette=full_pal,\n ax=ax2)\n", (22609, 22684), True, 'import seaborn as sns\n'), ((22839, 22962), 'pandas.melt', 'pd.melt', (['match_repr'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(match_repr, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (22846, 22962), True, 'import pandas as pd\n'), ((23397, 23407), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23405, 23407), True, 'import matplotlib.pyplot as plt\n'), ((23513, 23524), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23522, 23524), True, 'import matplotlib.pyplot as plt\n'), ((24017, 24044), 'numpy.abs', 'np.abs', (["trans_orth['logFC']"], {}), "(trans_orth['logFC'])\n", (24023, 24044), True, 'import numpy as np\n'), ((24432, 24461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1, 1.75)'}), '(figsize=(1, 1.75))\n', (24442, 24461), True, 'import matplotlib.pyplot as plt\n'), ((25085, 25151), 'numpy.asarray', 'np.asarray', (["trans_orth_sig[trans_orth_sig['sig'] == 'sig']['dNdS']"], {}), "(trans_orth_sig[trans_orth_sig['sig'] == 'sig']['dNdS'])\n", (25095, 25151), True, 'import numpy as np\n'), ((25160, 25226), 'numpy.asarray', 'np.asarray', (["trans_orth_sig[trans_orth_sig['sig'] != 'sig']['dNdS']"], {}), "(trans_orth_sig[trans_orth_sig['sig'] != 'sig']['dNdS'])\n", (25170, 25226), True, 'import numpy as np\n'), ((25456, 25466), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25464, 25466), True, 'import matplotlib.pyplot as plt\n'), ((25547, 25558), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25556, 25558), True, 'import matplotlib.pyplot as plt\n'), ((12151, 12202), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.75, 1.5)', 'nrows': '(1)', 'ncols': '(2)'}), '(figsize=(2.75, 1.5), nrows=1, ncols=2)\n', (12163, 12202), True, 'import matplotlib.pyplot as plt\n'), ((13783, 13818), 'numpy.asarray', 'np.asarray', (["sub1['logFC_trans_one']"], {}), "(sub1['logFC_trans_one'])\n", (13793, 13818), True, 'import numpy as np\n'), ((13831, 13866), 'numpy.asarray', 'np.asarray', (["sub2['logFC_trans_one']"], {}), "(sub2['logFC_trans_one'])\n", (13841, 13866), True, 'import numpy as np\n'), ((14157, 14188), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)'}), '(wspace=0.5)\n', (14176, 14188), True, 'import matplotlib.pyplot as plt\n'), ((14384, 14394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14392, 14394), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6762), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6760, 6762), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7278, 7335), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""logFC_trans ~ gc + cpg"""', 'data': 'data_filt'}), "(formula='logFC_trans ~ gc + cpg', data=data_filt)\n", (7285, 7335), True, 'import statsmodels.formula.api as smf\n'), ((9660, 9713), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['motif_results'], {'orient': '"""index"""'}), "(motif_results, orient='index')\n", (9682, 9713), True, 'import pandas as pd\n'), ((11461, 11486), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (11478, 11486), True, 'import seaborn as sns\n'), ((11497, 11522), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (11514, 11522), True, 'import seaborn as sns\n'), ((14430, 14457), 'seaborn.color_palette', 'sns.color_palette', (['"""pastel"""'], {}), "('pastel')\n", (14447, 14457), True, 'import seaborn as sns\n'), ((14476, 14503), 'seaborn.color_palette', 'sns.color_palette', (['"""pastel"""'], {}), "('pastel')\n", (14493, 14503), True, 'import seaborn as sns\n'), ((24256, 24281), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (24273, 24281), True, 'import seaborn as sns\n'), ((25243, 25258), 'numpy.isnan', 'np.isnan', (['dist1'], {}), '(dist1)\n', (25251, 25258), True, 'import numpy as np\n'), ((25275, 25290), 'numpy.isnan', 'np.isnan', (['dist2'], {}), '(dist2)\n', (25283, 25290), True, 'import numpy as np\n'), ((8981, 9044), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""logFC_trans ~ gc + cpg + tss_motif"""', 'data': 'tmp'}), "(formula='logFC_trans ~ gc + cpg + tss_motif', data=tmp)\n", (8988, 9044), True, 'import statsmodels.formula.api as smf\n'), ((13886, 13901), 'numpy.isnan', 'np.isnan', (['vals1'], {}), '(vals1)\n', (13894, 13901), True, 'import numpy as np\n'), ((13922, 13937), 'numpy.isnan', 'np.isnan', (['vals2'], {}), '(vals2)\n', (13930, 13937), True, 'import numpy as np\n'), ((15826, 15839), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (15832, 15839), True, 'import numpy as np\n'), ((18576, 18589), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (18582, 18589), True, 'import numpy as np\n'), ((19115, 19140), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (19132, 19140), True, 'import seaborn as sns\n'), ((19458, 19493), 'pandas.isnull', 'pd.isnull', (["trans_orth['beta_trans']"], {}), "(trans_orth['beta_trans'])\n", (19467, 19493), True, 'import pandas as pd\n'), ((19520, 19550), 'pandas.isnull', 'pd.isnull', (["trans_orth['logFC']"], {}), "(trans_orth['logFC'])\n", (19529, 19550), True, 'import pandas as pd\n'), ((21596, 21609), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (21602, 21609), True, 'import numpy as np\n'), ((23137, 23150), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (23143, 23150), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
import multipy
################################################################################
################################################################################
####
#### Class: Transform
####
################################################################################
################################################################################
class Transform(unittest.TestCase):
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__allowed_calls(self):
X = np.random.rand(5,100)
Y = np.random.rand(5,100)
try:
transform = multipy.Transform()
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
(n_species_1,n_species_2,n_observations) = np.shape(B_ou)
self.assertTrue(n_species_1==4)
self.assertTrue(n_species_2==4)
self.assertTrue(n_observations==100)
except Exception:
self.assertTrue(False)
X = np.random.rand(2,100)
Y = np.random.rand(2,100)
try:
transform = multipy.Transform()
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
(n_species_1,n_species_2,n_observations) = np.shape(B_ou)
self.assertTrue(n_species_1==1)
self.assertTrue(n_species_2==1)
self.assertTrue(n_observations==100)
except Exception:
self.assertTrue(False)
X = np.random.rand(2,1)
Y = np.random.rand(2,1)
try:
transform = multipy.Transform()
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
(n_species_1,n_species_2,n_observations) = np.shape(B_ou)
self.assertTrue(n_species_1==1)
self.assertTrue(n_species_2==1)
self.assertTrue(n_observations==1)
except Exception:
self.assertTrue(False)
################################################################################
################################################################################
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__not_allowed_calls(self):
transform = multipy.Transform()
X = np.random.rand(1,100)
Y = np.random.rand(1,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
Y = np.random.rand(4,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
Y = np.random.rand(1,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(1,100)
Y = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(100)
Y = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
Y = np.random.rand(100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, [1,2,3,4,5])
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, None)
Y = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass([1,2,3,4,5], Y)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(None, Y)
################################################################################
################################################################################
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__computation(self):
pass
################################################################################
################################################################################
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__inverses(self):
pass
################################################################################
################################################################################
|
[
"numpy.random.rand",
"multipy.Transform",
"numpy.shape"
] |
[((546, 568), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (560, 568), True, 'import numpy as np\n'), ((580, 602), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (594, 602), True, 'import numpy as np\n'), ((1015, 1037), 'numpy.random.rand', 'np.random.rand', (['(2)', '(100)'], {}), '(2, 100)\n', (1029, 1037), True, 'import numpy as np\n'), ((1049, 1071), 'numpy.random.rand', 'np.random.rand', (['(2)', '(100)'], {}), '(2, 100)\n', (1063, 1071), True, 'import numpy as np\n'), ((1484, 1504), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (1498, 1504), True, 'import numpy as np\n'), ((1516, 1536), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (1530, 1536), True, 'import numpy as np\n'), ((2208, 2227), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (2225, 2227), False, 'import multipy\n'), ((2241, 2263), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2255, 2263), True, 'import numpy as np\n'), ((2275, 2297), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2289, 2297), True, 'import numpy as np\n'), ((2429, 2451), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (2443, 2451), True, 'import numpy as np\n'), ((2463, 2485), 'numpy.random.rand', 'np.random.rand', (['(4)', '(100)'], {}), '(4, 100)\n', (2477, 2485), True, 'import numpy as np\n'), ((2617, 2639), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (2631, 2639), True, 'import numpy as np\n'), ((2651, 2673), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2665, 2673), True, 'import numpy as np\n'), ((2805, 2827), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2819, 2827), True, 'import numpy as np\n'), ((2839, 2861), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (2853, 2861), True, 'import numpy as np\n'), ((2993, 3012), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (3007, 3012), True, 'import numpy as np\n'), ((3025, 3047), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3039, 3047), True, 'import numpy as np\n'), ((3179, 3201), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3193, 3201), True, 'import numpy as np\n'), ((3213, 3232), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (3227, 3232), True, 'import numpy as np\n'), ((3365, 3387), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3379, 3387), True, 'import numpy as np\n'), ((3651, 3673), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3665, 3673), True, 'import numpy as np\n'), ((640, 659), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (657, 659), False, 'import multipy\n'), ((789, 803), 'numpy.shape', 'np.shape', (['B_ou'], {}), '(B_ou)\n', (797, 803), True, 'import numpy as np\n'), ((1109, 1128), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (1126, 1128), False, 'import multipy\n'), ((1258, 1272), 'numpy.shape', 'np.shape', (['B_ou'], {}), '(B_ou)\n', (1266, 1272), True, 'import numpy as np\n'), ((1574, 1593), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (1591, 1593), False, 'import multipy\n'), ((1723, 1737), 'numpy.shape', 'np.shape', (['B_ou'], {}), '(B_ou)\n', (1731, 1737), True, 'import numpy as np\n')]
|
# SPDX-FileCopyrightText: 2022 UChicago Argonne, LLC
# SPDX-License-Identifier: MIT
from datetime import datetime
from pathlib import Path
import shutil
from typing import List, Optional
import numpy as np
import pandas as pd
from .fileutils import PathLike, run as run_proc
from .parameters import Parameters
from .plugin import TemplatePlugin
from .results import Results
class ResultsMOOSE(Results):
"""MOOSE simulation results
Parameters
----------
params
Parameters used to generate inputs
name
Name of workflow producing results
time
Time at which workflow was run
inputs
List of input files
outputs
List of output files
Attributes
----------
stdout
Standard output from MOOSE run
csv_data
Dictionary with data from .csv files
"""
def __init__(self, params: Parameters, name: str, time: datetime,
inputs: List[PathLike], outputs: List[PathLike]):
super().__init__('MOOSE', params, name, time, inputs, outputs)
self.csv_data = self._save_MOOSE_csv()
@property
def stdout(self) -> str:
return (self.base_path / "MOOSE_log.txt").read_text()
def _save_MOOSE_csv(self) -> dict:
"""Read all MOOSE '.csv' files and return results in a dictionary
Returns
-------
Results from MOOSE .csv files
"""
input_file = self.inputs[0]
csv_file = input_file.with_name(f"{input_file.stem}_csv.csv")
# Save MOOSE's main output '.csv' files
csv_data = {}
if csv_file.exists():
csv_file_df = pd.read_csv(csv_file)
for column_name in csv_file_df.columns:
csv_data[column_name] = np.array(csv_file_df[column_name])
# Read MOOSE's vector postprocesssor '.csv' files and save the
# parameters as individual array
for output in self.outputs:
if (output.name.startswith(f"{input_file.stem}_csv_") and
not output.name.endswith("_0000.csv")):
vector_csv_df = pd.read_csv(output)
csv_param = list(set(vector_csv_df.columns) - {"id", "x", "y", "z"})
csv_data[output.stem] = np.array(vector_csv_df[csv_param[0]], dtype=float)
for name in ("id", "x", "y", "z"):
new_name = output.name[:-8] + name
if new_name not in csv_data:
csv_data[new_name] = np.array(vector_csv_df[name], dtype=float)
return csv_data
class PluginMOOSE(TemplatePlugin):
"""Plugin for running MOOSE
Parameters
----------
template_file
Templated MOOSE input
n_cpu
Number of processors to be used to run MOOSE application
extra_inputs
List of extra (non-templated) input files that are needed
extra_template_inputs
Extra templated input files
show_stdout
Whether to display output from stdout when MOOSE is run
show_stderr
Whether to display output from stderr when MOOSE is run
Attributes
----------
moose_exec
Path to MOOSE executable
"""
def __init__(self, template_file: str, n_cpu: int = 1,
extra_inputs: Optional[List[str]] = None,
extra_template_inputs: Optional[List[PathLike]] = None,
show_stdout: bool = False, show_stderr: bool = False):
super().__init__(template_file, extra_inputs, extra_template_inputs,
show_stdout, show_stderr)
self._moose_exec = Path('moose-opt')
self.input_name = "MOOSE.i"
if n_cpu < 1:
raise RuntimeError("The CPU number used to run MOOSE app must be a natural number.")
self.n_cpu = n_cpu
@property
def moose_exec(self) -> Path:
return self._moose_exec
@moose_exec.setter
def moose_exec(self, exe: PathLike):
if shutil.which(exe) is None:
raise RuntimeError(f"MOOSE executable '{exe}' is missing.")
self._moose_exec = Path(exe)
def options(self, moose_exec):
"""Input MOOSE user-specified options
Parameters
----------
MOOSE_exec
Path to MOOSE executable
"""
self.moose_exec = moose_exec
def run(self):
"""Run MOOSE"""
run_proc(["mpiexec", "-n", str(self.n_cpu), self.moose_exec,
"-i", self.input_name])
def postrun(self, params: Parameters, name: str) -> ResultsMOOSE:
"""Read MOOSE results and create results object
Parameters
----------
params
Parameters used to create MOOSE model
name
Name of the workflow
Returns
-------
MOOSE results object
"""
time, inputs, outputs = self._get_result_input(self.input_name)
return ResultsMOOSE(params, name, time, inputs, outputs)
|
[
"pandas.read_csv",
"pathlib.Path",
"numpy.array",
"shutil.which"
] |
[((3600, 3617), 'pathlib.Path', 'Path', (['"""moose-opt"""'], {}), "('moose-opt')\n", (3604, 3617), False, 'from pathlib import Path\n'), ((4083, 4092), 'pathlib.Path', 'Path', (['exe'], {}), '(exe)\n', (4087, 4092), False, 'from pathlib import Path\n'), ((1644, 1665), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (1655, 1665), True, 'import pandas as pd\n'), ((3957, 3974), 'shutil.which', 'shutil.which', (['exe'], {}), '(exe)\n', (3969, 3974), False, 'import shutil\n'), ((1758, 1792), 'numpy.array', 'np.array', (['csv_file_df[column_name]'], {}), '(csv_file_df[column_name])\n', (1766, 1792), True, 'import numpy as np\n'), ((2100, 2119), 'pandas.read_csv', 'pd.read_csv', (['output'], {}), '(output)\n', (2111, 2119), True, 'import pandas as pd\n'), ((2245, 2295), 'numpy.array', 'np.array', (['vector_csv_df[csv_param[0]]'], {'dtype': 'float'}), '(vector_csv_df[csv_param[0]], dtype=float)\n', (2253, 2295), True, 'import numpy as np\n'), ((2497, 2539), 'numpy.array', 'np.array', (['vector_csv_df[name]'], {'dtype': 'float'}), '(vector_csv_df[name], dtype=float)\n', (2505, 2539), True, 'import numpy as np\n')]
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import logging
import numpy as np
import copy
import coremltools
from coremltools import converters as converter
from coremltools.converters.mil import converter as _converter
from coremltools.converters.mil.mil import Program, Function
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
from coremltools._deps import _IS_MACOS
import PIL.Image
converter = converter
_converter = _converter
def assert_op_count_match(program, expect, op=None, verbose=False):
"""
Assert number of ops match expected number. If op is not specified,
Count total number of ops and match with expect.
"""
if verbose:
print(program)
count = 0
for _, func in program.functions.items():
for o in func.operations:
if not op:
count += 1
elif o.op_type.lower() == op.lower():
count += 1
np.testing.assert_equal(count, expect)
def assert_model_is_valid(
program, inputs, backend="nn_proto", verbose=True, expected_output_shapes=None
):
"""
Assert Core ML model is valid.
Inputs:
- input: str -> shape tuple. All program input names need to appear in str.
shape tuple can only contain positive integers.
"""
input_dict = dict()
for name, shape in inputs.items():
input_dict[name] = np.random.rand(*shape)
proto = _converter._convert(program, convert_from="mil", convert_to=backend)
if verbose:
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(proto, style="coding")
model = coremltools.models.MLModel(proto)
assert model is not None
if _IS_MACOS:
prediction = model.predict(input_dict, useCPUOnly=True)
assert prediction is not None
if expected_output_shapes is not None:
for out_name, out_shape in expected_output_shapes.items():
assert out_name in prediction
assert out_shape == prediction[out_name].shape
def assert_same_output_names(prog1, prog2, func_name="main"):
prog1_outputs = [o.name for o in prog1[func_name].outputs]
prog2_outputs = [o.name for o in prog2[func_name].outputs]
assert prog1_outputs == prog2_outputs
def assert_same_output_shapes(prog1, prog2, func_name="main"):
prog1_output_shapes = [o.shape for o in prog1[func_name].outputs]
prog2_output_shapes = [o.shape for o in prog2[func_name].outputs]
assert prog1_output_shapes == prog2_output_shapes
def get_op_types_in_program(prog, func_name="main", skip_const_ops=True):
"""
Return the operation types in prog[func_name],
in the same order as they are stored (topological)
"""
op_types_in_program = []
for op in prog[func_name].operations:
if skip_const_ops:
if op.op_type == "const":
continue
op_types_in_program.append(op.op_type)
return op_types_in_program
def random_gen(
shape,
rand_min=0.0,
rand_max=1.0,
eps_from_int=0.0,
allow_duplicate=True,
dtype=np.float32,
):
"""
This helper function generates a random array of shape `shape`
The range of generated numbers will be between (rand_min, rand_max].
The value of generated numbers will be at least `eps_from_int` apart from integers.
If allow_duplicate is set to false, it is guaranteed that value generated are all different.
Default data type is np.float32.
"""
elem = np.prod(shape).astype(np.int)
ret = []
for _ in range(elem):
while True:
r = dtype((rand_max - rand_min) * np.random.random() + rand_min)
if not allow_duplicate and r in ret:
continue
if np.issubdtype(dtype, np.integer) or np.fabs(np.round(r) - r) > eps_from_int:
ret.append(r)
break
ret = np.array(ret).reshape(shape)
return ret.astype(dtype)
def ssa_fn(func):
"""
Deprecated: use @mb.program()
"""
def wrapper(*args, **kwargs):
prog = Program()
with Function({}) as ssa_func:
func(*args, **kwargs)
return wrapper
def to_tuple(v):
if not isinstance(v, (list, tuple)):
return tuple([v])
return tuple(v)
def is_close(expected, actual, atol=1e-04, rtol=1e-05):
"""
expected, actual: np.array or python primitive (scalar)
rtol: relative tolerance. See numpy.isclose.
"""
close = np.isclose(expected, actual, atol=atol, rtol=rtol)
if not np.all(close):
diff = expected - actual
num_not_close = np.sum(~close)
msg = "Values differ by L1 norm: {}. Num entries not close: {}/{}"
logging.error(msg.format(np.sum(np.abs(diff)), num_not_close, expected.size))
if num_not_close < 30:
logging.error("Differing entries:")
logging.error("Expected: {}".format(expected[~close]))
logging.error("Actual: {}".format(actual[~close]))
logging.error("Delta: {}".format(diff[~close]))
return False
return True
def run_core_ml_predict(proto, input_key_values, use_cpu_only=False):
model = coremltools.models.MLModel(proto, useCPUOnly=use_cpu_only)
for k, v in input_key_values.items():
if isinstance(v, PIL.Image.Image):
continue
elif not np.isscalar(v) and not v.shape == ():
input_key_values[k] = v.astype(np.float32)
else:
input_key_values[k] = np.array([v], dtype=np.float32)
return model.predict(input_key_values, useCPUOnly=use_cpu_only)
def compare_backend(
proto,
input_key_values,
expected_outputs,
use_cpu_only=False,
atol=1e-04,
rtol=1e-05,
also_compare_shapes=True,
):
"""
Inputs:
- proto: MLModel proto.
- input_key_values: str -> np.array. Keys must match those in
input_placeholders.
- expected_outputs: dict[str, np.array]. Required iff
frontend_only == False
- use_cpu_only: True/False.
"""
if _IS_MACOS:
pred = run_core_ml_predict(proto, input_key_values, use_cpu_only=use_cpu_only)
if also_compare_shapes:
compare_shapes(
proto,
input_key_values,
expected_outputs,
use_cpu_only=use_cpu_only,
pred=pred,
)
if not use_cpu_only:
atol = max(atol * 100.0, 5e-1)
rtol = max(rtol * 100.0, 5e-2)
for o, expected in expected_outputs.items():
msg = (
"Output {} differs. useCPUOnly={}.\nInput={}, "
+ "\nExpected={}, \nOutput={}\n"
)
assert is_close(expected, pred[o], atol, rtol), msg.format(
o, use_cpu_only, input_key_values, expected, pred[o]
)
def compare_shapes(
proto, input_key_values, expected_outputs, use_cpu_only=False, pred=None
):
"""
Inputs:
- proto: MLModel proto.
- input_key_values: str -> np.array or PIL.Image. Keys must match those in
input_placeholders.
- expected_outputs: dict[str, np.array].
- use_cpu_only: True/False.
- pred: Prediction to use, if it has already been computed.
"""
if _IS_MACOS:
if not pred:
pred = run_core_ml_predict(proto, input_key_values, use_cpu_only)
for o, expected in expected_outputs.items():
msg = "Output: {}. expected shape {} != actual shape {}".format(
o, expected.shape, pred[o].shape
)
# Core ML does not support scalar as output
# remove this special case when support is added
if expected.shape == () and pred[o].shape == (1,):
continue
assert pred[o].shape == expected.shape, msg
def get_core_ml_prediction(
build, input_placeholders, input_values, use_cpu_only=False, backend="nn_proto"
):
"""
Return predictions of the given model.
"""
program = Program()
with Function(input_placeholders) as ssa_func:
output_vars = build(**ssa_func.inputs)
if isinstance(output_vars, tuple):
output_vars = list(output_vars)
elif not isinstance(output_vars, list):
output_vars = [output_vars]
ssa_func.set_outputs(output_vars)
program.add_function("main", ssa_func)
proto = _converter._convert(program, convert_from="mil", convert_to=backend)
model = coremltools.models.MLModel(proto, use_cpu_only)
return model.predict(input_values, useCPUOnly=use_cpu_only)
def apply_pass_and_basic_check(prog, pass_name):
"""
Apply pass to the program
"""
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY[pass_name](prog)
block = prog.functions["main"]
prev_block = prev_prog.functions["main"]
assert_same_output_names(prev_prog, prog)
assert_same_output_shapes(prev_prog, prog)
return prev_prog, prev_block, block
|
[
"numpy.sum",
"numpy.abs",
"numpy.isclose",
"numpy.round",
"numpy.prod",
"logging.error",
"numpy.testing.assert_equal",
"coremltools.converters.mil.mil.Program",
"copy.deepcopy",
"coremltools.models.neural_network.printer.print_network_spec",
"numpy.issubdtype",
"numpy.all",
"coremltools.converters.mil.converter._convert",
"numpy.isscalar",
"coremltools.converters.mil.mil.Function",
"numpy.random.random",
"numpy.array",
"numpy.random.rand",
"coremltools.models.MLModel"
] |
[((1597, 1665), 'coremltools.converters.mil.converter._convert', '_converter._convert', (['program'], {'convert_from': '"""mil"""', 'convert_to': 'backend'}), "(program, convert_from='mil', convert_to=backend)\n", (1616, 1665), True, 'from coremltools.converters.mil import converter as _converter\n'), ((1827, 1860), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['proto'], {}), '(proto)\n', (1853, 1860), False, 'import coremltools\n'), ((4669, 4719), 'numpy.isclose', 'np.isclose', (['expected', 'actual'], {'atol': 'atol', 'rtol': 'rtol'}), '(expected, actual, atol=atol, rtol=rtol)\n', (4679, 4719), True, 'import numpy as np\n'), ((5369, 5427), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['proto'], {'useCPUOnly': 'use_cpu_only'}), '(proto, useCPUOnly=use_cpu_only)\n', (5395, 5427), False, 'import coremltools\n'), ((8257, 8266), 'coremltools.converters.mil.mil.Program', 'Program', ([], {}), '()\n', (8264, 8266), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((8642, 8710), 'coremltools.converters.mil.converter._convert', '_converter._convert', (['program'], {'convert_from': '"""mil"""', 'convert_to': 'backend'}), "(program, convert_from='mil', convert_to=backend)\n", (8661, 8710), True, 'from coremltools.converters.mil import converter as _converter\n'), ((8723, 8770), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['proto', 'use_cpu_only'], {}), '(proto, use_cpu_only)\n', (8749, 8770), False, 'import coremltools\n'), ((8948, 8967), 'copy.deepcopy', 'copy.deepcopy', (['prog'], {}), '(prog)\n', (8961, 8967), False, 'import copy\n'), ((1119, 1157), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['count', 'expect'], {}), '(count, expect)\n', (1142, 1157), True, 'import numpy as np\n'), ((1562, 1584), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1576, 1584), True, 'import numpy as np\n'), ((1772, 1813), 'coremltools.models.neural_network.printer.print_network_spec', 'print_network_spec', (['proto'], {'style': '"""coding"""'}), "(proto, style='coding')\n", (1790, 1813), False, 'from coremltools.models.neural_network.printer import print_network_spec\n'), ((4264, 4273), 'coremltools.converters.mil.mil.Program', 'Program', ([], {}), '()\n', (4271, 4273), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((4731, 4744), 'numpy.all', 'np.all', (['close'], {}), '(close)\n', (4737, 4744), True, 'import numpy as np\n'), ((4803, 4817), 'numpy.sum', 'np.sum', (['(~close)'], {}), '(~close)\n', (4809, 4817), True, 'import numpy as np\n'), ((8276, 8304), 'coremltools.converters.mil.mil.Function', 'Function', (['input_placeholders'], {}), '(input_placeholders)\n', (8284, 8304), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((3692, 3706), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3699, 3706), True, 'import numpy as np\n'), ((4086, 4099), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (4094, 4099), True, 'import numpy as np\n'), ((4287, 4299), 'coremltools.converters.mil.mil.Function', 'Function', (['{}'], {}), '({})\n', (4295, 4299), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((5022, 5057), 'logging.error', 'logging.error', (['"""Differing entries:"""'], {}), "('Differing entries:')\n", (5035, 5057), False, 'import logging\n'), ((3947, 3979), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (3960, 3979), True, 'import numpy as np\n'), ((5692, 5723), 'numpy.array', 'np.array', (['[v]'], {'dtype': 'np.float32'}), '([v], dtype=np.float32)\n', (5700, 5723), True, 'import numpy as np\n'), ((4933, 4945), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (4939, 4945), True, 'import numpy as np\n'), ((5551, 5565), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (5562, 5565), True, 'import numpy as np\n'), ((3827, 3845), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3843, 3845), True, 'import numpy as np\n'), ((3991, 4002), 'numpy.round', 'np.round', (['r'], {}), '(r)\n', (3999, 4002), True, 'import numpy as np\n')]
|
import os
import flopy
import pandas as pd
import numpy as np
def hdobj2data(hdsobj):
# convert usg hdsobj to array of shape (nper, nnodes)
hds = []
kstpkpers = hdsobj.get_kstpkper()
for kstpkper in kstpkpers:
data = hdsobj.get_data(kstpkper=kstpkper)
fdata = []
for lay in range(len(data)):
fdata += data[lay].tolist()
hds.append(fdata)
return np.array(hds)
def get_sim_hds(model_ws='.'):
node_df = pd.read_csv(os.path.join("Freyberg","misc","obs_nodes.dat"),delim_whitespace=True)
hdsobj = flopy.utils.HeadUFile(os.path.join(model_ws,"freyberg.usg.hds"))
hds = hdobj2data(hdsobj)
nper,nnodes = hds.shape
data = []
for i, dfrow in node_df.iterrows():
name, node = dfrow['name'], dfrow['node']
for sp in range(nper):
hd = hds[sp,node-1]
data.append([hd,name,node,sp])
obs_df = pd.DataFrame(data,columns=['head','name','node','sp'])
obs_df.to_csv(os.path.join('obs.csv'),index=False)
if __name__ == '__main__':
get_sim_hds(model_ws = 'template')
|
[
"pandas.DataFrame",
"numpy.array",
"os.path.join"
] |
[((412, 425), 'numpy.array', 'np.array', (['hds'], {}), '(hds)\n', (420, 425), True, 'import numpy as np\n'), ((951, 1009), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['head', 'name', 'node', 'sp']"}), "(data, columns=['head', 'name', 'node', 'sp'])\n", (963, 1009), True, 'import pandas as pd\n'), ((484, 533), 'os.path.join', 'os.path.join', (['"""Freyberg"""', '"""misc"""', '"""obs_nodes.dat"""'], {}), "('Freyberg', 'misc', 'obs_nodes.dat')\n", (496, 533), False, 'import os\n'), ((590, 632), 'os.path.join', 'os.path.join', (['model_ws', '"""freyberg.usg.hds"""'], {}), "(model_ws, 'freyberg.usg.hds')\n", (602, 632), False, 'import os\n'), ((1024, 1047), 'os.path.join', 'os.path.join', (['"""obs.csv"""'], {}), "('obs.csv')\n", (1036, 1047), False, 'import os\n')]
|
import argparse
import network_utils
import helper_utils
import torch
import json
import numpy as np
def get_args():
parser = argparse.ArgumentParser(description="Predict flower classification with DNN")
parser.add_argument('input', default='./flowers/test/17/image_03911.jpg', type=str, help="input flower image to predict")
parser.add_argument('checkpoint', type=str, help='pre-trained model path')
parser.add_argument('--top_k', default=3, type=int, help='default top_k results')
parser.add_argument('--category_names', default='./cat_to_name.json', type=str, help='default category file')
parser.add_argument('--gpu', default='False',type=str, help='If GPU should be enabled')
return parser.parse_args()
def predict(image, model, use_gpu, topk):
#Predict the class (or classes) of an image using a trained deep learning model.
if helper_utils.str_to_bool(use_gpu) and torch.cuda.is_available():
image = torch.from_numpy(image).type(torch.cuda.FloatTensor) # Convert numpy to tensor
model.cuda()
print("GPU active")
else:
image = torch.from_numpy(image).type(torch.FloatTensor) # Convert numpy to tensor
model.cpu()
print("CPU active: Either cuda is not available or gpu option has been turn off")
model.eval() # set model to evaluation mode
image = torch.unsqueeze(image, dim=0) # form a column tensor
with torch.no_grad (): # Turn off gradient
output = model.forward(image)
preds, classes = torch.exp(output).topk(topk) # Get prediction and classes of top 5
probs = preds.cpu().numpy().tolist()[0]
classes = classes.cpu().numpy().tolist()[0]
idx_to_class = {model.class_to_idx[k]: k for k in model.class_to_idx}
topk_classes = [idx_to_class[i] for i in classes]
return probs, topk_classes
def main():
args = get_args()
processed_image = helper_utils.process_image(args.input) # Process the image to numpy array
model = network_utils.loading_model(args.checkpoint)
probs, topk_classes = predict(processed_image, model, args.gpu, args.top_k)
cat_to_name = helper_utils.load_cat_to_name(args.category_names)
class_names = [cat_to_name [item] for item in topk_classes]
max_prob_idx = np.argmax(probs)
max_class_nb = topk_classes[max_prob_idx]
predicted_class = cat_to_name[max_class_nb]
#helper_utils.display_image(args.input,predicted_class)
helper_utils.display_result(class_names, probs)
if __name__ == '__main__':
main()
|
[
"helper_utils.str_to_bool",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.exp",
"network_utils.loading_model",
"helper_utils.load_cat_to_name",
"torch.cuda.is_available",
"helper_utils.display_result",
"torch.unsqueeze",
"torch.no_grad",
"helper_utils.process_image",
"torch.from_numpy"
] |
[((132, 209), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict flower classification with DNN"""'}), "(description='Predict flower classification with DNN')\n", (155, 209), False, 'import argparse\n'), ((1382, 1411), 'torch.unsqueeze', 'torch.unsqueeze', (['image'], {'dim': '(0)'}), '(image, dim=0)\n', (1397, 1411), False, 'import torch\n'), ((1996, 2034), 'helper_utils.process_image', 'helper_utils.process_image', (['args.input'], {}), '(args.input)\n', (2022, 2034), False, 'import helper_utils\n'), ((2082, 2126), 'network_utils.loading_model', 'network_utils.loading_model', (['args.checkpoint'], {}), '(args.checkpoint)\n', (2109, 2126), False, 'import network_utils\n'), ((2226, 2276), 'helper_utils.load_cat_to_name', 'helper_utils.load_cat_to_name', (['args.category_names'], {}), '(args.category_names)\n', (2255, 2276), False, 'import helper_utils\n'), ((2360, 2376), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (2369, 2376), True, 'import numpy as np\n'), ((2536, 2583), 'helper_utils.display_result', 'helper_utils.display_result', (['class_names', 'probs'], {}), '(class_names, probs)\n', (2563, 2583), False, 'import helper_utils\n'), ((879, 912), 'helper_utils.str_to_bool', 'helper_utils.str_to_bool', (['use_gpu'], {}), '(use_gpu)\n', (903, 912), False, 'import helper_utils\n'), ((917, 942), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (940, 942), False, 'import torch\n'), ((1463, 1478), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1476, 1478), False, 'import torch\n'), ((1594, 1611), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (1603, 1611), False, 'import torch\n'), ((960, 983), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (976, 983), False, 'import torch\n'), ((1115, 1138), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1131, 1138), False, 'import torch\n')]
|
import numpy as np
from eb_gridmaker import dtb, config
from eb_gridmaker.utils import aux, multiproc
from elisa import SingleSystem, BinarySystem, Observer, settings
from elisa.base.error import LimbDarkeningError, AtmosphereError, MorphologyError
def spotty_single_system_random_sampling(db_name=None, number_of_samples=1e4):
"""
Producing sample of spotty single system models generated randomly in given parameter space.
:param db_name: str;
:param number_of_samples: int;
:return: None;
"""
if db_name is not None:
config.DATABASE_NAME = db_name
phases = np.linspace(0, 1.0, num=config.N_POINTS, endpoint=False)
# generating IDs of each possible combination
ids = np.arange(0, number_of_samples, dtype=np.int)
dtb.create_ceb_db(config.DATABASE_NAME, config.PARAMETER_COLUMNS_SINGLE, config.PARAMETER_TYPES_SINGLE)
brkpoint = dtb.search_for_breakpoint(config.DATABASE_NAME, ids)
print(f'Breakpoint found {100.0 * brkpoint / number_of_samples:.2f}%: {brkpoint}/{number_of_samples}')
ids = ids[brkpoint:]
args = (phases, number_of_samples, brkpoint, )
multiproc.multiprocess_eval(ids, eval_single_grid_node, args)
def eval_single_grid_node(iden, counter, phases, maxiter, start_index):
"""
Evaluating randomly generated spotty single system model.
:param iden: str; node ID
:param counter: int; current number of already calculeted nodes
:param phases: numpy.array; desired phases of observations
:param maxiter: int; total number of nodes in this batch
:param start_index: int; number of iterations already calculated before interruption
:return: None
"""
aug_counter = counter + start_index
print(f'Processing node: {aug_counter}/{maxiter}, {100.0 * aug_counter / maxiter:.2f}%')
while True:
params = aux.draw_single_star_params()
try:
s = SingleSystem.from_json(params)
except ValueError as e:
continue
o = Observer(passband=config.PASSBANDS, system=s)
try:
o.lc(phases=phases, normalize=True)
# o.plot.lc()
except (LimbDarkeningError, AtmosphereError) as e:
# print(f'Parameters: {params} produced system outside grid coverage.')
continue
dtb.insert_observation(
config.DATABASE_NAME, o, iden, config.PARAMETER_COLUMNS_SINGLE, config.PARAMETER_TYPES_SINGLE
)
break
def eval_eccentric_random_sample(iden, counter, phases, maxiter, start_index):
np.random.seed()
while True:
args = aux.draw_eccentric_system_params()
params = aux.assign_eccentric_system_params(*args)
try:
bs = BinarySystem.from_json(params)
except MorphologyError as e:
# print(e)
continue
try:
setattr(bs, 'inclination', np.radians(aux.draw_inclination(binary=bs)))
bs.init()
o = Observer(passband=config.PASSBANDS, system=bs)
except Exception as e:
raise ValueError(e)
try:
o.lc(phases=phases, normalize=True)
# o.plot.lc()
except (LimbDarkeningError, AtmosphereError) as e:
# print(f'Parameters: {params} produced system outside grid coverage.')
continue
dtb.insert_observation(
config.DATABASE_NAME, o, iden, config.PARAMETER_COLUMNS_ECCENTRIC, config.PARAMETER_TYPES_ECCENTRIC
)
aug_counter = counter + start_index + 1
print(f'Node processed: {aug_counter}/{maxiter}, {100.0 * aug_counter / maxiter:.2f}%')
break
def eccentric_system_random_sampling(db_name=None, number_of_samples=1e4):
if db_name is not None:
config.DATABASE_NAME = db_name
phases = np.linspace(0, 1.0, num=config.N_POINTS, endpoint=False)
# generating IDs of each possible combination
ids = np.arange(0, number_of_samples, dtype=np.int)
dtb.create_ceb_db(config.DATABASE_NAME, config.PARAMETER_COLUMNS_ECCENTRIC, config.PARAMETER_TYPES_ECCENTRIC)
brkpoint = dtb.search_for_breakpoint(config.DATABASE_NAME, ids)
print(f'Breakpoint found {100.0 * brkpoint / number_of_samples:.2f}%: {brkpoint}/{number_of_samples}')
ids = ids[brkpoint:]
args = (phases, number_of_samples, brkpoint,)
multiproc.multiprocess_eval(ids, eval_eccentric_random_sample, args)
def random_sampling(db_name=None, desired_morphology='all', number_of_samples=1e4):
"""
:param db_name: str; path to the database
:param desired_morphology: string; `all`, `detached` - detached binaries on circular orbit, `overcontact`,
`single_spotty`, `eccentric`
:param number_of_samples: int; number of samples for random sampling
:return:
"""
if desired_morphology in ['detached', 'overcontact', 'circular']:
raise NotImplementedError('Random sampling on circular binaries is not yet implemented. '
'Try grid sampling method.')
elif desired_morphology in ['single_spotty']:
spotty_single_system_random_sampling(db_name, number_of_samples=number_of_samples)
elif desired_morphology in ['eccentric']:
eccentric_system_random_sampling(db_name, number_of_samples=number_of_samples)
else:
raise ValueError(f'Unknown morphology: {desired_morphology}. '
f'List of available morphologies: `all`, `detached` - detached binaries on circular orbit, '
f'`overcontact`, `single_spotty`, `eccentric`')
if __name__ == "__main__":
settings.LOG_CONFIG = 'fit'
config.NUMBER_OF_PROCESSES = 1
# random_sampling('../../random.db', desired_morphology='single_spotty', number_of_samples=10)
random_sampling('../../random.db', desired_morphology='eccentric', number_of_samples=10)
|
[
"eb_gridmaker.dtb.insert_observation",
"eb_gridmaker.dtb.search_for_breakpoint",
"numpy.random.seed",
"eb_gridmaker.utils.aux.draw_single_star_params",
"eb_gridmaker.utils.aux.assign_eccentric_system_params",
"eb_gridmaker.utils.aux.draw_inclination",
"elisa.SingleSystem.from_json",
"eb_gridmaker.utils.multiproc.multiprocess_eval",
"elisa.Observer",
"numpy.arange",
"numpy.linspace",
"eb_gridmaker.dtb.create_ceb_db",
"elisa.BinarySystem.from_json",
"eb_gridmaker.utils.aux.draw_eccentric_system_params"
] |
[((604, 660), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)'], {'num': 'config.N_POINTS', 'endpoint': '(False)'}), '(0, 1.0, num=config.N_POINTS, endpoint=False)\n', (615, 660), True, 'import numpy as np\n'), ((722, 767), 'numpy.arange', 'np.arange', (['(0)', 'number_of_samples'], {'dtype': 'np.int'}), '(0, number_of_samples, dtype=np.int)\n', (731, 767), True, 'import numpy as np\n'), ((773, 880), 'eb_gridmaker.dtb.create_ceb_db', 'dtb.create_ceb_db', (['config.DATABASE_NAME', 'config.PARAMETER_COLUMNS_SINGLE', 'config.PARAMETER_TYPES_SINGLE'], {}), '(config.DATABASE_NAME, config.PARAMETER_COLUMNS_SINGLE,\n config.PARAMETER_TYPES_SINGLE)\n', (790, 880), False, 'from eb_gridmaker import dtb, config\n'), ((892, 944), 'eb_gridmaker.dtb.search_for_breakpoint', 'dtb.search_for_breakpoint', (['config.DATABASE_NAME', 'ids'], {}), '(config.DATABASE_NAME, ids)\n', (917, 944), False, 'from eb_gridmaker import dtb, config\n'), ((1133, 1194), 'eb_gridmaker.utils.multiproc.multiprocess_eval', 'multiproc.multiprocess_eval', (['ids', 'eval_single_grid_node', 'args'], {}), '(ids, eval_single_grid_node, args)\n', (1160, 1194), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((2546, 2562), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (2560, 2562), True, 'import numpy as np\n'), ((3801, 3857), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)'], {'num': 'config.N_POINTS', 'endpoint': '(False)'}), '(0, 1.0, num=config.N_POINTS, endpoint=False)\n', (3812, 3857), True, 'import numpy as np\n'), ((3919, 3964), 'numpy.arange', 'np.arange', (['(0)', 'number_of_samples'], {'dtype': 'np.int'}), '(0, number_of_samples, dtype=np.int)\n', (3928, 3964), True, 'import numpy as np\n'), ((3970, 4083), 'eb_gridmaker.dtb.create_ceb_db', 'dtb.create_ceb_db', (['config.DATABASE_NAME', 'config.PARAMETER_COLUMNS_ECCENTRIC', 'config.PARAMETER_TYPES_ECCENTRIC'], {}), '(config.DATABASE_NAME, config.PARAMETER_COLUMNS_ECCENTRIC,\n config.PARAMETER_TYPES_ECCENTRIC)\n', (3987, 4083), False, 'from eb_gridmaker import dtb, config\n'), ((4095, 4147), 'eb_gridmaker.dtb.search_for_breakpoint', 'dtb.search_for_breakpoint', (['config.DATABASE_NAME', 'ids'], {}), '(config.DATABASE_NAME, ids)\n', (4120, 4147), False, 'from eb_gridmaker import dtb, config\n'), ((4335, 4403), 'eb_gridmaker.utils.multiproc.multiprocess_eval', 'multiproc.multiprocess_eval', (['ids', 'eval_eccentric_random_sample', 'args'], {}), '(ids, eval_eccentric_random_sample, args)\n', (4362, 4403), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((1843, 1872), 'eb_gridmaker.utils.aux.draw_single_star_params', 'aux.draw_single_star_params', ([], {}), '()\n', (1870, 1872), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((2000, 2045), 'elisa.Observer', 'Observer', ([], {'passband': 'config.PASSBANDS', 'system': 's'}), '(passband=config.PASSBANDS, system=s)\n', (2008, 2045), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2307, 2429), 'eb_gridmaker.dtb.insert_observation', 'dtb.insert_observation', (['config.DATABASE_NAME', 'o', 'iden', 'config.PARAMETER_COLUMNS_SINGLE', 'config.PARAMETER_TYPES_SINGLE'], {}), '(config.DATABASE_NAME, o, iden, config.\n PARAMETER_COLUMNS_SINGLE, config.PARAMETER_TYPES_SINGLE)\n', (2329, 2429), False, 'from eb_gridmaker import dtb, config\n'), ((2594, 2628), 'eb_gridmaker.utils.aux.draw_eccentric_system_params', 'aux.draw_eccentric_system_params', ([], {}), '()\n', (2626, 2628), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((2646, 2687), 'eb_gridmaker.utils.aux.assign_eccentric_system_params', 'aux.assign_eccentric_system_params', (['*args'], {}), '(*args)\n', (2680, 2687), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((3339, 3467), 'eb_gridmaker.dtb.insert_observation', 'dtb.insert_observation', (['config.DATABASE_NAME', 'o', 'iden', 'config.PARAMETER_COLUMNS_ECCENTRIC', 'config.PARAMETER_TYPES_ECCENTRIC'], {}), '(config.DATABASE_NAME, o, iden, config.\n PARAMETER_COLUMNS_ECCENTRIC, config.PARAMETER_TYPES_ECCENTRIC)\n', (3361, 3467), False, 'from eb_gridmaker import dtb, config\n'), ((1903, 1933), 'elisa.SingleSystem.from_json', 'SingleSystem.from_json', (['params'], {}), '(params)\n', (1925, 1933), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2719, 2749), 'elisa.BinarySystem.from_json', 'BinarySystem.from_json', (['params'], {}), '(params)\n', (2741, 2749), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2968, 3014), 'elisa.Observer', 'Observer', ([], {'passband': 'config.PASSBANDS', 'system': 'bs'}), '(passband=config.PASSBANDS, system=bs)\n', (2976, 3014), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2895, 2926), 'eb_gridmaker.utils.aux.draw_inclination', 'aux.draw_inclination', ([], {'binary': 'bs'}), '(binary=bs)\n', (2915, 2926), False, 'from eb_gridmaker.utils import aux, multiproc\n')]
|
#!/usr/bin/env python3
"""
This module should introduce you to basic plotting routines
using matplotlib.
We will be plotting quadratic equations since we already
have a module to calculate them.
"""
# Matplotlib is a module with routines to
# plot data and display them on the screen or save them to files.
# The primary plotting module is pyplot, which is conventionally imported
# as plt
import matplotlib.pyplot as plt
import numpy as np
# This is our quadratic_equation class, rename as QE for shortness
from quad_class import quadratic_Equation as QE
def simplePlot(quadEqn, xlim):
"""
This function will plot a quadratic equation, using
the quadratic_Equation module.
inputs:
quadEqn -- instance of QE --
the quadratic equation to plot
xlim -- list of [float, float] or None --
This will define the limits for which x is plotted.
If xlim is None, Matplotlib will auto-scale the axis.
"""
# Enforce that quadEqn MUST be a QE object.
# The isinstance function checks if the variable is of type QE,
# and returns true if and only if it is.
if not isinstance(quadEqn, QE):
# RuntimeError is a built in exception class
raise RuntimeError(msg='provided quadEqn is NOT of type QE')
# Define the x values we are going to plot
# np.arange is a function similiar to range, except
# can use floats as well as integers.
x_vals = np.arange(xlim[0], xlim[1], .01) #go from xlim[0] to xlim[1] in step sizes of .01
# Define the y values to plot. This should be the value of our quadratic equation at each
# value of x.
# NOTE: x_vals and y_vals MUST have the same length to plot
y_vals = [quadEqn(x) for x in x_vals]
# Create a simple plot
plt.plot(x_vals, y_vals)
# Display the plot on the screen
plt.show()
# We are introducing a new python object called None here.
# In python, None represents that nothing is there.
# So in the following definition we are saying that
# by default ylim will be a nothing.
def plotQE(quadEqn, xlim, ylim=None):
"""
This function will plot a quadratic equation, using
the quadratic_Equation module.
inputs:
quadEqn -- instance of QE --
the quadratic equation to plot
xlim -- list of [float, float] or None --
This will define the limits for which x is plotted.
If xlim is None, Matplotlib will auto-scale the axis.
optional inputs:
ylim=None -- list of [float, float] or None --
This will define the limits for which y is plotted.
If ylim is None, Matplotlib will auto-scale the axis.
"""
# Ensure quadEqn is of type QE
if not isinstance(quadEqn, QE):
raise RuntimeError(msg='provided quadEqn is NOT of type QE')
# Define the x values to plot
x_vals = np.arange(xlim[0], xlim[1], .01) #go from xlim[0] to xlim[1] in step sizes of .01
# Define the y values to plot.
y_vals = [quadEqn(x) for x in x_vals]
# Plot the function, but make it red, and only plot the actual data points without a line
plt.plot(x_vals, y_vals, 'ro')
# Set the plot so it only shows the defined x range
plt.xlim(xlim)
# If ylim was provided, set the y limits of the plot
if ylim is not None:
plt.ylim(ylim)
# Label the axes
plt.xlabel('x')
plt.ylabel('y')
# Display the plot on the screen
plt.show()
def plotRoots(quadEqn, xlim=None, ylim=None):
"""
This function will plot a quadratic equation,
along with vertical bars at its roots.
inputs:
quadEqn -- instance of QE --
the quadratic equation to plot
optional inputs:
xlim=None -- list of [float, float] or None --
This will define the limits for which x is plotted.
If xlim is None, will only plot just beyond the roots of the function.
If the roots are not real, an Exception will be raised.
ylim=None -- list of [float, float] or None --
This will define the limits for which y is plotted.
If ylim is None, the limits will be chosen to fit the plot tightly.
"""
# Ensure quadEqn is of type QE
if not isinstance(quadEqn, QE):
raise RuntimeError(msg='provided quadEqn is NOT of type QE')
# find the roots
neg_root = quadEqn.root(False)
pos_root = quadEqn.root(True)
# if xlim not provided, set just further than the roots as the limits
if xlim is None:
# define padding of a tenth of the distance between the roots
pad = pos_root - neg_root
pad *= .1
xlim = [min(neg_root, pos_root) - pad, max(neg_root, pos_root) + pad]
# Define the x values to plot
x_vals = np.arange(xlim[0], xlim[1], .01) #go from xlim[0] to xlim[1] in step sizes of .01
# Define the y values to plot.
y_vals = [quadEqn(x) for x in x_vals]
# Create a plot of the equation, with a solid red line. Give it a label
plt.plot(x_vals, y_vals, linestyle='-', color='red', label='Quad. Eqn.')
# Set the plot so it only shows the defined x range
plt.xlim(xlim)
if ylim is not None:
# If ylim was provided, set the y limits of the plot
plt.ylim(ylim)
else:
# squeeze the y limits to just cover y-values
plt.ylim([min(y_vals), max(y_vals)])
# Plot a blue vertical bar at the the negative root, with a label
plt.axvline(neg_root, color='blue', label='neg. root')
# Plot a purple vertical bar at the the positive root, with a label
plt.axvline(pos_root, color='purple', label='pos. root')
# add a legend to the plot
plt.legend()
# add a title to the plot
plt.title('root plot')
# display the plot
plt.show()
def test_plots():
"""
A simple method to demonstrate the three plotting routines.
"""
myeqn = QE(.8, 3, -2)
simplePlot(myeqn, [-5,3])
plotQE(myeqn, [-5,3])
plotRoots(myeqn)
if __name__ == '__main__':
test_plots()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.arange",
"quad_class.quadratic_Equation",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((1449, 1482), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', '(0.01)'], {}), '(xlim[0], xlim[1], 0.01)\n', (1458, 1482), True, 'import numpy as np\n'), ((1785, 1809), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals'], {}), '(x_vals, y_vals)\n', (1793, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1862), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1860, 1862), True, 'import matplotlib.pyplot as plt\n'), ((2876, 2909), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', '(0.01)'], {}), '(xlim[0], xlim[1], 0.01)\n', (2885, 2909), True, 'import numpy as np\n'), ((3138, 3168), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals', '"""ro"""'], {}), "(x_vals, y_vals, 'ro')\n", (3146, 3168), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3244), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (3238, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3392), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3387, 3392), True, 'import matplotlib.pyplot as plt\n'), ((3397, 3412), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (3407, 3412), True, 'import matplotlib.pyplot as plt\n'), ((3455, 3465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3463, 3465), True, 'import matplotlib.pyplot as plt\n'), ((4781, 4814), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', '(0.01)'], {}), '(xlim[0], xlim[1], 0.01)\n', (4790, 4814), True, 'import numpy as np\n'), ((5025, 5097), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals'], {'linestyle': '"""-"""', 'color': '"""red"""', 'label': '"""Quad. Eqn."""'}), "(x_vals, y_vals, linestyle='-', color='red', label='Quad. Eqn.')\n", (5033, 5097), True, 'import matplotlib.pyplot as plt\n'), ((5159, 5173), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (5167, 5173), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5522), 'matplotlib.pyplot.axvline', 'plt.axvline', (['neg_root'], {'color': '"""blue"""', 'label': '"""neg. root"""'}), "(neg_root, color='blue', label='neg. root')\n", (5479, 5522), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5655), 'matplotlib.pyplot.axvline', 'plt.axvline', (['pos_root'], {'color': '"""purple"""', 'label': '"""pos. root"""'}), "(pos_root, color='purple', label='pos. root')\n", (5610, 5655), True, 'import matplotlib.pyplot as plt\n'), ((5692, 5704), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5702, 5704), True, 'import matplotlib.pyplot as plt\n'), ((5740, 5762), 'matplotlib.pyplot.title', 'plt.title', (['"""root plot"""'], {}), "('root plot')\n", (5749, 5762), True, 'import matplotlib.pyplot as plt\n'), ((5791, 5801), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5799, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5913, 5927), 'quad_class.quadratic_Equation', 'QE', (['(0.8)', '(3)', '(-2)'], {}), '(0.8, 3, -2)\n', (5915, 5927), True, 'from quad_class import quadratic_Equation as QE\n'), ((3336, 3350), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (3344, 3350), True, 'import matplotlib.pyplot as plt\n'), ((5269, 5283), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (5277, 5283), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
gnpy.core.request
=================
This module contains path request functionality.
This functionality allows the user to provide a JSON request
file in accordance with a Yang model for requesting path
computations and returns path results in terms of path
and feasibility
See: draft-ietf-teas-yang-path-computation-01.txt
"""
from collections import namedtuple
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
from networkx import (dijkstra_path, NetworkXNoPath)
from numpy import mean
from gnpy.core.service_sheet import convert_service_sheet, Request_element, Element
from gnpy.core.elements import Transceiver, Roadm, Edfa, Fused
from gnpy.core.network import set_roadm_loss
from gnpy.core.utils import db2lin, lin2db
from gnpy.core.info import create_input_spectral_information, SpectralInformation, Channel, Power
from copy import copy, deepcopy
from csv import writer
logger = getLogger(__name__)
RequestParams = namedtuple('RequestParams','request_id source destination trx_type'+
' trx_mode nodes_list loose_list spacing power nb_channel frequency format baud_rate OSNR bit_rate roll_off')
class Path_request:
def __init__(self, *args, **params):
params = RequestParams(**params)
self.request_id = params.request_id
self.source = params.source
self.destination = params.destination
self.tsp = params.trx_type
self.tsp_mode = params.trx_mode
self.baud_rate = params.baud_rate
self.nodes_list = params.nodes_list
self.loose_list = params.loose_list
self.spacing = params.spacing
self.power = params.power
self.nb_channel = params.nb_channel
self.frequency = params.frequency
self.format = params.format
self.OSNR = params.OSNR
self.bit_rate = params.bit_rate
self.roll_off = params.roll_off
def __str__(self):
return '\n\t'.join([ f'{type(self).__name__} {self.request_id}',
f'source: {self.source}',
f'destination: {self.destination}'])
def __repr__(self):
return '\n\t'.join([ f'{type(self).__name__} {self.request_id}',
f'source: \t{self.source}',
f'destination:\t{self.destination}',
f'trx type:\t{self.tsp}',
f'trx mode:\t{self.tsp_mode}',
f'baud_rate:\t{self.baud_rate * 1e-9} Gbaud',
f'bit_rate:\t{self.bit_rate * 1e-9} Gb/s',
f'spacing:\t{self.spacing * 1e-9} GHz',
f'power: \t{round(lin2db(self.power)+30,2)} dBm'
'\n'])
class Result_element(Element):
def __init__(self,path_request,computed_path):
self.path_id = path_request.request_id
self.path_request = path_request
self.computed_path = computed_path
hop_type = []
for e in computed_path :
if isinstance(e, Transceiver) :
hop_type.append(' - '.join([path_request.tsp,path_request.tsp_mode]))
else:
hop_type.append('not recorded')
self.hop_type = hop_type
uid = property(lambda self: repr(self))
@property
def pathresult(self):
if not self.computed_path:
return {
'path-id': self.path_id,
'path-properties':{
'path-metric': [
{
'metric-type': 'SNR@bandwidth',
'accumulative-value': 'None'
},
{
'metric-type': '[email protected]',
'accumulative-value': 'None'
},
{
'metric-type': 'OSNR@bandwidth',
'accumulative-value': 'None'
},
{
'metric-type': '[email protected]',
'accumulative-value': 'None'
},
{
'metric-type': 'reference_power',
'accumulative-value': self.path_request.power
}
],
'path-srlgs': {
'usage': 'not used yet',
'values': 'not used yet'
},
'path-route-objects': [
{
'path-route-object': {
'index': 0,
'unnumbered-hop': {
'node-id': self.path_request.source,
'link-tp-id': self.path_request.source,
'hop-type': ' - '.join([self.path_request.tsp, self.path_request.tsp_mode]),
'direction': 'not used'
},
'label-hop': {
'te-label': {
'generic': 'not used yet',
'direction': 'not used yet'
}
}
}
},
{
'path-route-object': {
'index': 1,
'unnumbered-hop': {
'node-id': self.path_request.destination,
'link-tp-id': self.path_request.destination,
'hop-type': ' - '.join([self.path_request.tsp, self.path_request.tsp_mode]),
'direction': 'not used'
},
'label-hop': {
'te-label': {
'generic': 'not used yet',
'direction': 'not used yet'
}
}
}
}
]
}
}
else:
return {
'path-id': self.path_id,
'path-properties':{
'path-metric': [
{
'metric-type': 'SNR@bandwidth',
'accumulative-value': round(mean(self.computed_path[-1].snr),2)
},
{
'metric-type': '[email protected]',
'accumulative-value': round(mean(self.computed_path[-1].snr+lin2db(self.path_request.baud_rate/12.5e9)),2)
},
{
'metric-type': 'OSNR@bandwidth',
'accumulative-value': round(mean(self.computed_path[-1].osnr_ase),2)
},
{
'metric-type': '[email protected]',
'accumulative-value': round(mean(self.computed_path[-1].osnr_ase_01nm),2)
},
{
'metric-type': 'reference_power',
'accumulative-value': self.path_request.power
}
],
'path-srlgs': {
'usage': 'not used yet',
'values': 'not used yet'
},
'path-route-objects': [
{
'path-route-object': {
'index': self.computed_path.index(n),
'unnumbered-hop': {
'node-id': n.uid,
'link-tp-id': n.uid,
'hop-type': self.hop_type[self.computed_path.index(n)],
'direction': 'not used'
},
'label-hop': {
'te-label': {
'generic': 'not used yet',
'direction': 'not used yet'
}
}
}
} for n in self.computed_path
]
}
}
@property
def json(self):
return self.pathresult
def compute_constrained_path(network, req):
trx = [n for n in network.nodes() if isinstance(n, Transceiver)]
roadm = [n for n in network.nodes() if isinstance(n, Roadm)]
edfa = [n for n in network.nodes() if isinstance(n, Edfa)]
source = next(el for el in trx if el.uid == req.source)
# start the path with its source
# TODO : avoid loops due to constraints , guess name base on string,
# avoid crashing if on req is not correct
total_path = [source]
for n in req.nodes_list:
# print(n)
try :
node = next(el for el in trx if el.uid == n)
except StopIteration:
try:
node = next(el for el in roadm if el.uid == f'roadm {n}')
except StopIteration:
try:
node = next(el for el in edfa
if el.uid.startswith(f'egress edfa in {n}'))
except StopIteration:
msg = f'could not find node : {n} in network topology: \
not a trx, roadm, edfa or fused element'
logger.critical(msg)
raise ValueError(msg)
# extend path list without repeating source -> skip first element in the list
try:
total_path.extend(dijkstra_path(network, source, node)[1:])
source = node
except NetworkXNoPath:
# for debug
# print(req.loose_list)
# print(req.nodes_list.index(n))
if req.loose_list[req.nodes_list.index(n)] == 'loose':
print(f'could not find a path from {source.uid} to loose node : {n} in network topology')
print(f'node {n} is skipped')
else:
msg = f'could not find a path from {source.uid} to node : {n} in network topology'
logger.critical(msg)
#raise ValueError(msg)
print(msg)
total_path = []
# preparing disjonction feature
# for p in all_simple_paths(network,\
# source=next(el for el in trx if el.uid == req.source),\
# target=next(el for el in trx if el.uid == req.destination)):
# print([e.uid for e in p if isinstance(e,Roadm)])
return total_path
def propagate(path, req, equipment, show=False):
#update roadm loss in case of power sweep (power mode only)
set_roadm_loss(path, equipment, lin2db(req.power*1e3))
si = create_input_spectral_information(
req.frequency['min'], req.roll_off,
req.baud_rate, req.power, req.spacing, req.nb_channel)
for el in path:
si = el(si)
if show :
print(el)
return path
def jsontocsv(json_data,equipment,fileout):
# read json path result file in accordance with:
# Yang model for requesting Path Computation
# draft-ietf-teas-yang-path-computation-01.txt.
# and write results in an CSV file
mywriter = writer(fileout)
mywriter.writerow(('path-id','source','destination','transponder-type',\
'transponder-mode','baud rate (Gbaud)', 'input power (dBm)','path',\
'OSNR@bandwidth','[email protected]','SNR@bandwidth','[email protected]','Pass?'))
tspjsondata = equipment['Transceiver']
#print(tspjsondata)
for p in json_data['path']:
path_id = p['path-id']
source = p['path-properties']['path-route-objects'][0]\
['path-route-object']['unnumbered-hop']['node-id']
destination = p['path-properties']['path-route-objects'][-1]\
['path-route-object']['unnumbered-hop']['node-id']
pth = ' | '.join([ e['path-route-object']['unnumbered-hop']['node-id']
for e in p['path-properties']['path-route-objects']])
[tsp,mode] = p['path-properties']['path-route-objects'][0]\
['path-route-object']['unnumbered-hop']['hop-type'].split(' - ')
# find the min acceptable OSNR, baud rate from the eqpt library based on tsp (tupe) and mode (format)
try:
[minosnr, baud_rate] = next([m['OSNR'] , m['baud_rate']]
for m in equipment['Transceiver'][tsp].mode if m['format']==mode)
# for debug
# print(f'coucou {baud_rate}')
except IndexError:
msg = f'could not find tsp : {self.tsp} with mode: {self.tsp_mode} in eqpt library'
raise ValueError(msg)
output_snr = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == '[email protected]')
output_snrbandwidth = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'SNR@bandwidth')
output_osnr = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == '[email protected]')
output_osnrbandwidth = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'OSNR@bandwidth')
power = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'reference_power')
if isinstance(output_snr, str):
isok = ''
else:
isok = output_snr >= minosnr
mywriter.writerow((path_id,
source,
destination,
tsp,
mode,
baud_rate*1e-9,
round(lin2db(power)+30,2),
pth,
output_osnrbandwidth,
output_osnr,
output_snrbandwidth,
output_snr,
isok
))
|
[
"csv.writer",
"gnpy.core.info.create_input_spectral_information",
"networkx.dijkstra_path",
"numpy.mean",
"collections.namedtuple",
"gnpy.core.utils.lin2db",
"logging.getLogger"
] |
[((959, 978), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (968, 978), False, 'from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO\n'), ((997, 1186), 'collections.namedtuple', 'namedtuple', (['"""RequestParams"""', "('request_id source destination trx_type' +\n ' trx_mode nodes_list loose_list spacing power nb_channel frequency format baud_rate OSNR bit_rate roll_off'\n )"], {}), "('RequestParams', 'request_id source destination trx_type' +\n ' trx_mode nodes_list loose_list spacing power nb_channel frequency format baud_rate OSNR bit_rate roll_off'\n )\n", (1007, 1186), False, 'from collections import namedtuple\n'), ((11596, 11725), 'gnpy.core.info.create_input_spectral_information', 'create_input_spectral_information', (["req.frequency['min']", 'req.roll_off', 'req.baud_rate', 'req.power', 'req.spacing', 'req.nb_channel'], {}), "(req.frequency['min'], req.roll_off, req.\n baud_rate, req.power, req.spacing, req.nb_channel)\n", (11629, 11725), False, 'from gnpy.core.info import create_input_spectral_information, SpectralInformation, Channel, Power\n'), ((12089, 12104), 'csv.writer', 'writer', (['fileout'], {}), '(fileout)\n', (12095, 12104), False, 'from csv import writer\n'), ((11564, 11590), 'gnpy.core.utils.lin2db', 'lin2db', (['(req.power * 1000.0)'], {}), '(req.power * 1000.0)\n', (11570, 11590), False, 'from gnpy.core.utils import db2lin, lin2db\n'), ((10444, 10480), 'networkx.dijkstra_path', 'dijkstra_path', (['network', 'source', 'node'], {}), '(network, source, node)\n', (10457, 10480), False, 'from networkx import dijkstra_path, NetworkXNoPath\n'), ((14545, 14558), 'gnpy.core.utils.lin2db', 'lin2db', (['power'], {}), '(power)\n', (14551, 14558), False, 'from gnpy.core.utils import db2lin, lin2db\n'), ((2769, 2787), 'gnpy.core.utils.lin2db', 'lin2db', (['self.power'], {}), '(self.power)\n', (2775, 2787), False, 'from gnpy.core.utils import db2lin, lin2db\n'), ((6928, 6960), 'numpy.mean', 'mean', (['self.computed_path[-1].snr'], {}), '(self.computed_path[-1].snr)\n', (6932, 6960), False, 'from numpy import mean\n'), ((7386, 7423), 'numpy.mean', 'mean', (['self.computed_path[-1].osnr_ase'], {}), '(self.computed_path[-1].osnr_ase)\n', (7390, 7423), False, 'from numpy import mean\n'), ((7597, 7639), 'numpy.mean', 'mean', (['self.computed_path[-1].osnr_ase_01nm'], {}), '(self.computed_path[-1].osnr_ase_01nm)\n', (7601, 7639), False, 'from numpy import mean\n'), ((7165, 7216), 'gnpy.core.utils.lin2db', 'lin2db', (['(self.path_request.baud_rate / 12500000000.0)'], {}), '(self.path_request.baud_rate / 12500000000.0)\n', (7171, 7216), False, 'from gnpy.core.utils import db2lin, lin2db\n')]
|
"""
----------------------------------------------------------------------
--- jumeg.jumeg_noise_reducer --------------------------------
----------------------------------------------------------------------
author : <NAME>
email : <EMAIL>
last update: 02.05.2019
version : 1.14
----------------------------------------------------------------------
Based on following publications:
----------------------------------------------------------------------
<NAME>., 'Environmental Noise Cancellation for
Biomagnetic Measurements', Advances in Biomagnetism,
Plenum Press, New York, 1989
----------------------------------------------------------------------
s'_i(t) = s_i(t) - sum(w_ij*r_j(t), j=1,nref)
where
s_i are the signal traces, i=1,nsig
r_j are the reference traces, j=1,nref after DC removal
w_ij are weights determined by minimizing
<(s'_i(t)-<s'_i>)^2> with <x> temporal mean
Typically s_i are magnetic signal channels and
r_j (selected) magnetic reference channels, but
other refs are possible.
----------------------------------------------------------------------
How to use the jumeg_noise_reducer?
----------------------------------------------------------------------
from jumeg import jumeg_noise_reducer
jumeg_noise_reducer.noise_reducer(fname_raw)
--> for further comments we refer directly to the functions
----------------------------------------------------------------------
"""
# Author: EE
# 150203/EE/
# 150619/EE/ fix for tmin/tmax-arg
# 170131/EE/ modified handling of refnotch-arg (no auto-harmonics)
# 180629/EE/ explicit spec. for reference-filter ('firwin','hann')
# 190103/EE/ fixed infosig-arg for _is_good()
# 190208/EE/ prep. f. transition to Python3
# 190502/EE/ Python3-version
#
# License: BSD (3-clause)
# cf. https://www.johndcook.com/blog/2019/01/09/projecting-unicode-to-ascii/
# for a ruggedized version of channel_indices_by_type()?
from builtins import str
from builtins import range
import sys # for sys.stdout.flush()
import os
import numpy as np
import time
import copy
import warnings
from math import floor, ceil
import mne
from mne.utils import logger
from mne.epochs import _is_good
from mne.io.pick import channel_indices_by_type
from jumeg.jumeg_utils import get_files_from_list
TINY = 1.e-38
SVD_RELCUTOFF = 1.e-08
##################################################
#
# generate plot of power spectrum before and
# after noise reduction
#
##################################################
def plot_denoising(fname_raw, fmin=0, fmax=300, tmin=0.0, tmax=60.0,
proj=False, n_fft=4096, color='blue',
stim_name=None, event_id=1,
tmin_stim=-0.2, tmax_stim=0.5,
area_mode='range', area_alpha=0.33, n_jobs=1,
title1='before denoising', title2='after denoising',
info=None, show=True, fnout=None):
"""Plot the power spectral density across channels to show denoising.
Parameters
----------
fname_raw : list or str
List of raw files, without denoising and with for comparison.
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
info : bool
Display information in the figure.
show : bool
Show figure.
fnout : str
Name of the saved output figure. If none, no figure will be saved.
title1, title2 : str
Title for two psd plots.
n_jobs : int
Number of jobs to use for parallel computation.
stim_name : str
Name of the stim channel. If stim_name is set, the plot of epochs
average is also shown alongside the PSD plots.
event_id : int
ID of the stim event. (only when stim_name is set)
Example Usage
-------------
plot_denoising(['orig-raw.fif', 'orig,nr-raw.fif', fnout='example')
"""
from matplotlib import gridspec as grd
import matplotlib.pyplot as plt
from mne.time_frequency import psd_welch
fnraw = get_files_from_list(fname_raw)
# ---------------------------------
# estimate power spectrum
# ---------------------------------
psds_all = []
freqs_all = []
# loop across all filenames
for fname in fnraw:
# read in data
raw = mne.io.Raw(fname, preload=True)
picks = mne.pick_types(raw.info, meg='mag', eeg=False,
stim=False, eog=False, exclude='bads')
if area_mode not in [None, 'std', 'range']:
raise ValueError('"area_mode" must be "std", "range", or None')
psds, freqs = psd_welch(raw, picks=picks, fmin=fmin, fmax=fmax,
tmin=tmin, tmax=tmax, n_fft=n_fft,
n_jobs=n_jobs, proj=proj)
psds_all.append(psds)
freqs_all.append(freqs)
if stim_name:
n_xplots = 2
# get some infos
events = mne.find_events(raw, stim_channel=stim_name, consecutive=True)
else:
n_xplots = 1
fig = plt.figure('denoising', figsize=(16, 6 * n_xplots))
gs = grd.GridSpec(n_xplots, int(len(psds_all)))
# loop across all filenames
for idx in range(int(len(psds_all))):
# ---------------------------------
# plot power spectrum
# ---------------------------------
p1 = plt.subplot(gs[0, idx])
# Convert PSDs to dB
psds = 10 * np.log10(psds_all[idx])
psd_mean = np.mean(psds, axis=0)
if area_mode == 'std':
psd_std = np.std(psds, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
else: # area_mode is None
hyp_limits = None
p1.plot(freqs_all[idx], psd_mean, color=color)
if hyp_limits is not None:
p1.fill_between(freqs_all[idx], hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if idx == 0:
p1.set_title(title1)
ylim = [np.min(psd_mean) - 10, np.max(psd_mean) + 10]
else:
p1.set_title(title2)
p1.set_xlabel('Freq (Hz)')
p1.set_ylabel('Power Spectral Density (dB/Hz)')
p1.set_xlim(freqs_all[idx][0], freqs_all[idx][-1])
p1.set_ylim(ylim[0], ylim[1])
# ---------------------------------
# plot signal around stimulus
# onset
# ---------------------------------
if stim_name:
raw = mne.io.Raw(fnraw[idx], preload=True)
epochs = mne.Epochs(raw, events, event_id, proj=False,
tmin=tmin_stim, tmax=tmax_stim, picks=picks,
preload=True, baseline=(None, None))
evoked = epochs.average()
if idx == 0:
ymin = np.min(evoked.data)
ymax = np.max(evoked.data)
times = evoked.times * 1e3
p2 = plt.subplot(gs[1, idx])
p2.plot(times, evoked.data.T, 'blue', linewidth=0.5)
p2.set_xlim(times[0], times[len(times) - 1])
p2.set_ylim(1.1 * ymin, 1.1 * ymax)
if (idx == 1) and info:
plt.text(times[0], 0.9 * ymax, ' ICs: ' + str(info))
# save image
if fnout:
fig.savefig(fnout + '.png', format='png')
# show image if requested
if show:
plt.show()
plt.close('denoising')
plt.ion()
##################################################
#
# routine to detrend the data
#
##################################################
def perform_detrending(fname_raw, save=True):
from mne.io import Raw
from numpy import poly1d, polyfit
fnraw = get_files_from_list(fname_raw)
# loop across all filenames
for fname in fnraw:
# read data in
raw = Raw(fname, preload=True)
# get channels
picks = mne.pick_types(raw.info, meg='mag', ref_meg=True,
eeg=False, stim=False,
eog=False, exclude='bads')
xval = np.arange(raw._data.shape[1])
# loop over all channels
for ipick in picks:
coeff = polyfit(xval, raw._data[ipick, :], deg=1)
trend = poly1d(coeff)
raw._data[ipick, :] -= trend(xval)
# save detrended data
if save:
fnout = fname_raw[:fname_raw.rfind('-raw.fif')] + ',dt-raw.fif'
raw.save(fnout, overwrite=True)
return raw
##################################################
#
# Get indices of matching channel names from list
#
##################################################
def channel_indices_from_list(fulllist, findlist, excllist=None):
"""Get indices of matching channel names from list
Parameters
----------
fulllist: list of channel names
findlist: list of (regexp) names to find
regexp are resolved using mne.pick_channels_regexp()
excllist: list of channel names to exclude,
e.g., raw.info.get('bads')
Returns
-------
chnpick: array with indices
"""
chnpick = []
for ir in range(len(findlist)):
if findlist[ir].replace(' ', '').isalnum():
try:
chnpicktmp = ([fulllist.index(findlist[ir])])
chnpick = np.array(np.concatenate((chnpick, chnpicktmp)), dtype=int)
except:
print(">>>>> Channel '%s' not found." % findlist[ir])
else:
chnpicktmp = (mne.pick_channels_regexp(fulllist, findlist[ir]))
if len(chnpicktmp) == 0:
print(">>>>> '%s' does not match any channel name." % findlist[ir])
else:
chnpick = np.array(np.concatenate((chnpick, chnpicktmp)), dtype=int)
if len(chnpick) > 1:
# Remove duplicates:
chnpick = np.sort(np.array(list(set(np.sort(chnpick)))))
if excllist is not None and len(excllist) > 0:
exclinds = [fulllist.index(excllist[ie]) for ie in range(len(excllist))]
chnpick = list(np.setdiff1d(chnpick, exclinds))
return chnpick
##################################################
#
# Apply noise reduction to signal channels
# using reference channels.
#
##################################################
def noise_reducer(fname_raw, raw=None, signals=[], noiseref=[], detrending=None,
tmin=None, tmax=None, reflp=None, refhp=None, refnotch=None,
exclude_artifacts=True, checkresults=True, return_raw=False,
complementary_signal=False, fnout=None, verbose=False):
"""
Apply noise reduction to signal channels using reference channels.
Parameters
----------
fname_raw : (list of) rawfile name(s)
raw : mne Raw objects
Allows passing of (preloaded) raw object in addition to fname_raw
or solely (use fname_raw=None in this case).
signals : list of string
List of channels to compensate using noiseref.
If empty use the meg signal channels.
noiseref : list of string | str
List of channels to use as noise reference.
If empty use the magnetic reference channsls (default).
signals and noiseref may contain regexp, which are resolved
using mne.pick_channels_regexp(). All other channels are copied.
tmin : lower latency bound for weight-calc [start of trace]
tmax : upper latency bound for weight-calc [ end of trace]
Weights are calc'd for (tmin,tmax), but applied to entire data set
refhp : high-pass frequency for reference signal filter [None]
reflp : low-pass frequency for reference signal filter [None]
reflp < refhp: band-stop filter
reflp > refhp: band-pass filter
reflp is not None, refhp is None: low-pass filter
reflp is None, refhp is not None: high-pass filter
refnotch : (list of) notch frequencies for reference signal filter [None]
use raw(ref)-notched(ref) as reference signal
exclude_artifacts: filter signal-channels thru _is_good() [True]
(parameters are at present hard-coded!)
return_raw : bool
If return_raw is true, the raw object is returned and raw file
is not written to disk unless fnout is explicitly specified.
It is suggested that this option be used in cases where the
noise_reducer is applied multiple times. [False]
fnout : explicit specification for an output file name [None]
Automatic filenames replace '-raw.fif' by ',nr-raw.fif'.
complementary_signal : replaced signal by traces that would be
subtracted [False]
(can be useful for debugging)
detrending: boolean to ctrl subtraction of linear trend from all
magn. chans [False]
checkresults : boolean to control internal checks and overall success
[True]
Outputfile
----------
<wawa>,nr-raw.fif for input <wawa>-raw.fif
Returns
-------
If return_raw is True, then mne.io.Raw instance is returned.
Bugs
----
- artifact checking is incomplete (and with arb. window of tstep=0.2s)
- no accounting of channels used as signal/reference
- non existing input file handled ungracefully
"""
if type(complementary_signal) != bool:
raise ValueError("Argument complementary_signal must be of type bool")
# handle error if Raw object passed with file list
if raw and isinstance(fname_raw, list):
raise ValueError('List of file names cannot be combined with'
'one Raw object')
# handle error if return_raw is requested with file list
if return_raw and isinstance(fname_raw, list):
raise ValueError('List of file names cannot be combined return_raw.'
'Please pass one file at a time.')
# handle error if Raw object is passed with detrending option
# TODO include perform_detrending for Raw objects
if raw and detrending:
raise ValueError('Please perform detrending on the raw file directly.'
'Cannot perform detrending on the raw object')
# Handle combinations of fname_raw and raw object:
if fname_raw is not None:
fnraw = get_files_from_list(fname_raw)
have_input_file = True
elif raw is not None:
if 'filename' in raw.info:
fnraw = [os.path.basename(raw.filenames[0])]
else:
fnraw = raw._filenames[0]
warnings.warn('Setting file name from Raw object')
have_input_file = False
if fnout is None and not return_raw:
raise ValueError('Refusing to waste resources without result')
else:
raise ValueError('Refusing Creatio ex nihilo')
# loop across all filenames
for fname in fnraw:
if verbose:
print("########## Read raw data:")
tc0 = time.clock()
tw0 = time.time()
if raw is None:
if detrending:
raw = perform_detrending(fname, save=False)
else:
raw = mne.io.Raw(fname, preload=True)
else:
# perform sanity check to make sure Raw object and file are same
if 'filename' in raw.info:
fnintern = [os.path.basename(raw.filenames[0])]
else:
fnintern = raw._filenames[0]
if os.path.basename(fname) != os.path.basename(fnintern):
warnings.warn('The file name within the Raw object and provided\n '
'fname are not the same. Please check again.')
tc1 = time.clock()
tw1 = time.time()
if verbose:
print(">>> loading raw data took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tc0)), (tw1 - tw0)))
# Time window selection
# weights are calc'd based on [tmin,tmax], but applied to the entire data set.
# tstep is used in artifact detection
# tmin,tmax variables must not be changed here!
if tmin is None:
itmin = 0
else:
itmin = int(floor(tmin * raw.info['sfreq']))
if tmax is None:
itmax = raw.last_samp - raw.first_samp
else:
itmax = int(ceil(tmax * raw.info['sfreq']))
if itmax - itmin < 2:
raise ValueError("Time-window for noise compensation empty or too short")
if verbose:
print(">>> Set time-range to [%7.3f,%7.3f]" % \
(raw.times[itmin], raw.times[itmax]))
if signals is None or len(signals) == 0:
sigpick = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False,
eog=False, exclude='bads')
else:
sigpick = channel_indices_from_list(raw.info['ch_names'][:], signals,
raw.info.get('bads'))
nsig = len(sigpick)
if nsig == 0:
raise ValueError("No channel selected for noise compensation")
if noiseref is None or len(noiseref) == 0:
# References are not limited to 4D ref-chans, but can be anything,
# incl. ECG or powerline monitor.
if verbose:
print(">>> Using all refchans.")
refexclude = "bads"
refpick = mne.pick_types(raw.info, ref_meg=True, meg=False,
eeg=False, stim=False,
eog=False, exclude='bads')
else:
refpick = channel_indices_from_list(raw.info['ch_names'][:],
noiseref, raw.info.get('bads'))
nref = len(refpick)
if nref == 0:
raise ValueError("No channel selected as noise reference")
if verbose:
print(">>> sigpick: %3d chans, refpick: %3d chans" % (nsig, nref))
badpick = np.intersect1d(sigpick, refpick, assume_unique=False)
if len(badpick) > 0:
raise Warning("Intersection of signal and reference channels not empty")
if reflp is None and refhp is None and refnotch is None:
use_reffilter = False
use_refantinotch = False
else:
use_reffilter = True
if verbose:
print("########## Filter reference channels:")
use_refantinotch = False
if refnotch is not None:
if reflp is not None or reflp is not None:
raise ValueError("Cannot specify notch- and high-/low-pass"
"reference filter together")
nyquist = (0.5 * raw.info['sfreq'])
if isinstance(refnotch, list):
notchfrqs = refnotch
else:
notchfrqs = [refnotch]
notchfrqscln = []
for nfrq in notchfrqs:
if not isinstance(nfrq, float) and not isinstance(nfrq, int):
raise ValueError("Illegal entry for notch-frequency (", nfrq, ")")
if nfrq >= nyquist:
warnings.warn('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)
else:
notchfrqscln.append(nfrq)
if len(notchfrqscln) == 0:
raise ValueError("Notch frequency list is (now) empty")
use_refantinotch = True
if verbose:
print(">>> notches at freq ", end=' ')
print(notchfrqscln)
else:
if verbose:
if reflp is not None:
print(">>> low-pass with cutoff-freq %.1f" % reflp)
if refhp is not None:
print(">>> high-pass with cutoff-freq %.1f" % refhp)
# Adapt followg drop-chans cmd to use 'all-but-refpick'
droplist = [raw.info['ch_names'][k] for k in range(raw.info['nchan']) if not k in refpick]
tct = time.clock()
twt = time.time()
fltref = raw.copy().drop_channels(droplist)
if use_refantinotch:
rawref = raw.copy().drop_channels(droplist)
fltref.notch_filter(notchfrqscln, fir_design='firwin',
fir_window='hann', phase='zero',
picks=np.array(list(range(nref))),
method='fir')
fltref._data = (rawref._data - fltref._data)
else:
fltref.filter(refhp, reflp, fir_design='firwin',
fir_window='hann', phase='zero',
picks=np.array(list(range(nref))),
method='fir')
tc1 = time.clock()
tw1 = time.time()
if verbose:
print(">>> filtering ref-chans took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)),
(tw1 - twt)))
if verbose:
print("########## Calculating sig-ref/ref-ref-channel covariances:")
# Calculate sig-ref/ref-ref-channel covariance:
# (there is no need to calc inter-signal-chan cov,
# but there seems to be no appropriat fct available)
# Here we copy the idea from compute_raw_data_covariance()
# and truncate it as appropriate.
tct = time.clock()
twt = time.time()
# The following reject and infosig entries are only
# used in _is_good-calls.
# _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
# ignore ref-channels (not covered by dict) and checks individual
# data segments - artifacts across a buffer boundary are not found.
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6) # uV (EOG channels)
infosig = copy.copy(raw.info)
infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
# the below fields are *NOT* (190103) updated automatically when 'chs' is updated
infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
infosig['nchan'] = len(sigpick)
idx_by_typesig = channel_indices_by_type(infosig)
# Read data in chunks:
tstep = 0.2
itstep = int(ceil(tstep * raw.info['sfreq']))
sigmean = 0
refmean = 0
sscovdata = 0
srcovdata = 0
rrcovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
if use_reffilter:
raw_segmentref, times = fltref[:, first:last]
else:
raw_segmentref, times = raw[refpick, first:last]
if not exclude_artifacts or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
refmean += raw_segmentref.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
n_samples += raw_segmentsig.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
if n_samples <= 1:
raise ValueError('Too few samples to calculate weights')
sigmean /= n_samples
refmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
srcovdata /= (n_samples - 1)
rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
rrcovdata /= (n_samples - 1)
sscovinit = np.copy(sscovdata)
if verbose:
print(">>> Normalize srcov...")
rrslope = copy.copy(rrcovdata)
for iref in range(nref):
dtmp = rrcovdata[iref, iref]
if dtmp > TINY:
srcovdata[:, iref] /= dtmp
rrslope[:, iref] /= dtmp
else:
srcovdata[:, iref] = 0.
rrslope[:, iref] = 0.
if verbose:
print(">>> Number of samples used : %d" % n_samples)
tc1 = time.clock()
tw1 = time.time()
print(">>> sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
if checkresults:
if verbose:
print("########## Calculated initial signal channel covariance:")
# Calculate initial signal channel covariance:
# (only used as quality measure)
print(">>> initl rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
for i in range(min(5, nsig)):
print(">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
for i in range(max(0, nsig - 5), nsig):
print(">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
print(">>>")
U, s, V = np.linalg.svd(rrslope, full_matrices=True)
if verbose:
print(">>> singular values:")
print(s)
print(">>> Applying cutoff for smallest SVs:")
dtmp = s.max() * SVD_RELCUTOFF
s *= (abs(s) >= dtmp)
sinv = [1. / s[k] if s[k] != 0. else 0. for k in range(nref)]
if verbose:
print(">>> singular values (after cutoff):")
print(s)
stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
if verbose:
print(">>> Testing svd-result: %s" % stat)
if not stat:
print(" (Maybe due to SV-cutoff?)")
# Solve for inverse coefficients:
# Set RRinv.tr=U diag(sinv) V
RRinv = np.transpose(np.dot(U, np.dot(np.diag(sinv), V)))
if checkresults:
stat = np.allclose(np.identity(nref), np.dot(RRinv, rrslope))
if stat:
if verbose:
print(">>> Testing RRinv-result (should be unit-matrix): ok")
else:
print(">>> Testing RRinv-result (should be unit-matrix): failed")
print(np.transpose(np.dot(RRinv, rrslope)))
print(">>>")
if verbose:
print("########## Calc weight matrix...")
# weights-matrix will be somewhat larger than necessary,
# (to simplify indexing in compensation loop):
weights = np.zeros((raw._data.shape[0], nref))
for isig in range(nsig):
for iref in range(nref):
weights[sigpick[isig], iref] = np.dot(srcovdata[isig, :], RRinv[:, iref])
if verbose:
print("########## Compensating signal channels:")
if complementary_signal:
print(">>> Caveat: REPLACING signal by compensation signal")
tct = time.clock()
twt = time.time()
# Work on entire data stream:
for isl in range(raw._data.shape[1]):
slice = np.take(raw._data, [isl], axis=1)
if use_reffilter:
refslice = np.take(fltref._data, [isl], axis=1)
refarr = refslice[:].flatten() - refmean
# refarr = fltres[:,isl]-refmean
else:
refarr = slice[refpick].flatten() - refmean
subrefarr = np.dot(weights[:], refarr)
if not complementary_signal:
raw._data[:, isl] -= subrefarr
else:
raw._data[:, isl] = subrefarr
if (isl % 10000 == 0 or isl + 1 == raw._data.shape[1]) and verbose:
print("\rProcessed slice %6d" % isl, end=" ")
sys.stdout.flush()
if verbose:
print("\nDone.")
tc1 = time.clock()
tw1 = time.time()
print(">>> compensation loop took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)), (tw1 - twt)))
if checkresults:
if verbose:
print("########## Calculating final signal channel covariance:")
# Calculate final signal channel covariance:
# (only used as quality measure)
tct = time.clock()
twt = time.time()
sigmean = 0
sscovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
# Artifacts found here will probably differ from pre-noisered artifacts!
if not exclude_artifacts or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
n_samples += raw_segmentsig.shape[1]
if n_samples <= 1:
raise ValueError('Too few samples to calculate final signal channel covariance')
sigmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
if verbose:
print(">>> no channel got worse: %s" % str(np.all(np.less_equal(sscovdata, sscovinit))))
print(">>> final rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
for i in range(min(5, nsig)):
print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
# for i in range(min(5,nsig),max(0,nsig-5)):
# print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
for i in range(max(0, nsig - 5), nsig):
print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
tc1 = time.clock()
tw1 = time.time()
print(">>> signal covar-calc took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)),
(tw1 - twt)))
print(">>>")
if fnout is not None:
fnoutloc = fnout
elif return_raw:
fnoutloc = None
elif have_input_file:
fnoutloc = fname[:fname.rfind('-raw.fif')] + ',nr-raw.fif'
else:
fnoutloc = None
if fnoutloc is not None:
if verbose:
print(">>> Saving '%s'..." % fnoutloc)
raw.save(fnoutloc, overwrite=True)
tc1 = time.clock()
tw1 = time.time()
if verbose:
print(">>> Total run took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tc0)), (tw1 - tw0)))
if return_raw:
if verbose:
print(">>> Returning raw object...")
return raw
##################################################
#
# routine to test if the noise reducer is
# working properly
#
##################################################
def test_noise_reducer():
data_path = os.environ['SUBJECTS_DIR']
subject = os.environ['SUBJECT']
dname = data_path + '/' + 'empty_room_files' + '/109925_empty_room_file-raw.fif'
subjects_dir = data_path + '/subjects'
#
checkresults = True
exclart = False
use_reffilter = True
refflt_lpfreq = 52.
refflt_hpfreq = 48.
print("########## before of noisereducer call ##########")
sigchanlist = ['MEG ..1', 'MEG ..3', 'MEG ..5', 'MEG ..7', 'MEG ..9']
# sigchanlist = None
refchanlist = ['RFM 001', 'RFM 003', 'RFM 005', 'RFG ...']
tmin = 15.
inraw = mne.io.Raw(dname, preload=True)
dname1 = dname[:dname.rfind('-raw.fif')] + ',test-raw.fif'
dname1nr = dname[:dname.rfind('-raw.fif')] + ',testnr-raw.fif'
noise_reducer(dname, raw=None, signals=sigchanlist, noiseref=refchanlist, tmin=tmin,
reflp=refflt_lpfreq, refhp=refflt_hpfreq, fnout=None,
exclude_artifacts=exclart, verbose=True, return_raw=False)
print("########## behind of noisereducer call ##########")
print("########## Read raw data:")
tc0 = time.clock()
tw0 = time.time()
raw = mne.io.Raw(dname, preload=True)
tc1 = time.clock()
tw1 = time.time()
print("loading raw data took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0)))
# Time window selection
# weights are calc'd based on [tmin,tmax], but applied to the entire data set.
# tstep is used in artifact detection
tmax = raw.times[raw.last_samp]
tstep = 0.2
itmin = int(floor(tmin * raw.info['sfreq']))
itmax = int(ceil(tmax * raw.info['sfreq']))
itstep = int(ceil(tstep * raw.info['sfreq']))
print(">>> Set time-range to [%7.3f,%7.3f]" % (tmin, tmax))
if sigchanlist is None:
sigpick = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=False, exclude='bads')
else:
sigpick = channel_indices_from_list(raw.info['ch_names'][:], sigchanlist)
nsig = len(sigpick)
print("sigpick: %3d chans" % nsig)
if nsig == 0:
raise ValueError("No channel selected for noise compensation")
if refchanlist is None:
# References are not limited to 4D ref-chans, but can be anything,
# incl. ECG or powerline monitor.
print(">>> Using all refchans.")
refexclude = "bads"
refpick = mne.pick_types(raw.info, ref_meg=True, meg=False, eeg=False,
stim=False, eog=False, exclude=refexclude)
else:
refpick = channel_indices_from_list(raw.info['ch_names'][:], refchanlist)
print("refpick = '%s'" % refpick)
nref = len(refpick)
print("refpick: %3d chans" % nref)
if nref == 0:
raise ValueError("No channel selected as noise reference")
print("########## Refchan geo data:")
# This is just for info to locate special 4D-refs.
for iref in refpick:
print(raw.info['chs'][iref]['ch_name'], raw.info['chs'][iref]['loc'][0:3])
print("")
if use_reffilter:
print("########## Filter reference channels:")
if refflt_lpfreq is not None:
print(" low-pass with cutoff-freq %.1f" % refflt_lpfreq)
if refflt_hpfreq is not None:
print("high-pass with cutoff-freq %.1f" % refflt_hpfreq)
# Adapt followg drop-chans cmd to use 'all-but-refpick'
droplist = [raw.info['ch_names'][k] for k in range(raw.info['nchan']) if not k in refpick]
fltref = raw.copy().drop_channels(droplist)
tct = time.clock()
twt = time.time()
fltref.filter(refflt_hpfreq, refflt_lpfreq, picks=np.array(list(range(nref))), method='fft')
tc1 = time.clock()
tw1 = time.time()
print("filtering ref-chans took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
print("########## Calculating sig-ref/ref-ref-channel covariances:")
# Calculate sig-ref/ref-ref-channel covariance:
# (there is no need to calc inter-signal-chan cov,
# but there seems to be no appropriat fct available)
# Here we copy the idea from compute_raw_data_covariance()
# and truncate it as appropriate.
tct = time.clock()
twt = time.time()
# The following reject and info{sig,ref} entries are only
# used in _is_good-calls.
# _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
# ignore ref-channels (not covered by dict) and checks individual
# data segments - artifacts across a buffer boundary are not found.
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6) # uV (EOG channels)
infosig = copy.copy(raw.info)
infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
# the below fields are *NOT* (190103) updated automatically when 'chs' is updated
infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
infosig['nchan'] = len(sigpick)
idx_by_typesig = channel_indices_by_type(infosig)
# inforef not good w/ filtering, but anyway useless
inforef = copy.copy(raw.info)
inforef['chs'] = [raw.info['chs'][k] for k in refpick]
# 'ch_names' and 'nchan' updated automatically when 'chs' is updated
idx_by_typeref = channel_indices_by_type(inforef)
# Read data in chunks:
sigmean = 0
refmean = 0
sscovdata = 0
srcovdata = 0
rrcovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
if use_reffilter:
raw_segmentref, times = fltref[:, first:last]
else:
raw_segmentref, times = raw[refpick, first:last]
# if True:
# if _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
# ignore_chs=raw.info['bads']) and _is_good(raw_segmentref,
# inforef['ch_names'], idx_by_typeref, reject, flat=None,
# ignore_chs=raw.info['bads']):
if not exclart or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
refmean += raw_segmentref.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
n_samples += raw_segmentsig.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
# _check_n_samples(n_samples, len(picks))
if n_samples <= 1:
raise ValueError('Too few samples to calculate covariances')
sigmean /= n_samples
refmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
srcovdata /= (n_samples - 1)
rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
rrcovdata /= (n_samples - 1)
sscovinit = sscovdata
print("Normalize srcov...")
rrslopedata = copy.copy(rrcovdata)
for iref in range(nref):
dtmp = rrcovdata[iref][iref]
if dtmp > TINY:
for isig in range(nsig):
srcovdata[isig][iref] /= dtmp
for jref in range(nref):
rrslopedata[jref][iref] /= dtmp
else:
for isig in range(nsig):
srcovdata[isig][iref] = 0.
for jref in range(nref):
rrslopedata[jref][iref] = 0.
logger.info("Number of samples used : %d" % n_samples)
tc1 = time.clock()
tw1 = time.time()
print("sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
print("########## Calculating sig-ref/ref-ref-channel covariances (robust):")
# Calculate sig-ref/ref-ref-channel covariance:
# (usg B.P.Welford, "Note on a method for calculating corrected sums
# of squares and products", Technometrics4 (1962) 419-420)
# (there is no need to calc inter-signal-chan cov,
# but there seems to be no appropriat fct available)
# Here we copy the idea from compute_raw_data_covariance()
# and truncate it as appropriate.
tct = time.clock()
twt = time.time()
# The following reject and info{sig,ref} entries are only
# used in _is_good-calls.
# _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
# ignore ref-channels (not covered by dict) and checks individual
# data segments - artifacts across a buffer boundary are not found.
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6) # uV (EOG channels)
infosig = copy.copy(raw.info)
infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
# the below fields are *NOT* (190103) updated automatically when 'chs' is updated
infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
infosig['nchan'] = len(sigpick)
idx_by_typesig = channel_indices_by_type(infosig)
# inforef not good w/ filtering, but anyway useless
inforef = copy.copy(raw.info)
inforef['chs'] = [raw.info['chs'][k] for k in refpick]
# 'ch_names' and 'nchan' updated automatically when 'chs' is updated
idx_by_typeref = channel_indices_by_type(inforef)
# Read data in chunks:
smean = np.zeros(nsig)
smold = np.zeros(nsig)
rmean = np.zeros(nref)
rmold = np.zeros(nref)
sscov = 0
srcov = 0
rrcov = np.zeros((nref, nref))
srcov = np.zeros((nsig, nref))
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
if use_reffilter:
raw_segmentref, times = fltref[:, first:last]
else:
raw_segmentref, times = raw[refpick, first:last]
# if True:
# if _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
# ignore_chs=raw.info['bads']) and _is_good(raw_segmentref,
# inforef['ch_names'], idx_by_typeref, reject, flat=None,
# ignore_chs=raw.info['bads']):
if not exclart or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
for isl in range(raw_segmentsig.shape[1]):
nsl = isl + n_samples + 1
cnslm1dnsl = float((nsl - 1)) / float(nsl)
sslsubmean = (raw_segmentsig[:, isl] - smold)
rslsubmean = (raw_segmentref[:, isl] - rmold)
smean = smold + sslsubmean / float(nsl)
rmean = rmold + rslsubmean / float(nsl)
sscov += sslsubmean * (raw_segmentsig[:, isl] - smean)
srcov += cnslm1dnsl * np.dot(sslsubmean.reshape((nsig, 1)), rslsubmean.reshape((1, nref)))
rrcov += cnslm1dnsl * np.dot(rslsubmean.reshape((nref, 1)), rslsubmean.reshape((1, nref)))
smold = smean
rmold = rmean
n_samples += raw_segmentsig.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
# _check_n_samples(n_samples, len(picks))
if n_samples <= 1:
raise ValueError('Too few samples to calculate covariances')
sscov /= (n_samples - 1)
srcov /= (n_samples - 1)
rrcov /= (n_samples - 1)
print("Normalize srcov...")
rrslope = copy.copy(rrcov)
for iref in range(nref):
dtmp = rrcov[iref][iref]
if dtmp > TINY:
srcov[:, iref] /= dtmp
rrslope[:, iref] /= dtmp
else:
srcov[:, iref] = 0.
rrslope[:, iref] = 0.
logger.info("Number of samples used : %d" % n_samples)
print("Compare results with 'standard' values:")
print("cmp(sigmean,smean):", np.allclose(smean, sigmean, atol=0.))
print("cmp(refmean,rmean):", np.allclose(rmean, refmean, atol=0.))
print("cmp(sscovdata,sscov):", np.allclose(sscov, sscovdata, atol=0.))
print("cmp(srcovdata,srcov):", np.allclose(srcov, srcovdata, atol=0.))
print("cmp(rrcovdata,rrcov):", np.allclose(rrcov, rrcovdata, atol=0.))
tc1 = time.clock()
tw1 = time.time()
print("sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
if checkresults:
print("########## Calculated initial signal channel covariance:")
# Calculate initial signal channel covariance:
# (only used as quality measure)
print("initl rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscov)))
for i in range(min(5, nsig)):
print("initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscov.flatten()[i])))
print(" ")
if nref < 6:
print("rrslope-entries:")
for i in range(nref):
print(rrslope[i][:])
U, s, V = np.linalg.svd(rrslope, full_matrices=True)
print(s)
print("Applying cutoff for smallest SVs:")
dtmp = s.max() * SVD_RELCUTOFF
sinv = np.zeros(nref)
for i in range(nref):
if abs(s[i]) >= dtmp:
sinv[i] = 1. / s[i]
else:
s[i] = 0.
# s *= (abs(s)>=dtmp)
# sinv = ???
print(s)
stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
print(">>> Testing svd-result: %s" % stat)
if not stat:
print(" (Maybe due to SV-cutoff?)")
# Solve for inverse coefficients:
print(">>> Setting RRinvtr=U diag(sinv) V")
RRinvtr = np.zeros((nref, nref))
RRinvtr = np.dot(U, np.dot(np.diag(sinv), V))
if checkresults:
# print(">>> RRinvtr-result:")
# print(RRinvtr)
stat = np.allclose(np.identity(nref), np.dot(rrslope.transpose(), RRinvtr))
if stat:
print(">>> Testing RRinvtr-result (shld be unit-matrix): ok")
else:
print(">>> Testing RRinvtr-result (shld be unit-matrix): failed")
print(np.dot(rrslope.transpose(), RRinvtr))
# np.less_equal(np.abs(np.dot(rrslope.transpose(),RRinvtr)-np.identity(nref)),0.01*np.ones((nref,nref)))
print("")
print("########## Calc weight matrix...")
# weights-matrix will be somewhat larger than necessary,
# (to simplify indexing in compensation loop):
weights = np.zeros((raw._data.shape[0], nref))
for isig in range(nsig):
for iref in range(nref):
weights[sigpick[isig]][iref] = np.dot(srcov[isig][:], RRinvtr[iref][:])
if np.allclose(np.zeros(weights.shape), np.abs(weights), atol=1.e-8):
print(">>> all weights are small (<=1.e-8).")
else:
print(">>> largest weight %12.5e" % np.max(np.abs(weights)))
wlrg = np.where(np.abs(weights) >= 0.99 * np.max(np.abs(weights)))
for iwlrg in range(len(wlrg[0])):
print(">>> weights[%3d,%2d] = %12.5e" % (wlrg[0][iwlrg], wlrg[1][iwlrg],
weights[wlrg[0][iwlrg], wlrg[1][iwlrg]]))
if nref < 5:
print("weights-entries for first sigchans:")
for i in range(min(5, nsig)):
print('weights[sp(%2d)][r]=[' % i + ' '.join([' %+10.7f' % val for val in weights[sigpick[i]][:]]) + ']')
print("########## Compensating signal channels:")
tct = time.clock()
twt = time.time()
# data,times = raw[:,raw.time_as_index(tmin)[0]:raw.time_as_index(tmax)[0]:]
# Work on entire data stream:
for isl in range(raw._data.shape[1]):
slice = np.take(raw._data, [isl], axis=1)
if use_reffilter:
refslice = np.take(fltref._data, [isl], axis=1)
refarr = refslice[:].flatten() - rmean
# refarr = fltres[:,isl]-rmean
else:
refarr = slice[refpick].flatten() - rmean
subrefarr = np.dot(weights[:], refarr)
# data[:,isl] -= subrefarr will not modify raw._data?
raw._data[:, isl] -= subrefarr
if isl % 10000 == 0:
print("\rProcessed slice %6d" % isl)
print("\nDone.")
tc1 = time.clock()
tw1 = time.time()
print("compensation loop took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
if checkresults:
print("########## Calculating final signal channel covariance:")
# Calculate final signal channel covariance:
# (only used as quality measure)
tct = time.clock()
twt = time.time()
sigmean = 0
sscovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
# Artifacts found here will probably differ from pre-noisered artifacts!
if not exclart or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
n_samples += raw_segmentsig.shape[1]
if n_samples <= 1:
raise ValueError('Too few samples to calculate final signal channel covariances')
sigmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
print(">>> no channel got worse: ", np.all(np.less_equal(sscovdata, sscovinit)))
print("final rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
for i in range(min(5, nsig)):
print("final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
tc1 = time.clock()
tw1 = time.time()
print("signal covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
print(" ")
nrname = dname[:dname.rfind('-raw.fif')] + ',nold-raw.fif'
print("Saving '%s'..." % nrname)
raw.save(nrname, overwrite=True)
tc1 = time.clock()
tw1 = time.time()
print("Total run took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0)))
|
[
"numpy.abs",
"numpy.polyfit",
"mne.pick_types",
"mne.io.Raw",
"numpy.allclose",
"mne.epochs._is_good",
"mne.find_events",
"matplotlib.pyplot.figure",
"numpy.linalg.svd",
"numpy.mean",
"numpy.arange",
"sys.stdout.flush",
"numpy.diag",
"builtins.range",
"mne.utils.logger.info",
"numpy.copy",
"numpy.std",
"matplotlib.pyplot.close",
"numpy.identity",
"time.clock",
"numpy.max",
"numpy.intersect1d",
"numpy.log10",
"numpy.less_equal",
"matplotlib.pyplot.show",
"math.ceil",
"os.path.basename",
"jumeg.jumeg_utils.get_files_from_list",
"matplotlib.pyplot.ion",
"numpy.min",
"numpy.sort",
"numpy.dot",
"builtins.str",
"mne.io.pick.channel_indices_by_type",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"numpy.poly1d",
"mne.time_frequency.psd_welch",
"copy.copy",
"numpy.zeros",
"math.floor",
"time.time",
"numpy.setdiff1d",
"mne.Epochs",
"numpy.take",
"mne.pick_channels_regexp",
"warnings.warn"
] |
[((4687, 4717), 'jumeg.jumeg_utils.get_files_from_list', 'get_files_from_list', (['fname_raw'], {}), '(fname_raw)\n', (4706, 4717), False, 'from jumeg.jumeg_utils import get_files_from_list\n'), ((5704, 5755), 'matplotlib.pyplot.figure', 'plt.figure', (['"""denoising"""'], {'figsize': '(16, 6 * n_xplots)'}), "('denoising', figsize=(16, 6 * n_xplots))\n", (5714, 5755), True, 'import matplotlib.pyplot as plt\n'), ((8138, 8160), 'matplotlib.pyplot.close', 'plt.close', (['"""denoising"""'], {}), "('denoising')\n", (8147, 8160), True, 'import matplotlib.pyplot as plt\n'), ((8165, 8174), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (8172, 8174), True, 'import matplotlib.pyplot as plt\n'), ((8438, 8468), 'jumeg.jumeg_utils.get_files_from_list', 'get_files_from_list', (['fname_raw'], {}), '(fname_raw)\n', (8457, 8468), False, 'from jumeg.jumeg_utils import get_files_from_list\n'), ((33164, 33195), 'mne.io.Raw', 'mne.io.Raw', (['dname'], {'preload': '(True)'}), '(dname, preload=True)\n', (33174, 33195), False, 'import mne\n'), ((33677, 33689), 'time.clock', 'time.clock', ([], {}), '()\n', (33687, 33689), False, 'import time\n'), ((33700, 33711), 'time.time', 'time.time', ([], {}), '()\n', (33709, 33711), False, 'import time\n'), ((33722, 33753), 'mne.io.Raw', 'mne.io.Raw', (['dname'], {'preload': '(True)'}), '(dname, preload=True)\n', (33732, 33753), False, 'import mne\n'), ((33764, 33776), 'time.clock', 'time.clock', ([], {}), '()\n', (33774, 33776), False, 'import time\n'), ((33787, 33798), 'time.time', 'time.time', ([], {}), '()\n', (33796, 33798), False, 'import time\n'), ((36734, 36746), 'time.clock', 'time.clock', ([], {}), '()\n', (36744, 36746), False, 'import time\n'), ((36757, 36768), 'time.time', 'time.time', ([], {}), '()\n', (36766, 36768), False, 'import time\n'), ((37293, 37312), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (37302, 37312), False, 'import copy\n'), ((37584, 37616), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['infosig'], {}), '(infosig)\n', (37607, 37616), False, 'from mne.io.pick import channel_indices_by_type\n'), ((37688, 37707), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (37697, 37707), False, 'import copy\n'), ((37861, 37893), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['inforef'], {}), '(inforef)\n', (37884, 37893), False, 'from mne.io.pick import channel_indices_by_type\n'), ((38043, 38070), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (38048, 38070), False, 'from builtins import range\n'), ((39861, 39881), 'copy.copy', 'copy.copy', (['rrcovdata'], {}), '(rrcovdata)\n', (39870, 39881), False, 'import copy\n'), ((39898, 39909), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (39903, 39909), False, 'from builtins import range\n'), ((40320, 40374), 'mne.utils.logger.info', 'logger.info', (["('Number of samples used : %d' % n_samples)"], {}), "('Number of samples used : %d' % n_samples)\n", (40331, 40374), False, 'from mne.utils import logger\n'), ((40385, 40397), 'time.clock', 'time.clock', ([], {}), '()\n', (40395, 40397), False, 'import time\n'), ((40408, 40419), 'time.time', 'time.time', ([], {}), '()\n', (40417, 40419), False, 'import time\n'), ((41035, 41047), 'time.clock', 'time.clock', ([], {}), '()\n', (41045, 41047), False, 'import time\n'), ((41058, 41069), 'time.time', 'time.time', ([], {}), '()\n', (41067, 41069), False, 'import time\n'), ((41594, 41613), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (41603, 41613), False, 'import copy\n'), ((41885, 41917), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['infosig'], {}), '(infosig)\n', (41908, 41917), False, 'from mne.io.pick import channel_indices_by_type\n'), ((41989, 42008), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (41998, 42008), False, 'import copy\n'), ((42162, 42194), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['inforef'], {}), '(inforef)\n', (42185, 42194), False, 'from mne.io.pick import channel_indices_by_type\n'), ((42235, 42249), 'numpy.zeros', 'np.zeros', (['nsig'], {}), '(nsig)\n', (42243, 42249), True, 'import numpy as np\n'), ((42262, 42276), 'numpy.zeros', 'np.zeros', (['nsig'], {}), '(nsig)\n', (42270, 42276), True, 'import numpy as np\n'), ((42289, 42303), 'numpy.zeros', 'np.zeros', (['nref'], {}), '(nref)\n', (42297, 42303), True, 'import numpy as np\n'), ((42316, 42330), 'numpy.zeros', 'np.zeros', (['nref'], {}), '(nref)\n', (42324, 42330), True, 'import numpy as np\n'), ((42371, 42393), 'numpy.zeros', 'np.zeros', (['(nref, nref)'], {}), '((nref, nref))\n', (42379, 42393), True, 'import numpy as np\n'), ((42406, 42428), 'numpy.zeros', 'np.zeros', (['(nsig, nref)'], {}), '((nsig, nref))\n', (42414, 42428), True, 'import numpy as np\n'), ((42464, 42491), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (42469, 42491), False, 'from builtins import range\n'), ((44441, 44457), 'copy.copy', 'copy.copy', (['rrcov'], {}), '(rrcov)\n', (44450, 44457), False, 'import copy\n'), ((44474, 44485), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (44479, 44485), False, 'from builtins import range\n'), ((44700, 44754), 'mne.utils.logger.info', 'logger.info', (["('Number of samples used : %d' % n_samples)"], {}), "('Number of samples used : %d' % n_samples)\n", (44711, 44754), False, 'from mne.utils import logger\n'), ((45185, 45197), 'time.clock', 'time.clock', ([], {}), '()\n', (45195, 45197), False, 'import time\n'), ((45208, 45219), 'time.time', 'time.time', ([], {}), '()\n', (45217, 45219), False, 'import time\n'), ((45861, 45903), 'numpy.linalg.svd', 'np.linalg.svd', (['rrslope'], {'full_matrices': '(True)'}), '(rrslope, full_matrices=True)\n', (45874, 45903), True, 'import numpy as np\n'), ((46011, 46025), 'numpy.zeros', 'np.zeros', (['nref'], {}), '(nref)\n', (46019, 46025), True, 'import numpy as np\n'), ((46039, 46050), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (46044, 46050), False, 'from builtins import range\n'), ((46484, 46506), 'numpy.zeros', 'np.zeros', (['(nref, nref)'], {}), '((nref, nref))\n', (46492, 46506), True, 'import numpy as np\n'), ((47273, 47309), 'numpy.zeros', 'np.zeros', (['(raw._data.shape[0], nref)'], {}), '((raw._data.shape[0], nref))\n', (47281, 47309), True, 'import numpy as np\n'), ((47326, 47337), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (47331, 47337), False, 'from builtins import range\n'), ((48253, 48265), 'time.clock', 'time.clock', ([], {}), '()\n', (48263, 48265), False, 'import time\n'), ((48276, 48287), 'time.time', 'time.time', ([], {}), '()\n', (48285, 48287), False, 'import time\n'), ((48418, 48443), 'builtins.range', 'range', (['raw._data.shape[1]'], {}), '(raw._data.shape[1])\n', (48423, 48443), False, 'from builtins import range\n'), ((49002, 49014), 'time.clock', 'time.clock', ([], {}), '()\n', (49012, 49014), False, 'import time\n'), ((49025, 49036), 'time.time', 'time.time', ([], {}), '()\n', (49034, 49036), False, 'import time\n'), ((50968, 50980), 'time.clock', 'time.clock', ([], {}), '()\n', (50978, 50980), False, 'import time\n'), ((50991, 51002), 'time.time', 'time.time', ([], {}), '()\n', (51000, 51002), False, 'import time\n'), ((4961, 4992), 'mne.io.Raw', 'mne.io.Raw', (['fname'], {'preload': '(True)'}), '(fname, preload=True)\n', (4971, 4992), False, 'import mne\n'), ((5009, 5098), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', eeg=False, stim=False, eog=False,\n exclude='bads')\n", (5023, 5098), False, 'import mne\n'), ((5278, 5392), 'mne.time_frequency.psd_welch', 'psd_welch', (['raw'], {'picks': 'picks', 'fmin': 'fmin', 'fmax': 'fmax', 'tmin': 'tmin', 'tmax': 'tmax', 'n_fft': 'n_fft', 'n_jobs': 'n_jobs', 'proj': 'proj'}), '(raw, picks=picks, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax,\n n_fft=n_fft, n_jobs=n_jobs, proj=proj)\n', (5287, 5392), False, 'from mne.time_frequency import psd_welch\n'), ((5598, 5660), 'mne.find_events', 'mne.find_events', (['raw'], {'stim_channel': 'stim_name', 'consecutive': '(True)'}), '(raw, stim_channel=stim_name, consecutive=True)\n', (5613, 5660), False, 'import mne\n'), ((6015, 6038), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, idx]'], {}), '(gs[0, idx])\n', (6026, 6038), True, 'import matplotlib.pyplot as plt\n'), ((6132, 6153), 'numpy.mean', 'np.mean', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6139, 6153), True, 'import numpy as np\n'), ((8122, 8132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8130, 8132), True, 'import matplotlib.pyplot as plt\n'), ((8564, 8588), 'mne.io.Raw', 'Raw', (['fname'], {'preload': '(True)'}), '(fname, preload=True)\n', (8567, 8588), False, 'from mne.io import Raw\n'), ((8629, 8732), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'ref_meg': '(True)', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', ref_meg=True, eeg=False, stim=False,\n eog=False, exclude='bads')\n", (8643, 8732), False, 'import mne\n'), ((8806, 8835), 'numpy.arange', 'np.arange', (['raw._data.shape[1]'], {}), '(raw._data.shape[1])\n', (8815, 8835), True, 'import numpy as np\n'), ((15040, 15070), 'jumeg.jumeg_utils.get_files_from_list', 'get_files_from_list', (['fname_raw'], {}), '(fname_raw)\n', (15059, 15070), False, 'from jumeg.jumeg_utils import get_files_from_list\n'), ((15688, 15700), 'time.clock', 'time.clock', ([], {}), '()\n', (15698, 15700), False, 'import time\n'), ((15715, 15726), 'time.time', 'time.time', ([], {}), '()\n', (15724, 15726), False, 'import time\n'), ((16416, 16428), 'time.clock', 'time.clock', ([], {}), '()\n', (16426, 16428), False, 'import time\n'), ((16443, 16454), 'time.time', 'time.time', ([], {}), '()\n', (16452, 16454), False, 'import time\n'), ((18706, 18759), 'numpy.intersect1d', 'np.intersect1d', (['sigpick', 'refpick'], {'assume_unique': '(False)'}), '(sigpick, refpick, assume_unique=False)\n', (18720, 18759), True, 'import numpy as np\n'), ((22328, 22340), 'time.clock', 'time.clock', ([], {}), '()\n', (22338, 22340), False, 'import time\n'), ((22355, 22366), 'time.time', 'time.time', ([], {}), '()\n', (22364, 22366), False, 'import time\n'), ((22925, 22944), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (22934, 22944), False, 'import copy\n'), ((23236, 23268), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['infosig'], {}), '(infosig)\n', (23259, 23268), False, 'from mne.io.pick import channel_indices_by_type\n'), ((23525, 23552), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (23530, 23552), False, 'from builtins import range\n'), ((25037, 25055), 'numpy.copy', 'np.copy', (['sscovdata'], {}), '(sscovdata)\n', (25044, 25055), True, 'import numpy as np\n'), ((25139, 25159), 'copy.copy', 'copy.copy', (['rrcovdata'], {}), '(rrcovdata)\n', (25148, 25159), False, 'import copy\n'), ((25180, 25191), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (25185, 25191), False, 'from builtins import range\n'), ((26393, 26435), 'numpy.linalg.svd', 'np.linalg.svd', (['rrslope'], {'full_matrices': '(True)'}), '(rrslope, full_matrices=True)\n', (26406, 26435), True, 'import numpy as np\n'), ((27822, 27858), 'numpy.zeros', 'np.zeros', (['(raw._data.shape[0], nref)'], {}), '((raw._data.shape[0], nref))\n', (27830, 27858), True, 'import numpy as np\n'), ((27879, 27890), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (27884, 27890), False, 'from builtins import range\n'), ((28231, 28243), 'time.clock', 'time.clock', ([], {}), '()\n', (28241, 28243), False, 'import time\n'), ((28258, 28269), 'time.time', 'time.time', ([], {}), '()\n', (28267, 28269), False, 'import time\n'), ((28328, 28353), 'builtins.range', 'range', (['raw._data.shape[1]'], {}), '(raw._data.shape[1])\n', (28333, 28353), False, 'from builtins import range\n'), ((32089, 32101), 'time.clock', 'time.clock', ([], {}), '()\n', (32099, 32101), False, 'import time\n'), ((32116, 32127), 'time.time', 'time.time', ([], {}), '()\n', (32125, 32127), False, 'import time\n'), ((34120, 34151), 'math.floor', 'floor', (["(tmin * raw.info['sfreq'])"], {}), "(tmin * raw.info['sfreq'])\n", (34125, 34151), False, 'from math import floor, ceil\n'), ((34169, 34199), 'math.ceil', 'ceil', (["(tmax * raw.info['sfreq'])"], {}), "(tmax * raw.info['sfreq'])\n", (34173, 34199), False, 'from math import floor, ceil\n'), ((34218, 34249), 'math.ceil', 'ceil', (["(tstep * raw.info['sfreq'])"], {}), "(tstep * raw.info['sfreq'])\n", (34222, 34249), False, 'from math import floor, ceil\n'), ((34362, 34451), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', eeg=False, stim=False, eog=False,\n exclude='bads')\n", (34376, 34451), False, 'import mne\n'), ((34925, 35032), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'ref_meg': '(True)', 'meg': '(False)', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': 'refexclude'}), '(raw.info, ref_meg=True, meg=False, eeg=False, stim=False,\n eog=False, exclude=refexclude)\n', (34939, 35032), False, 'import mne\n'), ((36085, 36097), 'time.clock', 'time.clock', ([], {}), '()\n', (36095, 36097), False, 'import time\n'), ((36112, 36123), 'time.time', 'time.time', ([], {}), '()\n', (36121, 36123), False, 'import time\n'), ((36239, 36251), 'time.clock', 'time.clock', ([], {}), '()\n', (36249, 36251), False, 'import time\n'), ((36266, 36277), 'time.time', 'time.time', ([], {}), '()\n', (36275, 36277), False, 'import time\n'), ((44841, 44878), 'numpy.allclose', 'np.allclose', (['smean', 'sigmean'], {'atol': '(0.0)'}), '(smean, sigmean, atol=0.0)\n', (44852, 44878), True, 'import numpy as np\n'), ((44912, 44949), 'numpy.allclose', 'np.allclose', (['rmean', 'refmean'], {'atol': '(0.0)'}), '(rmean, refmean, atol=0.0)\n', (44923, 44949), True, 'import numpy as np\n'), ((44985, 45024), 'numpy.allclose', 'np.allclose', (['sscov', 'sscovdata'], {'atol': '(0.0)'}), '(sscov, sscovdata, atol=0.0)\n', (44996, 45024), True, 'import numpy as np\n'), ((45060, 45099), 'numpy.allclose', 'np.allclose', (['srcov', 'srcovdata'], {'atol': '(0.0)'}), '(srcov, srcovdata, atol=0.0)\n', (45071, 45099), True, 'import numpy as np\n'), ((45135, 45174), 'numpy.allclose', 'np.allclose', (['rrcov', 'rrcovdata'], {'atol': '(0.0)'}), '(rrcov, rrcovdata, atol=0.0)\n', (45146, 45174), True, 'import numpy as np\n'), ((45800, 45811), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (45805, 45811), False, 'from builtins import range\n'), ((47359, 47370), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (47364, 47370), False, 'from builtins import range\n'), ((47476, 47499), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (47484, 47499), True, 'import numpy as np\n'), ((47501, 47516), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47507, 47516), True, 'import numpy as np\n'), ((48461, 48494), 'numpy.take', 'np.take', (['raw._data', '[isl]'], {'axis': '(1)'}), '(raw._data, [isl], axis=1)\n', (48468, 48494), True, 'import numpy as np\n'), ((48763, 48789), 'numpy.dot', 'np.dot', (['weights[:]', 'refarr'], {}), '(weights[:], refarr)\n', (48769, 48789), True, 'import numpy as np\n'), ((49339, 49351), 'time.clock', 'time.clock', ([], {}), '()\n', (49349, 49351), False, 'import time\n'), ((49366, 49377), 'time.time', 'time.time', ([], {}), '()\n', (49375, 49377), False, 'import time\n'), ((49463, 49490), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (49468, 49490), False, 'from builtins import range\n'), ((50659, 50671), 'time.clock', 'time.clock', ([], {}), '()\n', (50669, 50671), False, 'import time\n'), ((50686, 50697), 'time.time', 'time.time', ([], {}), '()\n', (50695, 50697), False, 'import time\n'), ((6089, 6112), 'numpy.log10', 'np.log10', (['psds_all[idx]'], {}), '(psds_all[idx])\n', (6097, 6112), True, 'import numpy as np\n'), ((6207, 6227), 'numpy.std', 'np.std', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6213, 6227), True, 'import numpy as np\n'), ((7231, 7267), 'mne.io.Raw', 'mne.io.Raw', (['fnraw[idx]'], {'preload': '(True)'}), '(fnraw[idx], preload=True)\n', (7241, 7267), False, 'import mne\n'), ((7289, 7421), 'mne.Epochs', 'mne.Epochs', (['raw', 'events', 'event_id'], {'proj': '(False)', 'tmin': 'tmin_stim', 'tmax': 'tmax_stim', 'picks': 'picks', 'preload': '(True)', 'baseline': '(None, None)'}), '(raw, events, event_id, proj=False, tmin=tmin_stim, tmax=\n tmax_stim, picks=picks, preload=True, baseline=(None, None))\n', (7299, 7421), False, 'import mne\n'), ((7687, 7710), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, idx]'], {}), '(gs[1, idx])\n', (7698, 7710), True, 'import matplotlib.pyplot as plt\n'), ((8918, 8959), 'numpy.polyfit', 'polyfit', (['xval', 'raw._data[ipick, :]'], {'deg': '(1)'}), '(xval, raw._data[ipick, :], deg=1)\n', (8925, 8959), False, 'from numpy import poly1d, polyfit\n'), ((8980, 8993), 'numpy.poly1d', 'poly1d', (['coeff'], {}), '(coeff)\n', (8986, 8993), False, 'from numpy import poly1d, polyfit\n'), ((10221, 10269), 'mne.pick_channels_regexp', 'mne.pick_channels_regexp', (['fulllist', 'findlist[ir]'], {}), '(fulllist, findlist[ir])\n', (10245, 10269), False, 'import mne\n'), ((10770, 10801), 'numpy.setdiff1d', 'np.setdiff1d', (['chnpick', 'exclinds'], {}), '(chnpick, exclinds)\n', (10782, 10801), True, 'import numpy as np\n'), ((15280, 15330), 'warnings.warn', 'warnings.warn', (['"""Setting file name from Raw object"""'], {}), "('Setting file name from Raw object')\n", (15293, 15330), False, 'import warnings\n'), ((17408, 17497), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', eeg=False, stim=False, eog=False,\n exclude='bads')\n", (17422, 17497), False, 'import mne\n'), ((18126, 18229), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'ref_meg': '(True)', 'meg': '(False)', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, ref_meg=True, meg=False, eeg=False, stim=False,\n eog=False, exclude='bads')\n", (18140, 18229), False, 'import mne\n'), ((20853, 20865), 'time.clock', 'time.clock', ([], {}), '()\n', (20863, 20865), False, 'import time\n'), ((20884, 20895), 'time.time', 'time.time', ([], {}), '()\n', (20893, 20895), False, 'import time\n'), ((21640, 21652), 'time.clock', 'time.clock', ([], {}), '()\n', (21650, 21652), False, 'import time\n'), ((21671, 21682), 'time.time', 'time.time', ([], {}), '()\n', (21680, 21682), False, 'import time\n'), ((23342, 23373), 'math.ceil', 'ceil', (["(tstep * raw.info['sfreq'])"], {}), "(tstep * raw.info['sfreq'])\n", (23346, 23373), False, 'from math import floor, ceil\n'), ((25546, 25558), 'time.clock', 'time.clock', ([], {}), '()\n', (25556, 25558), False, 'import time\n'), ((25577, 25588), 'time.time', 'time.time', ([], {}), '()\n', (25586, 25588), False, 'import time\n'), ((27916, 27927), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (27921, 27927), False, 'from builtins import range\n'), ((28375, 28408), 'numpy.take', 'np.take', (['raw._data', '[isl]'], {'axis': '(1)'}), '(raw._data, [isl], axis=1)\n', (28382, 28408), True, 'import numpy as np\n'), ((28711, 28737), 'numpy.dot', 'np.dot', (['weights[:]', 'refarr'], {}), '(weights[:], refarr)\n', (28717, 28737), True, 'import numpy as np\n'), ((29137, 29149), 'time.clock', 'time.clock', ([], {}), '()\n', (29147, 29149), False, 'import time\n'), ((29168, 29179), 'time.time', 'time.time', ([], {}), '()\n', (29177, 29179), False, 'import time\n'), ((29552, 29564), 'time.clock', 'time.clock', ([], {}), '()\n', (29562, 29564), False, 'import time\n'), ((29583, 29594), 'time.time', 'time.time', ([], {}), '()\n', (29592, 29594), False, 'import time\n'), ((29696, 29723), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (29701, 29723), False, 'from builtins import range\n'), ((38739, 38853), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (38747, 38853), False, 'from mne.epochs import _is_good\n'), ((39071, 39111), 'numpy.dot', 'np.dot', (['raw_segmentsig', 'raw_segmentref.T'], {}), '(raw_segmentsig, raw_segmentref.T)\n', (39077, 39111), True, 'import numpy as np\n'), ((39137, 39177), 'numpy.dot', 'np.dot', (['raw_segmentref', 'raw_segmentref.T'], {}), '(raw_segmentref, raw_segmentref.T)\n', (39143, 39177), True, 'import numpy as np\n'), ((39253, 39313), 'mne.utils.logger.info', 'logger.info', (["('Artefact detected in [%d, %d]' % (first, last))"], {}), "('Artefact detected in [%d, %d]' % (first, last))\n", (39264, 39313), False, 'from mne.utils import logger\n'), ((39996, 40007), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (40001, 40007), False, 'from builtins import range\n'), ((40079, 40090), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (40084, 40090), False, 'from builtins import range\n'), ((40178, 40189), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (40183, 40189), False, 'from builtins import range\n'), ((40258, 40269), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (40263, 40269), False, 'from builtins import range\n'), ((43160, 43274), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (43168, 43274), False, 'from mne.epochs import _is_good\n'), ((43319, 43349), 'builtins.range', 'range', (['raw_segmentsig.shape[1]'], {}), '(raw_segmentsig.shape[1])\n', (43324, 43349), False, 'from builtins import range\n'), ((44108, 44168), 'mne.utils.logger.info', 'logger.info', (["('Artefact detected in [%d, %d]' % (first, last))"], {}), "('Artefact detected in [%d, %d]' % (first, last))\n", (44119, 44168), False, 'from mne.utils import logger\n'), ((46538, 46551), 'numpy.diag', 'np.diag', (['sinv'], {}), '(sinv)\n', (46545, 46551), True, 'import numpy as np\n'), ((46669, 46686), 'numpy.identity', 'np.identity', (['nref'], {}), '(nref)\n', (46680, 46686), True, 'import numpy as np\n'), ((47415, 47455), 'numpy.dot', 'np.dot', (['srcov[isig][:]', 'RRinvtr[iref][:]'], {}), '(srcov[isig][:], RRinvtr[iref][:])\n', (47421, 47455), True, 'import numpy as np\n'), ((48544, 48580), 'numpy.take', 'np.take', (['fltref._data', '[isl]'], {'axis': '(1)'}), '(fltref._data, [isl], axis=1)\n', (48551, 48580), True, 'import numpy as np\n'), ((7567, 7586), 'numpy.min', 'np.min', (['evoked.data'], {}), '(evoked.data)\n', (7573, 7586), True, 'import numpy as np\n'), ((7610, 7629), 'numpy.max', 'np.max', (['evoked.data'], {}), '(evoked.data)\n', (7616, 7629), True, 'import numpy as np\n'), ((15879, 15910), 'mne.io.Raw', 'mne.io.Raw', (['fname'], {'preload': '(True)'}), '(fname, preload=True)\n', (15889, 15910), False, 'import mne\n'), ((16183, 16206), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (16199, 16206), False, 'import os\n'), ((16210, 16236), 'os.path.basename', 'os.path.basename', (['fnintern'], {}), '(fnintern)\n', (16226, 16236), False, 'import os\n'), ((16254, 16380), 'warnings.warn', 'warnings.warn', (['"""The file name within the Raw object and provided\n fname are not the same. Please check again."""'], {}), '(\n """The file name within the Raw object and provided\n fname are not the same. Please check again."""\n )\n', (16267, 16380), False, 'import warnings\n'), ((16903, 16934), 'math.floor', 'floor', (["(tmin * raw.info['sfreq'])"], {}), "(tmin * raw.info['sfreq'])\n", (16908, 16934), False, 'from math import floor, ceil\n'), ((17050, 17080), 'math.ceil', 'ceil', (["(tmax * raw.info['sfreq'])"], {}), "(tmax * raw.info['sfreq'])\n", (17054, 17080), False, 'from math import floor, ceil\n'), ((23946, 24060), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (23954, 24060), False, 'from mne.epochs import _is_good\n'), ((24298, 24338), 'numpy.dot', 'np.dot', (['raw_segmentsig', 'raw_segmentref.T'], {}), '(raw_segmentsig, raw_segmentref.T)\n', (24304, 24338), True, 'import numpy as np\n'), ((24368, 24408), 'numpy.dot', 'np.dot', (['raw_segmentref', 'raw_segmentref.T'], {}), '(raw_segmentref, raw_segmentref.T)\n', (24374, 24408), True, 'import numpy as np\n'), ((24496, 24556), 'mne.utils.logger.info', 'logger.info', (["('Artefact detected in [%d, %d]' % (first, last))"], {}), "('Artefact detected in [%d, %d]' % (first, last))\n", (24507, 24556), False, 'from mne.utils import logger\n'), ((26705, 26716), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (26710, 26716), False, 'from builtins import range\n'), ((27245, 27262), 'numpy.identity', 'np.identity', (['nref'], {}), '(nref)\n', (27256, 27262), True, 'import numpy as np\n'), ((27264, 27286), 'numpy.dot', 'np.dot', (['RRinv', 'rrslope'], {}), '(RRinv, rrslope)\n', (27270, 27286), True, 'import numpy as np\n'), ((27976, 28018), 'numpy.dot', 'np.dot', (['srcovdata[isig, :]', 'RRinv[:, iref]'], {}), '(srcovdata[isig, :], RRinv[:, iref])\n', (27982, 28018), True, 'import numpy as np\n'), ((28466, 28502), 'numpy.take', 'np.take', (['fltref._data', '[isl]'], {'axis': '(1)'}), '(fltref._data, [isl], axis=1)\n', (28473, 28502), True, 'import numpy as np\n'), ((29050, 29068), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (29066, 29068), False, 'import sys\n'), ((31369, 31381), 'time.clock', 'time.clock', ([], {}), '()\n', (31379, 31381), False, 'import time\n'), ((31404, 31415), 'time.time', 'time.time', ([], {}), '()\n', (31413, 31415), False, 'import time\n'), ((35973, 35997), 'builtins.range', 'range', (["raw.info['nchan']"], {}), "(raw.info['nchan'])\n", (35978, 35997), False, 'from builtins import range\n'), ((46255, 46265), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (46262, 46265), True, 'import numpy as np\n'), ((47688, 47703), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47694, 47703), True, 'import numpy as np\n'), ((49783, 49897), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (49791, 49897), False, 'from mne.epochs import _is_good\n'), ((50400, 50435), 'numpy.less_equal', 'np.less_equal', (['sscovdata', 'sscovinit'], {}), '(sscovdata, sscovinit)\n', (50413, 50435), True, 'import numpy as np\n'), ((6355, 6375), 'numpy.min', 'np.min', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6361, 6375), True, 'import numpy as np\n'), ((6377, 6397), 'numpy.max', 'np.max', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6383, 6397), True, 'import numpy as np\n'), ((6766, 6782), 'numpy.min', 'np.min', (['psd_mean'], {}), '(psd_mean)\n', (6772, 6782), True, 'import numpy as np\n'), ((6789, 6805), 'numpy.max', 'np.max', (['psd_mean'], {}), '(psd_mean)\n', (6795, 6805), True, 'import numpy as np\n'), ((10041, 10078), 'numpy.concatenate', 'np.concatenate', (['(chnpick, chnpicktmp)'], {}), '((chnpick, chnpicktmp))\n', (10055, 10078), True, 'import numpy as np\n'), ((10445, 10482), 'numpy.concatenate', 'np.concatenate', (['(chnpick, chnpicktmp)'], {}), '((chnpick, chnpicktmp))\n', (10459, 10482), True, 'import numpy as np\n'), ((15184, 15218), 'os.path.basename', 'os.path.basename', (['raw.filenames[0]'], {}), '(raw.filenames[0])\n', (15200, 15218), False, 'import os\n'), ((16069, 16103), 'os.path.basename', 'os.path.basename', (['raw.filenames[0]'], {}), '(raw.filenames[0])\n', (16085, 16103), False, 'import os\n'), ((20789, 20813), 'builtins.range', 'range', (["raw.info['nchan']"], {}), "(raw.info['nchan'])\n", (20794, 20813), False, 'from builtins import range\n'), ((26870, 26880), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (26877, 26880), True, 'import numpy as np\n'), ((27169, 27182), 'numpy.diag', 'np.diag', (['sinv'], {}), '(sinv)\n', (27176, 27182), True, 'import numpy as np\n'), ((30054, 30168), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (30062, 30168), False, 'from mne.epochs import _is_good\n'), ((45571, 45585), 'numpy.mean', 'np.mean', (['sscov'], {}), '(sscov)\n', (45578, 45585), True, 'import numpy as np\n'), ((47646, 47661), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47652, 47661), True, 'import numpy as np\n'), ((50495, 50513), 'numpy.mean', 'np.mean', (['sscovdata'], {}), '(sscovdata)\n', (50502, 50513), True, 'import numpy as np\n'), ((7977, 7986), 'builtins.str', 'str', (['info'], {}), '(info)\n', (7980, 7986), False, 'from builtins import str\n'), ((10593, 10609), 'numpy.sort', 'np.sort', (['chnpick'], {}), '(chnpick)\n', (10600, 10609), True, 'import numpy as np\n'), ((19940, 20016), 'warnings.warn', 'warnings.warn', (["('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)"], {}), "('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)\n", (19953, 20016), False, 'import warnings\n'), ((27554, 27576), 'numpy.dot', 'np.dot', (['RRinv', 'rrslope'], {}), '(RRinv, rrslope)\n', (27560, 27576), True, 'import numpy as np\n'), ((36196, 36207), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (36201, 36207), False, 'from builtins import range\n'), ((47721, 47736), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47727, 47736), True, 'import numpy as np\n'), ((26016, 26034), 'numpy.mean', 'np.mean', (['sscovdata'], {}), '(sscovdata)\n', (26023, 26034), True, 'import numpy as np\n'), ((30853, 30871), 'numpy.mean', 'np.mean', (['sscovdata'], {}), '(sscovdata)\n', (30860, 30871), True, 'import numpy as np\n'), ((21241, 21252), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (21246, 21252), False, 'from builtins import range\n'), ((21563, 21574), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (21568, 21574), False, 'from builtins import range\n'), ((30745, 30780), 'numpy.less_equal', 'np.less_equal', (['sscovdata', 'sscovinit'], {}), '(sscovdata, sscovinit)\n', (30758, 30780), True, 'import numpy as np\n')]
|
import argparse
import os
import logging
import string
import sys
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from sqlalchemy import create_engine
from .alphabet import ALPHABET_DNA
from .model import (
conv1d_densenet_regression_model,
compile_regression_model,
DenormalizedMAE,
)
from .load_sequences import (
TrainingSequence,
TestingSequence,
load_growth_temperatures,
assign_weight_to_batch_values,
compute_inverse_probability_weights,
)
from .utilities import (
SaveModelCallback,
generate_random_run_id,
)
from .validation import validate_model_on_test_set
DB_PATH = 'data/db/seq.db'
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument('--run_id', type=str, default=None)
parser.add_argument('--resume', action='store_true')
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--n_epochs', type=int, default=10)
parser.add_argument('--db_path', type=str, default=None)
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--max_queue_size', type=int, default=50)
parser.add_argument('--max_sequence_length', type=int, default=5001)
parser.add_argument('--dtype', type=str, default='float32')
args = parser.parse_args()
run_id = args.run_id
resume = args.resume
learning_rate = args.learning_rate
batch_size = args.batch_size
n_epochs = args.n_epochs
db_path = args.db_path
verbose = args.verbose
max_queue_size = args.max_queue_size
max_sequence_length = args.max_sequence_length
dtype = args.dtype
if run_id is None and resume:
logger.error('Specify --run_id to resume run')
sys.exit(1)
elif run_id is None and not resume:
run_id = generate_random_run_id()
if db_path is None:
db_path = os.path.join(os.getcwd(), DB_PATH)
engine = create_engine(f'sqlite+pysqlite:///{db_path}')
logger.info(f'Run {run_id}')
output_folder = os.path.join(os.getcwd(), f'saved_models/{run_id}/')
model_path = os.path.join(output_folder, 'model.h5')
metadata_path = os.path.join(output_folder, 'metadata.json')
validation_output_path = os.path.join(output_folder, 'validation.csv')
log_dir = os.path.join(os.getcwd(), f'summary_log/{run_id}')
try:
os.makedirs(output_folder)
except FileExistsError:
pass
if resume:
with open(metadata_path, 'r') as f:
metadata = json.load(f)
else:
initial_epoch = 0
dropout_rate = 0.5
seed = np.random.randint(0, 9999)
encoding_size = 20
decoder_n_hidden = 100
growth_rate = 15
kernel_sizes = [3] + [5] * 9
strides = None
dilation_rates = None
n_layers = len(kernel_sizes)
l2_reg = 1e-5
metadata = {
'run_id': run_id,
'alphabet': ALPHABET_DNA,
'learning_rate': learning_rate,
'batch_size': batch_size,
'encoding_size': encoding_size,
'decoder_n_hidden': decoder_n_hidden,
'growth_rate': growth_rate,
'n_layers': n_layers,
'kernel_sizes': kernel_sizes,
'strides': strides,
'dilation_rates': dilation_rates,
'l2_reg': l2_reg,
'dropout': dropout_rate,
'n_epochs': initial_epoch,
'max_sequence_length': max_sequence_length,
'seed': seed,
}
logger.info('Loading data')
tmps, mean, std = load_growth_temperatures(engine)
max_sequence_length = metadata['max_sequence_length']
training_sequence = TrainingSequence(
engine,
batch_size=batch_size,
temperatures=tmps,
mean=mean,
std=std,
dtype=dtype,
alphabet=metadata['alphabet'],
max_sequence_length=max_sequence_length,
random_seed=metadata['seed'],
)
testing_sequence = TestingSequence(
engine,
batch_size=batch_size,
temperatures=tmps,
mean=mean,
std=std,
dtype=dtype,
alphabet=metadata['alphabet'],
max_sequence_length=max_sequence_length,
random_seed=metadata['seed'],
)
model = conv1d_densenet_regression_model(
alphabet_size=len(metadata['alphabet']),
growth_rate=metadata['growth_rate'],
n_layers=metadata['n_layers'],
kernel_sizes=metadata['kernel_sizes'],
strides=metadata.get('strides'),
dilation_rates=metadata.get('dilation_rates'),
l2_reg=metadata['l2_reg'],
dropout=metadata['dropout'],
masking=True,
)
compile_regression_model(model, learning_rate)
if resume:
logger.info(f'Resuming from {model_path}')
model.load_weights(model_path)
initial_epoch = 0
epochs = n_epochs
if resume:
initial_epoch = metadata['n_epochs']
epochs += initial_epoch
logger.info(f'Training run {run_id}')
model.fit(
training_sequence,
validation_data=testing_sequence,
max_queue_size=max_queue_size,
epochs=epochs,
initial_epoch=initial_epoch,
verbose=verbose,
callbacks=[
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
update_freq=1000,
embeddings_freq=0,
profile_batch=(2, 100),
),
SaveModelCallback(
model_path=model_path,
metadata_path=metadata_path,
metadata=metadata,
),
],
)
logger.info('Training completed')
logger.info('Validating on test set')
validation_df = validate_model_on_test_set(
engine,
model,
batch_size=batch_size,
max_queue_size=max_queue_size,
max_sequence_length=max_sequence_length,
)
validation_df.to_csv(validation_output_path)
logger.info('DONE')
if __name__ == '__main__':
main()
|
[
"json.load",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"os.getcwd",
"numpy.random.randint",
"sqlalchemy.create_engine",
"sys.exit",
"tensorflow.keras.callbacks.TensorBoard",
"os.path.join",
"logging.getLogger"
] |
[((677, 704), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (694, 704), False, 'import logging\n'), ((723, 817), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s (%(levelname)s) %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s (%(levelname)s) %(message)s')\n", (742, 817), False, 'import logging\n'), ((831, 856), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (854, 856), False, 'import argparse\n'), ((2123, 2169), 'sqlalchemy.create_engine', 'create_engine', (['f"""sqlite+pysqlite:///{db_path}"""'], {}), "(f'sqlite+pysqlite:///{db_path}')\n", (2136, 2169), False, 'from sqlalchemy import create_engine\n'), ((2295, 2334), 'os.path.join', 'os.path.join', (['output_folder', '"""model.h5"""'], {}), "(output_folder, 'model.h5')\n", (2307, 2334), False, 'import os\n'), ((2355, 2399), 'os.path.join', 'os.path.join', (['output_folder', '"""metadata.json"""'], {}), "(output_folder, 'metadata.json')\n", (2367, 2399), False, 'import os\n'), ((2429, 2474), 'os.path.join', 'os.path.join', (['output_folder', '"""validation.csv"""'], {}), "(output_folder, 'validation.csv')\n", (2441, 2474), False, 'import os\n'), ((1937, 1948), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1945, 1948), False, 'import sys\n'), ((2238, 2249), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2247, 2249), False, 'import os\n'), ((2502, 2513), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2511, 2513), False, 'import os\n'), ((2558, 2584), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (2569, 2584), False, 'import os\n'), ((2800, 2826), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9999)'], {}), '(0, 9999)\n', (2817, 2826), True, 'import numpy as np\n'), ((2087, 2098), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2096, 2098), False, 'import os\n'), ((2709, 2721), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2718, 2721), False, 'import json\n'), ((5479, 5633), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(0)', 'write_graph': '(False)', 'update_freq': '(1000)', 'embeddings_freq': '(0)', 'profile_batch': '(2, 100)'}), '(log_dir=log_dir, histogram_freq=0,\n write_graph=False, update_freq=1000, embeddings_freq=0, profile_batch=(\n 2, 100))\n', (5509, 5633), True, 'import tensorflow as tf\n')]
|
import numpy
import xraylib
def transfocator_guess_configuration(focal_f_target, deltas=[0.999998], radii=[500e-4],
initial_focal_distance=None, verbose=0):
nn = len(radii)
ncombinations = 2**nn
Farray = numpy.zeros(ncombinations)
# Rarray = numpy.zeros(ncombinations)
for i in range(ncombinations):
str1 = numpy.binary_repr(i, width=nn)
if initial_focal_distance is None:
invF = 0
# invR = 0
else:
invF = 1.0 / initial_focal_distance
# invR = invF / (2 * deltas[0])
for j in range(nn):
# if float(str1[j]) != 0:
invF += 2 * deltas[j] / radii[j] * float(str1[j])
# invR += 1 / radii[j] * float(str1[j])
# try:
# print(">>>", i, nn, j, str1, float(str1[j]), 1/invF, 1e6/invR, ">", 1e6*radii[j])
# except:
# print(">>>>>", i, nn, j, str1, float(str1[j]))
if invF != 0:
Farray[i] = 1.0 / invF
else:
Farray[i] = 1e5
# if invR != 0:
# Rarray[i] = 1.0 / invR
# else:
# Rarray[i] = 1e15
# print(Farray)
iarg = numpy.argmin( numpy.abs(focal_f_target - Farray))
if verbose:
# print(">>>> optimum: ", iarg )
print(">>>> optimum for f=%g (idx %d): " % (focal_f_target, iarg), numpy.binary_repr(iarg, width=nn), Farray[iarg] )
print(" cumulated radius: wanted R=%g found R=%g: " % (
1e6*_transfocator_calculate_radius(delta=deltas[0], focal_distance=focal_f_target),
1e6*_transfocator_calculate_radius(delta=deltas[0], focal_distance=Farray[iarg]),
# 1e6*Rarray[iarg]
))
print(" Initial focal distance: ", initial_focal_distance)
return Farray[iarg]
def _transfocator_calculate_focal_distance(deltas=[0.999998],nlenses=[1],radii=[500e-4]):
inverse_focal_distance = 0.0
for i,nlensesi in enumerate(nlenses):
if nlensesi > 0:
focal_distance_i = radii[i] / (2.*nlensesi*deltas[i])
inverse_focal_distance += 1.0/focal_distance_i
if inverse_focal_distance == 0:
return 99999999999999999999999999.
else:
return 1.0/inverse_focal_distance
def _transfocator_calculate_radius(delta=0.999998,focal_distance=10):
radius = focal_distance * (2.*delta)
return radius
if __name__ == "__main__":
symbol = "Be"
density = 1.845
photon_energy_ev = 7000.0
delta = 1.0 - xraylib.Refractive_Index_Re(symbol,photon_energy_ev*1e-3,density)
print("delta: %g" % delta)
# f1 in 15-85
# focal_f_target = 30.0
fwanted = numpy.linspace(2,85,50)
# fwanted = numpy.array([2])
# fpaper_tf1v = numpy.array([15.0, 42.2, 85.2, 42.2])
# fpaper_tf1h = numpy.array([46.1, 25.1, 46.1, 25.1])
#
# fpaper_tf2v = numpy.array([22.2, 55.6, 27.8, 55.7])
# fpaper_tf2h = numpy.array([26.5, 21.3, 31.8, 20.7])
fpaper_tf1v = numpy.array([ 42.2 ])
fpaper_tf1h = numpy.array([ 25.1 ])
fpaper_tf2v = numpy.array([ 55.7 ])
fpaper_tf2h = numpy.array([ 20.7 ])
# ## TRANSFOCATOR @ 65
# Transfocator 2D with 7 axis and 11 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
# - 1×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 2×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 4×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# ## TRANSFOCATOR @ 65
# Transfocator 1DH with 6 axis and 7 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
# - 1×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 2×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
radii_tf1v = [5000e-6, 2000e-6, 1000e-6, 500e-6, 200e-6, 200e-6/2, 200e-6/4]
radii_tf1h = [5000e-6, 2000e-6, 1000e-6, 500e-6, 200e-6, 200e-6/2]
# ## TRANSFOCATOR 2D @ 170
# Transfocator 2D with 9 axis and 20 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
# - 1×Be lenses, r=300.0 μm, D=1.0 mm (2R_0=1079 μm)
# - 1×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 2×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 4×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 8×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
#
# ## TRANSFOCATOR 1DH @ 170
# Transfocator with 4 axis and 4 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
radii_tf2v = [5000e-6, 2000e-6, 1000e-6, 500e-6, 300e-6, 200e-6, 200e-6/2, 200e-6/4, 200e-6/8]
radii_tf2h = [5000e-6, 2000e-6, 1000e-6, 500e-6]
if False:
ffound = numpy.zeros_like(fwanted)
for ii,focal_f_target in enumerate(fwanted):
a = transfocator_guess_configuration(focal_f_target, deltas=[delta]*len(radii_tf1v), radii=radii_tf1v)
ffound[ii] = a
# print(ffound)
ffound2 = numpy.zeros_like(fwanted)
for ii,focal_f_target in enumerate(fwanted):
a = transfocator_guess_configuration(focal_f_target,
deltas=[delta]*len(radii_tf2v), radii=radii_tf2v)
ffound2[ii] = a
# print(ffound2)
#
# plot
#
from srxraylib.plot.gol import plot, set_qt
set_qt()
plot(fwanted, fwanted,
fwanted, ffound,
fwanted,ffound2,
fpaper_tf1v, fpaper_tf1v,
fpaper_tf2v, fpaper_tf2v,
fpaper_tf1h, fpaper_tf1h,
fpaper_tf2h, fpaper_tf2h,
xtitle="f wanted [m]", ytitle="f found [m]",
legend=["ideal","TF1","TF2","f wanted TF1 V","f wanted TF2 V","f wanted TF1 H","f wanted TF2 H"],
linestyle=[":",None,None,"","","",""],
marker=[None,None,None,'+','+','x','x'],
title="2D focusing")
#
# TF1
#
fwanted_2d = numpy.zeros_like(fpaper_tf1h)
ffound_2d = numpy.zeros_like(fpaper_tf1h)
for i in range(fwanted_2d.size):
fwanted_2d[i] = numpy.max( (fpaper_tf1h[i], fpaper_tf1v[i]))
tmp = transfocator_guess_configuration(fwanted_2d[i], deltas=[delta]*len(radii_tf1v),
radii=radii_tf1v, verbose=1)
ffound_2d[i] = tmp
fwanted_1d = numpy.zeros_like(fpaper_tf1h)
ffound_1d = numpy.zeros_like(fpaper_tf1h)
for i in range(fwanted_1d.size):
fwanted_1d[i] = fpaper_tf1h[i]
tmp = transfocator_guess_configuration(fwanted_1d[i], deltas=[delta]*len(radii_tf1h),
radii=radii_tf1h, verbose=1, initial_focal_distance=ffound_2d[i])
ffound_1d[i] = tmp
print("TF1 V 2D f wanted, f found: ", fpaper_tf1v,ffound_2d)
print("TF1 H 1D f wanted, f found: ", fpaper_tf1h,ffound_1d)
#
# TF2
#
fwanted_2d = numpy.zeros_like(fpaper_tf2h)
ffound_2d = numpy.zeros_like( fpaper_tf2h)
for i in range(fwanted_2d.size):
fwanted_2d[i] = numpy.max( (fpaper_tf2h[i], fpaper_tf2v[i]))
tmp = transfocator_guess_configuration(fwanted_2d[i], deltas=[delta]*len(radii_tf2v),
radii=radii_tf2v, verbose=1)
ffound_2d[i] = tmp
fwanted_1d = numpy.zeros_like(fpaper_tf2h)
ffound_1d = numpy.zeros_like(fpaper_tf2h)
for i in range(fwanted_1d.size):
fwanted_1d[i] = fpaper_tf2h[i]
tmp = transfocator_guess_configuration(fwanted_1d[i], deltas=[delta]*len(radii_tf2h),
radii=radii_tf2h, verbose=1, initial_focal_distance=ffound_2d[i])
ffound_1d[i] = tmp
print("TF2 V 2D f wanted, f found: ", fpaper_tf2v,ffound_2d)
print("TF2 H 1D f wanted, f found: ", fpaper_tf2h,ffound_1d)
print(1.0 / (1/5000 + 1/1000 + 1/5000 + 1/500))
|
[
"numpy.binary_repr",
"numpy.zeros_like",
"numpy.abs",
"numpy.zeros",
"numpy.max",
"numpy.array",
"numpy.linspace",
"srxraylib.plot.gol.set_qt",
"srxraylib.plot.gol.plot",
"xraylib.Refractive_Index_Re"
] |
[((258, 284), 'numpy.zeros', 'numpy.zeros', (['ncombinations'], {}), '(ncombinations)\n', (269, 284), False, 'import numpy\n'), ((2747, 2772), 'numpy.linspace', 'numpy.linspace', (['(2)', '(85)', '(50)'], {}), '(2, 85, 50)\n', (2761, 2772), False, 'import numpy\n'), ((3063, 3082), 'numpy.array', 'numpy.array', (['[42.2]'], {}), '([42.2])\n', (3074, 3082), False, 'import numpy\n'), ((3103, 3122), 'numpy.array', 'numpy.array', (['[25.1]'], {}), '([25.1])\n', (3114, 3122), False, 'import numpy\n'), ((3143, 3162), 'numpy.array', 'numpy.array', (['[55.7]'], {}), '([55.7])\n', (3154, 3162), False, 'import numpy\n'), ((3183, 3202), 'numpy.array', 'numpy.array', (['[20.7]'], {}), '([20.7])\n', (3194, 3202), False, 'import numpy\n'), ((6493, 6522), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6509, 6522), False, 'import numpy\n'), ((6539, 6568), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6555, 6568), False, 'import numpy\n'), ((6891, 6920), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6907, 6920), False, 'import numpy\n'), ((6937, 6966), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6953, 6966), False, 'import numpy\n'), ((7449, 7478), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7465, 7478), False, 'import numpy\n'), ((7495, 7524), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7511, 7524), False, 'import numpy\n'), ((7848, 7877), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7864, 7877), False, 'import numpy\n'), ((7895, 7924), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7911, 7924), False, 'import numpy\n'), ((378, 408), 'numpy.binary_repr', 'numpy.binary_repr', (['i'], {'width': 'nn'}), '(i, width=nn)\n', (395, 408), False, 'import numpy\n'), ((1243, 1277), 'numpy.abs', 'numpy.abs', (['(focal_f_target - Farray)'], {}), '(focal_f_target - Farray)\n', (1252, 1277), False, 'import numpy\n'), ((2588, 2658), 'xraylib.Refractive_Index_Re', 'xraylib.Refractive_Index_Re', (['symbol', '(photon_energy_ev * 0.001)', 'density'], {}), '(symbol, photon_energy_ev * 0.001, density)\n', (2615, 2658), False, 'import xraylib\n'), ((5229, 5254), 'numpy.zeros_like', 'numpy.zeros_like', (['fwanted'], {}), '(fwanted)\n', (5245, 5254), False, 'import numpy\n'), ((5493, 5518), 'numpy.zeros_like', 'numpy.zeros_like', (['fwanted'], {}), '(fwanted)\n', (5509, 5518), False, 'import numpy\n'), ((5887, 5895), 'srxraylib.plot.gol.set_qt', 'set_qt', ([], {}), '()\n', (5893, 5895), False, 'from srxraylib.plot.gol import plot, set_qt\n'), ((5904, 6352), 'srxraylib.plot.gol.plot', 'plot', (['fwanted', 'fwanted', 'fwanted', 'ffound', 'fwanted', 'ffound2', 'fpaper_tf1v', 'fpaper_tf1v', 'fpaper_tf2v', 'fpaper_tf2v', 'fpaper_tf1h', 'fpaper_tf1h', 'fpaper_tf2h', 'fpaper_tf2h'], {'xtitle': '"""f wanted [m]"""', 'ytitle': '"""f found [m]"""', 'legend': "['ideal', 'TF1', 'TF2', 'f wanted TF1 V', 'f wanted TF2 V',\n 'f wanted TF1 H', 'f wanted TF2 H']", 'linestyle': "[':', None, None, '', '', '', '']", 'marker': "[None, None, None, '+', '+', 'x', 'x']", 'title': '"""2D focusing"""'}), "(fwanted, fwanted, fwanted, ffound, fwanted, ffound2, fpaper_tf1v,\n fpaper_tf1v, fpaper_tf2v, fpaper_tf2v, fpaper_tf1h, fpaper_tf1h,\n fpaper_tf2h, fpaper_tf2h, xtitle='f wanted [m]', ytitle='f found [m]',\n legend=['ideal', 'TF1', 'TF2', 'f wanted TF1 V', 'f wanted TF2 V',\n 'f wanted TF1 H', 'f wanted TF2 H'], linestyle=[':', None, None, '', '',\n '', ''], marker=[None, None, None, '+', '+', 'x', 'x'], title='2D focusing'\n )\n", (5908, 6352), False, 'from srxraylib.plot.gol import plot, set_qt\n'), ((6630, 6673), 'numpy.max', 'numpy.max', (['(fpaper_tf1h[i], fpaper_tf1v[i])'], {}), '((fpaper_tf1h[i], fpaper_tf1v[i]))\n', (6639, 6673), False, 'import numpy\n'), ((7587, 7630), 'numpy.max', 'numpy.max', (['(fpaper_tf2h[i], fpaper_tf2v[i])'], {}), '((fpaper_tf2h[i], fpaper_tf2v[i]))\n', (7596, 7630), False, 'import numpy\n'), ((1412, 1445), 'numpy.binary_repr', 'numpy.binary_repr', (['iarg'], {'width': 'nn'}), '(iarg, width=nn)\n', (1429, 1445), False, 'import numpy\n')]
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
from sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart import \
InstantiatedParetoFrontOptimalChart
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.execution_engine.data_manager import DataManager
import numpy as np
import pandas as pd
from climateeconomics.sos_processes.iam.witness.witness_optim_sub_process.usecase_witness_optim_sub import OPTIM_NAME, COUPLING_NAME, EXTRA_NAME
def post_processing_filters(execution_engine, namespace):
filters = []
chart_list = ['Temperature vs Welfare',
'CO2 Emissions vs Welfare', 'CO2 Emissions vs min(Utility)',
'CO2 tax per scenario', 'Temperature per scenario', 'Welfare per scenario',
'Utility per scenario', 'CO2 emissions per scenario', 'ppm(mean) vs Welfare',
'Total production per scenario', 'ppm per scenario', 'invest per scenario']
scatter_scenario = 'optimization scenarios'
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
scenario_key = execution_engine.dm.get_data_id(
f'{namespace_w}.scenario_list')
scenario_list = execution_engine.dm.data_dict[scenario_key][DataManager.VALUE]
filters.append(ChartFilter('Charts', chart_list, chart_list, 'Charts'))
filters.append(ChartFilter('Scenarios', scenario_list,
scenario_list, 'Scenarios'))
return filters
def post_processings(execution_engine, namespace, filters):
instanciated_charts = []
scatter_scenario = 'optimization scenarios'
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
scenario_key = execution_engine.dm.get_data_id(
f'{namespace_w}.scenario_list')
scenario_list = execution_engine.dm.data_dict[scenario_key][DataManager.VALUE]
# Overload default value with chart filter
if filters is not None:
for chart_filter in filters:
if chart_filter.filter_key == 'Charts':
graphs_list = chart_filter.selected_values
if chart_filter.filter_key == 'Scenarios':
selected_scenarios = chart_filter.selected_values
else:
graphs_list = ['Temperature vs Welfare',
'CO2 Emissions vs Welfare', 'CO2 Emissions vs min(Utility)'
'CO2 tax per scenario', 'Temperature per scenario', 'Welfare per scenario',
'Utility per scenario', 'CO2 emissions per scenario', 'ppm(mean) vs Welfare',
'Total production per scenario', 'ppm per scenario', 'invest per scenario']
selected_scenarios = scenario_list
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.year_start',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.year_end', ]
year_start_dict, year_end_dict = get_df_per_scenario_dict(
execution_engine, df_paths, scenario_list)
year_start, year_end = year_start_dict[scenario_list[0]
], year_end_dict[scenario_list[0]]
years = np.arange(year_start, year_end).tolist()
"""
-------------
-------------
PARETO OPTIMAL CHART
-------------
-------------
"""
if 'Temperature vs Welfare' in graphs_list:
chart_name = f'Temperature in {year_end} vs Welfare'
x_axis_name = f'Temperature increase since industrial revolution in degree Celsius'
y_axis_name = 'Welfare'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Temperature_change.temperature_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df'
]
(temperature_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths, scenario_list)
last_temperature_dict, welfare_dict = {}, {}
for scenario in scenario_list:
last_temperature_dict[scenario] = temperature_df_dict[scenario]['temp_atmo'][year_end]
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(last_temperature_dict, welfare_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
if 'CO2 Emissions vs Welfare' in graphs_list:
chart_name = f'Sum of CO2 emissions vs Welfare'
x_axis_name = f'Summed CO2 emissions'
y_axis_name = f'Welfare in {year_end}'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carbon_emissions.CO2_emissions_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(co2_emissions_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths)
summed_co2_emissions_dict, welfare_dict = {}, {}
for scenario in scenario_list:
summed_co2_emissions_dict[scenario] = co2_emissions_df_dict[scenario]['total_emissions'].sum(
)
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(summed_co2_emissions_dict, welfare_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
if 'CO2 Emissions vs min(Utility)' in graphs_list:
chart_name = f'CO2 Emissions vs minimum of Utility'
x_axis_name = f'Summed CO2 emissions'
y_axis_name = 'min( Utility )'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carbon_emissions.CO2_emissions_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(co2_emissions_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths)
summed_co2_emissions_dict, min_utility_dict = {}, {}
for scenario in scenario_list:
summed_co2_emissions_dict[scenario] = co2_emissions_df_dict[scenario]['total_emissions'].sum(
)
min_utility_dict[scenario] = min(
utility_df_dict[scenario]['discounted_utility'])
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(summed_co2_emissions_dict, min_utility_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
if 'ppm(mean) vs Welfare' in graphs_list:
chart_name = f'mean ppm vs Welfare'
x_axis_name = f'Mean ppm'
y_axis_name = f'Welfare in {year_end}'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carboncycle.carboncycle_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(carboncycle_detail_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths)
mean_co2_ppm_dict, welfare_dict = {}, {}
for scenario in scenario_list:
mean_co2_ppm_dict[scenario] = carboncycle_detail_df_dict[scenario]['ppm'].mean(
)
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(mean_co2_ppm_dict, welfare_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
"""
-------------
-------------
SCENARIO COMPARISON CHART
-------------
-------------
"""
if 'CO2 tax per scenario' in graphs_list:
chart_name = 'CO2 tax per scenario'
x_axis_name = 'Years'
y_axis_name = 'Price ($/tCO2)'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.CO2_taxes', ]
(co2_taxes_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
co2_tax_dict = {}
for scenario in scenario_list:
co2_tax_dict[scenario] = co2_taxes_df_dict[scenario]['CO2_tax'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, co2_tax_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'Temperature per scenario' in graphs_list:
chart_name = 'Atmosphere temperature evolution per scenario'
x_axis_name = 'Years'
y_axis_name = 'Temperature (degrees Celsius above preindustrial)'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Temperature_change.temperature_detail_df', ]
(temperature_detail_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
temperature_dict = {}
for scenario in scenario_list:
temperature_dict[scenario] = temperature_detail_df_dict[scenario]['temp_atmo'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, temperature_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'Welfare per scenario' in graphs_list:
chart_name = 'Welfare per scenario'
y_axis_name = f'Welfare in {year_end}'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(utility_df_dict,) = get_df_per_scenario_dict(execution_engine, df_paths)
welfare_dict = {}
for scenario in scenario_list:
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
min_y = min(list(welfare_dict.values()))
max_y = max(list(welfare_dict.values()))
new_chart = TwoAxesInstanciatedChart('', y_axis_name,
[], [
min_y * 0.95, max_y * 1.05],
chart_name)
for scenario, welfare in welfare_dict.items():
if scenario in selected_scenarios:
serie = InstanciatedSeries(
[''],
[welfare], scenario, 'bar')
new_chart.series.append(serie)
instanciated_charts.append(new_chart)
if 'Utility per scenario' in graphs_list:
chart_name = 'Utility per scenario'
x_axis_name = 'Years'
y_axis_name = 'Discounted Utility (trill $)'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df', ]
(utility_df_dict,) = get_df_per_scenario_dict(execution_engine, df_paths)
utility_dict = {}
for scenario in scenario_list:
utility_dict[scenario] = utility_df_dict[scenario]['discounted_utility'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, utility_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'CO2 emissions per scenario' in graphs_list:
chart_name = 'CO2 emissions per scenario'
x_axis_name = 'Years'
y_axis_name = 'Carbon emissions (Gtc)'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carbon_emissions.CO2_emissions_detail_df']
(co2_emissions_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
co2_emissions_dict = {}
for scenario in scenario_list:
co2_emissions_dict[scenario] = co2_emissions_df_dict[scenario]['total_emissions'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, co2_emissions_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'ppm per scenario' in graphs_list:
chart_name = 'Atmospheric concentrations parts per million per scenario'
x_axis_name = 'Years'
y_axis_name = 'Atmospheric concentrations parts per million'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carboncycle.carboncycle_detail_df']
(carboncycle_detail_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
co2_ppm_dict, welfare_dict = {}, {}
for scenario in scenario_list:
co2_ppm_dict[scenario] = carboncycle_detail_df_dict[scenario]['ppm'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, co2_ppm_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
# Rockstrom Limit
ordonate_data = [450] * int(len(years) / 5)
abscisse_data = np.linspace(
year_start, year_end, int(len(years) / 5))
new_series = InstanciatedSeries(
abscisse_data.tolist(), ordonate_data, 'Rockstrom limit', 'scatter')
note = {'Rockstrom limit': 'Scientifical limit of the Earth'}
new_chart.annotation_upper_left = note
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'Total production per scenario' in graphs_list:
chart_name = 'Total production per scenario'
x_axis_name = 'Years'
y_axis_name = 'Total production'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.EnergyMix.energy_production_detailed']
(energy_production_detailed_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
energy_production_detailed_dict = {}
for scenario in scenario_list:
energy_production_detailed_dict[scenario] = energy_production_detailed_df_dict[
scenario]['Total production (uncut)'].values.tolist()
new_chart = get_scenario_comparison_chart(years, energy_production_detailed_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'invest per scenario' in graphs_list:
chart_name = f'investments per scenario'
x_axis_name = 'Years'
y_axis_name = f'total energy investment'
# Get the total energy investment
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.energy_investment']
(energy_investment_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
energy_investment_dict = {}
for scenario in scenario_list:
energy_investment_dict[scenario] = energy_investment_df_dict[
scenario]['energy_investment'].values.tolist()
new_chart = get_scenario_comparison_chart(years, energy_investment_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
return instanciated_charts
def get_scenario_comparison_chart(x_list, y_dict, chart_name, x_axis_name, y_axis_name, selected_scenarios):
min_x = min(x_list)
max_x = max(x_list)
min_y = min([min(list(y)) for y in y_dict.values()])
max_y = max([max(list(y)) for y in y_dict.values()])
new_chart = TwoAxesInstanciatedChart(x_axis_name, y_axis_name,
[min_x - 5, max_x + 5], [
min_y - max_y * 0.05, max_y * 1.05],
chart_name)
for scenario, y_values in y_dict.items():
if scenario in selected_scenarios:
new_series = InstanciatedSeries(
x_list, y_values, scenario, 'lines', True)
new_chart.series.append(new_series)
return new_chart
def get_chart_pareto_front(x_dict, y_dict, scenario_list, namespace_w, chart_name='Pareto Front',
x_axis_name='x', y_axis_name='y'):
'''
Function that, given two dictionaries and a scenario_list, returns a pareto front
:params: x_dict, dict containing the data for the x axis of the pareto front per scenario
:type: dict
:params: y_dict, dict containing the data for the y axis of the pareto front per scenario
:type: dict
:params: scenario_list, list containing the name of the scenarios
:type: list
:params: namespace_w, namespace of scatter scenario
:type: string
:params: chart_name, name of the chart used as title
:type: string
:returns: new_pareto_chart, the chart object to be displayed
:type: InstantiatedParetoFrontOptimalChart
'''
min_x = min(list(x_dict.values()))
max_x = max(list(x_dict.values()))
max_y = max(list(y_dict.values()))
min_y = min(list(y_dict.values()))
new_pareto_chart = InstantiatedParetoFrontOptimalChart(
abscissa_axis_name=f'{x_axis_name}',
primary_ordinate_axis_name=f'{y_axis_name}',
abscissa_axis_range=[min_x - max_x * 0.05, max_x * 1.05],
primary_ordinate_axis_range=[
min_y - max_y * 0.03, max_y * 1.03],
chart_name=chart_name)
for scenario in scenario_list:
new_serie = InstanciatedSeries([x_dict[scenario]],
[y_dict[scenario]],
scenario, 'scatter',
custom_data=f'{namespace_w}.{scenario}')
new_pareto_chart.add_serie(new_serie)
# Calculating and adding pareto front
sorted_x = sorted(x_dict.values())
sorted_scenarios = []
for val in sorted_x:
for scen, x_val in x_dict.items():
if x_val == val:
sorted_scenarios.append(scen)
sorted_list = sorted([[x_dict[scenario], y_dict[scenario]]
for scenario in sorted_scenarios])
pareto_front = [sorted_list[0]]
for pair in sorted_list[1:]:
if pair[1] >= pareto_front[-1][1]:
pareto_front.append(pair)
pareto_front_serie = InstanciatedSeries(
[pair[0] for pair in pareto_front], [pair[1] for pair in pareto_front], 'Pareto front', 'lines')
new_pareto_chart.add_pareto_front_optimal(pareto_front_serie)
return new_pareto_chart
def get_df_per_scenario_dict(execution_engine, df_paths, scenario_list=None):
'''! Function to retrieve dataframes from all the scenarios given a specified path
@param execution_engine: Execution_engine, object from which the data is gathered
@param df_paths: list of string, containing the paths to access the df
@return df_per_scenario_dict: list of dict, with {key = scenario_name: value= requested_dataframe}
'''
df_per_scenario_dicts = [{} for _ in df_paths]
scatter_scenario = 'optimization scenarios'
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
if not scenario_list:
scenario_key = execution_engine.dm.get_data_id(
f'{namespace_w}.scenario_list')
scenario_list = execution_engine.dm.data_dict[scenario_key][DataManager.VALUE]
for scenario in scenario_list:
for i, df_path in enumerate(df_paths):
df_per_scenario_dicts[i][scenario] = execution_engine.dm.get_value(
f'{namespace_w}.{scenario}.{df_path}')
return df_per_scenario_dicts
|
[
"sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart.InstantiatedParetoFrontOptimalChart",
"sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter",
"numpy.arange",
"sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart",
"sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries"
] |
[((17569, 17697), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart', 'TwoAxesInstanciatedChart', (['x_axis_name', 'y_axis_name', '[min_x - 5, max_x + 5]', '[min_y - max_y * 0.05, max_y * 1.05]', 'chart_name'], {}), '(x_axis_name, y_axis_name, [min_x - 5, max_x + 5],\n [min_y - max_y * 0.05, max_y * 1.05], chart_name)\n', (17593, 17697), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((19108, 19384), 'sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart.InstantiatedParetoFrontOptimalChart', 'InstantiatedParetoFrontOptimalChart', ([], {'abscissa_axis_name': 'f"""{x_axis_name}"""', 'primary_ordinate_axis_name': 'f"""{y_axis_name}"""', 'abscissa_axis_range': '[min_x - max_x * 0.05, max_x * 1.05]', 'primary_ordinate_axis_range': '[min_y - max_y * 0.03, max_y * 1.03]', 'chart_name': 'chart_name'}), "(abscissa_axis_name=f'{x_axis_name}',\n primary_ordinate_axis_name=f'{y_axis_name}', abscissa_axis_range=[min_x -\n max_x * 0.05, max_x * 1.05], primary_ordinate_axis_range=[min_y - max_y *\n 0.03, max_y * 1.03], chart_name=chart_name)\n", (19143, 19384), False, 'from sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart import InstantiatedParetoFrontOptimalChart\n'), ((20319, 20438), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (['[pair[0] for pair in pareto_front]', '[pair[1] for pair in pareto_front]', '"""Pareto front"""', '"""lines"""'], {}), "([pair[0] for pair in pareto_front], [pair[1] for pair in\n pareto_front], 'Pareto front', 'lines')\n", (20337, 20438), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((1986, 2041), 'sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter', 'ChartFilter', (['"""Charts"""', 'chart_list', 'chart_list', '"""Charts"""'], {}), "('Charts', chart_list, chart_list, 'Charts')\n", (1997, 2041), False, 'from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter\n'), ((2062, 2129), 'sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter', 'ChartFilter', (['"""Scenarios"""', 'scenario_list', 'scenario_list', '"""Scenarios"""'], {}), "('Scenarios', scenario_list, scenario_list, 'Scenarios')\n", (2073, 2129), False, 'from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter\n'), ((11511, 11602), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart', 'TwoAxesInstanciatedChart', (['""""""', 'y_axis_name', '[]', '[min_y * 0.95, max_y * 1.05]', 'chart_name'], {}), "('', y_axis_name, [], [min_y * 0.95, max_y * 1.05],\n chart_name)\n", (11535, 11602), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((19483, 19607), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (['[x_dict[scenario]]', '[y_dict[scenario]]', 'scenario', '"""scatter"""'], {'custom_data': 'f"""{namespace_w}.{scenario}"""'}), "([x_dict[scenario]], [y_dict[scenario]], scenario,\n 'scatter', custom_data=f'{namespace_w}.{scenario}')\n", (19501, 19607), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((3809, 3840), 'numpy.arange', 'np.arange', (['year_start', 'year_end'], {}), '(year_start, year_end)\n', (3818, 3840), True, 'import numpy as np\n'), ((17937, 17998), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (['x_list', 'y_values', 'scenario', '"""lines"""', '(True)'], {}), "(x_list, y_values, scenario, 'lines', True)\n", (17955, 17998), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((11866, 11918), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (["['']", '[welfare]', 'scenario', '"""bar"""'], {}), "([''], [welfare], scenario, 'bar')\n", (11884, 11918), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n')]
|
"""
======================
CSA Histogram Approach
======================
Static Chemical Shift Powder Pattern using a Histogram Approach
The equation for the static powder pattern for a simple chemical shift anisotropy interation is given by the following equation
.. math::
H = \\delta_{iso} + \\frac {1}{2} \\delta \left ( 3 \\cos^2 \\theta - 1 \\right ) - \\delta \\eta \\sin^2 \\theta \\cos 2 \\phi
There are a number of conventions for the assignment of :math:`\eta` and :math:`\delta`, we have used Haeberlen's convention.
if :math:`\sigma_{xx}`, :math:`\sigma_{yy}` and :math:`\sigma_{zz}` are the principal components of the chemical shielding tensor then they must have the following order.
.. math::
\\left | \\sigma_{zz} - \\sigma_{iso} \\right | \\ge \\left | \\sigma_{xx} - \\sigma_{iso} \\right | \\ge \\left | \\sigma_{yy} - \\sigma_{iso} \\right |
where
.. math::
\\sigma_{iso} = \\frac {1}{3} \\left ( \\sigma_{xx} + \\sigma_{yy} + \\sigma_{zz} \\right )
and then :math:`\delta` and :math:`\eta` ared defined as follows
.. math::
\\delta = \\sigma_{zz} - \sigma_{iso}
and
.. math::
\\eta = \\frac {\\sigma_{xx}-\\sigma_{yy}}{\\sigma_{zz}-\\sigma_{iso}}
References
~~~~~~~~~~
- <NAME>, In Advances in Magnetic Resonance; Suppl. 1; <NAME>, Ed.; Academic Press, New York, 1976.
"""
import numpy as np
from matplotlib import pyplot as plt
import sys
def return_Hammersley_points_array( l, n, p):
"""
l is the power of x to go out to p^m
n is the maximun number of points
p is the order of the Hammersley point, 1,2,3,4,... etc
returns
--------
np.array of double
"""
vvv = np.zeros(n)
for m in range(n):
m1=1*m
if p == 1:
vvv[m] = m1/n
else:
v = 0.0
for j in range(l,-1,-1):
num = m1//p**j
if num > 0:
m1 -= num*p**j
v += num / ( p ** (j+1) )
vvv[m]=v
return(vvv)
def omega_cs( theta, phi, iso_cs=0.0, asymm_cs=100, eta_cs=1.0):
return (iso_cs +0.5* asymm_cs*(3.0 * (np.cos(theta)**2) -1.0 - eta_cs*(np.sin(theta)**2)*np.cos( 2.0 * phi ))), np.sin(theta)
if __name__ == "__main__":
# Define CSA powder pattern
# Principal components of the chemical shift shielding tensor
s_zz = -120.0
s_yy = -50.0
s_xx = 100.0
# Check for Haeberlens convention
iso_cs =(s_zz+s_yy+s_xx)/3.
if abs(s_zz-iso_cs) >= abs(s_xx-iso_cs) and abs(s_xx-iso_cs) >= abs(s_yy-iso_cs):
h_zz = s_zz
h_yy = s_yy
h_xx = s_xx
elif abs(s_zz-iso_cs) < abs(s_xx-iso_cs) and abs(s_xx-iso_cs) >= abs(s_yy-iso_cs):
h_zz = s_xx
h_yy = s_yy
h_xx = s_zz
else:
print("problem with assignment of cs tensors")
sys.exit()
asymm_cs = h_zz-iso_cs
eta_cs = (h_xx-h_yy)/(h_zz-iso_cs)
# Calculate Hammersley Points and Powder pattern
N_particles = 2**17
theta = return_Hammersley_points_array(22, N_particles, 2)
phi = return_Hammersley_points_array(22, N_particles, 3)
omega, solid_angle = omega_cs(theta*np.pi,2*np.pi*phi, eta_cs=eta_cs, iso_cs=iso_cs, asymm_cs=asymm_cs)
# Plot Powder pattern and use sin(theta) solid angle weighting
plt.hist(omega, bins = 200, weights=solid_angle, density=True);
plt.xlim(250.0, -250.0)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.yticks([])
plt.xlabel('Hz', fontsize=14)
ax.annotate('$\sigma_{xx}$',
xy=(s_xx+5, 0.0030), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=14)
ax.annotate('$\sigma_{yy}$',
xy=(s_yy+5, 0.012), xycoords='data',
xytext=(-50, 00), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=14)
ax.annotate('$\sigma_{zz}$',
xy=(s_zz-5, 0.0044), xycoords='data',
xytext=(50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=14)
plt.title(f"{N_particles} Hammersley Pts CSA Calculated Histogram", fontsize=14);
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.yticks",
"numpy.zeros",
"numpy.sin",
"numpy.cos",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"sys.exit"
] |
[((1718, 1729), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1726, 1729), True, 'import numpy as np\n'), ((3505, 3565), 'matplotlib.pyplot.hist', 'plt.hist', (['omega'], {'bins': '(200)', 'weights': 'solid_angle', 'density': '(True)'}), '(omega, bins=200, weights=solid_angle, density=True)\n', (3513, 3565), True, 'from matplotlib import pyplot as plt\n'), ((3573, 3596), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(250.0)', '(-250.0)'], {}), '(250.0, -250.0)\n', (3581, 3596), True, 'from matplotlib import pyplot as plt\n'), ((3606, 3615), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3613, 3615), True, 'from matplotlib import pyplot as plt\n'), ((3743, 3757), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3753, 3757), True, 'from matplotlib import pyplot as plt\n'), ((3762, 3791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hz"""'], {'fontsize': '(14)'}), "('Hz', fontsize=14)\n", (3772, 3791), True, 'from matplotlib import pyplot as plt\n'), ((4411, 4496), 'matplotlib.pyplot.title', 'plt.title', (['f"""{N_particles} Hammersley Pts CSA Calculated Histogram"""'], {'fontsize': '(14)'}), "(f'{N_particles} Hammersley Pts CSA Calculated Histogram', fontsize=14\n )\n", (4420, 4496), True, 'from matplotlib import pyplot as plt\n'), ((4502, 4512), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4510, 4512), True, 'from matplotlib import pyplot as plt\n'), ((2335, 2348), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2341, 2348), True, 'import numpy as np\n'), ((3011, 3021), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3019, 3021), False, 'import sys\n'), ((2312, 2329), 'numpy.cos', 'np.cos', (['(2.0 * phi)'], {}), '(2.0 * phi)\n', (2318, 2329), True, 'import numpy as np\n'), ((2261, 2274), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2267, 2274), True, 'import numpy as np\n'), ((2294, 2307), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2300, 2307), True, 'import numpy as np\n')]
|
import typing
import gettext
import numpy
from nion.data import Core
from nion.data import DataAndMetadata
from nion.swift.model import Symbolic
from nion.typeshed import API_1_0
_ = gettext.gettext
class AlignMultiDimensionalSequence:
label = _("Align multi-dimensional sequence")
inputs = {"si_sequence_data_item": {"label": _("Multi-dimensional sequence data item")},
"haadf_sequence_data_item": {"label": _("HAADF sequence data item")},
"align_index": {"label": _("Align to this slice")},
"align_region": {"label": _("Alignment bounds")},
}
outputs = {"aligned_haadf": {"label": _("Aligned HAADF sequence")},
"aligned_si": {"label": _("Aligned multi-dimensional sequence")}}
def __init__(self, computation, **kwargs):
self.computation = computation
def execute(self, si_sequence_data_item: API_1_0.DataItem, haadf_sequence_data_item: API_1_0.DataItem,
align_index: int, align_region: API_1_0.Graphic):
haadf_xdata = haadf_sequence_data_item.xdata
si_xdata = si_sequence_data_item.xdata
bounds = align_region.bounds
translations = Core.function_sequence_measure_relative_translation(haadf_xdata,
haadf_xdata[align_index],
10, True, bounds=bounds)
sequence_shape = haadf_sequence_data_item.xdata.sequence_dimension_shape
c = int(numpy.product(sequence_shape))
haadf_result_data = numpy.empty_like(haadf_xdata.data)
si_result_data = numpy.empty_like(si_xdata.data)
align_data_shape = haadf_sequence_data_item.xdata.datum_dimension_shape
align_axes_start_index = None
for i in range(len(si_xdata.data_shape) - 1):
if align_data_shape == si_xdata.data_shape[i:i+2]:
align_axes_start_index = i
break
else:
raise RuntimeError('Could not find axes that match the HAADF shape in SI data item.')
si_translation = [0.0] * (len(si_xdata.data_shape) - len(sequence_shape))
align_axes_start_index -= len(sequence_shape)
assert align_axes_start_index >= 0
for i in range(c):
ii = numpy.unravel_index(i, sequence_shape)
current_xdata = DataAndMetadata.new_data_and_metadata(haadf_xdata.data[ii])
translation = translations.data[ii]
haadf_result_data[ii] = Core.function_shift(current_xdata, tuple(translation)).data
current_xdata = DataAndMetadata.new_data_and_metadata(si_xdata.data[ii])
si_translation[align_axes_start_index] = translation[0]
si_translation[align_axes_start_index+1] = translation[1]
si_result_data[ii] = Core.function_shift(current_xdata, tuple(si_translation)).data
self.__aligned_haadf_sequence = DataAndMetadata.new_data_and_metadata(haadf_result_data,
intensity_calibration=haadf_xdata.intensity_calibration,
dimensional_calibrations=haadf_xdata.dimensional_calibrations,
metadata=haadf_xdata.metadata,
data_descriptor=haadf_xdata.data_descriptor)
self.__aligned_si_sequence = DataAndMetadata.new_data_and_metadata(si_result_data,
intensity_calibration=si_xdata.intensity_calibration,
dimensional_calibrations=si_xdata.dimensional_calibrations,
metadata=si_xdata.metadata,
data_descriptor=si_xdata.data_descriptor)
def commit(self):
self.computation.set_referenced_xdata("aligned_haadf", self.__aligned_haadf_sequence)
self.computation.set_referenced_xdata("aligned_si", self.__aligned_si_sequence)
def align_multi_si(api: API_1_0.API, window: API_1_0.DocumentWindow):
selected_display_items = window._document_controller._get_two_data_sources()
error_msg = "Select a sequence of spectrum images and a sequence of scanned images in order to use this computation."
assert selected_display_items[0][0] is not None, error_msg
assert selected_display_items[1][0] is not None, error_msg
assert selected_display_items[0][0].data_item is not None, error_msg
assert selected_display_items[1][0].data_item is not None, error_msg
assert selected_display_items[0][0].data_item.is_sequence, error_msg
assert selected_display_items[1][0].data_item.is_sequence, error_msg
di_1 = selected_display_items[0][0].data_item
di_2 = selected_display_items[1][0].data_item
haadf_footprint = (2, True, 0, True)
di_1_footprint = (di_1.datum_dimension_count, di_1.is_sequence, di_1.collection_dimension_count,
di_1.metadata.get("hardware_source", {}).get("harwdare_source_id", "") == "superscan")
di_2_footprint = (di_2.datum_dimension_count, di_2.is_sequence, di_2.collection_dimension_count,
di_2.metadata.get("hardware_source", {}).get("harwdare_source_id", "") == "superscan")
di_1_points = 0
di_2_points = 0
print(di_1_footprint, di_2_footprint)
for i in range(len(haadf_footprint)):
di_1_points -= abs(haadf_footprint[i] - di_1_footprint[i])
di_2_points -= abs(haadf_footprint[i] - di_2_footprint[i])
print(di_1_points, di_2_points)
if di_1_points > di_2_points:
assert di_1_footprint[:-1] == haadf_footprint[:-1], error_msg
haadf_sequence_data_item = api._new_api_object(di_1)
si_sequence_data_item = api._new_api_object(di_2)
elif di_2_points > di_1_points:
assert di_2_footprint[:-1] == haadf_footprint[:-1], error_msg
haadf_sequence_data_item = api._new_api_object(di_2)
si_sequence_data_item = api._new_api_object(di_1)
else:
raise ValueError(error_msg)
print('here')
align_region = None
for graphic in haadf_sequence_data_item.graphics:
if graphic.graphic_type == 'rect-graphic':
align_region = graphic
break
if align_region is None:
align_region = haadf_sequence_data_item.add_rectangle_region(0.5, 0.5, 0.75, 0.75)
align_region.label = 'Alignment bounds'
print('here2')
align_index = haadf_sequence_data_item.display._display.display_data_channel.sequence_index
aligned_haadf = api.library.create_data_item_from_data(numpy.zeros((1,1,1)), title="Aligned {}".format(haadf_sequence_data_item.title))
aligned_si = api.library.create_data_item_from_data(numpy.zeros((1,1,1)), title="Aligned {}".format(si_sequence_data_item.title))
inputs = {"si_sequence_data_item": si_sequence_data_item,
"haadf_sequence_data_item": haadf_sequence_data_item,
"align_index": align_index,
"align_region": align_region}
computation = api.library.create_computation("nion.align_multi_d_sequence",
inputs=inputs,
outputs={"aligned_haadf": aligned_haadf,
"aligned_si": aligned_si})
computation._computation.source = aligned_si._data_item
window.display_data_item(aligned_haadf)
window.display_data_item(aligned_si)
print('here3')
Symbolic.register_computation_type("nion.align_multi_d_sequence", AlignMultiDimensionalSequence)
class AlignSequenceMenuItemDelegate:
def __init__(self, api):
self.__api = api
self.menu_id = "processing_menu" # required, specify menu_id where this item will go
self.menu_name = _("Processing") # optional, specify default name if not a standard menu
self.menu_before_id = "window_menu" # optional, specify before menu_id if not a standard menu
self.menu_item_name = _("Align sequence of multi-dimensional data") # menu item name
def menu_item_execute(self, window):
align_multi_si(self.__api, window)
class AlignSequenceExtension:
# required for Swift to recognize this as an extension class.
extension_id = "nion.experimental.align_multi_d_sequence"
def __init__(self, api_broker):
# grab the api object.
api = api_broker.get_api(version="~1.0")
self.__align_sequence_menu_item_ref = api.create_menu_item(AlignSequenceMenuItemDelegate(api))
def close(self):
# close will be called when the extension is unloaded. in turn, close any references so they get closed. this
# is not strictly necessary since the references will be deleted naturally when this object is deleted.
self.__align_sequence_menu_item_ref.close()
self.__align_sequence_menu_item_ref = None
|
[
"nion.swift.model.Symbolic.register_computation_type",
"nion.data.Core.function_sequence_measure_relative_translation",
"nion.data.DataAndMetadata.new_data_and_metadata",
"numpy.zeros",
"numpy.empty_like",
"numpy.unravel_index",
"numpy.product"
] |
[((7815, 7915), 'nion.swift.model.Symbolic.register_computation_type', 'Symbolic.register_computation_type', (['"""nion.align_multi_d_sequence"""', 'AlignMultiDimensionalSequence'], {}), "('nion.align_multi_d_sequence',\n AlignMultiDimensionalSequence)\n", (7849, 7915), False, 'from nion.swift.model import Symbolic\n'), ((1187, 1306), 'nion.data.Core.function_sequence_measure_relative_translation', 'Core.function_sequence_measure_relative_translation', (['haadf_xdata', 'haadf_xdata[align_index]', '(10)', '(True)'], {'bounds': 'bounds'}), '(haadf_xdata,\n haadf_xdata[align_index], 10, True, bounds=bounds)\n', (1238, 1306), False, 'from nion.data import Core\n'), ((1609, 1643), 'numpy.empty_like', 'numpy.empty_like', (['haadf_xdata.data'], {}), '(haadf_xdata.data)\n', (1625, 1643), False, 'import numpy\n'), ((1669, 1700), 'numpy.empty_like', 'numpy.empty_like', (['si_xdata.data'], {}), '(si_xdata.data)\n', (1685, 1700), False, 'import numpy\n'), ((2970, 3235), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['haadf_result_data'], {'intensity_calibration': 'haadf_xdata.intensity_calibration', 'dimensional_calibrations': 'haadf_xdata.dimensional_calibrations', 'metadata': 'haadf_xdata.metadata', 'data_descriptor': 'haadf_xdata.data_descriptor'}), '(haadf_result_data,\n intensity_calibration=haadf_xdata.intensity_calibration,\n dimensional_calibrations=haadf_xdata.dimensional_calibrations, metadata\n =haadf_xdata.metadata, data_descriptor=haadf_xdata.data_descriptor)\n', (3007, 3235), False, 'from nion.data import DataAndMetadata\n'), ((3572, 3824), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['si_result_data'], {'intensity_calibration': 'si_xdata.intensity_calibration', 'dimensional_calibrations': 'si_xdata.dimensional_calibrations', 'metadata': 'si_xdata.metadata', 'data_descriptor': 'si_xdata.data_descriptor'}), '(si_result_data, intensity_calibration\n =si_xdata.intensity_calibration, dimensional_calibrations=si_xdata.\n dimensional_calibrations, metadata=si_xdata.metadata, data_descriptor=\n si_xdata.data_descriptor)\n', (3609, 3824), False, 'from nion.data import DataAndMetadata\n'), ((6899, 6921), 'numpy.zeros', 'numpy.zeros', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (6910, 6921), False, 'import numpy\n'), ((7036, 7058), 'numpy.zeros', 'numpy.zeros', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (7047, 7058), False, 'import numpy\n'), ((1550, 1579), 'numpy.product', 'numpy.product', (['sequence_shape'], {}), '(sequence_shape)\n', (1563, 1579), False, 'import numpy\n'), ((2339, 2377), 'numpy.unravel_index', 'numpy.unravel_index', (['i', 'sequence_shape'], {}), '(i, sequence_shape)\n', (2358, 2377), False, 'import numpy\n'), ((2406, 2465), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['haadf_xdata.data[ii]'], {}), '(haadf_xdata.data[ii])\n', (2443, 2465), False, 'from nion.data import DataAndMetadata\n'), ((2638, 2694), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['si_xdata.data[ii]'], {}), '(si_xdata.data[ii])\n', (2675, 2694), False, 'from nion.data import DataAndMetadata\n')]
|
# Copyright (c) 2018-2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
ILRMA
=====
Blind Source Separation using Independent Low-Rank Matrix Analysis (ILRMA).
"""
import numpy as np
from .common import projection_back
def ilrma(
X,
n_src=None,
n_iter=20,
proj_back=False,
W0=None,
n_components=2,
return_filters=0,
callback=None,
):
"""
Implementation of ILRMA algorithm without partitioning function for BSS presented in
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, *Determined blind
source separation unifying independent vector analysis and nonnegative matrix
factorization,* IEEE/ACM Trans. ASLP, vol. 24, no. 9, pp. 1626-1641, Sept. 2016
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME> *Determined
Blind Source Separation with Independent Low-Rank Matrix Analysis,* in
Audio Source Separation, <NAME>, 2018, pp. 125-156.
Parameters
----------
X: ndarray (nframes, nfrequencies, nchannels)
STFT representation of the observed signal n_src: int, optional
The number of sources or independent components
n_iter: int, optional
The number of iterations (default 20)
proj_back: bool, optional
Scaling on first mic by back projection (default True)
W0: ndarray (nfrequencies, nchannels, nchannels), optional
Initial value for demixing matrix
n_components: int
Number of components in the non-negative spectrum
return_filters: bool
If true, the function will return the demixing matrix too
callback: func
A callback function called every 10 iterations, allows to monitor convergence
Returns
-------
Returns an (nframes, nfrequencies, nsources) array. Also returns
the demixing matrix W (nfrequencies, nchannels, nsources)
if ``return_values`` keyword is True.
"""
n_frames, n_freq, n_chan = X.shape
# default to determined case
if n_src is None:
n_src = X.shape[2]
# Only supports determined case
assert n_chan == n_src, "There should be as many microphones as sources"
# initialize the demixing matrices
# The demixing matrix has the following dimensions (nfrequencies, nchannels, nsources),
if W0 is None:
W = np.array([np.eye(n_chan, n_src) for f in range(n_freq)], dtype=X.dtype)
else:
W = W0.copy()
# initialize the nonnegative matrixes with random values
T = np.array(0.1 + 0.9 * np.random.rand(n_src, n_freq, n_components))
V = np.array(0.1 + 0.9 * np.random.rand(n_src, n_frames, n_components))
R = np.zeros((n_src, n_freq, n_frames))
I = np.eye(n_src, n_src)
U = np.zeros((n_freq, n_src, n_chan, n_chan), dtype=X.dtype)
product = np.zeros((n_freq, n_chan, n_chan), dtype=X.dtype)
lambda_aux = np.zeros(n_src)
eps = 1e-15
eyes = np.tile(np.eye(n_chan, n_chan), (n_freq, 1, 1))
# Things are more efficient when the frequencies are over the first axis
Y = np.zeros((n_freq, n_src, n_frames), dtype=X.dtype)
X_original = X
X = X.transpose([1, 2, 0]).copy()
np.matmul(T, V.swapaxes(1, 2), out=R)
# Compute the demixed output
def demix(Y, X, W):
Y[:, :, :] = np.matmul(W, X)
demix(Y, X, W)
# P.shape == R.shape == (n_src, n_freq, n_frames)
P = np.power(abs(Y.transpose([1, 0, 2])), 2.0)
iR = 1 / R
for epoch in range(n_iter):
if callback is not None and epoch % 10 == 0:
Y_t = Y.transpose([2, 0, 1])
if proj_back:
z = projection_back(Y_t, X_original[:, :, 0])
callback(Y_t * np.conj(z[None, :, :]))
else:
callback(Y_t)
# simple loop as a start
for s in range(n_src):
## NMF
######
T[s, :, :] *= np.sqrt(
np.dot(P[s, :, :] * iR[s, :, :] ** 2, V[s, :, :])
/ np.dot(iR[s, :, :], V[s, :, :])
)
T[T < eps] = eps
R[s, :, :] = np.dot(T[s, :, :], V[s, :, :].T)
iR[s, :, :] = 1 / R[s, :, :]
V[s, :, :] *= np.sqrt(
np.dot(P[s, :, :].T * iR[s, :, :].T ** 2, T[s, :, :])
/ np.dot(iR[s, :, :].T, T[s, :, :])
)
V[V < eps] = eps
R[s, :, :] = np.dot(T[s, :, :], V[s, :, :].T)
iR[s, :, :] = 1 / R[s, :, :]
## IVA
######
# Compute Auxiliary Variable
# shape: (n_freq, n_chan, n_chan)
C = np.matmul((X * iR[s, :, None, :]), np.conj(X.swapaxes(1, 2)) / n_frames)
WV = np.matmul(W, C)
W[:, s, :] = np.conj(np.linalg.solve(WV, eyes[:, :, s]))
# normalize
denom = np.matmul(
np.matmul(W[:, None, s, :], C[:, :, :]), np.conj(W[:, s, :, None])
)
W[:, s, :] /= np.sqrt(denom[:, :, 0])
demix(Y, X, W)
np.power(abs(Y.transpose([1, 0, 2])), 2.0, out=P)
for s in range(n_src):
lambda_aux[s] = 1 / np.sqrt(np.mean(P[s, :, :]))
W[:, :, s] *= lambda_aux[s]
P[s, :, :] *= lambda_aux[s] ** 2
R[s, :, :] *= lambda_aux[s] ** 2
T[s, :, :] *= lambda_aux[s] ** 2
Y = Y.transpose([2, 0, 1]).copy()
if proj_back:
z = projection_back(Y, X_original[:, :, 0])
Y *= np.conj(z[None, :, :])
if return_filters:
return Y, W
else:
return Y
|
[
"numpy.conj",
"numpy.random.rand",
"numpy.zeros",
"numpy.mean",
"numpy.matmul",
"numpy.dot",
"numpy.eye",
"numpy.linalg.solve",
"numpy.sqrt"
] |
[((3600, 3635), 'numpy.zeros', 'np.zeros', (['(n_src, n_freq, n_frames)'], {}), '((n_src, n_freq, n_frames))\n', (3608, 3635), True, 'import numpy as np\n'), ((3644, 3664), 'numpy.eye', 'np.eye', (['n_src', 'n_src'], {}), '(n_src, n_src)\n', (3650, 3664), True, 'import numpy as np\n'), ((3673, 3729), 'numpy.zeros', 'np.zeros', (['(n_freq, n_src, n_chan, n_chan)'], {'dtype': 'X.dtype'}), '((n_freq, n_src, n_chan, n_chan), dtype=X.dtype)\n', (3681, 3729), True, 'import numpy as np\n'), ((3744, 3793), 'numpy.zeros', 'np.zeros', (['(n_freq, n_chan, n_chan)'], {'dtype': 'X.dtype'}), '((n_freq, n_chan, n_chan), dtype=X.dtype)\n', (3752, 3793), True, 'import numpy as np\n'), ((3811, 3826), 'numpy.zeros', 'np.zeros', (['n_src'], {}), '(n_src)\n', (3819, 3826), True, 'import numpy as np\n'), ((3988, 4038), 'numpy.zeros', 'np.zeros', (['(n_freq, n_src, n_frames)'], {'dtype': 'X.dtype'}), '((n_freq, n_src, n_frames), dtype=X.dtype)\n', (3996, 4038), True, 'import numpy as np\n'), ((3862, 3884), 'numpy.eye', 'np.eye', (['n_chan', 'n_chan'], {}), '(n_chan, n_chan)\n', (3868, 3884), True, 'import numpy as np\n'), ((4218, 4233), 'numpy.matmul', 'np.matmul', (['W', 'X'], {}), '(W, X)\n', (4227, 4233), True, 'import numpy as np\n'), ((6388, 6410), 'numpy.conj', 'np.conj', (['z[None, :, :]'], {}), '(z[None, :, :])\n', (6395, 6410), True, 'import numpy as np\n'), ((5017, 5049), 'numpy.dot', 'np.dot', (['T[s, :, :]', 'V[s, :, :].T'], {}), '(T[s, :, :], V[s, :, :].T)\n', (5023, 5049), True, 'import numpy as np\n'), ((5318, 5350), 'numpy.dot', 'np.dot', (['T[s, :, :]', 'V[s, :, :].T'], {}), '(T[s, :, :], V[s, :, :].T)\n', (5324, 5350), True, 'import numpy as np\n'), ((5626, 5641), 'numpy.matmul', 'np.matmul', (['W', 'C'], {}), '(W, C)\n', (5635, 5641), True, 'import numpy as np\n'), ((5890, 5913), 'numpy.sqrt', 'np.sqrt', (['denom[:, :, 0]'], {}), '(denom[:, :, 0])\n', (5897, 5913), True, 'import numpy as np\n'), ((3286, 3307), 'numpy.eye', 'np.eye', (['n_chan', 'n_src'], {}), '(n_chan, n_src)\n', (3292, 3307), True, 'import numpy as np\n'), ((3471, 3514), 'numpy.random.rand', 'np.random.rand', (['n_src', 'n_freq', 'n_components'], {}), '(n_src, n_freq, n_components)\n', (3485, 3514), True, 'import numpy as np\n'), ((3545, 3590), 'numpy.random.rand', 'np.random.rand', (['n_src', 'n_frames', 'n_components'], {}), '(n_src, n_frames, n_components)\n', (3559, 3590), True, 'import numpy as np\n'), ((5675, 5709), 'numpy.linalg.solve', 'np.linalg.solve', (['WV', 'eyes[:, :, s]'], {}), '(WV, eyes[:, :, s])\n', (5690, 5709), True, 'import numpy as np\n'), ((5783, 5822), 'numpy.matmul', 'np.matmul', (['W[:, None, s, :]', 'C[:, :, :]'], {}), '(W[:, None, s, :], C[:, :, :])\n', (5792, 5822), True, 'import numpy as np\n'), ((5824, 5849), 'numpy.conj', 'np.conj', (['W[:, s, :, None]'], {}), '(W[:, s, :, None])\n', (5831, 5849), True, 'import numpy as np\n'), ((4848, 4897), 'numpy.dot', 'np.dot', (['(P[s, :, :] * iR[s, :, :] ** 2)', 'V[s, :, :]'], {}), '(P[s, :, :] * iR[s, :, :] ** 2, V[s, :, :])\n', (4854, 4897), True, 'import numpy as np\n'), ((4916, 4947), 'numpy.dot', 'np.dot', (['iR[s, :, :]', 'V[s, :, :]'], {}), '(iR[s, :, :], V[s, :, :])\n', (4922, 4947), True, 'import numpy as np\n'), ((5143, 5196), 'numpy.dot', 'np.dot', (['(P[s, :, :].T * iR[s, :, :].T ** 2)', 'T[s, :, :]'], {}), '(P[s, :, :].T * iR[s, :, :].T ** 2, T[s, :, :])\n', (5149, 5196), True, 'import numpy as np\n'), ((5215, 5248), 'numpy.dot', 'np.dot', (['iR[s, :, :].T', 'T[s, :, :]'], {}), '(iR[s, :, :].T, T[s, :, :])\n', (5221, 5248), True, 'import numpy as np\n'), ((6068, 6087), 'numpy.mean', 'np.mean', (['P[s, :, :]'], {}), '(P[s, :, :])\n', (6075, 6087), True, 'import numpy as np\n'), ((4621, 4643), 'numpy.conj', 'np.conj', (['z[None, :, :]'], {}), '(z[None, :, :])\n', (4628, 4643), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Slit devices in AMOR"""
from numpy import arctan, radians, tan
from nicos.core import Attach, HasPrecision, Override, Param, Readable, \
dictwith, oneof, status
from nicos.core.utils import multiStatus
from nicos.devices.generic.slit import Slit, SlitAxis
from nicos_sinq.amor.devices.logical_motor import AmorLogicalMotor, \
InterfaceLogicalMotorHandler
class SlitOpening(HasPrecision, SlitAxis):
"""Device to control the slit opening/height.
Motor dXt changes moves the slit's top slab in turn changing the
slit opening. Motor dXb changes the position of the whole slit
moving it up or down (X is the slit number).
This device reads the current opening using the motor dXt and
changes the opening using combination of the motors dXt and dXb
such that the center remains aligned.
"""
parameter_overrides = {
'unit': Override(mandatory=False, default='mm'),
'fmtstr': Override(userparam=False),
'maxage': Override(userparam=False),
'pollinterval': Override(userparam=False),
'warnlimits': Override(userparam=False),
'precision': Override(userparam=False, default=0.01),
'target': Override(volatile=True)
}
status_to_msg = {
status.ERROR: 'Error in %s',
status.BUSY: 'Moving: %s ...',
status.WARN: 'Warning in %s',
status.NOTREACHED: '%s did not reach target!',
status.UNKNOWN: 'Unknown status in %s!',
status.OK: 'Ready.'
}
def doReadTarget(self):
# Do not allow None as target
target = self._getFromCache('target', self.doRead)
return target if target is not None else self.doRead(0)
def _convertRead(self, positions):
return positions[3]
def _convertStart(self, target, current):
current_opening = current[3]
current_bottom = current[2]
new_bottom = current_bottom + 0.5 * (current_opening - target)
return current[0], current[1], new_bottom, target
def doStatus(self, maxage=0):
# Check for error and warning in the dependent devices
st_devs = multiStatus(self._adevs, maxage)
devs = [dname for dname, d in self._adevs.items()
if d.status()[0] == st_devs[0]]
if st_devs[0] in self.status_to_msg:
msg = self.status_to_msg[st_devs[0]]
if '%' in msg:
msg = msg % ', '.join(devs)
return st_devs[0], msg
return st_devs
def read_divergence(xs, slit):
left, _, bottom, top = slit
s = arctan(top/xs)
d = arctan(bottom/xs)
return s+d, 2*arctan(left/xs), (s-d)/2
def read_beam_shaping(slit):
left, right, bottom, top = slit
return top+bottom, right+left, (top-bottom)/2
class AmorSlitHandler(InterfaceLogicalMotorHandler):
attached_devices = {
'xs': Attach('Sample x position', Readable, missingok=True,
optional=True),
'mu': Attach('Sample omega', Readable, missingok=True,
optional=True),
'nu': Attach('Sample omega', Readable, missingok=True,
optional=True),
'ltz': Attach('Sample x position', Readable, missingok=True,
optional=True),
'xd2': Attach('Sample x position', Readable, missingok=True,
optional=True),
'xl': Attach('Deflector x position', Readable, missingok=True,
optional=True),
'mu_offset': Attach('Sample x position', Readable, missingok=True,
optional=True),
'kappa': Attach('Inclination of the beam after the Selene guide',
Readable, missingok=True, optional=True),
'soz_ideal': Attach('Ideal sample omega', Readable, missingok=True,
optional=True),
'xd3': Attach('', Readable, missingok=True, optional=True),
'slit1': Attach('slit 1', Slit, missingok=True, optional=True),
'slit2': Attach('slit 2', Slit, missingok=True, optional=True),
'slit2z': Attach('Z motor for slit 2', Readable, missingok=True,
optional=True),
'slit3': Attach('slit 3', Slit, missingok=True, optional=True),
'slit3z': Attach('Z motor for slit 3', Readable, missingok=True,
optional=True),
}
def doPreinit(self, mode):
self._status_devs = ['slit1', 'slit2', 'slit2z', 'slit3', 'slit3z']
InterfaceLogicalMotorHandler.doPreinit(self, mode)
self.valuetype = dictwith(div=float, did=float, dih=float)
def doRead(self, maxage=0):
result = {}
if self._is_active('diaphragm1'):
v, h, d = read_divergence(self._read_dev('xs'),
self._read_dev('slit1'))
result.update({'div': v, 'dih': h, 'did': d})
if self._is_active('diaphragm2'):
v, h, d = read_beam_shaping(self._read_dev('slit2'))
result.update({'d2v': v, 'd2h': h, 'd2d': d})
if self._is_active('diaphragm3'):
v, h, d = read_beam_shaping(self._read_dev('slit3'))
result.update({'d3v': v, 'd3h': h, 'd3d': d})
return result
def _get_move_list(self, targets):
positions = []
if self._is_active('diaphragm1'):
xs = self._read_dev('xs')
div = targets.get('div') or self._read_dev('div')
did = targets.get('did') or self._read_dev('did')
dih = targets.get('dih') or self._read_dev('dih')
top = xs * tan(radians(div / 2 + did))
bottom = xs * tan(radians(div / 2 - did))
horizontal = xs * tan(radians(dih / 2))
positions.extend([(self._get_dev('slit1'),
(top, bottom, horizontal, horizontal))
])
if self._is_active('diaphragm2'):
v = targets.get('d2v')
d = targets.get('d2d')
h = targets.get('d2h')
ltz = self._read_dev('ltz')
xd2 = self._read_dev('xd2')
xl = self._read_dev('xl')
mu_offset = self._read_dev('mu_offset')
kappa = self._read_dev('kappa')
if self._is_active('deflector'):
z = ltz - (xd2 - xl) * tan(radians(self._read_dev('mu') +
mu_offset))
else:
z = xd2 * tan(radians(kappa))
top = 0.5 * (v + d)
bottom = 0.5 * (v - d)
horizontal = 0.5 * h
positions.extend([(self._get_dev('slit2z'), z),
(self._get_dev('slit2'),
(top, bottom, horizontal, horizontal))
])
if self._is_active('diaphragm3'):
soz_ideal = self._read_dev('soz_ideal')
xd3 = self._read_dev('xd3')
nu = self._read_dev('nu')
xs = self._read_dev('xs')
kappa = self._read_dev('kappa')
v = targets.get('d3v')
d = targets.get('d3d')
h = targets.get('d3h')
z = soz_ideal + (xd3 - xs) * tan(radians(nu + kappa))
top = 0.5 * (v + d)
bottom = 0.5 * (v - d)
horizontal = 0.5 * h
positions.extend([(self._get_dev('slit2z'), z),
(self._get_dev('slit2'),
(top, bottom, horizontal, horizontal))
])
return positions
motortypes = ['div', 'dih', 'did', 'd2v', 'd2h', 'd2d', 'd3v', 'd3h', 'd3d']
class AmorSlitLogicalMotor(AmorLogicalMotor):
""" Class to represent the logical slit motors in AMOR.
"""
parameters = {
'motortype': Param('Type of motor %s' % ','.join(motortypes),
type=oneof(*motortypes), mandatory=True),
}
parameter_overrides = {
'unit': Override(mandatory=False, default='degree'),
'target': Override(volatile=True),
'abslimits': Override(mandatory=False, default=(-3.0, 3.0)),
'userlimits': Override(mandatory=False, default=(-3.0, 3.0))
}
attached_devices = {
'controller': Attach('Controller for the logical motors',
AmorSlitHandler)
}
def doRead(self, maxage=0):
return self._attached_controller.doRead(maxage)
|
[
"nicos.core.Attach",
"numpy.radians",
"nicos_sinq.amor.devices.logical_motor.InterfaceLogicalMotorHandler.doPreinit",
"nicos.core.dictwith",
"nicos.core.Override",
"numpy.arctan",
"nicos.core.utils.multiStatus",
"nicos.core.oneof"
] |
[((3633, 3649), 'numpy.arctan', 'arctan', (['(top / xs)'], {}), '(top / xs)\n', (3639, 3649), False, 'from numpy import arctan, radians, tan\n'), ((3656, 3675), 'numpy.arctan', 'arctan', (['(bottom / xs)'], {}), '(bottom / xs)\n', (3662, 3675), False, 'from numpy import arctan, radians, tan\n'), ((1957, 1996), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '"""mm"""'}), "(mandatory=False, default='mm')\n", (1965, 1996), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2016, 2041), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2024, 2041), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2061, 2086), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2069, 2086), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2112, 2137), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2120, 2137), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2161, 2186), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2169, 2186), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2209, 2248), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)', 'default': '(0.01)'}), '(userparam=False, default=0.01)\n', (2217, 2248), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2268, 2291), 'nicos.core.Override', 'Override', ([], {'volatile': '(True)'}), '(volatile=True)\n', (2276, 2291), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((3196, 3228), 'nicos.core.utils.multiStatus', 'multiStatus', (['self._adevs', 'maxage'], {}), '(self._adevs, maxage)\n', (3207, 3228), False, 'from nicos.core.utils import multiStatus\n'), ((3930, 3998), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (3936, 3998), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4036, 4099), 'nicos.core.Attach', 'Attach', (['"""Sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample omega', Readable, missingok=True, optional=True)\n", (4042, 4099), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4136, 4199), 'nicos.core.Attach', 'Attach', (['"""Sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample omega', Readable, missingok=True, optional=True)\n", (4142, 4199), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4237, 4305), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4243, 4305), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4344, 4412), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4350, 4412), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4450, 4521), 'nicos.core.Attach', 'Attach', (['"""Deflector x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Deflector x position', Readable, missingok=True, optional=True)\n", (4456, 4521), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4565, 4633), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4571, 4633), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4679, 4780), 'nicos.core.Attach', 'Attach', (['"""Inclination of the beam after the Selene guide"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Inclination of the beam after the Selene guide', Readable,\n missingok=True, optional=True)\n", (4685, 4780), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4823, 4892), 'nicos.core.Attach', 'Attach', (['"""Ideal sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Ideal sample omega', Readable, missingok=True, optional=True)\n", (4829, 4892), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4931, 4982), 'nicos.core.Attach', 'Attach', (['""""""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('', Readable, missingok=True, optional=True)\n", (4937, 4982), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5001, 5054), 'nicos.core.Attach', 'Attach', (['"""slit 1"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 1', Slit, missingok=True, optional=True)\n", (5007, 5054), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5073, 5126), 'nicos.core.Attach', 'Attach', (['"""slit 2"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 2', Slit, missingok=True, optional=True)\n", (5079, 5126), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5146, 5215), 'nicos.core.Attach', 'Attach', (['"""Z motor for slit 2"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Z motor for slit 2', Readable, missingok=True, optional=True)\n", (5152, 5215), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5259, 5312), 'nicos.core.Attach', 'Attach', (['"""slit 3"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 3', Slit, missingok=True, optional=True)\n", (5265, 5312), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5332, 5401), 'nicos.core.Attach', 'Attach', (['"""Z motor for slit 3"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Z motor for slit 3', Readable, missingok=True, optional=True)\n", (5338, 5401), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5554, 5604), 'nicos_sinq.amor.devices.logical_motor.InterfaceLogicalMotorHandler.doPreinit', 'InterfaceLogicalMotorHandler.doPreinit', (['self', 'mode'], {}), '(self, mode)\n', (5592, 5604), False, 'from nicos_sinq.amor.devices.logical_motor import AmorLogicalMotor, InterfaceLogicalMotorHandler\n'), ((5630, 5671), 'nicos.core.dictwith', 'dictwith', ([], {'div': 'float', 'did': 'float', 'dih': 'float'}), '(div=float, did=float, dih=float)\n', (5638, 5671), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9044, 9087), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '"""degree"""'}), "(mandatory=False, default='degree')\n", (9052, 9087), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9107, 9130), 'nicos.core.Override', 'Override', ([], {'volatile': '(True)'}), '(volatile=True)\n', (9115, 9130), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9153, 9199), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '(-3.0, 3.0)'}), '(mandatory=False, default=(-3.0, 3.0))\n', (9161, 9199), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9223, 9269), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '(-3.0, 3.0)'}), '(mandatory=False, default=(-3.0, 3.0))\n', (9231, 9269), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9324, 9384), 'nicos.core.Attach', 'Attach', (['"""Controller for the logical motors"""', 'AmorSlitHandler'], {}), "('Controller for the logical motors', AmorSlitHandler)\n", (9330, 9384), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((3692, 3709), 'numpy.arctan', 'arctan', (['(left / xs)'], {}), '(left / xs)\n', (3698, 3709), False, 'from numpy import arctan, radians, tan\n'), ((8956, 8974), 'nicos.core.oneof', 'oneof', (['*motortypes'], {}), '(*motortypes)\n', (8961, 8974), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((6656, 6678), 'numpy.radians', 'radians', (['(div / 2 + did)'], {}), '(div / 2 + did)\n', (6663, 6678), False, 'from numpy import arctan, radians, tan\n'), ((6710, 6732), 'numpy.radians', 'radians', (['(div / 2 - did)'], {}), '(div / 2 - did)\n', (6717, 6732), False, 'from numpy import arctan, radians, tan\n'), ((6768, 6784), 'numpy.radians', 'radians', (['(dih / 2)'], {}), '(dih / 2)\n', (6775, 6784), False, 'from numpy import arctan, radians, tan\n'), ((7536, 7550), 'numpy.radians', 'radians', (['kappa'], {}), '(kappa)\n', (7543, 7550), False, 'from numpy import arctan, radians, tan\n'), ((8275, 8294), 'numpy.radians', 'radians', (['(nu + kappa)'], {}), '(nu + kappa)\n', (8282, 8294), False, 'from numpy import arctan, radians, tan\n')]
|
import os
import numpy as np
import codecs
import pandas as pd
import json
from glob import glob
import cv2
import shutil
from sklearn.model_selection import train_test_split
#1.标签路径
csv_file = "annotations.csv"
saved_path = "./VOCdevkit/VOC2007/" #保存路径
image_save_path = "./JPEGImages/"
image_raw_parh = "./images/"
#2.创建要求文件夹
if not os.path.exists(saved_path + "Annotations"):
os.makedirs(saved_path + "Annotations")
if not os.path.exists(saved_path + "JPEGImages/"):
os.makedirs(saved_path + "JPEGImages/")
if not os.path.exists(saved_path + "ImageSets/Main/"):
os.makedirs(saved_path + "ImageSets/Main/")
#3.获取待处理文件
total_csv_annotations = {}
annotations = pd.read_csv(csv_file,header=None).values
for annotation in annotations:
key = annotation[0].split(os.sep)[-1]
value = np.array([annotation[1:]])
if key in total_csv_annotations.keys():
total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)
else:
total_csv_annotations[key] = value
#4.读取标注信息并写入 xml
for filename,label in total_csv_annotations.items():
#embed()
print(filename)
if filename == 'image':
continue
filename = filename.split(".png")[0]
print(filename)
height, width, channels = cv2.imread(image_raw_parh + filename + '.jpg').shape
#embed()
with codecs.open(saved_path + "Annotations/"+filename +".xml","w","utf-8") as xml:
xml.write('<annotation>\n')
xml.write('\t<folder>' + 'BCDD' + '</folder>\n')
xml.write('\t<filename>' + filename +'.jpg' + '</filename>\n')
xml.write('\t<source>\n')
xml.write('\t\t<database>BLOOD CELL DETECTION DATASET</database>\n')
xml.write('\t\t<annotation>UAV AutoLanding</annotation>\n')
xml.write('\t\t\n')
xml.write('\t\t<flickrid>NULL</flickrid>\n')
xml.write('\t</source>\n')
xml.write('\t<owner>\n')
xml.write('\t\t<flickrid>NULL</flickrid>\n')
xml.write('\t\t<name>Alex</name>\n')
xml.write('\t</owner>\n')
xml.write('\t<size>\n')
xml.write('\t\t<width>'+ str(width) + '</width>\n')
xml.write('\t\t<height>'+ str(height) + '</height>\n')
xml.write('\t\t<depth>' + str(channels) + '</depth>\n')
xml.write('\t</size>\n')
xml.write('\t\t<segmented>0</segmented>\n')
if isinstance(label,float):
## 空白
xml.write('</annotation>')
continue
for label_detail in label:
labels = label_detail
#embed()
xmin = int(float(labels[0]))
ymin = int(float(labels[1]))
xmax = int(float(labels[2]))
ymax = int(float(labels[3]))
label_ = labels[-1]
if xmax <= xmin:
pass
elif ymax <= ymin:
pass
else:
xml.write('\t<object>\n')
xml.write('\t\t<name>'+label_+'</name>\n')
xml.write('\t\t<pose>Unspecified</pose>\n')
xml.write('\t\t<truncated>0</truncated>\n')
xml.write('\t\t<difficult>0</difficult>\n')
xml.write('\t\t<bndbox>\n')
xml.write('\t\t\t<xmin>' + str(xmin) + '</xmin>\n')
xml.write('\t\t\t<ymin>' + str(ymin) + '</ymin>\n')
xml.write('\t\t\t<xmax>' + str(xmax) + '</xmax>\n')
xml.write('\t\t\t<ymax>' + str(ymax) + '</ymax>\n')
xml.write('\t\t</bndbox>\n')
xml.write('\t</object>\n')
print(filename,xmin,ymin,xmax,ymax,labels)
xml.write('</annotation>')
#6.split files for txt
txtsavepath = saved_path + "ImageSets/Main/"
ftrainval = open(txtsavepath+'/trainval.txt', 'w')
ftest = open(txtsavepath+'/test.txt', 'w')
ftrain = open(txtsavepath+'/train.txt', 'w')
fval = open(txtsavepath+'/val.txt', 'w')
total_files = glob(saved_path+"./Annotations/*.xml")
total_files = [i.split("\\")[-1].split(".xml")[0] for i in total_files]
#test_filepath = ""
for file in total_files:
ftrainval.write(file + "\n")
# move images to voc JPEGImages folder
for image in glob(image_raw_parh+"/*.jpg"):
shutil.copy(image,saved_path+image_save_path)
train_files,val_files = train_test_split(total_files,test_size=0.2,random_state=42)
for file in train_files:
ftrain.write(file + "\n")
#val
for file in val_files:
fval.write(file + "\n")
ftrainval.close()
ftrain.close()
fval.close()
#ftest.close()
|
[
"os.makedirs",
"numpy.concatenate",
"codecs.open",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"os.path.exists",
"cv2.imread",
"numpy.array",
"glob.glob",
"shutil.copy"
] |
[((3978, 4018), 'glob.glob', 'glob', (["(saved_path + './Annotations/*.xml')"], {}), "(saved_path + './Annotations/*.xml')\n", (3982, 4018), False, 'from glob import glob\n'), ((4220, 4251), 'glob.glob', 'glob', (["(image_raw_parh + '/*.jpg')"], {}), "(image_raw_parh + '/*.jpg')\n", (4224, 4251), False, 'from glob import glob\n'), ((4326, 4387), 'sklearn.model_selection.train_test_split', 'train_test_split', (['total_files'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(total_files, test_size=0.2, random_state=42)\n', (4342, 4387), False, 'from sklearn.model_selection import train_test_split\n'), ((352, 394), 'os.path.exists', 'os.path.exists', (["(saved_path + 'Annotations')"], {}), "(saved_path + 'Annotations')\n", (366, 394), False, 'import os\n'), ((400, 439), 'os.makedirs', 'os.makedirs', (["(saved_path + 'Annotations')"], {}), "(saved_path + 'Annotations')\n", (411, 439), False, 'import os\n'), ((447, 489), 'os.path.exists', 'os.path.exists', (["(saved_path + 'JPEGImages/')"], {}), "(saved_path + 'JPEGImages/')\n", (461, 489), False, 'import os\n'), ((495, 534), 'os.makedirs', 'os.makedirs', (["(saved_path + 'JPEGImages/')"], {}), "(saved_path + 'JPEGImages/')\n", (506, 534), False, 'import os\n'), ((542, 588), 'os.path.exists', 'os.path.exists', (["(saved_path + 'ImageSets/Main/')"], {}), "(saved_path + 'ImageSets/Main/')\n", (556, 588), False, 'import os\n'), ((594, 637), 'os.makedirs', 'os.makedirs', (["(saved_path + 'ImageSets/Main/')"], {}), "(saved_path + 'ImageSets/Main/')\n", (605, 637), False, 'import os\n'), ((695, 729), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'header': 'None'}), '(csv_file, header=None)\n', (706, 729), True, 'import pandas as pd\n'), ((821, 847), 'numpy.array', 'np.array', (['[annotation[1:]]'], {}), '([annotation[1:]])\n', (829, 847), True, 'import numpy as np\n'), ((4255, 4303), 'shutil.copy', 'shutil.copy', (['image', '(saved_path + image_save_path)'], {}), '(image, saved_path + image_save_path)\n', (4266, 4303), False, 'import shutil\n'), ((929, 988), 'numpy.concatenate', 'np.concatenate', (['(total_csv_annotations[key], value)'], {'axis': '(0)'}), '((total_csv_annotations[key], value), axis=0)\n', (943, 988), True, 'import numpy as np\n'), ((1285, 1331), 'cv2.imread', 'cv2.imread', (["(image_raw_parh + filename + '.jpg')"], {}), "(image_raw_parh + filename + '.jpg')\n", (1295, 1331), False, 'import cv2\n'), ((1360, 1434), 'codecs.open', 'codecs.open', (["(saved_path + 'Annotations/' + filename + '.xml')", '"""w"""', '"""utf-8"""'], {}), "(saved_path + 'Annotations/' + filename + '.xml', 'w', 'utf-8')\n", (1371, 1434), False, 'import codecs\n')]
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import argparse
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
def RF_Classifier(X, y, indep=None, fold=5, n_trees=100, out='RF_output'):
"""
Parameters:
----------
:param X: 2-D ndarray
:param y: 1-D ndarray
:param indep: 2-D ndarray, the first column is labels and the rest are feature values
:param fold: int, default 5
:param n_trees: int, number of trees, default: 5
:param out:
:return:
info: str, the model parameters
cross-validation result: list with element is ndarray
independent result: ndarray, the first column is labels and the rest are prediction scores.
"""
classes = sorted(list(set(y)))
if indep.shape[0] != 0:
indep_out = np.zeros((indep.shape[0], len(classes) + 1))
indep_out[:, 0] = indep[:, 0]
prediction_result_cv = []
prediction_result_ind = np.array([])
if indep.shape[0] != 0:
prediction_result_ind = np.zeros((len(indep), len(classes) + 1))
prediction_result_ind[:, 0] = indep[:, 0]
folds = StratifiedKFold(fold).split(X, y)
for i, (trained, valided) in enumerate(folds):
train_y, train_X = y[trained], X[trained]
valid_y, valid_X = y[valided], X[valided]
model = RandomForestClassifier(n_estimators=n_trees, bootstrap=False)
rfc = model.fit(train_X, train_y)
scores = rfc.predict_proba(valid_X)
tmp_result = np.zeros((len(valid_y), len(classes) + 1))
tmp_result[:, 0], tmp_result[:, 1:] = valid_y, scores
prediction_result_cv.append(tmp_result)
# independent
if indep.shape[0] != 0:
prediction_result_ind[:, 1:] += rfc.predict_proba(indep[:, 1:])
if indep.shape[0] != 0:
prediction_result_ind[:, 1:] /= fold
header = 'n_trees: %d' % n_trees
return header, prediction_result_cv, prediction_result_ind
|
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"sklearn.model_selection.StratifiedKFold"
] |
[((1023, 1035), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1031, 1035), True, 'import numpy as np\n'), ((1410, 1471), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_trees', 'bootstrap': '(False)'}), '(n_estimators=n_trees, bootstrap=False)\n', (1432, 1471), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1205, 1226), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['fold'], {}), '(fold)\n', (1220, 1226), False, 'from sklearn.model_selection import StratifiedKFold\n')]
|
import pytest
from gpmap import GenotypePhenotypeMap
import numpy as np
@pytest.fixture(scope="module")
def gpvolve_gpm():
wildtype = "AAA"
genotypes = [
"AAA",
"AAB",
"ABA",
"BAA",
"ABB",
"BAB",
"BBA",
"BBB"
]
mutations = {
0: ["A", "B"],
1: ["A", "B"],
2: ["A", "B"],
}
phenotypes = np.random.rand(len(genotypes))
gpm = GenotypePhenotypeMap(wildtype=wildtype,
genotype=genotypes,
phenotype=phenotypes)
return gpm
@pytest.fixture(scope="module")
def number_data():
return {"max_float":np.finfo(float).max,
"tiny_float":np.finfo(float).tiny,
"max_int":np.iinfo(int).max}
@pytest.fixture(scope="module")
def pop_gen_scenarios():
scenarios = []
for f1 in 10**np.arange(-10,11,1,dtype=float):
for f2 in 10**np.arange(-10,11,1,dtype=float):
for pop in 10**np.arange(0,10,dtype=int):
scenarios.append((f1,f2,pop))
return scenarios
|
[
"gpmap.GenotypePhenotypeMap",
"pytest.fixture",
"numpy.iinfo",
"numpy.finfo",
"numpy.arange"
] |
[((75, 105), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (89, 105), False, 'import pytest\n'), ((605, 635), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (619, 635), False, 'import pytest\n'), ((791, 821), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (805, 821), False, 'import pytest\n'), ((443, 529), 'gpmap.GenotypePhenotypeMap', 'GenotypePhenotypeMap', ([], {'wildtype': 'wildtype', 'genotype': 'genotypes', 'phenotype': 'phenotypes'}), '(wildtype=wildtype, genotype=genotypes, phenotype=\n phenotypes)\n', (463, 529), False, 'from gpmap import GenotypePhenotypeMap\n'), ((885, 919), 'numpy.arange', 'np.arange', (['(-10)', '(11)', '(1)'], {'dtype': 'float'}), '(-10, 11, 1, dtype=float)\n', (894, 919), True, 'import numpy as np\n'), ((680, 695), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (688, 695), True, 'import numpy as np\n'), ((726, 741), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (734, 741), True, 'import numpy as np\n'), ((770, 783), 'numpy.iinfo', 'np.iinfo', (['int'], {}), '(int)\n', (778, 783), True, 'import numpy as np\n'), ((940, 974), 'numpy.arange', 'np.arange', (['(-10)', '(11)', '(1)'], {'dtype': 'float'}), '(-10, 11, 1, dtype=float)\n', (949, 974), True, 'import numpy as np\n'), ((1000, 1027), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {'dtype': 'int'}), '(0, 10, dtype=int)\n', (1009, 1027), True, 'import numpy as np\n')]
|
import os
import cv2
import torch
import time
import ujson as json
import numpy as np
from tqdm import tqdm
from torch.cuda.amp import autocast, GradScaler
from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, \
deeplabv1_resnet50, deeplabv1_resnet101, enet_
from utils.datasets import StandardLaneDetectionDataset
from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose
from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix
def erfnet_tusimple(num_classes, scnn=False, pretrained_weights='erfnet_encoder_pretrained.pth.tar'):
# Define ERFNet for TuSimple (With only ImageNet pretraining)
return erfnet_resnet(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.3, dropout_2=0.3, flattened_size=4400, scnn=scnn)
def erfnet_culane(num_classes, scnn=False, pretrained_weights='erfnet_encoder_pretrained.pth.tar'):
# Define ERFNet for CULane (With only ImageNet pretraining)
return erfnet_resnet(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.1, dropout_2=0.1, flattened_size=4500, scnn=scnn)
def vgg16_tusimple(num_classes, scnn=False, pretrained_weights='pytorch-pretrained'):
# Define Vgg16 for Tusimple (With only ImageNet pretraining)
return deeplabv1_vgg16(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.1, flattened_size=6160, scnn=scnn)
def vgg16_culane(num_classes, scnn=False, pretrained_weights='pytorch-pretrained'):
# Define Vgg16 for CULane (With only ImageNet pretraining)
return deeplabv1_vgg16(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.1, flattened_size=4500, scnn=scnn)
def resnet_tusimple(num_classes, backbone_name='resnet18', scnn=False):
# Define ResNets for Tusimple (With only ImageNet pretraining)
model_map = {
'resnet18': deeplabv1_resnet18,
'resnet34': deeplabv1_resnet34,
'resnet50': deeplabv1_resnet50,
'resnet101': deeplabv1_resnet101,
}
return model_map[backbone_name](pretrained=False, num_classes=num_classes, num_lanes=num_classes - 1,
channel_reduce=128, flattened_size=6160, scnn=scnn)
def resnet_culane(num_classes, backbone_name='resnet18', scnn=False):
# Define ResNets for CULane (With only ImageNet pretraining)
model_map = {
'resnet18': deeplabv1_resnet18,
'resnet34': deeplabv1_resnet34,
'resnet50': deeplabv1_resnet50,
'resnet101': deeplabv1_resnet101,
}
return model_map[backbone_name](pretrained=False, num_classes=num_classes, num_lanes=num_classes - 1,
channel_reduce=128, flattened_size=4500, scnn=scnn)
def enet_tusimple(num_classes, encoder_only, continue_from):
return enet_(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01, dropout_2=0.1, flattened_size=4400,
encoder_only=encoder_only, pretrained_weights=continue_from if not encoder_only else None)
def enet_culane(num_classes, encoder_only, continue_from):
return enet_(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01, dropout_2=0.1, flattened_size=4500,
encoder_only=encoder_only, pretrained_weights=continue_from if not encoder_only else None)
def init(batch_size, state, input_sizes, dataset, mean, std, base, workers=10):
# Return data_loaders
# depending on whether the state is
# 0: training
# 1: fast validation by mean IoU (validation set)
# 2: just testing (test set)
# 3: just testing (validation set)
# Transformations
# ! Can't use torchvision.Transforms.Compose
transforms_test = Compose(
[Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
ToTensor(),
Normalize(mean=mean, std=std)])
transforms_train = Compose(
[Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
RandomRotation(degrees=3),
ToTensor(),
Normalize(mean=mean, std=std)])
if state == 0:
data_set = StandardLaneDetectionDataset(root=base, image_set='train', transforms=transforms_train,
data_set=dataset)
data_loader = torch.utils.data.DataLoader(dataset=data_set, batch_size=batch_size,
num_workers=workers, shuffle=True)
validation_set = StandardLaneDetectionDataset(root=base, image_set='val',
transforms=transforms_test, data_set=dataset)
validation_loader = torch.utils.data.DataLoader(dataset=validation_set, batch_size=batch_size * 4,
num_workers=workers, shuffle=False)
return data_loader, validation_loader
elif state == 1 or state == 2 or state == 3:
image_sets = ['valfast', 'test', 'val']
data_set = StandardLaneDetectionDataset(root=base, image_set=image_sets[state - 1],
transforms=transforms_test, data_set=dataset)
data_loader = torch.utils.data.DataLoader(dataset=data_set, batch_size=batch_size,
num_workers=workers, shuffle=False)
return data_loader
else:
raise ValueError
def train_schedule(writer, loader, validation_loader, val_num_steps, device, criterion, net, optimizer, lr_scheduler,
num_epochs, is_mixed_precision, input_sizes, exp_name, num_classes):
# Should be the same as segmentation, given customized loss classes
net.train()
epoch = 0
running_loss = 0.0
loss_num_steps = int(len(loader) / 10) if len(loader) > 10 else 1
if is_mixed_precision:
scaler = GradScaler()
# Training
best_validation = 0
while epoch < num_epochs:
net.train()
time_now = time.time()
for i, data in enumerate(loader, 0):
inputs, labels, lane_existence = data
inputs, labels, lane_existence = inputs.to(device), labels.to(device), lane_existence.to(device)
optimizer.zero_grad()
with autocast(is_mixed_precision):
# To support intermediate losses for SAD
loss = criterion(inputs, labels, lane_existence, net, input_sizes[0])
if is_mixed_precision:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
running_loss += loss.item()
current_step_num = int(epoch * len(loader) + i + 1)
# Record losses
if current_step_num % loss_num_steps == (loss_num_steps - 1):
print('[%d, %d] loss: %.4f' % (epoch + 1, i + 1, running_loss / loss_num_steps))
writer.add_scalar('training loss',
running_loss / loss_num_steps,
current_step_num)
running_loss = 0.0
# Record checkpoints
if validation_loader is not None:
if current_step_num % val_num_steps == (val_num_steps - 1) or \
current_step_num == num_epochs * len(loader):
# save_checkpoint(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,
# filename=exp_name + '_' + str(current_step_num) + '.pt')
test_pixel_accuracy, test_mIoU = fast_evaluate(loader=validation_loader, device=device, net=net,
num_classes=num_classes, output_size=input_sizes[0],
is_mixed_precision=is_mixed_precision)
writer.add_scalar('test pixel accuracy',
test_pixel_accuracy,
current_step_num)
writer.add_scalar('test mIoU',
test_mIoU,
current_step_num)
net.train()
# Record best model (straight to disk)
if test_mIoU > best_validation:
best_validation = test_mIoU
save_checkpoint(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,
filename=exp_name + '.pt')
epoch += 1
print('Epoch time: %.2fs' % (time.time() - time_now))
# For no-evaluation mode
if validation_loader is None:
save_checkpoint(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler, filename=exp_name + '.pt')
def fast_evaluate(net, device, loader, is_mixed_precision, output_size, num_classes):
# Fast evaluation (e.g. on the validation set) by pixel-wise mean IoU
net.eval()
conf_mat = ConfusionMatrix(num_classes)
with torch.no_grad():
for image, target in tqdm(loader):
image, target = image.to(device), target.to(device)
with autocast(is_mixed_precision):
output = net(image)['out']
output = torch.nn.functional.interpolate(output, size=output_size, mode='bilinear', align_corners=True)
conf_mat.update(target.flatten(), output.argmax(1).flatten())
acc_global, acc, iu = conf_mat.compute()
print((
'global correct: {:.2f}\n'
'average row correct: {}\n'
'IoU: {}\n'
'mean IoU: {:.2f}'
).format(
acc_global.item() * 100,
['{:.2f}'.format(i) for i in (acc * 100).tolist()],
['{:.2f}'.format(i) for i in (iu * 100).tolist()],
iu.mean().item() * 100))
return acc_global.item() * 100, iu.mean().item() * 100
# Adapted from harryhan618/SCNN_Pytorch
def test_one_set(net, device, loader, is_mixed_precision, input_sizes, gap, ppl, thresh, dataset):
# Predict on 1 data_loader and save predictions for the official script
all_lanes = []
net.eval()
with torch.no_grad():
for images, filenames in tqdm(loader):
images = images.to(device)
with autocast(is_mixed_precision):
outputs = net(images)
prob_map = torch.nn.functional.interpolate(outputs['out'], size=input_sizes[0], mode='bilinear',
align_corners=True).softmax(dim=1)
existence = (outputs['lane'].sigmoid() > 0.5)
if dataset == 'tusimple': # At most 5 lanes
indices = (existence.sum(dim=1, keepdim=True) > 5).expand_as(existence) * \
(existence == existence.min(dim=1, keepdim=True).values)
existence[indices] = 0
# To CPU
prob_map = prob_map.cpu().numpy()
existence = existence.cpu().numpy()
# Get coordinates for lanes
for j in range(existence.shape[0]):
lane_coordinates = prob_to_lines(prob_map[j], existence[j], resize_shape=input_sizes[1],
gap=gap, ppl=ppl, thresh=thresh, dataset=dataset)
if dataset == 'culane':
# Save each lane to disk
dir_name = filenames[j][:filenames[j].rfind('/')]
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(filenames[j], "w") as f:
for lane in lane_coordinates:
if lane: # No printing for []
for (x, y) in lane:
print("{} {}".format(x, y), end=" ", file=f)
print(file=f)
elif dataset == 'tusimple':
# Save lanes to a single file
formatted = {
"h_samples": [160 + y * 10 for y in range(ppl)],
"lanes": lane_coordinates,
"run_time": 0,
"raw_file": filenames[j]
}
all_lanes.append(json.dumps(formatted))
else:
raise ValueError
if dataset == 'tusimple':
with open('./output/tusimple_pred.json', 'w') as f:
for lane in all_lanes:
print(lane, end="\n", file=f)
# Adapted from harryhan618/SCNN_Pytorch
# Note that in tensors we have indices start from 0 and in annotations coordinates start at 1
def get_lane(prob_map, gap, ppl, thresh, resize_shape=None, dataset='culane'):
"""
Arguments:
----------
prob_map: prob map for single lane, np array size (h, w)
resize_shape: reshape size target, (H, W)
Return:
----------
coords: x coords bottom up every gap px, 0 for non-exist, in resized shape
"""
if resize_shape is None:
resize_shape = prob_map.shape
h, w = prob_map.shape
H, W = resize_shape
coords = np.zeros(ppl)
for i in range(ppl):
if dataset == 'tusimple': # Annotation start at 10 pixel away from bottom
y = int(h - (ppl - i) * gap / H * h)
elif dataset == 'culane': # Annotation start at bottom
y = int(h - i * gap / H * h - 1) # Same as original SCNN code
else:
raise ValueError
if y < 0:
break
line = prob_map[y, :]
id = np.argmax(line)
if line[id] > thresh:
coords[i] = int(id / w * W)
if (coords > 0).sum() < 2:
coords = np.zeros(ppl)
return coords
# Adapted from harryhan618/SCNN_Pytorch
def prob_to_lines(seg_pred, exist, resize_shape=None, smooth=True, gap=20, ppl=None, thresh=0.3, dataset='culane'):
"""
Arguments:
----------
seg_pred: np.array size (num_classes, h, w)
resize_shape: reshape size target, (H, W)
exist: list of existence, e.g. [0, 1, 1, 0]
smooth: whether to smooth the probability or not
gap: y pixel gap for sampling
ppl: how many points for one lane
thresh: probability threshold
all_points: Whether to save all sample points or just points predicted as lane
Return:
----------
coordinates: [x, y] list of lanes, e.g.: [ [[9, 569], [50, 549]] ,[[630, 569], [647, 549]] ]
"""
if resize_shape is None:
resize_shape = seg_pred.shape[1:] # seg_pred (num_classes, h, w)
_, h, w = seg_pred.shape
H, W = resize_shape
coordinates = []
if ppl is None:
ppl = round(H / 2 / gap)
for i in range(1, seg_pred.shape[0]):
prob_map = seg_pred[i, :, :]
if exist[i - 1]:
if smooth:
prob_map = cv2.blur(prob_map, (9, 9), borderType=cv2.BORDER_REPLICATE)
coords = get_lane(prob_map, gap, ppl, thresh, resize_shape, dataset=dataset)
if coords.sum() == 0:
continue
if dataset == 'tusimple': # Invalid sample points need to be included as negative value, e.g. -2
coordinates.append([coords[j] if coords[j] > 0 else -2 for j in range(ppl)])
elif dataset == 'culane':
coordinates.append([[coords[j], H - j * gap - 1] for j in range(ppl) if coords[j] > 0])
else:
raise ValueError
return coordinates
def build_lane_detection_model(args, num_classes):
scnn = True if args.method == 'scnn' else False
if args.dataset == 'tusimple' and args.backbone == 'erfnet':
net = erfnet_tusimple(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'culane' and args.backbone == 'erfnet':
net = erfnet_culane(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'culane' and args.backbone == 'vgg16':
net = vgg16_culane(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'tusimple' and args.backbone == 'vgg16':
net = vgg16_tusimple(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'tusimple' and 'resnet' in args.backbone:
net = resnet_tusimple(num_classes=num_classes, scnn=scnn, backbone_name=args.backbone)
elif args.dataset == 'culane' and 'resnet' in args.backbone:
net = resnet_culane(num_classes=num_classes, scnn=scnn, backbone_name=args.backbone)
elif args.dataset == 'tusimple' and args.backbone == 'enet':
net = enet_tusimple(num_classes=num_classes, encoder_only=args.encoder_only,
continue_from=args.continue_from)
elif args.dataset == 'culane' and args.backbone == 'enet':
net = enet_culane(num_classes=num_classes, encoder_only=args.encoder_only,
continue_from=args.continue_from)
elif args.method == 'lstr':
pass
else:
raise ValueError
return net
|
[
"numpy.argmax",
"torchvision_models.segmentation.deeplabv1_vgg16",
"utils.all_utils_semseg.save_checkpoint",
"torch.no_grad",
"torchvision_models.segmentation.erfnet_resnet",
"torch.cuda.amp.autocast",
"torch.utils.data.DataLoader",
"os.path.exists",
"utils.all_utils_semseg.ConfusionMatrix",
"ujson.dumps",
"tqdm.tqdm",
"transforms.Resize",
"torchvision_models.segmentation.enet_",
"torch.cuda.amp.GradScaler",
"transforms.RandomRotation",
"utils.datasets.StandardLaneDetectionDataset",
"os.makedirs",
"numpy.zeros",
"cv2.blur",
"time.time",
"torch.nn.functional.interpolate",
"transforms.ToTensor",
"transforms.Normalize"
] |
[((706, 881), 'torchvision_models.segmentation.erfnet_resnet', 'erfnet_resnet', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.3)', 'dropout_2': '(0.3)', 'flattened_size': '(4400)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.3, dropout_2=0.3,\n flattened_size=4400, scnn=scnn)\n', (719, 881), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((1075, 1250), 'torchvision_models.segmentation.erfnet_resnet', 'erfnet_resnet', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.1)', 'dropout_2': '(0.1)', 'flattened_size': '(4500)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.1, dropout_2=0.1,\n flattened_size=4500, scnn=scnn)\n', (1088, 1250), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((1431, 1594), 'torchvision_models.segmentation.deeplabv1_vgg16', 'deeplabv1_vgg16', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.1)', 'flattened_size': '(6160)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.1, flattened_size=\n 6160, scnn=scnn)\n', (1446, 1594), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((1772, 1935), 'torchvision_models.segmentation.deeplabv1_vgg16', 'deeplabv1_vgg16', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.1)', 'flattened_size': '(4500)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.1, flattened_size=\n 4500, scnn=scnn)\n', (1787, 1935), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((3066, 3274), 'torchvision_models.segmentation.enet_', 'enet_', ([], {'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.01)', 'dropout_2': '(0.1)', 'flattened_size': '(4400)', 'encoder_only': 'encoder_only', 'pretrained_weights': '(continue_from if not encoder_only else None)'}), '(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01,\n dropout_2=0.1, flattened_size=4400, encoder_only=encoder_only,\n pretrained_weights=continue_from if not encoder_only else None)\n', (3071, 3274), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((3357, 3565), 'torchvision_models.segmentation.enet_', 'enet_', ([], {'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.01)', 'dropout_2': '(0.1)', 'flattened_size': '(4500)', 'encoder_only': 'encoder_only', 'pretrained_weights': '(continue_from if not encoder_only else None)'}), '(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01,\n dropout_2=0.1, flattened_size=4500, encoder_only=encoder_only,\n pretrained_weights=continue_from if not encoder_only else None)\n', (3362, 3565), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((9311, 9339), 'utils.all_utils_semseg.ConfusionMatrix', 'ConfusionMatrix', (['num_classes'], {}), '(num_classes)\n', (9326, 9339), False, 'from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix\n'), ((13473, 13486), 'numpy.zeros', 'np.zeros', (['ppl'], {}), '(ppl)\n', (13481, 13486), True, 'import numpy as np\n'), ((4343, 4453), 'utils.datasets.StandardLaneDetectionDataset', 'StandardLaneDetectionDataset', ([], {'root': 'base', 'image_set': '"""train"""', 'transforms': 'transforms_train', 'data_set': 'dataset'}), "(root=base, image_set='train', transforms=\n transforms_train, data_set=dataset)\n", (4371, 4453), False, 'from utils.datasets import StandardLaneDetectionDataset\n'), ((4519, 4626), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'data_set', 'batch_size': 'batch_size', 'num_workers': 'workers', 'shuffle': '(True)'}), '(dataset=data_set, batch_size=batch_size,\n num_workers=workers, shuffle=True)\n', (4546, 4626), False, 'import torch\n'), ((4698, 4805), 'utils.datasets.StandardLaneDetectionDataset', 'StandardLaneDetectionDataset', ([], {'root': 'base', 'image_set': '"""val"""', 'transforms': 'transforms_test', 'data_set': 'dataset'}), "(root=base, image_set='val', transforms=\n transforms_test, data_set=dataset)\n", (4726, 4805), False, 'from utils.datasets import StandardLaneDetectionDataset\n'), ((4883, 5001), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'validation_set', 'batch_size': '(batch_size * 4)', 'num_workers': 'workers', 'shuffle': '(False)'}), '(dataset=validation_set, batch_size=batch_size *\n 4, num_workers=workers, shuffle=False)\n', (4910, 5001), False, 'import torch\n'), ((6070, 6082), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {}), '()\n', (6080, 6082), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((6192, 6203), 'time.time', 'time.time', ([], {}), '()\n', (6201, 6203), False, 'import time\n'), ((9019, 9122), 'utils.all_utils_semseg.save_checkpoint', 'save_checkpoint', ([], {'net': 'net', 'optimizer': 'optimizer', 'lr_scheduler': 'lr_scheduler', 'filename': "(exp_name + '.pt')"}), "(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,\n filename=exp_name + '.pt')\n", (9034, 9122), False, 'from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix\n'), ((9349, 9364), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9362, 9364), False, 'import torch\n'), ((9395, 9407), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (9399, 9407), False, 'from tqdm import tqdm\n'), ((10457, 10472), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10470, 10472), False, 'import torch\n'), ((10507, 10519), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (10511, 10519), False, 'from tqdm import tqdm\n'), ((13905, 13920), 'numpy.argmax', 'np.argmax', (['line'], {}), '(line)\n', (13914, 13920), True, 'import numpy as np\n'), ((14039, 14052), 'numpy.zeros', 'np.zeros', (['ppl'], {}), '(ppl)\n', (14047, 14052), True, 'import numpy as np\n'), ((3979, 4039), 'transforms.Resize', 'Resize', ([], {'size_image': 'input_sizes[0]', 'size_label': 'input_sizes[0]'}), '(size_image=input_sizes[0], size_label=input_sizes[0])\n', (3985, 4039), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4050, 4060), 'transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4058, 4060), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4071, 4100), 'transforms.Normalize', 'Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (4080, 4100), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4144, 4204), 'transforms.Resize', 'Resize', ([], {'size_image': 'input_sizes[0]', 'size_label': 'input_sizes[0]'}), '(size_image=input_sizes[0], size_label=input_sizes[0])\n', (4150, 4204), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4215, 4240), 'transforms.RandomRotation', 'RandomRotation', ([], {'degrees': '(3)'}), '(degrees=3)\n', (4229, 4240), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4251, 4261), 'transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4259, 4261), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4272, 4301), 'transforms.Normalize', 'Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (4281, 4301), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((5217, 5339), 'utils.datasets.StandardLaneDetectionDataset', 'StandardLaneDetectionDataset', ([], {'root': 'base', 'image_set': 'image_sets[state - 1]', 'transforms': 'transforms_test', 'data_set': 'dataset'}), '(root=base, image_set=image_sets[state - 1],\n transforms=transforms_test, data_set=dataset)\n', (5245, 5339), False, 'from utils.datasets import StandardLaneDetectionDataset\n'), ((5406, 5514), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'data_set', 'batch_size': 'batch_size', 'num_workers': 'workers', 'shuffle': '(False)'}), '(dataset=data_set, batch_size=batch_size,\n num_workers=workers, shuffle=False)\n', (5433, 5514), False, 'import torch\n'), ((6460, 6488), 'torch.cuda.amp.autocast', 'autocast', (['is_mixed_precision'], {}), '(is_mixed_precision)\n', (6468, 6488), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((9490, 9518), 'torch.cuda.amp.autocast', 'autocast', (['is_mixed_precision'], {}), '(is_mixed_precision)\n', (9498, 9518), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((9588, 9686), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['output'], {'size': 'output_size', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(output, size=output_size, mode='bilinear',\n align_corners=True)\n", (9619, 9686), False, 'import torch\n'), ((10578, 10606), 'torch.cuda.amp.autocast', 'autocast', (['is_mixed_precision'], {}), '(is_mixed_precision)\n', (10586, 10606), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((15178, 15237), 'cv2.blur', 'cv2.blur', (['prob_map', '(9, 9)'], {'borderType': 'cv2.BORDER_REPLICATE'}), '(prob_map, (9, 9), borderType=cv2.BORDER_REPLICATE)\n', (15186, 15237), False, 'import cv2\n'), ((8922, 8933), 'time.time', 'time.time', ([], {}), '()\n', (8931, 8933), False, 'import time\n'), ((8725, 8828), 'utils.all_utils_semseg.save_checkpoint', 'save_checkpoint', ([], {'net': 'net', 'optimizer': 'optimizer', 'lr_scheduler': 'lr_scheduler', 'filename': "(exp_name + '.pt')"}), "(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,\n filename=exp_name + '.pt')\n", (8740, 8828), False, 'from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix\n'), ((10673, 10783), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (["outputs['out']"], {'size': 'input_sizes[0]', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(outputs['out'], size=input_sizes[0], mode=\n 'bilinear', align_corners=True)\n", (10704, 10783), False, 'import torch\n'), ((11794, 11818), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (11808, 11818), False, 'import os\n'), ((11844, 11865), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (11855, 11865), False, 'import os\n'), ((12612, 12633), 'ujson.dumps', 'json.dumps', (['formatted'], {}), '(formatted)\n', (12622, 12633), True, 'import ujson as json\n')]
|
import os.path as osp
from cytokit.ops.op import CytokitOp, get_tf_config
from cytokit.miq import prediction
from cytokit import data as cytokit_data
from cytokit import io as cytokit_io
import tensorflow as tf
import numpy as np
import logging
DEFAULT_PATCH_SIZE = 84
DEFAULT_N_CLASSES = 11
logger = logging.getLogger(__name__)
class CytokitFocalPlaneSelector(CytokitOp):
"""Best focal plan selection operation
Args:
config: Cytokit configuration
patch_size: size of patches within image to estimate quality for; defaults to 84, same as default
in originating classifier project
n_classes: number of different quality strata to predict logits for; defaults to 11, same as default
in originating classifier project
save_tile: Indicates whether or not best-focus tiles (with single z-plane) should be saved; default false
Note:
See https://github.com/google/microscopeimagequality for more details on the classifier used by this operation
"""
def __init__(self, config, patch_size=DEFAULT_PATCH_SIZE, n_classes=DEFAULT_N_CLASSES, save_tile=False):
super().__init__(config)
self.mqiest = None
self.graph = None
params = config.best_focus_params
self.patch_size = params.get('patch_size', patch_size)
self.n_classes = params.get('n_classes', n_classes)
self.focus_cycle, self.focus_channel = config.get_channel_coordinates(params['channel'])
self.save_tile = params.get('save_tile', save_tile)
def initialize(self):
model_path = cytokit_data.initialize_best_focus_model()
self.graph = tf.Graph()
self.mqiest = prediction.ImageQualityClassifier(
model_path, self.patch_size, self.n_classes,
graph=self.graph, session_config=get_tf_config(self)
)
return self
def shutdown(self):
self.mqiest._sess.close()
return self
def _run(self, tile, **kwargs):
# Subset to 3D stack based on reference cycle and channel
# * tile should have shape (cycles, z, channel, height, width)
img = tile[self.focus_cycle, :, self.focus_channel, :, :]
nz = img.shape[0]
scores = []
classes = []
for iz in range(nz):
pred = self.mqiest.predict(img[iz])
# Append n_classes length array of class probabilities ordered from 0 to n_classes
# where 0 is the best possible quality and n_classes the worst
scores.append(pred.probabilities)
classes.append(np.argmax(pred.probabilities))
# Calculate scores as probability weighted sum of (reversed) class indexes, giving one score per z-plane
scores = np.dot(np.array(scores), np.arange(self.n_classes)[::-1])
assert len(scores) == nz, \
'Expecting {} scores but only {} were found (scores = {})'.format(nz, len(scores), scores)
# Reverse class designations
classes = self.n_classes - np.array(classes) - 1
# Determine best z plane as index with highest score
best_z = np.argmax(scores)
# Record and log classification information
self.record({'scores': scores, 'classes': classes, 'best_z': best_z})
logger.debug('Best focal plane: z = {} (score: {})'.format(best_z, scores.max()))
# Subset tile to best focal plane
best_focus_tile = tile[:, [best_z], :, :, :]
# Return best focus tile and other context
return best_focus_tile, best_z, scores
def save(self, tile_indices, output_dir, data):
region_index, tile_index, tx, ty = tile_indices
best_focus_tile, best_z, scores = data
path = cytokit_io.get_best_focus_img_path(region_index, tx, ty, best_z)
if self.save_tile:
cytokit_io.save_tile(osp.join(output_dir, path), best_focus_tile, config=self.config)
return [path]
|
[
"numpy.argmax",
"cytokit.data.initialize_best_focus_model",
"cytokit.io.get_best_focus_img_path",
"numpy.array",
"numpy.arange",
"tensorflow.Graph",
"cytokit.ops.op.get_tf_config",
"os.path.join",
"logging.getLogger"
] |
[((302, 329), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (319, 329), False, 'import logging\n'), ((1594, 1636), 'cytokit.data.initialize_best_focus_model', 'cytokit_data.initialize_best_focus_model', ([], {}), '()\n', (1634, 1636), True, 'from cytokit import data as cytokit_data\n'), ((1658, 1668), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1666, 1668), True, 'import tensorflow as tf\n'), ((3118, 3135), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3127, 3135), True, 'import numpy as np\n'), ((3723, 3787), 'cytokit.io.get_best_focus_img_path', 'cytokit_io.get_best_focus_img_path', (['region_index', 'tx', 'ty', 'best_z'], {}), '(region_index, tx, ty, best_z)\n', (3757, 3787), True, 'from cytokit import io as cytokit_io\n'), ((2754, 2770), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2762, 2770), True, 'import numpy as np\n'), ((1828, 1847), 'cytokit.ops.op.get_tf_config', 'get_tf_config', (['self'], {}), '(self)\n', (1841, 1847), False, 'from cytokit.ops.op import CytokitOp, get_tf_config\n'), ((2585, 2614), 'numpy.argmax', 'np.argmax', (['pred.probabilities'], {}), '(pred.probabilities)\n', (2594, 2614), True, 'import numpy as np\n'), ((2772, 2797), 'numpy.arange', 'np.arange', (['self.n_classes'], {}), '(self.n_classes)\n', (2781, 2797), True, 'import numpy as np\n'), ((3017, 3034), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (3025, 3034), True, 'import numpy as np\n'), ((3848, 3874), 'os.path.join', 'osp.join', (['output_dir', 'path'], {}), '(output_dir, path)\n', (3856, 3874), True, 'import os.path as osp\n')]
|
import torch
import numpy as np
class Uniform_Buffer:
def __init__(self, buffer_size, state_shape, action_shape, device, mix=1):
self._n = 0
self._p = 0
self.mix = mix
self.buffer_size = buffer_size
self.total_size = mix * buffer_size
self.states = torch.empty(
(self.total_size, *state_shape), dtype=torch.float, device=device)
self.actions = torch.empty(
(self.total_size, *action_shape), dtype=torch.float, device=device)
self.rewards = torch.empty(
(self.total_size, 1), dtype=torch.float, device=device)
self.dones = torch.empty(
(self.total_size, 1), dtype=torch.float, device=device)
self.log_pis = torch.empty(
(self.total_size, 1), dtype=torch.float, device=device)
self.next_states = torch.empty(
(self.total_size, *state_shape), dtype=torch.float, device=device)
def append(self, state, action, reward, done, log_pi, next_state):
self.states[self._p].copy_(torch.from_numpy(state))
self.actions[self._p].copy_(torch.from_numpy(action))
self.rewards[self._p] = float(reward)
self.dones[self._p] = float(done)
self.log_pis[self._p] = float(log_pi)
self.next_states[self._p].copy_(torch.from_numpy(next_state))
self._p = (self._p + 1) % self.total_size
self._n = min(self._n + 1, self.total_size)
def get(self):
assert self._p % self.buffer_size == 0
start = (self._p - self.buffer_size) % self.total_size
idxes = slice(start, start + self.buffer_size)
return (
self.states[idxes],
self.actions[idxes],
self.rewards[idxes],
self.dones[idxes],
self.log_pis[idxes],
self.next_states[idxes]
)
def sample(self, batch_size):
assert self._p % self.buffer_size == 0
idxes = np.random.randint(low=0, high=self._n, size=batch_size)
return (
self.states[idxes],
self.actions[idxes],
self.rewards[idxes],
self.dones[idxes],
self.log_pis[idxes],
self.next_states[idxes]
)
|
[
"numpy.random.randint",
"torch.empty",
"torch.from_numpy"
] |
[((305, 383), 'torch.empty', 'torch.empty', (['(self.total_size, *state_shape)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, *state_shape), dtype=torch.float, device=device)\n', (316, 383), False, 'import torch\n'), ((420, 499), 'torch.empty', 'torch.empty', (['(self.total_size, *action_shape)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, *action_shape), dtype=torch.float, device=device)\n', (431, 499), False, 'import torch\n'), ((536, 603), 'torch.empty', 'torch.empty', (['(self.total_size, 1)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, 1), dtype=torch.float, device=device)\n', (547, 603), False, 'import torch\n'), ((638, 705), 'torch.empty', 'torch.empty', (['(self.total_size, 1)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, 1), dtype=torch.float, device=device)\n', (649, 705), False, 'import torch\n'), ((742, 809), 'torch.empty', 'torch.empty', (['(self.total_size, 1)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, 1), dtype=torch.float, device=device)\n', (753, 809), False, 'import torch\n'), ((850, 928), 'torch.empty', 'torch.empty', (['(self.total_size, *state_shape)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, *state_shape), dtype=torch.float, device=device)\n', (861, 928), False, 'import torch\n'), ((1951, 2006), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self._n', 'size': 'batch_size'}), '(low=0, high=self._n, size=batch_size)\n', (1968, 2006), True, 'import numpy as np\n'), ((1049, 1072), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (1065, 1072), False, 'import torch\n'), ((1110, 1134), 'torch.from_numpy', 'torch.from_numpy', (['action'], {}), '(action)\n', (1126, 1134), False, 'import torch\n'), ((1310, 1338), 'torch.from_numpy', 'torch.from_numpy', (['next_state'], {}), '(next_state)\n', (1326, 1338), False, 'import torch\n')]
|
"""
Linear State Space Assembler
"""
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.linear.utils.ss_interface as ss_interface
import sharpy.utils.settings as settings
import sharpy.utils.h5utils as h5
import warnings
@solver
class LinearAssembler(BaseSolver):
r"""
Warnings:
Under development - please advise of new features and bugs!
Creates a workspace containing the different linear elements of the state-space.
The user specifies which elements to build sequentially via the ``linear_system`` setting.
The most common uses will be:
* Aerodynamic: :class:`sharpy.linear.assembler.LinearUVLM` solver
* Structural: :class:`sharpy.linear.assembler.LinearBeam` solver
* Aeroelastic: :class:`sharpy.linear.assembler.LinearAeroelastic` solver
The solver enables to load a user specific assembly of a state-space by means of the ``LinearCustom`` block.
See :class:`sharpy.sharpy.linear.assembler.LinearAssembler` for a detailed description of each of the state-space assemblies.
Upon assembly of the linear system, the data structure ``data.linear`` will be created. The :class:`.Linear`
contains the state-space as an attribute. This state space will be the one employed by postprocessors.
Important: running the linear routines requires information on the tangent mass, stiffness and gyroscopic
structural matrices therefore the solver :class:`solvers.modal.Modal` must have been run prior to linearisation.
In addition, if the problem includes rigid body velocities, at least one
timestep of :class:`solvers.DynamicCoupled` must have run such that the rigid body velocity is included.
Example:
The typical ``flow`` setting used prior to using this solver for an aeroelastic simulation with rigid body dynamics
will be similar to:
>>> flow = ['BeamLoader',
>>> 'AerogridLoader',
>>> 'StaticTrim',
>>> 'DynamicCoupled', # a single time step will suffice
>>> 'Modal',
>>> 'LinearAssembler']
"""
solver_id = 'LinearAssembler'
solver_classification = 'Linear'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['linear_system'] = 'str'
settings_default['linear_system'] = None
settings_description['linear_system'] = 'Name of chosen state space assembly type'
settings_types['linear_system_settings'] = 'dict'
settings_default['linear_system_settings'] = dict()
settings_description['linear_system_settings'] = 'Settings for the desired state space assembler'
settings_types['linearisation_tstep'] = 'int'
settings_default['linearisation_tstep'] = -1
settings_description['linearisation_tstep'] = 'Chosen linearisation time step number from available time steps'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.settings = dict()
self.data = None
def initialise(self, data, custom_settings=None):
self.data = data
if custom_settings:
self.data.settings[self.solver_id] = custom_settings
self.settings = self.data.settings[self.solver_id]
# else:custom_settings
else:
self.settings = data.settings[self.solver_id]
settings.to_custom_types(self.settings, self.settings_types, self.settings_default)
# Get consistent linearisation timestep
ii_step = self.settings['linearisation_tstep']
if type(ii_step) != int:
ii_step = self.settings['linearisation_tstep'].value
tsstruct0 = data.structure.timestep_info[ii_step]
tsaero0 = data.aero.timestep_info[ii_step]
# Create data.linear
self.data.linear = Linear(tsaero0, tsstruct0)
# Load available systems
import sharpy.linear.assembler
# Load roms
import sharpy.rom
lsys = ss_interface.initialise_system(self.settings['linear_system'])
lsys.initialise(data)
self.data.linear.linear_system = lsys
def run(self):
self.data.linear.ss = self.data.linear.linear_system.assemble()
return self.data
class Linear(object):
"""
This is the class responsible for the transfer of information and can be accessed as ``data.linear``. It stores
as class attributes the following classes that describe the linearised problem.
Attributes:
ss (sharpy.linear.src.libss.ss): State-space system
linear_system (sharpy.linear.utils.ss_interface.BaseElement): Assemble system properties
tsaero0 (sharpy.utils.datastructures.AeroTimeStepInfo): Linearisation aerodynamic timestep
tsstruct0 (sharpy.utils.datastructures.StructTimeStepInfo): Linearisation structural timestep
timestep_info (list): Linear time steps
"""
def __init__(self, tsaero0, tsstruct0):
self.linear_system = None
self.ss = None
self.tsaero0 = tsaero0
self.tsstruct0 = tsstruct0
self.timestep_info = []
self.uvlm = None
self.beam = None
if __name__ == "__main__":
print('Testing the assembly of the pendulum system')
test = 'aeroelastic'
if test == 'beam':
data = h5.readh5('/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/flexible_beam_static.data.h5').data
beam_settings = {'modal_projection': False,
'inout_coords': 'nodes',
'discrete_time': True,
'newmark_damp': 0.15*1,
'discr_method': 'newmark',
'dt': 0.001,
'proj_modes': 'undamped',
'use_euler': True,
'num_modes': 13,
'remove_dofs': ['V'],
'gravity': 'on'}
custom_settings = {'linearisation_tstep': -1,
'flow': ['LinearBeam'],
'LinearBeam': beam_settings}
linear_space = LinearAssembler()
linear_space.initialise(data, custom_settings)
data = linear_space.run()
# import sharpy.solvers.lindynamicsim as lindynsim
# linear_sim = lindynsim.LinearDynamicSimulation()
# linear_sim.initialise(data)
import numpy as np
eigs = np.linalg.eig(data.linear.ss.A)
eigs_ct = np.log(eigs[0]) / data.linear.ss.dt
order = np.argsort(eigs_ct.real)[::-1]
eigs_ct = eigs_ct[order]
print('End')
elif test == 'uvlm':
data = h5.readh5('/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/sears_uinf0050_AR100_M8N12Ms10_KR15_sp0.data.h5').data
uvlm_settings = {'dt': 0.001,
'integr_order': 2,
'density': 1.225,
'remove_predictor': False,
'use_sparse': False,
'ScalingDict': {'length': 1.,
'speed': 1.,
'density': 1.},
'remove_inputs': ['u_gust']}
custom_settings = {'linearisation_tstep': -1,
'flow': ['LinearUVLM'],
'LinearUVLM': uvlm_settings}
linear_space = LinearAssembler()
linear_space.initialise(data, custom_settings)
data = linear_space.run()
elif test=='aeroelastic':
data = h5.readh5('/home/ng213/sharpy_cases/ToSORT_FlyingWings/01_RichardsBFF/cases/horten/horten.data.h5').data
custom_settings = {'flow': ['LinearAeroelastic'],
'LinearAeroelastic': {
'beam_settings': {'modal_projection': False,
'inout_coords': 'nodes',
'discrete_time': True,
'newmark_damp': 0.5,
'discr_method': 'newmark',
'dt': 0.001,
'proj_modes': 'undamped',
'use_euler': 'off',
'num_modes': 40,
'print_info': 'on',
'gravity': 'on',
'remove_dofs': []},
'aero_settings': {'dt': 0.001,
'integr_order': 2,
'density': 1.225*0.0000000001,
'remove_predictor': False,
'use_sparse': True,
'rigid_body_motion': True,
'use_euler': False,
'remove_inputs': ['u_gust']},
'rigid_body_motion': True}}
linear_space = LinearAssembler()
linear_space.initialise(data, custom_settings)
data = linear_space.run()
print('End')
|
[
"numpy.log",
"sharpy.utils.settings.SettingsTable",
"sharpy.utils.settings.to_custom_types",
"numpy.linalg.eig",
"sharpy.linear.utils.ss_interface.initialise_system",
"numpy.argsort",
"sharpy.utils.h5utils.readh5"
] |
[((2890, 2914), 'sharpy.utils.settings.SettingsTable', 'settings.SettingsTable', ([], {}), '()\n', (2912, 2914), True, 'import sharpy.utils.settings as settings\n'), ((3441, 3529), 'sharpy.utils.settings.to_custom_types', 'settings.to_custom_types', (['self.settings', 'self.settings_types', 'self.settings_default'], {}), '(self.settings, self.settings_types, self.\n settings_default)\n', (3465, 3529), True, 'import sharpy.utils.settings as settings\n'), ((4057, 4119), 'sharpy.linear.utils.ss_interface.initialise_system', 'ss_interface.initialise_system', (["self.settings['linear_system']"], {}), "(self.settings['linear_system'])\n", (4087, 4119), True, 'import sharpy.linear.utils.ss_interface as ss_interface\n'), ((6488, 6519), 'numpy.linalg.eig', 'np.linalg.eig', (['data.linear.ss.A'], {}), '(data.linear.ss.A)\n', (6501, 6519), True, 'import numpy as np\n'), ((5377, 5483), 'sharpy.utils.h5utils.readh5', 'h5.readh5', (['"""/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/flexible_beam_static.data.h5"""'], {}), "(\n '/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/flexible_beam_static.data.h5'\n )\n", (5386, 5483), True, 'import sharpy.utils.h5utils as h5\n'), ((6538, 6553), 'numpy.log', 'np.log', (['eigs[0]'], {}), '(eigs[0])\n', (6544, 6553), True, 'import numpy as np\n'), ((6590, 6614), 'numpy.argsort', 'np.argsort', (['eigs_ct.real'], {}), '(eigs_ct.real)\n', (6600, 6614), True, 'import numpy as np\n'), ((6716, 6841), 'sharpy.utils.h5utils.readh5', 'h5.readh5', (['"""/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/sears_uinf0050_AR100_M8N12Ms10_KR15_sp0.data.h5"""'], {}), "(\n '/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/sears_uinf0050_AR100_M8N12Ms10_KR15_sp0.data.h5'\n )\n", (6725, 6841), True, 'import sharpy.utils.h5utils as h5\n'), ((7632, 7741), 'sharpy.utils.h5utils.readh5', 'h5.readh5', (['"""/home/ng213/sharpy_cases/ToSORT_FlyingWings/01_RichardsBFF/cases/horten/horten.data.h5"""'], {}), "(\n '/home/ng213/sharpy_cases/ToSORT_FlyingWings/01_RichardsBFF/cases/horten/horten.data.h5'\n )\n", (7641, 7741), True, 'import sharpy.utils.h5utils as h5\n')]
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import simpy, numpy
from mds_sim import *
from rvs import *
from patch import *
# ######################## Models ######################## #
def ar_ub_fj(n, X):
return float(1/moment_i_n_k(1, n, n, X) )
def E_T_fj(ar, n, X):
# def max_cdf(x):
# return X.cdf(x)**n
# def max_moment(i):
# return mpmath.quad(lambda x: i*x**(i-1) * (1 - max_cdf(x) ), [0, mpmath.inf] )
# return PK(max_moment(1), max_moment(2), ar)
return PK(moment_i_n_k(1, n, n, X), moment_i_n_k(2, n, n, X), ar)
# ######################## Sim ######################## #
def test_fj(num_f_run, ar, n, serv, serv_dist_m):
E_T_f_sum = 0
for f in range(num_f_run):
log(WARNING, "ar= {}, n= {}, serv= {}, serv_dist_m= {}".format(ar, n, serv, serv_dist_m) )
env = simpy.Environment()
pg = PG(env, "pg", ar)
q = MDSQ("mdsq", env, n, range(n), serv, serv_dist_m)
pg.out = q
pg.init()
env.run(until=10*10*50000)
l = q.jsink.st_l
if len(l): E_T_f_sum += float(sum(l) )/len(l)
total_n_wins = sum([n for i, n in q.jsink.qid__num_win_map.items() ] )
print("pg.n_sent= {}, total_n_wins= {}".format(pg.n_sent, total_n_wins) )
qid__win_freq_map = {i:float(n)/total_n_wins for i, n in q.jsink.qid__num_win_map.items() }
print("qid__win_freq_map= {}".format(pprint.pformat(qid__win_freq_map) ) )
E_T = E_T_f_sum/num_f_run
print(">> E_T= {}".format(E_T) )
return E_T
def plot_fj():
n = 2
serv = "Pareto" # "TPareto"
l, u, a = 1, 10**6, 2
if serv == "TPareto":
X = TPareto(l, u, a)
serv_dist_m = {'l':l, 'u':u, 'a':a}
elif serv == "Pareto":
X = Pareto(l, a)
serv_dist_m = {'loc':l, 'a':a}
ar_ub = ar_ub_fj(n, X)
log(WARNING, "n= {}, serv= {}, serv_dist_m= {}, ar_ub= {}".format(n, serv, serv_dist_m, ar_ub) )
E_T_l, E_T_sim_l = [], []
num_f_run = 1
sim = False
if serv == "TPareto":
if n == 22:
pass
else:
sim = True
elif serv == "Pareto":
if n == 22:
E_T_sim_l= [
3.7875159802925884,
3.6594505295950768,
4.223943206950012,
4.589334674521958,
6.524796278389641,
5.64633614293259,
7.252958280015537,
8.035109860019876,
8.463351261567757,
39.12300569764332,
11.573032446153153,
13.929789522860153,
14.965936063862987,
20.40743954754556,
27.105625093446594]
else:
sim = True
ar_l = []
for ar in numpy.linspace(0.05, ar_ub, 15):
ar_l.append(ar)
if sim:
E_T_sim_l.append(test_fj(num_f_run, ar, n, serv, serv_dist_m) )
E_T_l.append(E_T_fj(ar, n, X) )
log(WARNING, "E_T_sim_l= {}".format(pprint.pformat(E_T_sim_l) ) )
plot.plot(ar_l, E_T_sim_l, label=r'sim, n={}'.format(n), color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.plot(ar_l, E_T_l, label=r'n={}'.format(n), color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.legend(prop={'size':12})
plot.xlabel(r'Arrival rate $\lambda$ (Request/s)', fontsize=12)
plot.ylabel(r'Average download time (s)', fontsize=12)
if serv == "TPareto":
serv_in_latex = r'TPareto(l={}, u={}, a={})'.format(l, u, a)
elif serv == "Pareto":
serv_in_latex = r'Pareto(l={}, a={})'.format(l, a)
plot.title(r'$X \sim {}$, $n= {}$'.format(serv_in_latex, n) )
fig = plot.gcf()
def_size = fig.get_size_inches()
fig.set_size_inches(def_size[0]/1.2, def_size[1]/1.2)
fig.tight_layout()
plot.savefig("plot_fj_n_{}.pdf".format(n) )
fig.clear()
log(WARNING, "done; n= {}".format(n) )
if __name__ == "__main__":
plot_fj()
|
[
"matplotlib.pyplot.legend",
"matplotlib.use",
"numpy.linspace",
"simpy.Environment",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((2525, 2556), 'numpy.linspace', 'numpy.linspace', (['(0.05)', 'ar_ub', '(15)'], {}), '(0.05, ar_ub, 15)\n', (2539, 2556), False, 'import simpy, numpy\n'), ((3030, 3060), 'matplotlib.pyplot.legend', 'plot.legend', ([], {'prop': "{'size': 12}"}), "(prop={'size': 12})\n", (3041, 3060), True, 'import matplotlib.pyplot as plot\n'), ((3062, 3125), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Arrival rate $\\\\lambda$ (Request/s)"""'], {'fontsize': '(12)'}), "('Arrival rate $\\\\lambda$ (Request/s)', fontsize=12)\n", (3073, 3125), True, 'import matplotlib.pyplot as plot\n'), ((3128, 3181), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Average download time (s)"""'], {'fontsize': '(12)'}), "('Average download time (s)', fontsize=12)\n", (3139, 3181), True, 'import matplotlib.pyplot as plot\n'), ((3424, 3434), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (3432, 3434), True, 'import matplotlib.pyplot as plot\n'), ((840, 859), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (857, 859), False, 'import simpy, numpy\n')]
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs to train an on-device recommendation model."""
import collections
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.api import mm_export
from tensorflow_examples.lite.model_maker.core.data_util import data_util
from tensorflow_examples.lite.model_maker.core.data_util import recommendation_config
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import custom_model
from tensorflow_examples.lite.model_maker.core.task import model_util
from tensorflow_examples.lite.model_maker.core.task.model_spec import recommendation_spec
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import input_pipeline
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import metrics as _metrics
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher as _launcher
@mm_export('recommendation.Recommendation')
class Recommendation(custom_model.CustomModel):
"""Recommendation task class."""
DEFAULT_EXPORT_FORMAT = (ExportFormat.TFLITE,)
ALLOWED_EXPORT_FORMAT = (ExportFormat.LABEL, ExportFormat.TFLITE,
ExportFormat.SAVED_MODEL)
# ID = 0 means a placeholder to OOV. Used for padding.
OOV_ID = 0
def __init__(self,
model_spec,
model_dir,
shuffle=True,
learning_rate=0.1,
gradient_clip_norm=1.0):
"""Init recommendation model.
Args:
model_spec: recommendation model spec.
model_dir: str, path to export model checkpoints and summaries.
shuffle: boolean, whether the training data should be shuffled.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
"""
if not isinstance(model_spec, recommendation_spec.RecommendationSpec):
raise ValueError(
'Expect RecommendationSpec but got model_spec: {}'.format(model_spec))
self._model_dir = model_dir
self._learning_rate = learning_rate
self._gradient_clip_norm = gradient_clip_norm
super(Recommendation, self).__init__(model_spec, shuffle=shuffle)
@property
def input_spec(self) -> recommendation_config.InputSpec:
return self.model_spec.input_spec
@property
def model_hparams(self) -> recommendation_config.ModelHParams:
return self.model_spec.model_hparams
def create_model(self, do_train=True):
"""Creates a model.
Args:
do_train: boolean. Whether to train the model.
Returns:
Keras model.
"""
self.model = self.model_spec.create_model()
if do_train:
_launcher.compile_model(self.model, self.model_hparams.eval_top_k,
self._learning_rate, self._gradient_clip_norm)
def train(self,
train_data,
validation_data=None,
batch_size=16,
steps_per_epoch=100,
epochs=1):
"""Feeds the training data for training.
Args:
train_data: Training dataset.
validation_data: Validation data. If None, skips validation process.
batch_size: int, the batch size.
steps_per_epoch: int, the step of each epoch.
epochs: int, number of epochs.
Returns:
History from model.fit().
"""
batch_size = batch_size if batch_size else self.model_spec.batch_size
train_ds = train_data.gen_dataset(
batch_size, is_training=True, shuffle=self.shuffle)
if validation_data:
validation_ds = validation_data.gen_dataset(batch_size, is_training=False)
else:
validation_ds = None
self.create_model(do_train=True)
history = self.model.fit(
x=train_ds,
validation_data=validation_ds,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=self._keras_callbacks(self._model_dir))
tf.get_logger().info(history)
return history
def evaluate(self, data, batch_size=10):
"""Evaluate the model.
Args:
data: Evaluation data.
batch_size: int, batch size for evaluation.
Returns:
History from model.evaluate().
"""
batch_size = batch_size if batch_size else self.model_spec.batch_size
eval_ds = data.gen_dataset(batch_size, is_training=False)
history = self.model.evaluate(eval_ds)
tf.get_logger().info(history)
return history
def _keras_callbacks(self, model_dir):
"""Returns a list of default keras callbacks for `model.fit`."""
return _launcher.get_callbacks(self.model, model_dir)
def _get_serve_fn(self, keras_model):
"""Gets serve fn for exporting model."""
input_specs = input_pipeline.get_serving_input_specs(self.input_spec)
return keras_model.serve.get_concrete_function(**input_specs)
def _export_tflite(self, tflite_filepath):
"""Exports tflite model."""
serve_fn = self._get_serve_fn(self.model)
# Providing trackable objects is now recommended since it will make the
# concrete function conversion API be based on the new SavedModel importer,
# which will enable new TensorFlow Lite features including variable support,
# resources and variant tensor, and signature concept.
if float('.'.join(tf.__version__.split('.')[:2])) >= 2.7:
converter = tf.lite.TFLiteConverter.from_concrete_functions([serve_fn],
self.model)
else:
converter = tf.lite.TFLiteConverter.from_concrete_functions([serve_fn])
tflite_model = converter.convert()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
def _export_saved_model(self, filepath):
serve_fn = self._get_serve_fn(self.model)
signatures = {tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: serve_fn}
tf.saved_model.save(self.model, export_dir=filepath, signatures=signatures)
def evaluate_tflite(self, tflite_filepath, data):
"""Evaluates the tflite model.
The data is padded to required length, and multiple metrics are evaluated.
Args:
tflite_filepath: File path to the TFLite model.
data: Data to be evaluated.
Returns:
Dict of (metric, value), evaluation result of TFLite model.
"""
label_name = self.input_spec.label_feature.feature_name
lite_runner = model_util.get_lite_runner(tflite_filepath, self.model_spec)
ds = data.gen_dataset(batch_size=1, is_training=False)
max_output_size = data.max_vocab_id + 1 # +1 because 0 is reserved for OOV.
eval_top_k = self.model_hparams.eval_top_k
metrics = [
_metrics.GlobalRecall(top_k=k, name=f'Global_Recall/Recall_{k}')
for k in eval_top_k
]
for feature, y_true in data_util.generate_elements(ds):
feature.pop(label_name)
x = feature
ids, scores = lite_runner.run(x)
# y_true: shape [1, 1]
# y_pred: shape [1, max_output_size]; fill only scores with top-k ids.
y_pred = np.zeros([1, max_output_size])
for i, score in zip(ids, scores):
if i in data.vocab: # Only set if id is in vocab.
y_pred[0, i] = score
# Update metrics.
for m in metrics:
m.update_state(y_true, y_pred)
result = collections.OrderedDict([(m.name, m.result()) for m in metrics])
return result
@classmethod
def create(cls,
train_data,
model_spec: recommendation_spec.RecommendationSpec,
model_dir: str = None,
validation_data=None,
batch_size: int = 16,
steps_per_epoch: int = 10000,
epochs: int = 1,
learning_rate: float = 0.1,
gradient_clip_norm: float = 1.0,
shuffle: bool = True,
do_train: bool = True):
"""Loads data and train the model for recommendation.
Args:
train_data: Training data.
model_spec: ModelSpec, Specification for the model.
model_dir: str, path to export model checkpoints and summaries.
validation_data: Validation data.
batch_size: Batch size for training.
steps_per_epoch: int, Number of step per epoch.
epochs: int, Number of epochs for training.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
shuffle: boolean, whether the training data should be shuffled.
do_train: boolean, whether to run training.
Returns:
An instance based on Recommendation.
"""
# Use model_dir or a temp folder to store intermediate checkpoints, etc.
if model_dir is None:
model_dir = tempfile.mkdtemp()
recommendation = cls(
model_spec,
model_dir=model_dir,
shuffle=shuffle,
learning_rate=learning_rate,
gradient_clip_norm=gradient_clip_norm)
if do_train:
tf.compat.v1.logging.info('Training recommendation model...')
recommendation.train(
train_data,
validation_data,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
epochs=epochs)
else:
recommendation.create_model(do_train=False)
return recommendation
# Shortcut function.
create = Recommendation.create
mm_export('recommendation.create').export_constant(__name__, 'create')
|
[
"tensorflow.io.gfile.GFile",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.get_callbacks",
"numpy.zeros",
"tensorflow.__version__.split",
"tensorflow.compat.v1.logging.info",
"tensorflow_examples.lite.model_maker.core.api.mm_export",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.input_pipeline.get_serving_input_specs",
"tensorflow.saved_model.save",
"tensorflow_examples.lite.model_maker.core.task.model_util.get_lite_runner",
"tensorflow_examples.lite.model_maker.core.data_util.data_util.generate_elements",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.compile_model",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.metrics.GlobalRecall",
"tempfile.mkdtemp",
"tensorflow.get_logger",
"tensorflow.lite.TFLiteConverter.from_concrete_functions"
] |
[((1620, 1662), 'tensorflow_examples.lite.model_maker.core.api.mm_export', 'mm_export', (['"""recommendation.Recommendation"""'], {}), "('recommendation.Recommendation')\n", (1629, 1662), False, 'from tensorflow_examples.lite.model_maker.core.api import mm_export\n'), ((5207, 5253), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.get_callbacks', '_launcher.get_callbacks', (['self.model', 'model_dir'], {}), '(self.model, model_dir)\n', (5230, 5253), True, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher as _launcher\n'), ((5358, 5413), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.input_pipeline.get_serving_input_specs', 'input_pipeline.get_serving_input_specs', (['self.input_spec'], {}), '(self.input_spec)\n', (5396, 5413), False, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import input_pipeline\n'), ((6501, 6576), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['self.model'], {'export_dir': 'filepath', 'signatures': 'signatures'}), '(self.model, export_dir=filepath, signatures=signatures)\n', (6520, 6576), True, 'import tensorflow as tf\n'), ((7010, 7070), 'tensorflow_examples.lite.model_maker.core.task.model_util.get_lite_runner', 'model_util.get_lite_runner', (['tflite_filepath', 'self.model_spec'], {}), '(tflite_filepath, self.model_spec)\n', (7036, 7070), False, 'from tensorflow_examples.lite.model_maker.core.task import model_util\n'), ((7409, 7440), 'tensorflow_examples.lite.model_maker.core.data_util.data_util.generate_elements', 'data_util.generate_elements', (['ds'], {}), '(ds)\n', (7436, 7440), False, 'from tensorflow_examples.lite.model_maker.core.data_util import data_util\n'), ((9904, 9938), 'tensorflow_examples.lite.model_maker.core.api.mm_export', 'mm_export', (['"""recommendation.create"""'], {}), "('recommendation.create')\n", (9913, 9938), False, 'from tensorflow_examples.lite.model_maker.core.api import mm_export\n'), ((3362, 3480), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.compile_model', '_launcher.compile_model', (['self.model', 'self.model_hparams.eval_top_k', 'self._learning_rate', 'self._gradient_clip_norm'], {}), '(self.model, self.model_hparams.eval_top_k, self.\n _learning_rate, self._gradient_clip_norm)\n', (3385, 3480), True, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher as _launcher\n'), ((5980, 6051), 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[serve_fn]', 'self.model'], {}), '([serve_fn], self.model)\n', (6027, 6051), True, 'import tensorflow as tf\n'), ((6146, 6205), 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[serve_fn]'], {}), '([serve_fn])\n', (6193, 6205), True, 'import tensorflow as tf\n'), ((6254, 6294), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['tflite_filepath', '"""wb"""'], {}), "(tflite_filepath, 'wb')\n", (6271, 6294), True, 'import tensorflow as tf\n'), ((7283, 7347), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.metrics.GlobalRecall', '_metrics.GlobalRecall', ([], {'top_k': 'k', 'name': 'f"""Global_Recall/Recall_{k}"""'}), "(top_k=k, name=f'Global_Recall/Recall_{k}')\n", (7304, 7347), True, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import metrics as _metrics\n'), ((7651, 7681), 'numpy.zeros', 'np.zeros', (['[1, max_output_size]'], {}), '([1, max_output_size])\n', (7659, 7681), True, 'import numpy as np\n'), ((9296, 9314), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9312, 9314), False, 'import tempfile\n'), ((9524, 9585), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Training recommendation model..."""'], {}), "('Training recommendation model...')\n", (9549, 9585), True, 'import tensorflow as tf\n'), ((4584, 4599), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (4597, 4599), True, 'import tensorflow as tf\n'), ((5036, 5051), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (5049, 5051), True, 'import tensorflow as tf\n'), ((5922, 5947), 'tensorflow.__version__.split', 'tf.__version__.split', (['"""."""'], {}), "('.')\n", (5942, 5947), True, 'import tensorflow as tf\n')]
|
import time
import datetime
import itertools
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.autograd as autograd
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import dataset
import utils
import sys
import networks.pwcnet as pwcnet
def Train_single(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_MSE = torch.nn.MSELoss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
discriminator = utils.create_discriminator(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
discriminator = nn.DataParallel(discriminator)
discriminator = discriminator.cuda()
else:
discriminator = discriminator.cuda()
generatorNet = generatorNet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr = opt.lr_d, betas = (opt.b1, opt.b2))
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('ILSVRC2012_train_sal_name.txt')[:1272480]
# Define the dataset
trainset = dataset.ColorizationDataset(opt, imglist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
# ----------------------------------------
# Training
# ----------------------------------------
# Tensor type
Tensor = torch.cuda.FloatTensor
# Count start time
prev_time = time.time()
# For loop training
# For loop training
for epoch in range(opt.epochs):
for iteration, (x_t, y_t) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
optimizer_D.zero_grad()
lstm_state = None
x_t = x_t.cuda()
y_t = y_t.cuda()
valid = Tensor(np.ones((x_t.shape[0], 1, 30, 30)))
fake = Tensor(np.zeros((x_t.shape[0], 1, 30, 30)))
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
# Train Discriminator
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
# Fake samples
fake_scalar = discriminator(x_t, p_t.detach())
loss_fake = criterion_MSE(fake_scalar, fake)
# True samples
true_scalar = discriminator(x_t, y_t)
loss_true = criterion_MSE(true_scalar, valid)
# Overall Loss and optimize
loss_D = 0.5 * (loss_fake + loss_true)
# Train Generator
# GAN Loss
fake_scalar = discriminator(x_t, p_t)
loss_G = criterion_MSE(fake_scalar, valid)
# Pixel-level loss
loss_L1 = criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1 + opt.lambda_gan * loss_G
loss.backward()
loss_D.backward()
optimizer_G.step()
optimizer_D.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] [G Loss: %.4f] [D Loss: %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), loss_G.item(), loss_D.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_D)
def Pre_train_single(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
print("Pre_train_single")
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_MSE = torch.nn.MSELoss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
else:
generatorNet = generatorNet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('ILSVRC2012_train_sal_name.txt')[:1272480]
# Define the dataset
trainset = dataset.ColorizationDataset(opt, imglist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
# ----------------------------------------
# Training
# ----------------------------------------
# Tensor type
Tensor = torch.cuda.FloatTensor
# Count start time
prev_time = time.time()
# For loop training
# For loop training
for epoch in range(opt.epochs):
for iteration, (x_t, y_t) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
lstm_state = None
x_t = x_t.cuda()
y_t = y_t.cuda()
valid = Tensor(np.ones((x_t.shape[0], 1, 30, 30)))
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
# Pixel-level loss
loss_L1 = criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1
loss.backward()
optimizer_G.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
def Train_GAN(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_MSE = torch.nn.MSELoss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
discriminator = utils.create_discriminator(opt)
flownet = utils.create_pwcnet(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
discriminator = nn.DataParallel(discriminator)
discriminator = discriminator.cuda()
flownet = nn.DataParallel(flownet)
flownet = flownet.cuda()
else:
discriminator = discriminator.cuda()
generatorNet = generatorNet.cuda()
flownet = flownet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr = opt.lr_d, betas = (opt.b1, opt.b2))
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d_Gan%d_os%d_ol%d.pth' % (opt.task, epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d_GAN%d_os%d_ol%d.pth' % (opt.task, epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('videocolor_linux.txt')
classlist = utils.get_dirs(opt.baseroot)
'''
imgnumber = len(imglist) - (len(imglist) % opt.batch_size)
imglist = imglist[:imgnumber]
'''
# Define the dataset
trainset = dataset.MultiFramesDataset(opt, imglist, classlist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = utils.create_dataloader(trainset, opt)
# ----------------------------------------
# Training
# ----------------------------------------
# Tensor type
Tensor = torch.cuda.FloatTensor
# Count start time
prev_time = time.time()
# For loop training
for epoch in range(opt.epochs):
for iteration, (in_part, out_part) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
optimizer_D.zero_grad()
lstm_state = None
loss_flow = 0
loss_flow_long = 0
loss_L1 = 0
loss_D = 0
loss_G = 0
x_0 = in_part[0].cuda()
p_t_0 = in_part[0].cuda()
# Adversarial ground truth
valid = Tensor(np.ones((in_part[0].shape[0], 1, 30, 30)))
fake = Tensor(np.zeros((in_part[0].shape[0], 1, 30, 30)))
for iter_frame in range(opt.iter_frames):
# Read data
x_t = in_part[iter_frame].cuda()
y_t = out_part[iter_frame].cuda()
# Initialize the second input and compute flow loss
if iter_frame == 0:
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
elif iter_frame == 1:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_0 = p_t.detach()
p_t_last.requires_grad = False
p_t_0.requires_grad = False
# o_t_last_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
# y_t_warp range is [0, 1]
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
else:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_last.requires_grad = False
# o_t_last_2_t o_t_first_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
o_t_first_2_t = pwcnet.PWCEstimate(flownet,x_t, x_0)
# y_t_warp, y_t_warp_long range is [0, 1]
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
x_t_warp_long = pwcnet.PWCNetBackward((x_0 + 1) / 2, o_t_first_2_t)
p_t_warp_long = pwcnet.PWCNetBackward((p_t_0 + 1) / 2, o_t_first_2_t)
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
lstm_state = utils.repackage_hidden(lstm_state)
if iter_frame == 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
elif iter_frame > 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
mask_flow_long = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp_long, dim=1).pow(2) ).unsqueeze(1)
loss_flow_long += criterion_L1(mask_flow_long * (p_t + 1) / 2, mask_flow_long * p_t_warp_long)
# Fake samples
fake_scalar = discriminator(x_t, p_t.detach())
loss_fake = criterion_MSE(fake_scalar, fake)
# True samples
true_scalar = discriminator(x_t, y_t)
loss_true = criterion_MSE(true_scalar, valid)
# Train Discriminator
loss_D += 0.5 * (loss_fake + loss_true)
# Train Generator
# GAN Loss
fake_scalar = discriminator(x_t, p_t)
loss_G += criterion_MSE(fake_scalar, valid)
# Pixel-level loss
loss_L1 += criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1 + opt.lambda_flow * loss_flow + opt.lambda_flow_long * loss_flow_long + opt.lambda_gan * loss_G
loss.backward()
loss_D.backward()
optimizer_G.step()
optimizer_D.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] [Flow Loss Short: %.8f] [Flow Loss Long: %.8f] [G Loss: %.4f] [D Loss: %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), loss_flow.item(), loss_flow_long.item(), loss_G.item(), loss_D.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_D)
def Train_No_GAN(opt): # w / o GAN
# ----------------------------------------
# Network training parameters
# ----------------------------------------
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
flownet = utils.create_pwcnet(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
flownet = nn.DataParallel(flownet)
flownet = flownet.cuda()
else:
generatorNet = generatorNet.cuda()
flownet = flownet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('videocolor_linux.txt')
classlist = utils.get_dirs(opt.baseroot)
'''
imgnumber = len(imglist) - (len(imglist) % opt.batch_size)
imglist = imglist[:imgnumber]
'''
# Define the dataset
trainset = dataset.MultiFramesDataset(opt, imglist, classlist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = utils.create_dataloader(trainset, opt)
# ----------------------------------------
# Training
# ----------------------------------------
# Count start time
prev_time = time.time()
# For loop training
for epoch in range(opt.epochs):
for iteration, (in_part, out_part) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
lstm_state = None
loss_flow = 0
loss_flow_long = 0
loss_L1 = 0
x_0 = in_part[0].cuda()
p_t_0 = in_part[0].cuda()
for iter_frame in range(opt.iter_frames):
# Read data
x_t = in_part[iter_frame].cuda()
y_t = out_part[iter_frame].cuda()
# Initialize the second input and compute flow loss
if iter_frame == 0:
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
elif iter_frame == 1:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_0 = p_t.detach()
p_t_last.requires_grad = False
p_t_0.requires_grad = False
# o_t_last_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
# y_t_warp range is [0, 1]
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
else:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_last.requires_grad = False
# o_t_last_2_t o_t_first_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
o_t_first_2_t = pwcnet.PWCEstimate(flownet,x_t, x_0)
# y_t_warp, y_t_warp_long range is [0, 1]
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
x_t_warp_long = pwcnet.PWCNetBackward((x_0 + 1) / 2, o_t_first_2_t)
p_t_warp_long = pwcnet.PWCNetBackward((p_t_0 + 1) / 2, o_t_first_2_t)
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
lstm_state = utils.repackage_hidden(lstm_state)
if iter_frame == 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
elif iter_frame > 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
mask_flow_long = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp_long, dim=1).pow(2) ).unsqueeze(1)
loss_flow_long += criterion_L1(mask_flow_long * (p_t + 1) / 2, mask_flow_long * p_t_warp_long)
# Pixel-level loss
loss_L1 += criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1 + opt.lambda_flow * loss_flow + opt.lambda_flow_long * loss_flow_long
loss.backward()
optimizer_G.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] [Flow Loss Short: %.8f] [Flow Loss Long: %.8f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), loss_flow.item(), loss_flow_long.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
|
[
"utils.repackage_hidden",
"numpy.ones",
"utils.create_generator",
"utils.create_dataloader",
"utils.create_discriminator",
"networks.pwcnet.PWCEstimate",
"torch.nn.MSELoss",
"dataset.ColorizationDataset",
"torch.utils.data.DataLoader",
"torch.zeros",
"networks.pwcnet.PWCNetBackward",
"utils.text_readlines",
"torch.sum",
"torch.nn.L1Loss",
"numpy.zeros",
"time.time",
"utils.create_pwcnet",
"dataset.MultiFramesDataset",
"torch.save",
"torch.nn.DataParallel",
"utils.get_dirs"
] |
[((702, 729), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (724, 729), False, 'import utils\n'), ((750, 781), 'utils.create_discriminator', 'utils.create_discriminator', (['opt'], {}), '(opt)\n', (776, 781), False, 'import utils\n'), ((4021, 4062), 'dataset.ColorizationDataset', 'dataset.ColorizationDataset', (['opt', 'imglist'], {}), '(opt, imglist)\n', (4048, 4062), False, 'import dataset\n'), ((4168, 4280), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)', 'num_workers': 'opt.num_workers', 'pin_memory': '(True)'}), '(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=\n opt.num_workers, pin_memory=True)\n', (4178, 4280), False, 'from torch.utils.data import DataLoader\n'), ((4509, 4520), 'time.time', 'time.time', ([], {}), '()\n', (4518, 4520), False, 'import time\n'), ((7444, 7471), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (7466, 7471), False, 'import utils\n'), ((10462, 10503), 'dataset.ColorizationDataset', 'dataset.ColorizationDataset', (['opt', 'imglist'], {}), '(opt, imglist)\n', (10489, 10503), False, 'import dataset\n'), ((10609, 10721), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)', 'num_workers': 'opt.num_workers', 'pin_memory': '(True)'}), '(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=\n opt.num_workers, pin_memory=True)\n', (10619, 10721), False, 'from torch.utils.data import DataLoader\n'), ((10950, 10961), 'time.time', 'time.time', ([], {}), '()\n', (10959, 10961), False, 'import time\n'), ((12954, 12981), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (12976, 12981), False, 'import utils\n'), ((13002, 13033), 'utils.create_discriminator', 'utils.create_discriminator', (['opt'], {}), '(opt)\n', (13028, 13033), False, 'import utils\n'), ((13048, 13072), 'utils.create_pwcnet', 'utils.create_pwcnet', (['opt'], {}), '(opt)\n', (13067, 13072), False, 'import utils\n'), ((16458, 16502), 'utils.text_readlines', 'utils.text_readlines', (['"""videocolor_linux.txt"""'], {}), "('videocolor_linux.txt')\n", (16478, 16502), False, 'import utils\n'), ((16519, 16547), 'utils.get_dirs', 'utils.get_dirs', (['opt.baseroot'], {}), '(opt.baseroot)\n', (16533, 16547), False, 'import utils\n'), ((16702, 16753), 'dataset.MultiFramesDataset', 'dataset.MultiFramesDataset', (['opt', 'imglist', 'classlist'], {}), '(opt, imglist, classlist)\n', (16728, 16753), False, 'import dataset\n'), ((16859, 16897), 'utils.create_dataloader', 'utils.create_dataloader', (['trainset', 'opt'], {}), '(trainset, opt)\n', (16882, 16897), False, 'import utils\n'), ((17123, 17134), 'time.time', 'time.time', ([], {}), '()\n', (17132, 17134), False, 'import time\n'), ((22936, 22963), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (22958, 22963), False, 'import utils\n'), ((22978, 23002), 'utils.create_pwcnet', 'utils.create_pwcnet', (['opt'], {}), '(opt)\n', (22997, 23002), False, 'import utils\n'), ((25997, 26041), 'utils.text_readlines', 'utils.text_readlines', (['"""videocolor_linux.txt"""'], {}), "('videocolor_linux.txt')\n", (26017, 26041), False, 'import utils\n'), ((26058, 26086), 'utils.get_dirs', 'utils.get_dirs', (['opt.baseroot'], {}), '(opt.baseroot)\n', (26072, 26086), False, 'import utils\n'), ((26241, 26292), 'dataset.MultiFramesDataset', 'dataset.MultiFramesDataset', (['opt', 'imglist', 'classlist'], {}), '(opt, imglist, classlist)\n', (26267, 26292), False, 'import dataset\n'), ((26398, 26436), 'utils.create_dataloader', 'utils.create_dataloader', (['trainset', 'opt'], {}), '(trainset, opt)\n', (26421, 26436), False, 'import utils\n'), ((26602, 26613), 'time.time', 'time.time', ([], {}), '()\n', (26611, 26613), False, 'import time\n'), ((844, 873), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (859, 873), True, 'import torch.nn as nn\n'), ((941, 971), 'torch.nn.DataParallel', 'nn.DataParallel', (['discriminator'], {}), '(discriminator)\n', (956, 971), True, 'import torch.nn as nn\n'), ((3916, 3969), 'utils.text_readlines', 'utils.text_readlines', (['"""ILSVRC2012_train_sal_name.txt"""'], {}), "('ILSVRC2012_train_sal_name.txt')\n", (3936, 3969), False, 'import utils\n'), ((7534, 7563), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (7549, 7563), True, 'import torch.nn as nn\n'), ((10357, 10410), 'utils.text_readlines', 'utils.text_readlines', (['"""ILSVRC2012_train_sal_name.txt"""'], {}), "('ILSVRC2012_train_sal_name.txt')\n", (10377, 10410), False, 'import utils\n'), ((13135, 13164), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (13150, 13164), True, 'import torch.nn as nn\n'), ((13232, 13262), 'torch.nn.DataParallel', 'nn.DataParallel', (['discriminator'], {}), '(discriminator)\n', (13247, 13262), True, 'import torch.nn as nn\n'), ((13326, 13350), 'torch.nn.DataParallel', 'nn.DataParallel', (['flownet'], {}), '(flownet)\n', (13341, 13350), True, 'import torch.nn as nn\n'), ((23065, 23094), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (23080, 23094), True, 'import torch.nn as nn\n'), ((23156, 23180), 'torch.nn.DataParallel', 'nn.DataParallel', (['flownet'], {}), '(flownet)\n', (23171, 23180), True, 'import torch.nn as nn\n'), ((584, 601), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (599, 601), False, 'import torch\n'), ((629, 647), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (645, 647), False, 'import torch\n'), ((6384, 6395), 'time.time', 'time.time', ([], {}), '()\n', (6393, 6395), False, 'import time\n'), ((7326, 7343), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (7341, 7343), False, 'import torch\n'), ((7371, 7389), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (7387, 7389), False, 'import torch\n'), ((12072, 12083), 'time.time', 'time.time', ([], {}), '()\n', (12081, 12083), False, 'import time\n'), ((12836, 12853), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (12851, 12853), False, 'import torch\n'), ((12881, 12899), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (12897, 12899), False, 'import torch\n'), ((21847, 21858), 'time.time', 'time.time', ([], {}), '()\n', (21856, 21858), False, 'import time\n'), ((22864, 22881), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (22879, 22881), False, 'import torch\n'), ((30407, 30418), 'time.time', 'time.time', ([], {}), '()\n', (30416, 30418), False, 'import time\n'), ((4915, 4949), 'numpy.ones', 'np.ones', (['(x_t.shape[0], 1, 30, 30)'], {}), '((x_t.shape[0], 1, 30, 30))\n', (4922, 4949), True, 'import numpy as np\n'), ((4977, 5012), 'numpy.zeros', 'np.zeros', (['(x_t.shape[0], 1, 30, 30)'], {}), '((x_t.shape[0], 1, 30, 30))\n', (4985, 5012), True, 'import numpy as np\n'), ((11320, 11354), 'numpy.ones', 'np.ones', (['(x_t.shape[0], 1, 30, 30)'], {}), '((x_t.shape[0], 1, 30, 30))\n', (11327, 11354), True, 'import numpy as np\n'), ((17683, 17724), 'numpy.ones', 'np.ones', (['(in_part[0].shape[0], 1, 30, 30)'], {}), '((in_part[0].shape[0], 1, 30, 30))\n', (17690, 17724), True, 'import numpy as np\n'), ((17752, 17794), 'numpy.zeros', 'np.zeros', (['(in_part[0].shape[0], 1, 30, 30)'], {}), '((in_part[0].shape[0], 1, 30, 30))\n', (17760, 17794), True, 'import numpy as np\n'), ((19800, 19834), 'utils.repackage_hidden', 'utils.repackage_hidden', (['lstm_state'], {}), '(lstm_state)\n', (19822, 19834), False, 'import utils\n'), ((29018, 29052), 'utils.repackage_hidden', 'utils.repackage_hidden', (['lstm_state'], {}), '(lstm_state)\n', (29040, 29052), False, 'import utils\n'), ((5038, 5111), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (5049, 5111), False, 'import torch\n'), ((11380, 11453), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (11391, 11453), False, 'import torch\n'), ((2456, 2551), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch,\n opt.batch_size))\n", (2466, 2551), False, 'import torch\n'), ((2804, 2902), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (2814, 2902), False, 'import torch\n'), ((3212, 3301), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.\n batch_size))\n", (3222, 3301), False, 'import torch\n'), ((3553, 3645), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (3563, 3645), False, 'import torch\n'), ((8897, 8992), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch,\n opt.batch_size))\n", (8907, 8992), False, 'import torch\n'), ((9245, 9343), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (9255, 9343), False, 'import torch\n'), ((9653, 9742), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.\n batch_size))\n", (9663, 9742), False, 'import torch\n'), ((9994, 10086), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (10004, 10086), False, 'import torch\n'), ((14856, 15028), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d_Gan%d_os%d_ol%d.pth' % (opt.task, epoch, opt.\n batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d_Gan%d_os%d_ol%d.pth' % (\n opt.task, epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.\n lambda_flow_long))\n", (14866, 15028), False, 'import torch\n'), ((15275, 15373), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (15285, 15373), False, 'import torch\n'), ((15683, 15847), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d_GAN%d_os%d_ol%d.pth' % (opt.task, epoch, opt.\n batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d_GAN%d_os%d_ol%d.pth' % (opt.task,\n epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.\n lambda_flow_long))\n", (15693, 15847), False, 'import torch\n'), ((16095, 16187), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (16105, 16187), False, 'import torch\n'), ((18585, 18627), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (18603, 18627), True, 'import networks.pwcnet as pwcnet\n'), ((18659, 18714), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (18680, 18714), True, 'import networks.pwcnet as pwcnet\n'), ((18793, 18848), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (18814, 18848), True, 'import networks.pwcnet as pwcnet\n'), ((19132, 19174), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (19150, 19174), True, 'import networks.pwcnet as pwcnet\n'), ((19211, 19248), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_0'], {}), '(flownet, x_t, x_0)\n', (19229, 19248), True, 'import networks.pwcnet as pwcnet\n'), ((19341, 19396), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (19362, 19396), True, 'import networks.pwcnet as pwcnet\n'), ((19428, 19483), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (19449, 19483), True, 'import networks.pwcnet as pwcnet\n'), ((19520, 19571), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((x_0 + 1) / 2, o_t_first_2_t)\n', (19541, 19571), True, 'import networks.pwcnet as pwcnet\n'), ((19608, 19661), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((p_t_0 + 1) / 2, o_t_first_2_t)\n', (19629, 19661), True, 'import networks.pwcnet as pwcnet\n'), ((24537, 24632), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch,\n opt.batch_size))\n", (24547, 24632), False, 'import torch\n'), ((24885, 24983), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (24895, 24983), False, 'import torch\n'), ((25293, 25382), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.\n batch_size))\n", (25303, 25382), False, 'import torch\n'), ((25634, 25726), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (25644, 25726), False, 'import torch\n'), ((27803, 27845), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (27821, 27845), True, 'import networks.pwcnet as pwcnet\n'), ((27877, 27932), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (27898, 27932), True, 'import networks.pwcnet as pwcnet\n'), ((28011, 28066), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (28032, 28066), True, 'import networks.pwcnet as pwcnet\n'), ((28350, 28392), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (28368, 28392), True, 'import networks.pwcnet as pwcnet\n'), ((28429, 28466), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_0'], {}), '(flownet, x_t, x_0)\n', (28447, 28466), True, 'import networks.pwcnet as pwcnet\n'), ((28559, 28614), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (28580, 28614), True, 'import networks.pwcnet as pwcnet\n'), ((28646, 28701), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (28667, 28701), True, 'import networks.pwcnet as pwcnet\n'), ((28738, 28789), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((x_0 + 1) / 2, o_t_first_2_t)\n', (28759, 28789), True, 'import networks.pwcnet as pwcnet\n'), ((28826, 28879), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((p_t_0 + 1) / 2, o_t_first_2_t)\n', (28847, 28879), True, 'import networks.pwcnet as pwcnet\n'), ((6334, 6345), 'time.time', 'time.time', ([], {}), '()\n', (6343, 6345), False, 'import time\n'), ((12022, 12033), 'time.time', 'time.time', ([], {}), '()\n', (12031, 12033), False, 'import time\n'), ((18130, 18203), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (18141, 18203), False, 'import torch\n'), ((21797, 21808), 'time.time', 'time.time', ([], {}), '()\n', (21806, 21808), False, 'import time\n'), ((27348, 27421), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (27359, 27421), False, 'import torch\n'), ((30357, 30368), 'time.time', 'time.time', ([], {}), '()\n', (30366, 30368), False, 'import time\n'), ((19931, 19973), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (19940, 19973), False, 'import torch\n'), ((29149, 29191), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (29158, 29191), False, 'import torch\n'), ((20188, 20230), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (20197, 20230), False, 'import torch\n'), ((20413, 20460), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp_long)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp_long, dim=1)\n', (20422, 20460), False, 'import torch\n'), ((29406, 29448), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (29415, 29448), False, 'import torch\n'), ((29631, 29678), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp_long)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp_long, dim=1)\n', (29640, 29678), False, 'import torch\n')]
|
# !!! Change: this is a new file
import os
import pathlib
import random
import time
import pprint
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from utils.conv_type import FixedSubnetConv, SampleSubnetConv
from utils.logging import AverageMeter, ProgressMeter
from utils.net_utils import (
set_model_prune_rate,
freeze_model_weights,
save_checkpoint,
get_lr,
LabelSmoothing,
)
from utils.schedulers import get_policy
from utils.feature_extractor import FeatureExtractor
from args import args
import importlib
import data
import models
import numpy as np
def main():
print(args)
# Simply call main_worker function
main_worker(args)
def main_worker(args):
args.gpu = None
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model and optimizer
model = get_model(args)
model = set_gpu(args, model)
if args.pretrained:
pretrained(args, model)
data = get_dataset(args)
output_path = args.pretrained + "_activations"
# setup feature extractor
feature_extractor = FeatureExtractor(model.module)
print(model.module)
target_layers = feature_extractor.parse_default_layers()
target_types = feature_extractor.parse_type("relu")
feature_extractor.append_target_layers(target_layers, target_types)
# print(feature_extractor.module_dict)
print(feature_extractor.target_outputs.keys())
predicate(data.val_loader, feature_extractor, output_path)
def predicate(data_loader, feature_extractor, output_path=None):
batch_time = AverageMeter("Time", ":6.3f", write_val=False)
model = feature_extractor.model
outputs_dict = dict()
# switch to evaluate mode
model.eval()
with torch.no_grad():
toc = time.time()
for batch_ind, (input, _) in enumerate(data_loader):
input = input.cuda(non_blocking=True)
# forward to get intermediate outputs
_ = model(input)
# synchronize so that everything is calculated
torch.cuda.synchronize()
# print(feature_extractor.target_outputs)
for target_layer, target_output in feature_extractor.target_outputs.items():
if target_layer in outputs_dict:
outputs_dict[target_layer].append(target_output.data.numpy())
else:
outputs_dict[target_layer] = [target_output.data.numpy()]
# measure elapsed time
batch_time.update(time.time() - toc)
toc = time.time()
if batch_ind % 10 == 0:
print('Predicate: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
batch_ind, len(data_loader), batch_time=batch_time))
if output_path is not None:
# def _squeeze_dict(d):
# for key, val in d.items():
# d[key] = np.concatenate(val, 0)
# return d
# outputs_dict = _squeeze_dict(outputs_dict)
# np.savez_compressed(output_path, **outputs_dict)
for key, val in outputs_dict.items():
np.save(output_path + '_' + key, np.concatenate(val, 0))
print(key, 'saved')
def set_gpu(args, model):
assert torch.cuda.is_available(), "CPU-only experiments currently unsupported"
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif args.multigpu is None:
device = torch.device("cpu")
else:
# DataParallel will divide and allocate batch_size to all available GPUs
print(f"=> Parallelizing on {args.multigpu} gpus")
torch.cuda.set_device(args.multigpu[0])
args.gpu = args.multigpu[0]
model = torch.nn.DataParallel(model, device_ids=args.multigpu).cuda(
args.multigpu[0]
)
cudnn.benchmark = True
return model
def pretrained(args, model):
if os.path.isfile(args.pretrained):
print("=> loading pretrained weights from '{}'".format(args.pretrained))
pretrained = torch.load(
args.pretrained,
map_location=torch.device("cuda:{}".format(args.multigpu[0])),
)["state_dict"]
model_state_dict = model.state_dict()
for k, v in pretrained.items():
if k not in model_state_dict or v.size() != model_state_dict[k].size():
print("IGNORE:", k)
pretrained = {
k: v
for k, v in pretrained.items()
if (k in model_state_dict and v.size() == model_state_dict[k].size())
}
model_state_dict.update(pretrained)
model.load_state_dict(model_state_dict)
else:
print("=> no pretrained weights found at '{}'".format(args.pretrained))
for n, m in model.named_modules():
if isinstance(m, FixedSubnetConv):
m.set_subnet()
def get_dataset(args):
print(f"=> Getting {args.set} dataset")
dataset = getattr(data, args.set)(args)
return dataset
def get_model(args):
if args.first_layer_dense:
args.first_layer_type = "DenseConv"
print("=> Creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# applying sparsity to the network
if (
args.conv_type != "DenseConv"
and args.conv_type != "SampleSubnetConv"
and args.conv_type != "ContinuousSparseConv"
):
if args.prune_rate < 0:
raise ValueError("Need to set a positive prune rate")
set_model_prune_rate(model, prune_rate=args.prune_rate)
print(
f"=> Rough estimate model params {sum(int(p.numel() * (1-args.prune_rate)) for n, p in model.named_parameters() if not n.endswith('scores'))}"
)
# freezing the weights if we are only doing subnet training
if args.freeze_weights:
freeze_model_weights(model)
return model
if __name__ == "__main__":
main()
|
[
"torch.cuda.synchronize",
"utils.net_utils.set_model_prune_rate",
"utils.net_utils.freeze_model_weights",
"time.time",
"os.path.isfile",
"utils.logging.AverageMeter",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.set_device",
"torch.nn.DataParallel",
"torch.no_grad",
"numpy.concatenate",
"utils.feature_extractor.FeatureExtractor"
] |
[((1274, 1304), 'utils.feature_extractor.FeatureExtractor', 'FeatureExtractor', (['model.module'], {}), '(model.module)\n', (1290, 1304), False, 'from utils.feature_extractor import FeatureExtractor\n'), ((1761, 1807), 'utils.logging.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {'write_val': '(False)'}), "('Time', ':6.3f', write_val=False)\n", (1773, 1807), False, 'from utils.logging import AverageMeter, ProgressMeter\n'), ((3476, 3501), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3499, 3501), False, 'import torch\n'), ((4158, 4189), 'os.path.isfile', 'os.path.isfile', (['args.pretrained'], {}), '(args.pretrained)\n', (4172, 4189), False, 'import os\n'), ((1928, 1943), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1941, 1943), False, 'import torch\n'), ((1959, 1970), 'time.time', 'time.time', ([], {}), '()\n', (1968, 1970), False, 'import time\n'), ((3586, 3617), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (3607, 3617), False, 'import torch\n'), ((5738, 5793), 'utils.net_utils.set_model_prune_rate', 'set_model_prune_rate', (['model'], {'prune_rate': 'args.prune_rate'}), '(model, prune_rate=args.prune_rate)\n', (5758, 5793), False, 'from utils.net_utils import set_model_prune_rate, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((6075, 6102), 'utils.net_utils.freeze_model_weights', 'freeze_model_weights', (['model'], {}), '(model)\n', (6095, 6102), False, 'from utils.net_utils import set_model_prune_rate, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((2234, 2258), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2256, 2258), False, 'import torch\n'), ((2737, 2748), 'time.time', 'time.time', ([], {}), '()\n', (2746, 2748), False, 'import time\n'), ((3704, 3723), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3716, 3723), False, 'import torch\n'), ((3882, 3921), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.multigpu[0]'], {}), '(args.multigpu[0])\n', (3903, 3921), False, 'import torch\n'), ((3381, 3403), 'numpy.concatenate', 'np.concatenate', (['val', '(0)'], {}), '(val, 0)\n', (3395, 3403), True, 'import numpy as np\n'), ((2700, 2711), 'time.time', 'time.time', ([], {}), '()\n', (2709, 2711), False, 'import time\n'), ((3974, 4028), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'args.multigpu'}), '(model, device_ids=args.multigpu)\n', (3995, 4028), False, 'import torch\n')]
|
import networkx as nx
import numpy as np
from pyquil.api import QPUCompiler
from pyquil.gates import H, RY, CZ, CNOT, MEASURE
from pyquil.quil import Program
from pyquil.quilbase import Pragma
from forest.benchmarking.compilation import basic_compile
def create_ghz_program(tree: nx.DiGraph):
"""
Create a Bell/GHZ state with CNOTs described by tree.
:param tree: A tree that describes the CNOTs to perform to create a bell/GHZ state.
:return: the program
"""
assert nx.is_tree(tree), 'Needs to be a tree'
nodes = list(nx.topological_sort(tree))
n_qubits = len(nodes)
program = Program(H(nodes[0]))
for node in nodes:
for child in tree.successors(node):
program += CNOT(node, child)
ro = program.declare('ro', 'BIT', n_qubits)
for i, q in enumerate(nodes):
program += MEASURE(q, ro[i])
return program
def ghz_state_statistics(bitstrings):
"""
Compute statistics bitstrings sampled from a Bell/GHZ state
:param bitstrings: An array of bitstrings
:return: A dictionary where bell = number of bitstrings consistent with a bell/GHZ state;
total = total number of bitstrings.
"""
bitstrings = np.asarray(bitstrings)
bell = np.sum(np.logical_or(np.all(bitstrings == 0, axis=1),
np.all(bitstrings == 1, axis=1)))
total = len(bitstrings)
return {
'bell': int(bell),
'total': int(total),
}
def create_graph_state(graph: nx.Graph, use_pragmas=False):
"""
Write a program to create a graph state according to the specified graph
A graph state involves Hadamarding all your qubits and then applying a CZ for each
edge in the graph. A graph state and the ability to measure it however you want gives
you universal quantum computation. Some good references are [MBQC]_ and [MBCS]_.
Similar to a Bell state / GHZ state, we can try to prepare a graph state and measure
how well we've done according to expected parities.
.. [MBQC] A One-Way Quantum Computer.
<NAME>.
Phys. Rev. Lett. 86, 5188 (2001).
https://doi.org/10.1103/PhysRevLett.86.5188
https://arxiv.org/abs/quant-ph/0010033
.. [MBCS] Measurement-based quantum computation with cluster states.
<NAME>.
Phys. Rev. A 68, 022312 (2003).
https://dx.doi.org/10.1103/PhysRevA.68.022312
https://arxiv.org/abs/quant-ph/0301052
:param graph: The graph. Nodes are used as arguments to gates, so they should be qubit-like.
:param use_pragmas: Use COMMUTING_BLOCKS pragmas to hint at the compiler
:return: A program that constructs a graph state.
"""
program = Program()
for q in graph.nodes:
program += H(q)
if use_pragmas:
program += Pragma('COMMUTING_BLOCKS')
for a, b in graph.edges:
if use_pragmas:
program += Pragma('BLOCK')
program += CZ(a, b)
if use_pragmas:
program += Pragma('END_BLOCK')
if use_pragmas:
program += Pragma('END_COMMUTING_BLOCKS')
return program
def measure_graph_state(graph: nx.Graph, focal_node: int):
"""
Given a graph state, measure a focal node and its neighbors with a particular measurement
angle.
:param graph: The graph state graph. This is needed to figure out what the neighbors are
:param focal_node: The node in the graph to serve as the focus. The focal node is measured
at an angle and all its neighbors are measured in the Z basis
:return: Program, list of classical offsets into the ``ro`` register.
"""
program = Program()
theta = program.declare('theta', 'REAL')
program += RY(theta, focal_node)
neighbors = sorted(graph[focal_node])
ro = program.declare('ro', 'BIT', len(neighbors) + 1)
program += MEASURE(focal_node, ro[0])
for i, neighbor in enumerate(neighbors):
program += MEASURE(neighbor, ro[i + 1])
classical_addresses = list(range(len(neighbors) + 1))
return program, classical_addresses
def compiled_parametric_graph_state(compiler: QPUCompiler, graph: nx.Graph, focal_node: int,
num_shots: int = 1000):
"""
Construct a program to create and measure a graph state, map it to qubits using ``addressing``,
and compile to an ISA.
Hackily implement a parameterized program by compiling a program with a particular angle,
finding where that angle appears in the results, and replacing it with ``"{angle}"`` so
the resulting compiled program can be run many times by using python's str.format method.
:param graph: A networkx graph defining the graph state
:param focal_node: The node of the graph to measure
:param compiler: The compiler to do the compiling.
:param num_shots: The number of shots to take when measuring the graph state.
:return: an executable that constructs and measures a graph state.
"""
program = create_graph_state(graph)
measure_prog, c_addrs = measure_graph_state(graph, focal_node)
program += measure_prog
program.wrap_in_numshots_loop(num_shots)
nq_program = basic_compile(program)
executable = compiler.native_quil_to_executable(nq_program)
return executable
|
[
"pyquil.quilbase.Pragma",
"pyquil.gates.CZ",
"pyquil.gates.MEASURE",
"numpy.asarray",
"pyquil.gates.H",
"networkx.topological_sort",
"pyquil.gates.RY",
"forest.benchmarking.compilation.basic_compile",
"pyquil.gates.CNOT",
"pyquil.quil.Program",
"networkx.is_tree",
"numpy.all"
] |
[((495, 511), 'networkx.is_tree', 'nx.is_tree', (['tree'], {}), '(tree)\n', (505, 511), True, 'import networkx as nx\n'), ((1210, 1232), 'numpy.asarray', 'np.asarray', (['bitstrings'], {}), '(bitstrings)\n', (1220, 1232), True, 'import numpy as np\n'), ((2729, 2738), 'pyquil.quil.Program', 'Program', ([], {}), '()\n', (2736, 2738), False, 'from pyquil.quil import Program\n'), ((3662, 3671), 'pyquil.quil.Program', 'Program', ([], {}), '()\n', (3669, 3671), False, 'from pyquil.quil import Program\n'), ((3732, 3753), 'pyquil.gates.RY', 'RY', (['theta', 'focal_node'], {}), '(theta, focal_node)\n', (3734, 3753), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((3871, 3897), 'pyquil.gates.MEASURE', 'MEASURE', (['focal_node', 'ro[0]'], {}), '(focal_node, ro[0])\n', (3878, 3897), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((5191, 5213), 'forest.benchmarking.compilation.basic_compile', 'basic_compile', (['program'], {}), '(program)\n', (5204, 5213), False, 'from forest.benchmarking.compilation import basic_compile\n'), ((551, 576), 'networkx.topological_sort', 'nx.topological_sort', (['tree'], {}), '(tree)\n', (570, 576), True, 'import networkx as nx\n'), ((626, 637), 'pyquil.gates.H', 'H', (['nodes[0]'], {}), '(nodes[0])\n', (627, 637), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((850, 867), 'pyquil.gates.MEASURE', 'MEASURE', (['q', 'ro[i]'], {}), '(q, ro[i])\n', (857, 867), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((2784, 2788), 'pyquil.gates.H', 'H', (['q'], {}), '(q)\n', (2785, 2788), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((2829, 2855), 'pyquil.quilbase.Pragma', 'Pragma', (['"""COMMUTING_BLOCKS"""'], {}), "('COMMUTING_BLOCKS')\n", (2835, 2855), False, 'from pyquil.quilbase import Pragma\n'), ((2967, 2975), 'pyquil.gates.CZ', 'CZ', (['a', 'b'], {}), '(a, b)\n', (2969, 2975), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((3082, 3112), 'pyquil.quilbase.Pragma', 'Pragma', (['"""END_COMMUTING_BLOCKS"""'], {}), "('END_COMMUTING_BLOCKS')\n", (3088, 3112), False, 'from pyquil.quilbase import Pragma\n'), ((3962, 3990), 'pyquil.gates.MEASURE', 'MEASURE', (['neighbor', 'ro[i + 1]'], {}), '(neighbor, ro[i + 1])\n', (3969, 3990), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((730, 747), 'pyquil.gates.CNOT', 'CNOT', (['node', 'child'], {}), '(node, child)\n', (734, 747), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((1265, 1296), 'numpy.all', 'np.all', (['(bitstrings == 0)'], {'axis': '(1)'}), '(bitstrings == 0, axis=1)\n', (1271, 1296), True, 'import numpy as np\n'), ((1330, 1361), 'numpy.all', 'np.all', (['(bitstrings == 1)'], {'axis': '(1)'}), '(bitstrings == 1, axis=1)\n', (1336, 1361), True, 'import numpy as np\n'), ((2932, 2947), 'pyquil.quilbase.Pragma', 'Pragma', (['"""BLOCK"""'], {}), "('BLOCK')\n", (2938, 2947), False, 'from pyquil.quilbase import Pragma\n'), ((3023, 3042), 'pyquil.quilbase.Pragma', 'Pragma', (['"""END_BLOCK"""'], {}), "('END_BLOCK')\n", (3029, 3042), False, 'from pyquil.quilbase import Pragma\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
conan.stacker was created on 2017/10/17.
Author: Charles_Lai
Email: <EMAIL>
"""
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from diego.ensemble_net.base import Ensemble
from diego.ensemble_net.combination import Combiner
from diego.classifier.logistic_regression_sk import LogisticRegressionSK
class EnsembleStack(object):
def __init__(self, mode='probs', cv=5):
self.mode = mode
self.layers = []
self.cv = cv
def add_layer(self, ensemble):
if isinstance(ensemble, Ensemble):
self.layers.append(ensemble)
else:
raise Exception('not an Ensemble object')
def fit_layer(self, layer_idx, X, y):
if layer_idx >= len(self.layers):
return
elif layer_idx == len(self.layers) - 1:
self.layers[layer_idx].fit(X, y)
else:
n_classes = len(set(y)) - 1
n_classifiers = len(self.layers[layer_idx])
output = np.zeros((X.shape[0], n_classes * n_classifiers))
skf = list(StratifiedKFold(self.cv).split(X, y))
for tra, tst in skf:
self.layers[layer_idx].fit(X[tra], y[tra])
out = self.layers[layer_idx].output(X[tst], mode=self.mode)
output[tst, :] = out[:, 1:, :].reshape(
out.shape[0], (out.shape[1] - 1) * out.shape[2])
self.layers[layer_idx].fit(X, y)
self.fit_layer(layer_idx + 1, output, y)
def fit(self, X, y):
if self.cv > 1:
self.fit_layer(0, X, y)
else:
X_ = X
for layer in self.layers:
layer.fit(X_, y)
out = layer.output(X_, mode=self.mode)
X_ = out[:, 1:, :].reshape(
out.shape[0], (out.shape[1] - 1) * out.shape[2])
return self
def output(self, X):
input_ = X
for layer in self.layers:
out = layer.output(input_, mode=self.mode)
input_ = out[:, 1:, :].reshape(
out.shape[0], (out.shape[1] - 1) * out.shape[2])
return input_
class EnsembleStackClassifier(object):
def __init__(self, stack, combiner=None):
self.stack = stack
if combiner is None:
self.combiner = Combiner(rule='mean')
elif isinstance(combiner, str):
if combiner == 'majority_vote':
raise ValueError('EnsembleStackClassifier '
'do not support majority_vote')
self.combiner = Combiner(rule=combiner)
elif isinstance(combiner, Combiner):
self.combiner = combiner
else:
raise ValueError('Invalid combiner!')
self.clf = self._make_clf()
@staticmethod
def _make_clf():
import autosklearn.classification
import autosklearn.pipeline.components.classification
autosklearn.pipeline.components.classification.add_classifier(
LogisticRegressionSK)
clf = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=30,
per_run_time_limit=10,
include_estimators=['LogisticRegressionSK'],
)
return clf
def fit(self, X, y):
self.stack.fit(X, y)
return self
def refit(self, X, y):
out = self.stack.output(X)
print(out)
print(out.shape, y.shape)
self.clf.fit(out, y)
def predict(self, X):
out = self.stack.output(X)
try:
y_pred = self.clf.predict(out)
except:
raise Exception('You must refit ensemble stacker')
return y_pred
def output(self, X):
out = self.stack.output(X)
return self.combiner.combine(out)
def output_proba(self, X):
out = self.stack.output(X)
return np.mean(out, axis=2)
|
[
"diego.ensemble_net.combination.Combiner",
"numpy.mean",
"numpy.zeros",
"sklearn.model_selection.StratifiedKFold"
] |
[((3956, 3976), 'numpy.mean', 'np.mean', (['out'], {'axis': '(2)'}), '(out, axis=2)\n', (3963, 3976), True, 'import numpy as np\n'), ((2395, 2416), 'diego.ensemble_net.combination.Combiner', 'Combiner', ([], {'rule': '"""mean"""'}), "(rule='mean')\n", (2403, 2416), False, 'from diego.ensemble_net.combination import Combiner\n'), ((1076, 1125), 'numpy.zeros', 'np.zeros', (['(X.shape[0], n_classes * n_classifiers)'], {}), '((X.shape[0], n_classes * n_classifiers))\n', (1084, 1125), True, 'import numpy as np\n'), ((2654, 2677), 'diego.ensemble_net.combination.Combiner', 'Combiner', ([], {'rule': 'combiner'}), '(rule=combiner)\n', (2662, 2677), False, 'from diego.ensemble_net.combination import Combiner\n'), ((1149, 1173), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['self.cv'], {}), '(self.cv)\n', (1164, 1173), False, 'from sklearn.model_selection import StratifiedKFold\n')]
|
import numpy as np
import astropy.units as u
from astropy.table import QTable
from jdaviz import Specviz
from specutils import Spectrum1D
def test_line_lists():
viz = Specviz()
spec = Spectrum1D(flux=np.random.rand(100)*u.Jy,
spectral_axis=np.arange(6000, 7000, 10)*u.AA)
viz.load_spectrum(spec)
lt = QTable()
lt['linename'] = ['O III', 'Halpha']
lt['rest'] = [5007, 6563]*u.AA
lt['redshift'] = u.Quantity(0.046)
viz.load_line_list(lt)
assert len(viz.spectral_lines) == 2
assert viz.spectral_lines.loc["linename", "Halpha"]["listname"] == "Custom"
assert np.all(viz.spectral_lines["show"])
viz.erase_spectral_lines()
assert np.all(viz.spectral_lines["show"] == False) # noqa
viz.plot_spectral_line("Halpha")
viz.plot_spectral_line("O III 5007.0")
assert np.all(viz.spectral_lines["show"])
|
[
"astropy.units.Quantity",
"jdaviz.Specviz",
"astropy.table.QTable",
"numpy.arange",
"numpy.random.rand",
"numpy.all"
] |
[((174, 183), 'jdaviz.Specviz', 'Specviz', ([], {}), '()\n', (181, 183), False, 'from jdaviz import Specviz\n'), ((343, 351), 'astropy.table.QTable', 'QTable', ([], {}), '()\n', (349, 351), False, 'from astropy.table import QTable\n'), ((449, 466), 'astropy.units.Quantity', 'u.Quantity', (['(0.046)'], {}), '(0.046)\n', (459, 466), True, 'import astropy.units as u\n'), ((626, 660), 'numpy.all', 'np.all', (["viz.spectral_lines['show']"], {}), "(viz.spectral_lines['show'])\n", (632, 660), True, 'import numpy as np\n'), ((705, 748), 'numpy.all', 'np.all', (["(viz.spectral_lines['show'] == False)"], {}), "(viz.spectral_lines['show'] == False)\n", (711, 748), True, 'import numpy as np\n'), ((850, 884), 'numpy.all', 'np.all', (["viz.spectral_lines['show']"], {}), "(viz.spectral_lines['show'])\n", (856, 884), True, 'import numpy as np\n'), ((211, 230), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (225, 230), True, 'import numpy as np\n'), ((273, 298), 'numpy.arange', 'np.arange', (['(6000)', '(7000)', '(10)'], {}), '(6000, 7000, 10)\n', (282, 298), True, 'import numpy as np\n')]
|
"""
Simple utils to save and load from disk.
"""
import joblib
import gzip
import pickle
import os
import tempfile
import tarfile
import zipfile
import logging
from urllib.request import urlretrieve
from typing import Any, Iterator, List, Optional, Tuple, Union, cast, IO
import pandas as pd
import numpy as np
import deepchem as dc
logger = logging.getLogger(__name__)
def pad_array(x: np.ndarray,
shape: Union[Tuple, int],
fill: float = 0.0,
both: bool = False) -> np.ndarray:
"""
Pad an array with a fill value.
Parameters
----------
x: np.ndarray
A numpy array.
shape: Tuple or int
Desired shape. If int, all dimensions are padded to that size.
fill: float, optional (default 0.0)
The padded value.
both: bool, optional (default False)
If True, split the padding on both sides of each axis. If False,
padding is applied to the end of each axis.
Returns
-------
np.ndarray
A padded numpy array
"""
x = np.asarray(x)
if not isinstance(shape, tuple):
shape = tuple(shape for _ in range(x.ndim))
pad = []
for i in range(x.ndim):
diff = shape[i] - x.shape[i]
assert diff >= 0
if both:
a, b = divmod(diff, 2)
b += a
pad.append((a, b))
else:
pad.append((0, diff))
pad = tuple(pad) # type: ignore
x = np.pad(x, pad, mode='constant', constant_values=fill)
return x
def get_data_dir() -> str:
"""Get the DeepChem data directory.
Returns
-------
str
The default path to store DeepChem data. If you want to
change this path, please set your own path to `DEEPCHEM_DATA_DIR`
as an environment variable.
"""
if 'DEEPCHEM_DATA_DIR' in os.environ:
return os.environ['DEEPCHEM_DATA_DIR']
return tempfile.gettempdir()
def download_url(url: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Download a file to disk.
Parameters
----------
url: str
The URL to download from
dest_dir: str
The directory to save the file in
name: str
The file name to save it as. If omitted, it will try to extract a file name from the URL
"""
if name is None:
name = url
if '?' in name:
name = name[:name.find('?')]
if '/' in name:
name = name[name.rfind('/') + 1:]
urlretrieve(url, os.path.join(dest_dir, name))
def untargz_file(file: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Untar and unzip a .tar.gz file to disk.
Parameters
----------
file: str
The filepath to decompress
dest_dir: str
The directory to save the file in
name: str
The file name to save it as. If omitted, it will use the file name
"""
if name is None:
name = file
tar = tarfile.open(name)
tar.extractall(path=dest_dir)
tar.close()
def unzip_file(file: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Unzip a .zip file to disk.
Parameters
----------
file: str
The filepath to decompress
dest_dir: str
The directory to save the file in
name: str
The directory name to unzip it to. If omitted, it will use the file name
"""
if name is None:
name = file
if dest_dir is None:
dest_dir = os.path.join(get_data_dir, name)
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(dest_dir)
def load_image_files(input_files: List[str]) -> np.ndarray:
"""Loads a set of images from disk.
Parameters
----------
input_files: List[str]
List of image filenames.
Returns
-------
np.ndarray
A numpy array that contains loaded images. The shape is, `(N,...)`.
Notes
-----
This method requires Pillow to be installed.
The supported file types are PNG and TIF.
"""
try:
from PIL import Image
except ModuleNotFoundError:
raise ImportError("This function requires Pillow to be installed.")
images = []
for input_file in input_files:
_, extension = os.path.splitext(input_file)
extension = extension.lower()
if extension == ".png":
image = np.array(Image.open(input_file))
images.append(image)
elif extension == ".tif":
im = Image.open(input_file)
imarray = np.array(im)
images.append(imarray)
else:
raise ValueError("Unsupported image filetype for %s" % input_file)
return np.array(images)
def load_sdf_files(input_files: List[str],
clean_mols: bool = True,
tasks: List[str] = [],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load SDF file into dataframe.
Parameters
----------
input_files: List[str]
List of filenames
clean_mols: bool, default True
Whether to sanitize molecules.
tasks: List[str], default []
Each entry in `tasks` is treated as a property in the SDF file and is
retrieved with `mol.GetProp(str(task))` where `mol` is the RDKit mol
loaded from a given SDF entry.
shard_size: int, default None
The shard size to yield at one time.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
df_rows = []
for input_file in input_files:
# Tasks are either in .sdf.csv file or in the .sdf file itself
has_csv = os.path.isfile(input_file + ".csv")
# Structures are stored in .sdf file
logger.info("Reading structures from %s." % input_file)
suppl = Chem.SDMolSupplier(str(input_file), clean_mols, False, False)
for ind, mol in enumerate(suppl):
if mol is None:
continue
smiles = Chem.MolToSmiles(mol)
df_row = [ind, smiles, mol]
if not has_csv: # Get task targets from .sdf file
for task in tasks:
df_row.append(mol.GetProp(str(task)))
df_rows.append(df_row)
if shard_size is not None and len(df_rows) == shard_size:
if has_csv:
mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))
raw_df = next(load_csv_files([input_file + ".csv"], shard_size=None))
yield pd.concat([mol_df, raw_df], axis=1, join='inner')
else:
mol_df = pd.DataFrame(
df_rows, columns=('mol_id', 'smiles', 'mol') + tuple(tasks))
yield mol_df
# Reset aggregator
df_rows = []
# Handle final leftovers for this file
if len(df_rows) > 0:
if has_csv:
mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))
raw_df = next(load_csv_files([input_file + ".csv"], shard_size=None))
yield pd.concat([mol_df, raw_df], axis=1, join='inner')
else:
mol_df = pd.DataFrame(
df_rows, columns=('mol_id', 'smiles', 'mol') + tuple(tasks))
yield mol_df
df_rows = []
def load_csv_files(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load data as pandas dataframe from CSV files.
Parameters
----------
input_files: List[str]
List of filenames
shard_size: int, default None
The shard size to yield at one time.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
"""
# First line of user-specified CSV *must* be header.
shard_num = 1
for input_file in input_files:
if shard_size is None:
yield pd.read_csv(input_file)
else:
logger.info("About to start loading CSV from %s" % input_file)
for df in pd.read_csv(input_file, chunksize=shard_size):
logger.info(
"Loading shard %d of size %s." % (shard_num, str(shard_size)))
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_json_files(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load data as pandas dataframe.
Parameters
----------
input_files: List[str]
List of json filenames.
shard_size: int, default None
Chunksize for reading json files.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
Notes
-----
To load shards from a json file into a Pandas dataframe, the file
must be originally saved with ``df.to_json('filename.json', orient='records', lines=True)``
"""
shard_num = 1
for input_file in input_files:
if shard_size is None:
yield pd.read_json(input_file, orient='records', lines=True)
else:
logger.info("About to start loading json from %s." % input_file)
for df in pd.read_json(
input_file, orient='records', chunksize=shard_size, lines=True):
logger.info(
"Loading shard %d of size %s." % (shard_num, str(shard_size)))
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_pickle_file(input_file: str) -> Any:
"""Load from single, possibly gzipped, pickle file.
Parameters
----------
input_file: str
The filename of pickle file. This function can load from
gzipped pickle file like `XXXX.pkl.gz`.
Returns
-------
Any
The object which is loaded from the pickle file.
"""
if ".gz" in input_file:
with gzip.open(input_file, "rb") as unzipped_file:
return pickle.load(cast(IO[bytes], unzipped_file))
else:
with open(input_file, "rb") as opened_file:
return pickle.load(opened_file)
def load_pickle_files(input_files: List[str]) -> Iterator[Any]:
"""Load dataset from pickle files.
Parameters
----------
input_files: List[str]
The list of filenames of pickle file. This function can load from
gzipped pickle file like `XXXX.pkl.gz`.
Returns
-------
Iterator[Any]
Generator which yields the objects which is loaded from each pickle file.
"""
for input_file in input_files:
yield load_pickle_file(input_file)
def load_data(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[Any]:
"""Loads data from files.
Parameters
----------
input_files: List[str]
List of filenames.
shard_size: int, default None
Size of shard to yield
Returns
-------
Iterator[Any]
Iterator which iterates over provided files.
Notes
-----
The supported file types are SDF, CSV and Pickle.
"""
if len(input_files) == 0:
raise ValueError("The length of `filenames` must be more than 1.")
file_type = _get_file_type(input_files[0])
if file_type == "sdf":
if shard_size is not None:
logger.info("Ignoring shard_size for sdf input.")
for value in load_sdf_files(input_files):
yield value
elif file_type == "csv":
for value in load_csv_files(input_files, shard_size):
yield value
elif file_type == "pickle":
if shard_size is not None:
logger.info("Ignoring shard_size for pickle input.")
for value in load_pickle_files(input_files):
yield value
def _get_file_type(input_file: str) -> str:
"""Get type of input file. Must be csv/pkl/sdf/joblib file."""
filename, file_extension = os.path.splitext(input_file)
# If gzipped, need to compute extension again
if file_extension == ".gz":
filename, file_extension = os.path.splitext(filename)
if file_extension == ".csv":
return "csv"
elif file_extension == ".pkl":
return "pickle"
elif file_extension == ".joblib":
return "joblib"
elif file_extension == ".sdf":
return "sdf"
else:
raise ValueError("Unrecognized extension %s" % file_extension)
def save_to_disk(dataset: Any, filename: str, compress: int = 3):
"""Save a dataset to file.
Parameters
----------
dataset: str
A data saved
filename: str
Path to save data.
compress: int, default 3
The compress option when dumping joblib file.
"""
if filename.endswith('.joblib'):
joblib.dump(dataset, filename, compress=compress)
elif filename.endswith('.npy'):
np.save(filename, dataset)
else:
raise ValueError("Filename with unsupported extension: %s" % filename)
def load_from_disk(filename: str) -> Any:
"""Load a dataset from file.
Parameters
----------
filename: str
A filename you want to load data.
Returns
-------
Any
A loaded object from file.
"""
name = filename
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
extension = os.path.splitext(name)[1]
if extension == ".pkl":
return load_pickle_file(filename)
elif extension == ".joblib":
return joblib.load(filename)
elif extension == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(filename, header=0)
df = df.replace(np.nan, str(""), regex=True)
return df
elif extension == ".npy":
return np.load(filename, allow_pickle=True)
else:
raise ValueError("Unrecognized filetype for %s" % filename)
def load_dataset_from_disk(save_dir: str) -> Tuple[bool, Optional[Tuple[
"dc.data.DiskDataset", "dc.data.DiskDataset", "dc.data.DiskDataset"]], List[
"dc.trans.Transformer"]]:
"""Loads MoleculeNet train/valid/test/transformers from disk.
Expects that data was saved using `save_dataset_to_disk` below. Expects the
following directory structure for `save_dir`:
save_dir/
|
---> train_dir/
|
---> valid_dir/
|
---> test_dir/
|
---> transformers.pkl
Parameters
----------
save_dir: str
Directory name to load datasets.
Returns
-------
loaded: bool
Whether the load succeeded
all_dataset: Tuple[DiskDataset, DiskDataset, DiskDataset]
The train, valid, test datasets
transformers: Transformer
The transformers used for this dataset
See Also
--------
save_dataset_to_disk
"""
train_dir = os.path.join(save_dir, "train_dir")
valid_dir = os.path.join(save_dir, "valid_dir")
test_dir = os.path.join(save_dir, "test_dir")
if not os.path.exists(train_dir) or not os.path.exists(
valid_dir) or not os.path.exists(test_dir):
return False, None, list()
loaded = True
train = dc.data.DiskDataset(train_dir)
valid = dc.data.DiskDataset(valid_dir)
test = dc.data.DiskDataset(test_dir)
train.memory_cache_size = 40 * (1 << 20) # 40 MB
all_dataset = (train, valid, test)
transformers = load_transformers(save_dir)
return loaded, all_dataset, transformers
def save_dataset_to_disk(
save_dir: str, train: "dc.data.DiskDataset", valid: "dc.data.DiskDataset",
test: "dc.data.DiskDataset", transformers: List["dc.trans.Transformer"]):
"""Utility used by MoleculeNet to save train/valid/test datasets.
This utility function saves a train/valid/test split of a dataset along
with transformers in the same directory. The saved datasets will take the
following structure:
save_dir/
|
---> train_dir/
|
---> valid_dir/
|
---> test_dir/
|
---> transformers.pkl
Parameters
----------
save_dir: str
Directory name to save datasets to.
train: DiskDataset
Training dataset to save.
valid: DiskDataset
Validation dataset to save.
test: DiskDataset
Test dataset to save.
transformers: List[Transformer]
List of transformers to save to disk.
See Also
--------
load_dataset_from_disk
"""
train_dir = os.path.join(save_dir, "train_dir")
valid_dir = os.path.join(save_dir, "valid_dir")
test_dir = os.path.join(save_dir, "test_dir")
train.move(train_dir)
valid.move(valid_dir)
test.move(test_dir)
save_transformers(save_dir, transformers)
def load_transformers(save_dir: str) -> List["dc.trans.Transformer"]:
"""Load the transformers for a MoleculeNet dataset from disk."""
with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f:
return pickle.load(f)
def save_transformers(save_dir: str,
transformers: List["dc.trans.Transformer"]):
"""Save the transformers for a MoleculeNet dataset to disk."""
with open(os.path.join(save_dir, "transformers.pkl"), 'wb') as f:
pickle.dump(transformers, f)
|
[
"pickle.dump",
"numpy.load",
"pandas.read_csv",
"typing.cast",
"joblib.dump",
"os.path.isfile",
"pickle.load",
"rdkit.Chem.MolToSmiles",
"os.path.join",
"numpy.pad",
"pandas.DataFrame",
"os.path.exists",
"tarfile.open",
"pandas.concat",
"numpy.save",
"numpy.asarray",
"zipfile.ZipFile",
"gzip.open",
"tempfile.gettempdir",
"deepchem.data.DiskDataset",
"pandas.read_json",
"PIL.Image.open",
"numpy.array",
"os.path.splitext",
"joblib.load",
"logging.getLogger"
] |
[((345, 372), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (362, 372), False, 'import logging\n'), ((1004, 1017), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1014, 1017), True, 'import numpy as np\n'), ((1351, 1404), 'numpy.pad', 'np.pad', (['x', 'pad'], {'mode': '"""constant"""', 'constant_values': 'fill'}), "(x, pad, mode='constant', constant_values=fill)\n", (1357, 1404), True, 'import numpy as np\n'), ((1770, 1791), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1789, 1791), False, 'import tempfile\n'), ((2804, 2822), 'tarfile.open', 'tarfile.open', (['name'], {}), '(name)\n', (2816, 2822), False, 'import tarfile\n'), ((4407, 4423), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (4415, 4423), True, 'import numpy as np\n'), ((11322, 11350), 'os.path.splitext', 'os.path.splitext', (['input_file'], {}), '(input_file)\n', (11338, 11350), False, 'import os\n'), ((13988, 14023), 'os.path.join', 'os.path.join', (['save_dir', '"""train_dir"""'], {}), "(save_dir, 'train_dir')\n", (14000, 14023), False, 'import os\n'), ((14038, 14073), 'os.path.join', 'os.path.join', (['save_dir', '"""valid_dir"""'], {}), "(save_dir, 'valid_dir')\n", (14050, 14073), False, 'import os\n'), ((14087, 14121), 'os.path.join', 'os.path.join', (['save_dir', '"""test_dir"""'], {}), "(save_dir, 'test_dir')\n", (14099, 14121), False, 'import os\n'), ((14287, 14317), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['train_dir'], {}), '(train_dir)\n', (14306, 14317), True, 'import deepchem as dc\n'), ((14328, 14358), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['valid_dir'], {}), '(valid_dir)\n', (14347, 14358), True, 'import deepchem as dc\n'), ((14368, 14397), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['test_dir'], {}), '(test_dir)\n', (14387, 14397), True, 'import deepchem as dc\n'), ((15500, 15535), 'os.path.join', 'os.path.join', (['save_dir', '"""train_dir"""'], {}), "(save_dir, 'train_dir')\n", (15512, 15535), False, 'import os\n'), ((15550, 15585), 'os.path.join', 'os.path.join', (['save_dir', '"""valid_dir"""'], {}), "(save_dir, 'valid_dir')\n", (15562, 15585), False, 'import os\n'), ((15599, 15633), 'os.path.join', 'os.path.join', (['save_dir', '"""test_dir"""'], {}), "(save_dir, 'test_dir')\n", (15611, 15633), False, 'import os\n'), ((2347, 2375), 'os.path.join', 'os.path.join', (['dest_dir', 'name'], {}), '(dest_dir, name)\n', (2359, 2375), False, 'import os\n'), ((3313, 3345), 'os.path.join', 'os.path.join', (['get_data_dir', 'name'], {}), '(get_data_dir, name)\n', (3325, 3345), False, 'import os\n'), ((3353, 3379), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file', '"""r"""'], {}), "(file, 'r')\n", (3368, 3379), False, 'import zipfile\n'), ((4028, 4056), 'os.path.splitext', 'os.path.splitext', (['input_file'], {}), '(input_file)\n', (4044, 4056), False, 'import os\n'), ((5551, 5586), 'os.path.isfile', 'os.path.isfile', (["(input_file + '.csv')"], {}), "(input_file + '.csv')\n", (5565, 5586), False, 'import os\n'), ((11460, 11486), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11476, 11486), False, 'import os\n'), ((12086, 12135), 'joblib.dump', 'joblib.dump', (['dataset', 'filename'], {'compress': 'compress'}), '(dataset, filename, compress=compress)\n', (12097, 12135), False, 'import joblib\n'), ((12614, 12636), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (12630, 12636), False, 'import os\n'), ((15966, 15980), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (15977, 15980), False, 'import pickle\n'), ((16224, 16252), 'pickle.dump', 'pickle.dump', (['transformers', 'f'], {}), '(transformers, f)\n', (16235, 16252), False, 'import pickle\n'), ((5854, 5875), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (5870, 5875), False, 'from rdkit import Chem\n'), ((7741, 7786), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'chunksize': 'shard_size'}), '(input_file, chunksize=shard_size)\n', (7752, 7786), True, 'import pandas as pd\n'), ((8832, 8908), 'pandas.read_json', 'pd.read_json', (['input_file'], {'orient': '"""records"""', 'chunksize': 'shard_size', 'lines': '(True)'}), "(input_file, orient='records', chunksize=shard_size, lines=True)\n", (8844, 8908), True, 'import pandas as pd\n'), ((9483, 9510), 'gzip.open', 'gzip.open', (['input_file', '"""rb"""'], {}), "(input_file, 'rb')\n", (9492, 9510), False, 'import gzip\n'), ((9655, 9679), 'pickle.load', 'pickle.load', (['opened_file'], {}), '(opened_file)\n', (9666, 9679), False, 'import pickle\n'), ((12174, 12200), 'numpy.save', 'np.save', (['filename', 'dataset'], {}), '(filename, dataset)\n', (12181, 12200), True, 'import numpy as np\n'), ((12527, 12549), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (12543, 12549), False, 'import os\n'), ((12574, 12596), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (12590, 12596), False, 'import os\n'), ((12746, 12767), 'joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (12757, 12767), False, 'import joblib\n'), ((14131, 14156), 'os.path.exists', 'os.path.exists', (['train_dir'], {}), '(train_dir)\n', (14145, 14156), False, 'import os\n'), ((14164, 14189), 'os.path.exists', 'os.path.exists', (['valid_dir'], {}), '(valid_dir)\n', (14178, 14189), False, 'import os\n'), ((14204, 14228), 'os.path.exists', 'os.path.exists', (['test_dir'], {}), '(test_dir)\n', (14218, 14228), False, 'import os\n'), ((15899, 15941), 'os.path.join', 'os.path.join', (['save_dir', '"""transformers.pkl"""'], {}), "(save_dir, 'transformers.pkl')\n", (15911, 15941), False, 'import os\n'), ((16164, 16206), 'os.path.join', 'os.path.join', (['save_dir', '"""transformers.pkl"""'], {}), "(save_dir, 'transformers.pkl')\n", (16176, 16206), False, 'import os\n'), ((4142, 4164), 'PIL.Image.open', 'Image.open', (['input_file'], {}), '(input_file)\n', (4152, 4164), False, 'from PIL import Image\n'), ((4234, 4256), 'PIL.Image.open', 'Image.open', (['input_file'], {}), '(input_file)\n', (4244, 4256), False, 'from PIL import Image\n'), ((4273, 4285), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4281, 4285), True, 'import numpy as np\n'), ((6676, 6734), 'pandas.DataFrame', 'pd.DataFrame', (['df_rows'], {'columns': "('mol_id', 'smiles', 'mol')"}), "(df_rows, columns=('mol_id', 'smiles', 'mol'))\n", (6688, 6734), True, 'import pandas as pd\n'), ((7622, 7645), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (7633, 7645), True, 'import pandas as pd\n'), ((8680, 8734), 'pandas.read_json', 'pd.read_json', (['input_file'], {'orient': '"""records"""', 'lines': '(True)'}), "(input_file, orient='records', lines=True)\n", (8692, 8734), True, 'import pandas as pd\n'), ((9554, 9584), 'typing.cast', 'cast', (['IO[bytes]', 'unzipped_file'], {}), '(IO[bytes], unzipped_file)\n', (9558, 9584), False, 'from typing import Any, Iterator, List, Optional, Tuple, Union, cast, IO\n'), ((12862, 12893), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)'}), '(filename, header=0)\n', (12873, 12893), True, 'import pandas as pd\n'), ((6174, 6232), 'pandas.DataFrame', 'pd.DataFrame', (['df_rows'], {'columns': "('mol_id', 'smiles', 'mol')"}), "(df_rows, columns=('mol_id', 'smiles', 'mol'))\n", (6186, 6232), True, 'import pandas as pd\n'), ((6827, 6876), 'pandas.concat', 'pd.concat', (['[mol_df, raw_df]'], {'axis': '(1)', 'join': '"""inner"""'}), "([mol_df, raw_df], axis=1, join='inner')\n", (6836, 6876), True, 'import pandas as pd\n'), ((12996, 13032), 'numpy.load', 'np.load', (['filename'], {'allow_pickle': '(True)'}), '(filename, allow_pickle=True)\n', (13003, 13032), True, 'import numpy as np\n'), ((6329, 6378), 'pandas.concat', 'pd.concat', (['[mol_df, raw_df]'], {'axis': '(1)', 'join': '"""inner"""'}), "([mol_df, raw_df], axis=1, join='inner')\n", (6338, 6378), True, 'import pandas as pd\n')]
|
import sys
sys.path.append('/usr/lib/python3.6/site-packages/')
import argparse
import base64
from datetime import datetime
import os
import shutil
import numpy as np
import socketio
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from keras.models import load_model
from utils import parse_position, preprocess_image
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
MAX_SPEED = 30
MIN_SPEED = 10
speed_limit = MAX_SPEED
recorded_points = []
lap_definition = None
no_manual_input = True
@sio.on('telemetry')
def telemetry(sid, data):
global recorded_points
global no_manual_input
if data:
x, y, z = parse_position(data["Position"])
if no_manual_input:
recorded_points.append([x, y, z])
speed = float(data["speed"])
image = Image.open(BytesIO(base64.b64decode(data["image"])))
try:
image = preprocess_image(image)
image = np.array([np.asarray(image)])
image = 2.0 * image / 255 - 1.0
steering_angle = float(model.predict(image, batch_size=1))
global speed_limit
if speed > speed_limit:
speed_limit = MIN_SPEED # slow down
else:
speed_limit = MAX_SPEED
throttle = 1.0 - steering_angle ** 2 - (speed / speed_limit) ** 2
# print('{} {} {}'.format(steering_angle, throttle, speed))
send_control(steering_angle, throttle)
except Exception as e:
print(e)
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
lycon.save(path='{}.jpg'.format(image_filename), image=image)
else:
no_manual_input = False
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("Connection established (id: {}).".format(sid))
send_control(0, 0)
@sio.on('disconnect')
def disconnect(sid):
output_path = 'car_positions.npz'
print("\nConnection terminated - saving data to {}.".format(output_path))
np.savez_compressed(output_path, recorded_points=np.asarray(recorded_points))
print("\n**** Data saved; press ctrl-C to exit.\n")
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'--lap_data',
type=str,
default='',
help='Path to lap data (required for progress).'
)
parser.add_argument(
'--image_folder',
type=str,
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# load model
model = load_model(args.model)
if args.lap_data != '':
try:
lap_definition = np.load(args.lap_data)
except:
print("Failed to load " + args.lap_data + "; no progress reporting.")
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("**** Recording images from this run to {}.".format(args.image_folder))
else:
print("Image recording not enabled on this run.")
app = socketio.Middleware(sio, app)
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
|
[
"sys.path.append",
"keras.models.load_model",
"socketio.Middleware",
"utils.parse_position",
"numpy.load",
"argparse.ArgumentParser",
"os.makedirs",
"socketio.Server",
"flask.Flask",
"numpy.asarray",
"os.path.exists",
"base64.b64decode",
"datetime.datetime.utcnow",
"shutil.rmtree",
"utils.preprocess_image",
"os.path.join"
] |
[((11, 63), 'sys.path.append', 'sys.path.append', (['"""/usr/lib/python3.6/site-packages/"""'], {}), "('/usr/lib/python3.6/site-packages/')\n", (26, 63), False, 'import sys\n'), ((369, 386), 'socketio.Server', 'socketio.Server', ([], {}), '()\n', (384, 386), False, 'import socketio\n'), ((393, 408), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (398, 408), False, 'from flask import Flask\n'), ((2631, 2684), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Remote Driving"""'}), "(description='Remote Driving')\n", (2654, 2684), False, 'import argparse\n'), ((3217, 3239), 'keras.models.load_model', 'load_model', (['args.model'], {}), '(args.model)\n', (3227, 3239), False, 'from keras.models import load_model\n'), ((3897, 3926), 'socketio.Middleware', 'socketio.Middleware', (['sio', 'app'], {}), '(sio, app)\n', (3916, 3926), False, 'import socketio\n'), ((704, 736), 'utils.parse_position', 'parse_position', (["data['Position']"], {}), "(data['Position'])\n", (718, 736), False, 'from utils import parse_position, preprocess_image\n'), ((957, 980), 'utils.preprocess_image', 'preprocess_image', (['image'], {}), '(image)\n', (973, 980), False, 'from utils import parse_position, preprocess_image\n'), ((1727, 1769), 'os.path.join', 'os.path.join', (['args.image_folder', 'timestamp'], {}), '(args.image_folder, timestamp)\n', (1739, 1769), False, 'import os\n'), ((2279, 2306), 'numpy.asarray', 'np.asarray', (['recorded_points'], {}), '(recorded_points)\n', (2289, 2306), True, 'import numpy as np\n'), ((3311, 3333), 'numpy.load', 'np.load', (['args.lap_data'], {}), '(args.lap_data)\n', (3318, 3333), True, 'import numpy as np\n'), ((3552, 3585), 'os.path.exists', 'os.path.exists', (['args.image_folder'], {}), '(args.image_folder)\n', (3566, 3585), False, 'import os\n'), ((3599, 3629), 'os.makedirs', 'os.makedirs', (['args.image_folder'], {}), '(args.image_folder)\n', (3610, 3629), False, 'import os\n'), ((3656, 3688), 'shutil.rmtree', 'shutil.rmtree', (['args.image_folder'], {}), '(args.image_folder)\n', (3669, 3688), False, 'import shutil\n'), ((3701, 3731), 'os.makedirs', 'os.makedirs', (['args.image_folder'], {}), '(args.image_folder)\n', (3712, 3731), False, 'import os\n'), ((885, 916), 'base64.b64decode', 'base64.b64decode', (["data['image']"], {}), "(data['image'])\n", (901, 916), False, 'import base64\n'), ((1011, 1028), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1021, 1028), True, 'import numpy as np\n'), ((1642, 1659), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1657, 1659), False, 'from datetime import datetime\n')]
|
# This file is used to configure the behavior of pytest when using the Astropy
# test infrastructure. It needs to live inside the package in order for it to
# get picked up when running the tests inside an interpreter using
# packagename.test
from copy import copy
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import models
from specutils.spectra import Spectrum1D
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
ASTROPY_HEADER = True
except ImportError:
ASTROPY_HEADER = False
def pytest_configure(config):
if ASTROPY_HEADER:
config.option.astropy_header = True
# Customize the following lines to add/remove entries from the list of
# packages for which version numbers are displayed when running the tests.
PYTEST_HEADER_MODULES.pop('Pandas', None)
PYTEST_HEADER_MODULES['gwcs'] = 'gwcs'
del PYTEST_HEADER_MODULES['h5py']
del PYTEST_HEADER_MODULES['Matplotlib']
# Use ASDF schema tester plugin if ASDF is installed
from importlib.util import find_spec
if find_spec('asdf') is not None:
PYTEST_HEADER_MODULES['Asdf'] = 'asdf'
from specutils import __version__
TESTED_VERSIONS['specutils'] = __version__
class SpectraExamples:
"""
The ``SpectralExamples`` class is a *container class* that has
several examples of simple spectra that are to be used in the tests
(e.g., arithmetic tests, smoothing tests etc).
The purpose of this being a test class instead of using a `Spectrum1D`
directly is that it contains both the `Spectrum1D` object and the flux
that was used to *create* the Spectrum. That's for tests that ensure
the simpler operations just on the flux arrays are carried through to
the `Spectrum1D` operations.
Each of the spectra are created from a base noise-less spectrum
constructed from 4 Gaussians and a ramp. Then three example spectra
are created, and then gaussian random noise is added.
1. s1_um_mJy_e1 - 4 Gaussians + ramp with one instantion of noise
dispersion: um, flux: mJy
2. s1_um_mJy_e2 - same as 1, but with a different instance of noise
dispersion: um, flux: mJy
3. s1_AA_mJy_e3 - same as 1, but with a third instance of noise
dispersion: Angstroms, flux: mJy
4. s1_AA_nJy_e3 - same as 1, but with a fourth instance of noise
dispersion: Angstroms, flux: nJy
5. s1_um_mJy_e1_masked - same as 1, but with a random set of pixels
masked.
6. s1_um_mJy_e1_desc - same as 1, but with the spectral axis in
descending rather than ascending order.
"""
def __init__(self):
#
# Create the base wavelengths and flux
#
self.wavelengths_um = np.linspace(0.4, 1.05, 100)
g1 = models.Gaussian1D(amplitude=2000, mean=0.56, stddev=0.01)
g2 = models.Gaussian1D(amplitude=500, mean=0.62, stddev=0.02)
g3 = models.Gaussian1D(amplitude=-400, mean=0.80, stddev=0.02)
g4 = models.Gaussian1D(amplitude=-350, mean=0.52, stddev=0.01)
ramp = models.Linear1D(slope=300, intercept=0.0)
self.base_flux = (g1(self.wavelengths_um) + g2(self.wavelengths_um) +
g3(self.wavelengths_um) + g4(self.wavelengths_um) +
ramp(self.wavelengths_um) + 1000)
#
# Initialize the seed so the random numbers are not quite as random
#
np.random.seed(42)
#
# Create two spectra with the only difference in the instance of noise
#
self._flux_e1 = self.base_flux + 400 * np.random.random(self.base_flux.shape)
self._s1_um_mJy_e1 = Spectrum1D(spectral_axis=self.wavelengths_um * u.um,
flux=self._flux_e1 * u.mJy)
self._flux_e2 = self.base_flux + 400 * np.random.random(self.base_flux.shape)
self._s1_um_mJy_e2 = Spectrum1D(spectral_axis=self.wavelengths_um * u.um,
flux=self._flux_e2 * u.mJy)
#
# Create one spectrum with the same flux but in angstrom units
#
self.wavelengths_AA = self.wavelengths_um * 10000
self._s1_AA_mJy_e3 = Spectrum1D(spectral_axis=self.wavelengths_AA * u.AA,
flux=self._flux_e1 * u.mJy)
#
# Create one spectrum with the same flux but in angstrom units and nJy
#
self._flux_e4 = (self.base_flux + 400 * np.random.random(self.base_flux.shape)) * 1000000
self._s1_AA_nJy_e4 = Spectrum1D(spectral_axis=self.wavelengths_AA * u.AA,
flux=self._flux_e4 * u.nJy)
#
# Create one spectrum like 1 but with a mask
#
self._s1_um_mJy_e1_masked = copy(self._s1_um_mJy_e1) # SHALLOW copy - the data are shared with the above non-masked case # noqa
self._s1_um_mJy_e1_masked.mask = (np.random.randn(*self.base_flux.shape) + 1) > 0
# Create a spectrum like 1, but with descending spectral axis
self._s1_um_mJy_e1_desc = Spectrum1D(spectral_axis=self.wavelengths_um[::-1] * u.um,
flux=self._flux_e1[::-1] * u.mJy)
@property
def s1_um_mJy_e1(self):
return self._s1_um_mJy_e1
@property
def s1_um_mJy_e1_flux(self):
return self._flux_e1
@property
def s1_um_mJy_e2(self):
return self._s1_um_mJy_e2
@property
def s1_um_mJy_e2_flux(self):
return self._flux_e2
@property
def s1_AA_mJy_e3(self):
return self._s1_AA_mJy_e3
@property
def s1_AA_mJy_e3_flux(self):
return self._flux_e1
@property
def s1_AA_nJy_e4(self):
return self._s1_AA_nJy_e4
@property
def s1_AA_nJy_e4_flux(self):
return self._flux_e4
@property
def s1_um_mJy_e1_masked(self):
return self._s1_um_mJy_e1_masked
@property
def s1_um_mJy_e1_desc(self):
return self._s1_um_mJy_e1_desc
@pytest.fixture
def simulated_spectra():
"""
The method will be called as a fixture to tests.
Parameters
----------
N/A
Return
------
``SpectralExamples``
An instance of the SpectraExamples class.
Examples
--------
This fixture can be used in a test as:
```
from .spectral_examples import spectral_examples
def test_add_spectra(spectral_examples):
# Get the numpy array of data
flux1 = define_spectra.s1_um_mJy_e1_flux
flux2 = define_spectra.s1_um_mJy_e2_flux
flux3 = flux1 + flux2
# Calculate using the spectrum1d/nddata code
spec3 = define_spectra.s1_um_mJy_e1 + define_spectra.s1_um_mJy_e2
assert np.allclose(spec3.flux.value, flux3)
```
"""
return SpectraExamples()
|
[
"numpy.random.seed",
"astropy.modeling.models.Linear1D",
"numpy.random.randn",
"astropy.modeling.models.Gaussian1D",
"importlib.util.find_spec",
"specutils.spectra.Spectrum1D",
"copy.copy",
"numpy.random.random",
"numpy.linspace",
"pytest_astropy_header.display.PYTEST_HEADER_MODULES.pop"
] |
[((845, 886), 'pytest_astropy_header.display.PYTEST_HEADER_MODULES.pop', 'PYTEST_HEADER_MODULES.pop', (['"""Pandas"""', 'None'], {}), "('Pandas', None)\n", (870, 886), False, 'from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS\n'), ((2986, 3013), 'numpy.linspace', 'np.linspace', (['(0.4)', '(1.05)', '(100)'], {}), '(0.4, 1.05, 100)\n', (2997, 3013), True, 'import numpy as np\n'), ((3028, 3085), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(2000)', 'mean': '(0.56)', 'stddev': '(0.01)'}), '(amplitude=2000, mean=0.56, stddev=0.01)\n', (3045, 3085), False, 'from astropy.modeling import models\n'), ((3099, 3155), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(500)', 'mean': '(0.62)', 'stddev': '(0.02)'}), '(amplitude=500, mean=0.62, stddev=0.02)\n', (3116, 3155), False, 'from astropy.modeling import models\n'), ((3169, 3225), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(-400)', 'mean': '(0.8)', 'stddev': '(0.02)'}), '(amplitude=-400, mean=0.8, stddev=0.02)\n', (3186, 3225), False, 'from astropy.modeling import models\n'), ((3240, 3297), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': '(-350)', 'mean': '(0.52)', 'stddev': '(0.01)'}), '(amplitude=-350, mean=0.52, stddev=0.01)\n', (3257, 3297), False, 'from astropy.modeling import models\n'), ((3313, 3354), 'astropy.modeling.models.Linear1D', 'models.Linear1D', ([], {'slope': '(300)', 'intercept': '(0.0)'}), '(slope=300, intercept=0.0)\n', (3328, 3354), False, 'from astropy.modeling import models\n'), ((3678, 3696), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3692, 3696), True, 'import numpy as np\n'), ((3913, 3998), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_um * u.um)', 'flux': '(self._flux_e1 * u.mJy)'}), '(spectral_axis=self.wavelengths_um * u.um, flux=self._flux_e1 * u.mJy\n )\n', (3923, 3998), False, 'from specutils.spectra import Spectrum1D\n'), ((4150, 4235), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_um * u.um)', 'flux': '(self._flux_e2 * u.mJy)'}), '(spectral_axis=self.wavelengths_um * u.um, flux=self._flux_e2 * u.mJy\n )\n', (4160, 4235), False, 'from specutils.spectra import Spectrum1D\n'), ((4451, 4536), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_AA * u.AA)', 'flux': '(self._flux_e1 * u.mJy)'}), '(spectral_axis=self.wavelengths_AA * u.AA, flux=self._flux_e1 * u.mJy\n )\n', (4461, 4536), False, 'from specutils.spectra import Spectrum1D\n'), ((4800, 4885), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_AA * u.AA)', 'flux': '(self._flux_e4 * u.nJy)'}), '(spectral_axis=self.wavelengths_AA * u.AA, flux=self._flux_e4 * u.nJy\n )\n', (4810, 4885), False, 'from specutils.spectra import Spectrum1D\n'), ((5031, 5055), 'copy.copy', 'copy', (['self._s1_um_mJy_e1'], {}), '(self._s1_um_mJy_e1)\n', (5035, 5055), False, 'from copy import copy\n'), ((5328, 5425), 'specutils.spectra.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': '(self.wavelengths_um[::-1] * u.um)', 'flux': '(self._flux_e1[::-1] * u.mJy)'}), '(spectral_axis=self.wavelengths_um[::-1] * u.um, flux=self.\n _flux_e1[::-1] * u.mJy)\n', (5338, 5425), False, 'from specutils.spectra import Spectrum1D\n'), ((1141, 1158), 'importlib.util.find_spec', 'find_spec', (['"""asdf"""'], {}), "('asdf')\n", (1150, 1158), False, 'from importlib.util import find_spec\n'), ((3845, 3883), 'numpy.random.random', 'np.random.random', (['self.base_flux.shape'], {}), '(self.base_flux.shape)\n', (3861, 3883), True, 'import numpy as np\n'), ((4082, 4120), 'numpy.random.random', 'np.random.random', (['self.base_flux.shape'], {}), '(self.base_flux.shape)\n', (4098, 4120), True, 'import numpy as np\n'), ((5175, 5213), 'numpy.random.randn', 'np.random.randn', (['*self.base_flux.shape'], {}), '(*self.base_flux.shape)\n', (5190, 5213), True, 'import numpy as np\n'), ((4721, 4759), 'numpy.random.random', 'np.random.random', (['self.base_flux.shape'], {}), '(self.base_flux.shape)\n', (4737, 4759), True, 'import numpy as np\n')]
|
'''
Deep Q-learning approach to the cartpole problem
using OpenAI's gym environment.
As part of the basic series on reinforcement learning @
https://github.com/vmayoral/basic_reinforcement_learning
Inspired by https://github.com/VinF/deer
@author: <NAME> <<EMAIL>>
'''
import gym
import random
import pandas
import numpy as np
from keras.models import Model
from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape
import theano.tensor as T
import sys
import logging
import numpy as np
from theano import config
import numpy as np
class QNetwork(object):
""" All the Q-networks classes should inherit this interface.
Parameters
-----------
environment : object from class Environment
The environment linked to the Q-network
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
"""
def __init__(self, environment, batch_size):
self._environment = environment
self._df = 0.9
self._lr = 0.0002
self._input_dimensions = self._environment.inputDimensions()
self._n_actions = self._environment.nActions()
self._batch_size = batch_size
def train(self, states, actions, rewards, nextStates, terminals):
""" This method performs the Bellman iteration for one batch of tuples.
"""
raise NotImplementedError()
def chooseBestAction(self, state):
""" Get the best action for a belief state
"""
raise NotImplementedError()
def qValues(self, state):
""" Get the q value for one belief state
"""
raise NotImplementedError()
def setLearningRate(self, lr):
""" Setting the learning rate
Parameters
-----------
lr : float
The learning rate that has to bet set
"""
self._lr = lr
def setDiscountFactor(self, df):
""" Setting the discount factor
Parameters
-----------
df : float
The discount factor that has to bet set
"""
if df < 0. or df > 1.:
raise AgentError("The discount factor should be in [0,1]")
self._df = df
def learningRate(self):
""" Getting the learning rate
"""
return self._lr
def discountFactor(self):
""" Getting the discount factor
"""
return self._df
class NN():
"""
Deep Q-learning network using Keras
Parameters
-----------
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
input_dimensions :
n_actions :
random_state : numpy random number generator
"""
def __init__(self, batch_size, input_dimensions, n_actions, random_state):
self._input_dimensions=input_dimensions
self._batch_size=batch_size
self._random_state=random_state
self._n_actions=n_actions
def _buildDQN(self):
"""
Build a network consistent with each type of inputs
"""
layers=[]
outs_conv=[]
inputs=[]
for i, dim in enumerate(self._input_dimensions):
nfilter=[]
# - observation[i] is a FRAME
if len(dim) == 3: #FIXME
input = Input(shape=(dim[0],dim[1],dim[2]))
inputs.append(input)
#reshaped=Reshape((dim[0],dim[1],dim[2]), input_shape=(dim[0],dim[1]))(input)
x = Convolution2D(32, 8, 8, border_mode='valid')(input)
x = MaxPooling2D(pool_size=(4, 4), strides=None, border_mode='valid')(x)
x = Convolution2D(64, 4, 4, border_mode='valid')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='valid')(x)
x = Convolution2D(64, 3, 3)(x)
out = Flatten()(x)
# - observation[i] is a VECTOR
elif len(dim) == 2 and dim[0] > 3: #FIXME
input = Input(shape=(dim[0],dim[1]))
inputs.append(input)
reshaped=Reshape((1,dim[0],dim[1]), input_shape=(dim[0],dim[1]))(input)
x = Convolution2D(16, 2, 1, border_mode='valid')(reshaped)
x = Convolution2D(16, 2, 2)(x)
out = Flatten()(x)
# - observation[i] is a SCALAR -
else:
if dim[0] > 3:
# this returns a tensor
input = Input(shape=(dim[0],))
inputs.append(input)
reshaped=Reshape((1,1,dim[0]), input_shape=(dim[0],))(input)
x = Convolution2D(8, 1, 2, border_mode='valid')(reshaped)
x = Convolution2D(8, 1, 2)(x)
out = Flatten()(x)
else:
if(len(dim) == 2):
# this returns a tensor
input = Input(shape=(dim[1],dim[0]))
inputs.append(input)
out = Flatten()(input)
if(len(dim) == 1):
input = Input(shape=(dim[0],))
inputs.append(input)
out=input
outs_conv.append(out)
if len(outs_conv)>1:
x = merge(outs_conv, mode='concat')
else:
x= outs_conv [0]
# we stack a deep fully-connected network on top
x = Dense(50, activation='relu')(x)
x = Dense(20, activation='relu')(x)
out = Dense(self._n_actions)(x)
model = Model(input=inputs, output=out)
layers=model.layers
# Grab all the parameters together.
params = [ param
for layer in layers
for param in layer.trainable_weights ]
return model, params
from warnings import warn
from keras.optimizers import SGD,RMSprop
class MyQNetwork(QNetwork):
"""
Deep Q-learning network using Keras
Parameters
-----------
environment : object from class Environment
rho : float
Parameter for rmsprop. Default : 0.9
rms_epsilon : float
Parameter for rmsprop. Default : 0.0001
momentum : float
Default : 0
clip_delta : float
Not implemented.
freeze_interval : int
Period during which the target network is freezed and after which the target network is updated. Default : 1000
batch_size : int
Number of tuples taken into account for each iteration of gradient descent. Default : 32
network_type : str
Not used. Default : None
update_rule: str
{sgd,rmsprop}. Default : rmsprop
batch_accumulator : str
{sum,mean}. Default : sum
random_state : numpy random number generator
double_Q : bool, optional
Activate or not the double_Q learning.
More informations in : <NAME> et al. (2015) - Deep Reinforcement Learning with Double Q-learning.
neural_network : object, optional
default is deer.qnetworks.NN_keras
"""
def __init__(self, environment, rho=0.9, rms_epsilon=0.0001, momentum=0, clip_delta=0, freeze_interval=1000, batch_size=32, network_type=None, update_rule="rmsprop", batch_accumulator="sum", random_state=np.random.RandomState(), double_Q=False, neural_network=NN):
""" Initialize environment
"""
QNetwork.__init__(self,environment, batch_size)
self._rho = rho
self._rms_epsilon = rms_epsilon
self._momentum = momentum
#self.clip_delta = clip_delta
self._freeze_interval = freeze_interval
self._double_Q = double_Q
self._random_state = random_state
self.update_counter = 0
Q_net = neural_network(self._batch_size, self._input_dimensions, self._n_actions, self._random_state)
self.q_vals, self.params = Q_net._buildDQN()
if update_rule == 'deepmind_rmsprop':
warn("The update_rule used is rmsprop")
update_rule='rmsprop'
if (update_rule=="sgd"):
optimizer = SGD(lr=self._lr, momentum=momentum, nesterov=False)
elif (update_rule=="rmsprop"):
optimizer = RMSprop(lr=self._lr, rho=self._rho, epsilon=self._rms_epsilon)
else:
raise Exception('The update_rule '+update_rule+ 'is not'
'implemented.')
self.q_vals.compile(optimizer=optimizer, loss='mse')
self.next_q_vals, self.next_params = Q_net._buildDQN()
self.next_q_vals.compile(optimizer='rmsprop', loss='mse') #The parameters do not matter since training is done on self.q_vals
self.q_vals.summary()
# self.next_q_vals.summary()
self._resetQHat()
def toDump(self):
# FIXME
return None,None
def train(self, states_val, actions_val, rewards_val, next_states_val, terminals_val):
"""
Train one batch.
1. Set shared variable in states_shared, next_states_shared, actions_shared, rewards_shared, terminals_shared
2. perform batch training
Parameters
-----------
states_val : list of batch_size * [list of max_num_elements* [list of k * [element 2D,1D or scalar]])
actions_val : b x 1 numpy array of integers
rewards_val : b x 1 numpy array
next_states_val : list of batch_size * [list of max_num_elements* [list of k * [element 2D,1D or scalar]])
terminals_val : b x 1 numpy boolean array (currently ignored)
Returns
-------
Average loss of the batch training
Individual losses for each tuple
"""
if self.update_counter % self._freeze_interval == 0:
self._resetQHat()
next_q_vals = self.next_q_vals.predict(next_states_val.tolist())
if(self._double_Q==True):
next_q_vals_current_qnet=self.q_vals.predict(next_states_val.tolist())
argmax_next_q_vals=np.argmax(next_q_vals_current_qnet, axis=1)
max_next_q_vals=next_q_vals[np.arange(self._batch_size),argmax_next_q_vals].reshape((-1, 1))
else:
max_next_q_vals=np.max(next_q_vals, axis=1, keepdims=True)
not_terminals=np.ones_like(terminals_val) - terminals_val
target = rewards_val + not_terminals * self._df * max_next_q_vals.reshape((-1))
q_vals=self.q_vals.predict(states_val.tolist())
# In order to obtain the individual losses, we predict the current Q_vals and calculate the diff
q_val=q_vals[np.arange(self._batch_size), actions_val.reshape((-1,))]#.reshape((-1, 1))
diff = - q_val + target
loss_ind=0.5*pow(diff,2)
q_vals[ np.arange(self._batch_size), actions_val.reshape((-1,)) ] = target
# Is it possible to use something more flexible than this?
# Only some elements of next_q_vals are actual value that I target.
# My loss should only take these into account.
# Workaround here is that many values are already "exact" in this update
loss=self.q_vals.train_on_batch(states_val.tolist() , q_vals )
self.update_counter += 1
return np.sqrt(loss),loss_ind
def qValues(self, state_val):
""" Get the q values for one belief state
Arguments
---------
state_val : one belief state
Returns
-------
The q value for the provided belief state
"""
return self.q_vals.predict([np.expand_dims(state,axis=0) for state in state_val])[0]
def chooseBestAction(self, state):
""" Get the best action for a belief state
Arguments
---------
state : one belief state
Returns
-------
The best action : int
"""
q_vals = self.qValues(state)
return np.argmax(q_vals)
def _resetQHat(self):
for i,(param,next_param) in enumerate(zip(self.params, self.next_params)):
next_param.set_value(param.get_value())
from deer.base_classes import Environment
import copy
class MyEnv(Environment):
def __init__(self, rng):
""" Initialize environment.
Arguments:
rng - the numpy random number generator
"""
# Defining the type of environment
self.env = gym.make('CartPole-v0')
self._last_observation = self.env.reset()
self.is_terminal=False
self._input_dim = [(1,), (1,), (1,), (1,)] # self.env.observation_space.shape is equal to 4
# and we use only the current value in the belief state
def act(self, action):
""" Simulate one time step in the environment.
"""
self._last_observation, reward, self.is_terminal, info = self.env.step(action)
if (self.mode==0): # Show the policy only at test time
self.env.render()
return reward
def reset(self, mode=0):
""" Reset environment for a new episode.
Arguments:
Mode : int
-1 corresponds to training and 0 to test
"""
# Reset initial observation to a random x and theta
self._last_observation = self.env.reset()
self.is_terminal=False
self.mode=mode
return self._last_observation
def inTerminalState(self):
"""Tell whether the environment reached a terminal state after the last transition (i.e. the last transition
that occured was terminal).
"""
return self.is_terminal
def inputDimensions(self):
return self._input_dim
def nActions(self):
return 2 #Would be useful to have this directly in gym : self.env.action_space.shape
def observe(self):
return copy.deepcopy(self._last_observation)
import deer.experiment.base_controllers as bc
from deer.default_parser import process_args
from deer.agent import NeuralAgent
class Defaults:
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 200
EPOCHS = 300
STEPS_PER_TEST = 200
PERIOD_BTW_SUMMARY_PERFS = 10
# ----------------------
# Environment Parameters
# ----------------------
FRAME_SKIP = 1
# ----------------------
# DQN Agent parameters:
# ----------------------
UPDATE_RULE = 'sgd'
BATCH_ACCUMULATOR = 'sum'
LEARNING_RATE = 0.1
LEARNING_RATE_DECAY = 0.99
DISCOUNT = 0.9
DISCOUNT_INC = 1.
DISCOUNT_MAX = 0.95
RMS_DECAY = 0.9
RMS_EPSILON = 0.0001
MOMENTUM = 0
CLIP_DELTA = 1.0
EPSILON_START = 1.0
EPSILON_MIN = 0.2
EPSILON_DECAY = 10000
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
NETWORK_TYPE = "General_DQN_0"
FREEZE_INTERVAL = 100
DETERMINISTIC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(12345)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = MyEnv(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_delta,
parameters.freeze_interval,
parameters.batch_size,
parameters.network_type,
parameters.update_rule,
parameters.batch_accumulator,
rng,
double_Q=True)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng)
# --- Bind controllers to the agent ---
# For comments, please refer to run_toy_env.py
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=False,
show_avg_Bellman_residual=False))
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
controllers_to_disable=[0, 1, 2, 3, 4],
periodicity=2,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
|
[
"deer.experiment.base_controllers.LearningRateController",
"numpy.argmax",
"keras.models.Model",
"numpy.arange",
"keras.layers.Input",
"keras.layers.Reshape",
"deer.experiment.base_controllers.InterleavedTestEpochController",
"keras.optimizers.SGD",
"deer.experiment.base_controllers.TrainerController",
"keras.layers.Flatten",
"numpy.random.RandomState",
"numpy.max",
"keras.layers.MaxPooling2D",
"copy.deepcopy",
"numpy.ones_like",
"deer.experiment.base_controllers.VerboseController",
"keras.layers.Convolution2D",
"deer.default_parser.process_args",
"keras.optimizers.RMSprop",
"gym.make",
"logging.basicConfig",
"numpy.expand_dims",
"deer.experiment.base_controllers.EpsilonController",
"keras.layers.Dense",
"warnings.warn",
"deer.experiment.base_controllers.DiscountFactorController",
"keras.layers.merge",
"numpy.sqrt"
] |
[((15309, 15348), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (15328, 15348), False, 'import logging\n'), ((15398, 15434), 'deer.default_parser.process_args', 'process_args', (['sys.argv[1:]', 'Defaults'], {}), '(sys.argv[1:], Defaults)\n', (15410, 15434), False, 'from deer.default_parser import process_args\n'), ((5761, 5792), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'out'}), '(input=inputs, output=out)\n', (5766, 5792), False, 'from keras.models import Model\n'), ((7454, 7477), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (7475, 7477), True, 'import numpy as np\n'), ((12211, 12228), 'numpy.argmax', 'np.argmax', (['q_vals'], {}), '(q_vals)\n', (12220, 12228), True, 'import numpy as np\n'), ((12705, 12728), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (12713, 12728), False, 'import gym\n'), ((14211, 14248), 'copy.deepcopy', 'copy.deepcopy', (['self._last_observation'], {}), '(self._last_observation)\n', (14224, 14248), False, 'import copy\n'), ((15482, 15510), 'numpy.random.RandomState', 'np.random.RandomState', (['(12345)'], {}), '(12345)\n', (15503, 15510), True, 'import numpy as np\n'), ((15535, 15558), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (15556, 15558), True, 'import numpy as np\n'), ((16401, 16457), 'deer.experiment.base_controllers.VerboseController', 'bc.VerboseController', ([], {'evaluate_on': '"""epoch"""', 'periodicity': '(1)'}), "(evaluate_on='epoch', periodicity=1)\n", (16421, 16457), True, 'import deer.experiment.base_controllers as bc\n'), ((16495, 16652), 'deer.experiment.base_controllers.TrainerController', 'bc.TrainerController', ([], {'evaluate_on': '"""action"""', 'periodicity': 'parameters.update_frequency', 'show_episode_avg_V_value': '(False)', 'show_avg_Bellman_residual': '(False)'}), "(evaluate_on='action', periodicity=parameters.\n update_frequency, show_episode_avg_V_value=False,\n show_avg_Bellman_residual=False)\n", (16515, 16652), True, 'import deer.experiment.base_controllers as bc\n'), ((16698, 16842), 'deer.experiment.base_controllers.LearningRateController', 'bc.LearningRateController', ([], {'initial_learning_rate': 'parameters.learning_rate', 'learning_rate_decay': 'parameters.learning_rate_decay', 'periodicity': '(1)'}), '(initial_learning_rate=parameters.learning_rate,\n learning_rate_decay=parameters.learning_rate_decay, periodicity=1)\n', (16723, 16842), True, 'import deer.experiment.base_controllers as bc\n'), ((16883, 17072), 'deer.experiment.base_controllers.DiscountFactorController', 'bc.DiscountFactorController', ([], {'initial_discount_factor': 'parameters.discount', 'discount_factor_growth': 'parameters.discount_inc', 'discount_factor_max': 'parameters.discount_max', 'periodicity': '(1)'}), '(initial_discount_factor=parameters.discount,\n discount_factor_growth=parameters.discount_inc, discount_factor_max=\n parameters.discount_max, periodicity=1)\n', (16910, 17072), True, 'import deer.experiment.base_controllers as bc\n'), ((17116, 17304), 'deer.experiment.base_controllers.EpsilonController', 'bc.EpsilonController', ([], {'initial_e': 'parameters.epsilon_start', 'e_decays': 'parameters.epsilon_decay', 'e_min': 'parameters.epsilon_min', 'evaluate_on': '"""action"""', 'periodicity': '(1)', 'reset_every': '"""none"""'}), "(initial_e=parameters.epsilon_start, e_decays=\n parameters.epsilon_decay, e_min=parameters.epsilon_min, evaluate_on=\n 'action', periodicity=1, reset_every='none')\n", (17136, 17304), True, 'import deer.experiment.base_controllers as bc\n'), ((17367, 17580), 'deer.experiment.base_controllers.InterleavedTestEpochController', 'bc.InterleavedTestEpochController', ([], {'id': '(0)', 'epoch_length': 'parameters.steps_per_test', 'controllers_to_disable': '[0, 1, 2, 3, 4]', 'periodicity': '(2)', 'show_score': '(True)', 'summarize_every': 'parameters.period_btw_summary_perfs'}), '(id=0, epoch_length=parameters.\n steps_per_test, controllers_to_disable=[0, 1, 2, 3, 4], periodicity=2,\n show_score=True, summarize_every=parameters.period_btw_summary_perfs)\n', (17400, 17580), True, 'import deer.experiment.base_controllers as bc\n'), ((5475, 5506), 'keras.layers.merge', 'merge', (['outs_conv'], {'mode': '"""concat"""'}), "(outs_conv, mode='concat')\n", (5480, 5506), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5628, 5656), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (5633, 5656), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5672, 5700), 'keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (5677, 5700), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5718, 5740), 'keras.layers.Dense', 'Dense', (['self._n_actions'], {}), '(self._n_actions)\n', (5723, 5740), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((8176, 8215), 'warnings.warn', 'warn', (['"""The update_rule used is rmsprop"""'], {}), "('The update_rule used is rmsprop')\n", (8180, 8215), False, 'from warnings import warn\n'), ((8328, 8379), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'self._lr', 'momentum': 'momentum', 'nesterov': '(False)'}), '(lr=self._lr, momentum=momentum, nesterov=False)\n', (8331, 8379), False, 'from keras.optimizers import SGD, RMSprop\n'), ((10257, 10300), 'numpy.argmax', 'np.argmax', (['next_q_vals_current_qnet'], {'axis': '(1)'}), '(next_q_vals_current_qnet, axis=1)\n', (10266, 10300), True, 'import numpy as np\n'), ((10448, 10490), 'numpy.max', 'np.max', (['next_q_vals'], {'axis': '(1)', 'keepdims': '(True)'}), '(next_q_vals, axis=1, keepdims=True)\n', (10454, 10490), True, 'import numpy as np\n'), ((10514, 10541), 'numpy.ones_like', 'np.ones_like', (['terminals_val'], {}), '(terminals_val)\n', (10526, 10541), True, 'import numpy as np\n'), ((11542, 11555), 'numpy.sqrt', 'np.sqrt', (['loss'], {}), '(loss)\n', (11549, 11555), True, 'import numpy as np\n'), ((3350, 3387), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0], dim[1], dim[2])'}), '(shape=(dim[0], dim[1], dim[2]))\n', (3355, 3387), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((8443, 8505), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'self._lr', 'rho': 'self._rho', 'epsilon': 'self._rms_epsilon'}), '(lr=self._lr, rho=self._rho, epsilon=self._rms_epsilon)\n', (8450, 8505), False, 'from keras.optimizers import SGD, RMSprop\n'), ((10847, 10874), 'numpy.arange', 'np.arange', (['self._batch_size'], {}), '(self._batch_size)\n', (10856, 10874), True, 'import numpy as np\n'), ((11030, 11057), 'numpy.arange', 'np.arange', (['self._batch_size'], {}), '(self._batch_size)\n', (11039, 11057), True, 'import numpy as np\n'), ((3537, 3581), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(8)', '(8)'], {'border_mode': '"""valid"""'}), "(32, 8, 8, border_mode='valid')\n", (3550, 3581), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3609, 3674), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(4, 4)', 'strides': 'None', 'border_mode': '"""valid"""'}), "(pool_size=(4, 4), strides=None, border_mode='valid')\n", (3621, 3674), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3698, 3742), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(4)', '(4)'], {'border_mode': '"""valid"""'}), "(64, 4, 4, border_mode='valid')\n", (3711, 3742), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3766, 3831), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': 'None', 'border_mode': '"""valid"""'}), "(pool_size=(2, 2), strides=None, border_mode='valid')\n", (3778, 3831), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3855, 3878), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {}), '(64, 3, 3)\n', (3868, 3878), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((3921, 3930), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3928, 3930), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4072, 4101), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0], dim[1])'}), '(shape=(dim[0], dim[1]))\n', (4077, 4101), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((11857, 11886), 'numpy.expand_dims', 'np.expand_dims', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (11871, 11886), True, 'import numpy as np\n'), ((4163, 4221), 'keras.layers.Reshape', 'Reshape', (['(1, dim[0], dim[1])'], {'input_shape': '(dim[0], dim[1])'}), '((1, dim[0], dim[1]), input_shape=(dim[0], dim[1]))\n', (4170, 4221), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4246, 4290), 'keras.layers.Convolution2D', 'Convolution2D', (['(16)', '(2)', '(1)'], {'border_mode': '"""valid"""'}), "(16, 2, 1, border_mode='valid')\n", (4259, 4290), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4321, 4344), 'keras.layers.Convolution2D', 'Convolution2D', (['(16)', '(2)', '(2)'], {}), '(16, 2, 2)\n', (4334, 4344), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4387, 4396), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4394, 4396), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4567, 4589), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0],)'}), '(shape=(dim[0],))\n', (4572, 4589), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4660, 4706), 'keras.layers.Reshape', 'Reshape', (['(1, 1, dim[0])'], {'input_shape': '(dim[0],)'}), '((1, 1, dim[0]), input_shape=(dim[0],))\n', (4667, 4706), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4736, 4779), 'keras.layers.Convolution2D', 'Convolution2D', (['(8)', '(1)', '(2)'], {'border_mode': '"""valid"""'}), "(8, 1, 2, border_mode='valid')\n", (4749, 4779), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4814, 4836), 'keras.layers.Convolution2D', 'Convolution2D', (['(8)', '(1)', '(2)'], {}), '(8, 1, 2)\n', (4827, 4836), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((4887, 4896), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4894, 4896), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5079, 5108), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[1], dim[0])'}), '(shape=(dim[1], dim[0]))\n', (5084, 5108), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((5272, 5294), 'keras.layers.Input', 'Input', ([], {'shape': '(dim[0],)'}), '(shape=(dim[0],))\n', (5277, 5294), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n'), ((10341, 10368), 'numpy.arange', 'np.arange', (['self._batch_size'], {}), '(self._batch_size)\n', (10350, 10368), True, 'import numpy as np\n'), ((5183, 5192), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5190, 5192), False, 'from keras.layers import Input, Layer, Dense, Flatten, merge, Activation, Convolution2D, MaxPooling2D, Reshape\n')]
|
from Agent import Agent
from Gaussian_Reward import Gaussian_Reward
from erdos_renyl_model_without_duplicate import MAMAB
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt, log
import networkx as nx
no_iterations = 20000
no_agents = 5
no_bandits = 20
#mean = [5.2, 4.1, 9.5, 2.4, 5.9, 6.9, 7.4, 0.5, 4.7, 2.1, 10.5, 1.5, 2.8, 8.8, 3.7, 4.4, 7.8, 3.0, 11.9, 8.3]
#print(len(mean))
#variance = [2 for i in range(no_bandits)]
mean = [5.2, 4.1, 9.5, 2.4, 5.9, 6.9, 7.4, 0.5, 4.7, 2.1, 10.5, 1.5, 2.8, 8.8, 3.7, 4.4, 7.8, 3.0, 11.9, 8.3]
#print(len(mean))
variance = [2 for i in range(no_bandits)]
'''
mean = [np.random.random()*12 for i in range(no_bandits) ]
variance = [2 for i in range(no_bandits)]
'''
#mean = np.load("Mean.npy")
#variance = np.load("Variance.npy")
np.save("Agent/Mean.npy", np.array(mean))
np.save("Agent/Variance.npy", np.array(variance))
o = np.argmax(np.array(mean))
print(o)
print(mean)
bandits = [Gaussian_Reward(mean[i], variance[i]) for i in range(no_bandits)]
#COMPUTE DELTA
maxi_mean = np.argsort(mean)
maxi_mean = np.flip(maxi_mean, axis=0)
max_index = maxi_mean[0]
print(maxi_mean)
for i in range(1, len(maxi_mean)):
if maxi_mean[i] != maxi_mean[0]:
sec_max_index = maxi_mean[i]
break
delta = mean[max_index]-mean[sec_max_index]
print(delta)
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
P = [0, 2, 4, 6, 8, 10]
#for prob in range(0, 2):
for prob in P:
GLOBAL_TOT_REGRET = np.array([0 for _ in range(no_iterations+1)])
AGENT_TOT_REGRET = np.array([[0 for _ in range(no_iterations+1)] for i in range(no_agents)])
SELF_TOT_REGRET = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
COM_TOT_REGRET = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
AGENT_TOT_REWARD = np.array([[0 for _ in range(no_iterations+1)] for i in range(no_agents)])
SELF_TOT_REWARD = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
COM_TOT_REWARD = np.array([[0 for _ in range(no_iterations+1)] for __ in range(no_agents)])
SELF_F = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
COM_F = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
DUPLICATE = np.array([[[0 for _ in range(no_iterations+1)] for i in range(no_bandits)] for j in range(no_agents)])
Nij_T_s_MAIN = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
Nij_T_MAIN = [[[0 for _ in range(no_iterations+1)] for j in range(no_bandits)] for k in range(no_agents)]
for experiment in range(1,10):
p = prob/10
print("Experiment " + str(experiment) + "for probability " + str(p))
G = MAMAB(no_bandits=no_bandits, no_agents=no_agents, bandits=bandits,optimal_bandit_index=o , p=p, reward_vairance=variance, delta=delta,
no_iter=no_iterations)
for i in range(no_iterations):
G.Sample()
a = G.Pick()
G.Communicate(index=i, itr=i)
agent_tot_regret_T = G.get_agent_tot_regret_with_time()
self_tot_regret = G.get_agent_self_tot_regret_with_time()
com_tot_reget = G.get_agent_com_tot_regret_with_time()
agent_tot_reward_T = G.get_agent_tot_reward_with_time()
self_tot_reward = G.get_agent_self_tot_reward_with_time()
com_tot_reward = G.get_agent_com_tot_reward_with_time()
self_F = G.get_self_F()
com_F = G.get_com_F()
global_tot_regert_T = G.get_global_tot_regret()
duplicate = G.get_agent_duplicate_eliminator()
Nij_T_s = G.get_Nij_T_s()
Nij_T = G.get_Nij_T()
AGENT_TOT_REGRET = np.add(AGENT_TOT_REGRET, agent_tot_regret_T)
GLOBAL_TOT_REGRET = np.add(GLOBAL_TOT_REGRET, global_tot_regert_T)
SELF_TOT_REGRET = np.add(SELF_TOT_REGRET, self_tot_regret)
COM_TOT_REGRET = np.add(COM_TOT_REGRET, com_tot_reget)
AGENT_TOT_REWARD = np.add(AGENT_TOT_REWARD, agent_tot_reward_T)
SELF_TOT_REWARD = np.add(SELF_TOT_REWARD, self_tot_reward)
COM_TOT_REWARD = np.add(COM_TOT_REWARD, com_tot_reward)
Nij_T_MAIN = np.add(Nij_T_MAIN, Nij_T)
Nij_T_s_MAIN = np.add(Nij_T_s_MAIN, Nij_T_s)
SELF_F = np.add(SELF_F, self_F)
COM_F = np.add(COM_F, com_F)
DUPLICATE = np.add(DUPLICATE, duplicate)
AGENT_TOT_REGRET = (1/experiment)*AGENT_TOT_REGRET
SELF_TOT_REGRET = (1/experiment)*SELF_TOT_REGRET
COM_TOT_REGRET = (1/experiment)*COM_TOT_REGRET
AGENT_TOT_REWARD = (1/experiment)*AGENT_TOT_REWARD
SELF_TOT_REWARD = (1/experiment)*SELF_TOT_REWARD
COM_TOT_REWARD = (1/experiment)*COM_TOT_REWARD
SELF_F = (1/experiment)*SELF_F
COM_F = (1/experiment)*COM_F
GLOBAL_TOT_REGRET = (1/experiment)*GLOBAL_TOT_REGRET
DUPLICATE = (1/experiment)*DUPLICATE
ESTIMATE = G.get_estimate()
Nij_T_MAIN = (1/experiment)*Nij_T_MAIN
Nij_T_s_MAIN = (1/experiment)*Nij_T_s_MAIN
np.save("Agent/Total/Agent_tot_regret" + str(prob), AGENT_TOT_REGRET)
np.save("Agent/Total/Agent_tot_reward" + str(prob), AGENT_TOT_REWARD)
np.save("Agent/Total/Estimate" + str(prob), ESTIMATE)
np.save("Agent/Total/Duplicate" + str(prob), DUPLICATE)
np.save("Self/Total/Self_tot_regret" + str(prob), SELF_TOT_REGRET)
np.save("Self/Total/Self_tot_reward" + str(prob), SELF_TOT_REWARD)
np.save("Self/Total/Self_F" + str(prob), SELF_F)
np.save("Com/Total/Com_tot_regret" + str(prob), COM_TOT_REGRET)
np.save("Com/Total/Com_tot_reward" + str(prob), COM_TOT_REWARD)
np.save("Com/Total/Com_F" + str(prob), COM_F)
np.save("Global/Total/Global_tot_regret" + str(prob), GLOBAL_TOT_REGRET)
np.save("Com/Total/N_ij_T" + str(prob), Nij_T_MAIN)
np.save("Com/Total/N_ij_T_s" + str(prob), Nij_T_s_MAIN)
fig_network = plt.figure()
plt.close(fig_network)
for prob in range(11):
#Network Visualization
p = prob/10
fig, ax = plt.subplots(1, 1, figsize=(30, 30))
nx.draw(G=nx.erdos_renyi_graph(no_agents, p, directed=True), with_labels=True, ax=ax)
plt.savefig("Network/" + str(prob))
plt.close(fig_network)
|
[
"numpy.flip",
"networkx.erdos_renyi_graph",
"matplotlib.pyplot.close",
"numpy.argsort",
"Gaussian_Reward.Gaussian_Reward",
"matplotlib.pyplot.figure",
"numpy.array",
"erdos_renyl_model_without_duplicate.MAMAB",
"numpy.add",
"matplotlib.pyplot.subplots"
] |
[((1089, 1105), 'numpy.argsort', 'np.argsort', (['mean'], {}), '(mean)\n', (1099, 1105), True, 'import numpy as np\n'), ((1119, 1145), 'numpy.flip', 'np.flip', (['maxi_mean'], {'axis': '(0)'}), '(maxi_mean, axis=0)\n', (1126, 1145), True, 'import numpy as np\n'), ((1387, 1423), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(15, 10)'}), '(1, 1, figsize=(15, 10))\n', (1399, 1423), True, 'import matplotlib.pyplot as plt\n'), ((6486, 6498), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6496, 6498), True, 'import matplotlib.pyplot as plt\n'), ((6500, 6522), 'matplotlib.pyplot.close', 'plt.close', (['fig_network'], {}), '(fig_network)\n', (6509, 6522), True, 'import matplotlib.pyplot as plt\n'), ((851, 865), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (859, 865), True, 'import numpy as np\n'), ((898, 916), 'numpy.array', 'np.array', (['variance'], {}), '(variance)\n', (906, 916), True, 'import numpy as np\n'), ((937, 951), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (945, 951), True, 'import numpy as np\n'), ((992, 1029), 'Gaussian_Reward.Gaussian_Reward', 'Gaussian_Reward', (['mean[i]', 'variance[i]'], {}), '(mean[i], variance[i])\n', (1007, 1029), False, 'from Gaussian_Reward import Gaussian_Reward\n'), ((6609, 6645), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(30, 30)'}), '(1, 1, figsize=(30, 30))\n', (6621, 6645), True, 'import matplotlib.pyplot as plt\n'), ((6783, 6805), 'matplotlib.pyplot.close', 'plt.close', (['fig_network'], {}), '(fig_network)\n', (6792, 6805), True, 'import matplotlib.pyplot as plt\n'), ((2938, 3103), 'erdos_renyl_model_without_duplicate.MAMAB', 'MAMAB', ([], {'no_bandits': 'no_bandits', 'no_agents': 'no_agents', 'bandits': 'bandits', 'optimal_bandit_index': 'o', 'p': 'p', 'reward_vairance': 'variance', 'delta': 'delta', 'no_iter': 'no_iterations'}), '(no_bandits=no_bandits, no_agents=no_agents, bandits=bandits,\n optimal_bandit_index=o, p=p, reward_vairance=variance, delta=delta,\n no_iter=no_iterations)\n', (2943, 3103), False, 'from erdos_renyl_model_without_duplicate import MAMAB\n'), ((4109, 4153), 'numpy.add', 'np.add', (['AGENT_TOT_REGRET', 'agent_tot_regret_T'], {}), '(AGENT_TOT_REGRET, agent_tot_regret_T)\n', (4115, 4153), True, 'import numpy as np\n'), ((4187, 4233), 'numpy.add', 'np.add', (['GLOBAL_TOT_REGRET', 'global_tot_regert_T'], {}), '(GLOBAL_TOT_REGRET, global_tot_regert_T)\n', (4193, 4233), True, 'import numpy as np\n'), ((4265, 4305), 'numpy.add', 'np.add', (['SELF_TOT_REGRET', 'self_tot_regret'], {}), '(SELF_TOT_REGRET, self_tot_regret)\n', (4271, 4305), True, 'import numpy as np\n'), ((4336, 4373), 'numpy.add', 'np.add', (['COM_TOT_REGRET', 'com_tot_reget'], {}), '(COM_TOT_REGRET, com_tot_reget)\n', (4342, 4373), True, 'import numpy as np\n'), ((4408, 4452), 'numpy.add', 'np.add', (['AGENT_TOT_REWARD', 'agent_tot_reward_T'], {}), '(AGENT_TOT_REWARD, agent_tot_reward_T)\n', (4414, 4452), True, 'import numpy as np\n'), ((4485, 4525), 'numpy.add', 'np.add', (['SELF_TOT_REWARD', 'self_tot_reward'], {}), '(SELF_TOT_REWARD, self_tot_reward)\n', (4491, 4525), True, 'import numpy as np\n'), ((4558, 4596), 'numpy.add', 'np.add', (['COM_TOT_REWARD', 'com_tot_reward'], {}), '(COM_TOT_REWARD, com_tot_reward)\n', (4564, 4596), True, 'import numpy as np\n'), ((4626, 4651), 'numpy.add', 'np.add', (['Nij_T_MAIN', 'Nij_T'], {}), '(Nij_T_MAIN, Nij_T)\n', (4632, 4651), True, 'import numpy as np\n'), ((4680, 4709), 'numpy.add', 'np.add', (['Nij_T_s_MAIN', 'Nij_T_s'], {}), '(Nij_T_s_MAIN, Nij_T_s)\n', (4686, 4709), True, 'import numpy as np\n'), ((4736, 4758), 'numpy.add', 'np.add', (['SELF_F', 'self_F'], {}), '(SELF_F, self_F)\n', (4742, 4758), True, 'import numpy as np\n'), ((4783, 4803), 'numpy.add', 'np.add', (['COM_F', 'com_F'], {}), '(COM_F, com_F)\n', (4789, 4803), True, 'import numpy as np\n'), ((4831, 4859), 'numpy.add', 'np.add', (['DUPLICATE', 'duplicate'], {}), '(DUPLICATE, duplicate)\n', (4837, 4859), True, 'import numpy as np\n'), ((6661, 6710), 'networkx.erdos_renyi_graph', 'nx.erdos_renyi_graph', (['no_agents', 'p'], {'directed': '(True)'}), '(no_agents, p, directed=True)\n', (6681, 6710), True, 'import networkx as nx\n')]
|
import os
import glob
import pickle
import zipfile
import warnings
import numpy as np
import pandas as pd
from urllib import request
def trim_eol_whitespace(data_file):
with open(data_file, 'r') as f:
lines = f.readlines()
lines = [line.replace(' \n', '\n') for line in lines]
with open(data_file, 'w') as f:
f.writelines(lines)
def decimal_comma_to_decimal_point(data_file):
with open(data_file, 'r') as f:
lines = f.readlines()
lines = [line.replace(',', '.') for line in lines]
with open(data_file, 'w') as f:
f.writelines(lines)
REGRESSION_DATA = {
'boston':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data',
'dir_after_unzip': None,
'data_file': 'housing.data',
'parse_args': {'sep': ' ', 'header': None, 'skipinitialspace': True},
'target_cols': [-1]},
'carbon':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv',
'dir_after_unzip': None,
'data_file': 'carbon_nanotubes.csv',
'formatter': decimal_comma_to_decimal_point,
'parse_args': {'sep': ';'},
'target_cols': [-1, -2, -3]},
'concrete':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls',
'dir_after_unzip': None,
'data_file': 'Concrete_Data.xls',
'parse_args': dict(),
'target_cols': [-1]},
'energy':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx',
'dir_after_unzip': None,
'data_file': 'ENB2012_data.xlsx',
'parse_args': dict(),
'target_cols': [-1, -2]},
'naval':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI%20CBM%20Dataset.zip',
'dir_after_unzip': 'UCI CBM Dataset',
'data_file': 'data.txt',
'parse_args': {'sep': ' ', 'header': None, 'skipinitialspace': True},
'target_cols': [-1, -2]},
'power plant':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip',
'dir_after_unzip': 'CCPP',
'data_file': 'Folds5x2_pp.xlsx',
'parse_args': dict(),
'target_cols': [-1]},
'protein':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv',
'dir_after_unzip': None,
'data_file': 'CASP.csv',
'parse_args': dict(),
'target_cols': [1]},
'superconductivity':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00464/superconduct.zip',
'dir_after_unzip': None,
'data_file': 'train.csv',
'parse_args': dict(),
'target_cols': [-1]},
'wine-red':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv',
'dir_after_unzip': None,
'data_file': 'winequality-red.csv',
'parse_args': {'sep': ';'},
'target_cols': [-1]},
'wine-white':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv',
'dir_after_unzip': None,
'data_file': 'winequality-white.csv',
'parse_args': {'sep': ';'},
'target_cols': [-1]},
'yacht':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data',
'dir_after_unzip': None,
'data_file': 'yacht_hydrodynamics.data',
'formatter': trim_eol_whitespace,
'parse_args': {'sep': ' ', 'header': None, 'skipinitialspace': True},
'target_cols': [-1]},
'year':
{'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip',
'dir_after_unzip': None,
'data_file': 'YearPredictionMSD.txt',
'parse_args': dict(),
'target_cols': [1]},
}
def download_all(force_download=False):
# make data directory if it doesn't yet exist
if not os.path.exists('data'):
os.mkdir('data')
# download all regression data experiments
for key in REGRESSION_DATA.keys():
data_dir = os.path.join('data', key)
if not os.path.exists(data_dir):
os.mkdir(data_dir)
file = os.path.join(data_dir, REGRESSION_DATA[key]['url'].split('/')[-1])
if os.path.exists(file) and force_download:
os.remove(file)
elif os.path.exists(file) and not force_download:
print(file.split(os.sep)[-1], 'already exists.')
continue
print('Downloading', file.split(os.sep)[-1])
request.urlretrieve(REGRESSION_DATA[key]['url'], file)
print('Downloads complete!')
def load_data(data_dir, dir_after_unzip, data_file, parse_args, **kwargs):
# save the base data directory as the save directory, since data_dir might be modified below
save_dir = data_dir
# find any zip files
zip_files = glob.glob(os.path.join(data_dir, '*.zip'))
assert len(zip_files) <= 1
# do we need to unzip?
if len(zip_files) or dir_after_unzip is not None:
# unzip it
with zipfile.ZipFile(zip_files[0], 'r') as f:
f.extractall(data_dir)
# update data directory if required
if dir_after_unzip is not None:
data_dir = os.path.join(data_dir, dir_after_unzip)
# correct formatting issues if necessary
if 'formatter' in kwargs.keys() and kwargs['formatter'] is not None:
kwargs['formatter'](os.path.join(data_dir, data_file))
# process files according to type
if os.path.splitext(data_file)[-1] in {'.csv', '.data', '.txt'}:
df = pd.read_csv(os.path.join(data_dir, data_file), **parse_args)
elif os.path.splitext(data_file)[-1] in {'.xls', '.xlsx'}:
df = pd.read_excel(os.path.join(data_dir, data_file))
else:
warnings.warn('Type Not Supported: ' + data_file)
return
# convert to numpy arrays
xy = df.to_numpy(dtype=np.float32)
y = xy[:, kwargs['target_cols']]
x_indices = list(range(xy.shape[1]))
for i in kwargs['target_cols']:
x_indices.pop(i)
x = xy[:, x_indices]
# save data
with open(os.path.join(save_dir, save_dir.split(os.sep)[-1] + '.pkl'), 'wb') as f:
pickle.dump({'data': x, 'target': y}, f)
def generate_toy_data(num_samples=500):
def data_mean(x):
return x * np.sin(x)
def data_std(x):
return np.abs(0.3 * (1 + x))
# sample training data
x_data = np.random.uniform(0, 10, size=num_samples)
y_data = data_mean(x_data) + np.random.normal(scale=data_std(x_data))
# generate evaluation points with the associated actual mean and standard deviation
x_eval = np.linspace(-4, 14, 250)
true_mean = data_mean(x_eval)
true_std = data_std(x_eval)
# process return tuple
return_tuple = (x_data, y_data, x_eval, true_mean, true_std)
return_tuple = (np.expand_dims(np.float32(x), axis=-1) for x in return_tuple)
return return_tuple
if __name__ == '__main__':
# download all the data
download_all()
# process all the data
for key in REGRESSION_DATA.keys():
load_data(data_dir=os.path.join('data', key), **REGRESSION_DATA[key])
print('Processing complete!')
|
[
"numpy.random.uniform",
"os.mkdir",
"pickle.dump",
"numpy.abs",
"os.remove",
"zipfile.ZipFile",
"numpy.float32",
"os.path.exists",
"urllib.request.urlretrieve",
"numpy.sin",
"os.path.splitext",
"numpy.linspace",
"warnings.warn",
"os.path.join"
] |
[((6613, 6655), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': 'num_samples'}), '(0, 10, size=num_samples)\n', (6630, 6655), True, 'import numpy as np\n'), ((6832, 6856), 'numpy.linspace', 'np.linspace', (['(-4)', '(14)', '(250)'], {}), '(-4, 14, 250)\n', (6843, 6856), True, 'import numpy as np\n'), ((4103, 4125), 'os.path.exists', 'os.path.exists', (['"""data"""'], {}), "('data')\n", (4117, 4125), False, 'import os\n'), ((4135, 4151), 'os.mkdir', 'os.mkdir', (['"""data"""'], {}), "('data')\n", (4143, 4151), False, 'import os\n'), ((4258, 4283), 'os.path.join', 'os.path.join', (['"""data"""', 'key'], {}), "('data', key)\n", (4270, 4283), False, 'import os\n'), ((4719, 4773), 'urllib.request.urlretrieve', 'request.urlretrieve', (["REGRESSION_DATA[key]['url']", 'file'], {}), "(REGRESSION_DATA[key]['url'], file)\n", (4738, 4773), False, 'from urllib import request\n'), ((5058, 5089), 'os.path.join', 'os.path.join', (['data_dir', '"""*.zip"""'], {}), "(data_dir, '*.zip')\n", (5070, 5089), False, 'import os\n'), ((6379, 6419), 'pickle.dump', 'pickle.dump', (["{'data': x, 'target': y}", 'f'], {}), "({'data': x, 'target': y}, f)\n", (6390, 6419), False, 'import pickle\n'), ((6550, 6571), 'numpy.abs', 'np.abs', (['(0.3 * (1 + x))'], {}), '(0.3 * (1 + x))\n', (6556, 6571), True, 'import numpy as np\n'), ((4299, 4323), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (4313, 4323), False, 'import os\n'), ((4337, 4355), 'os.mkdir', 'os.mkdir', (['data_dir'], {}), '(data_dir)\n', (4345, 4355), False, 'import os\n'), ((4449, 4469), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (4463, 4469), False, 'import os\n'), ((4502, 4517), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (4511, 4517), False, 'import os\n'), ((5237, 5271), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_files[0]', '"""r"""'], {}), "(zip_files[0], 'r')\n", (5252, 5271), False, 'import zipfile\n'), ((5421, 5460), 'os.path.join', 'os.path.join', (['data_dir', 'dir_after_unzip'], {}), '(data_dir, dir_after_unzip)\n', (5433, 5460), False, 'import os\n'), ((5608, 5641), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (5620, 5641), False, 'import os\n'), ((5689, 5716), 'os.path.splitext', 'os.path.splitext', (['data_file'], {}), '(data_file)\n', (5705, 5716), False, 'import os\n'), ((5776, 5809), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (5788, 5809), False, 'import os\n'), ((5968, 6017), 'warnings.warn', 'warnings.warn', (["('Type Not Supported: ' + data_file)"], {}), "('Type Not Supported: ' + data_file)\n", (5981, 6017), False, 'import warnings\n'), ((6503, 6512), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (6509, 6512), True, 'import numpy as np\n'), ((7051, 7064), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (7061, 7064), True, 'import numpy as np\n'), ((4531, 4551), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (4545, 4551), False, 'import os\n'), ((5834, 5861), 'os.path.splitext', 'os.path.splitext', (['data_file'], {}), '(data_file)\n', (5850, 5861), False, 'import os\n'), ((5915, 5948), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (5927, 5948), False, 'import os\n'), ((7294, 7319), 'os.path.join', 'os.path.join', (['"""data"""', 'key'], {}), "('data', key)\n", (7306, 7319), False, 'import os\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Sequential, Model
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# In[2]:
path_all_tfrecord = "fp56.tfrecord"
# In[3]:
dir_from = "/data/fp_img_processed/"
# In[4]:
dir_model = "vgg_cam/"
path_best = dir_model + "model-17-1.17-53.3%.hdf5"
path_best
# # model
# In[5]:
from fp_tensorflow import create_pair_56_dataset, create_single_dataset
from fp_tensorflow import create_vgg_5y_model
model = create_vgg_5y_model()
model.load_weights(path_best)
model.summary()
# # class activation map
# In[6]:
# Get the 512 input weights to the softmax.
class_weights = model.layers[-1].get_weights()[0]
# In[7]:
class_weights.shape
# In[8]:
class_weights.mean(), class_weights.std()
# In[9]:
def get_fp_output(fp, model=model):
final_conv_layer = model.get_layer("conv5_3")
get_output = K.function(
[model.layers[0].input], [final_conv_layer.output, model.layers[-1].output]
)
conv_output, prediction = get_output(np.expand_dims(fp, 0))
return np.squeeze(conv_output, axis=0), np.argmax(prediction)
# In[10]:
def get_fp_cam(fp, model=model):
class_weights = model.layers[-1].get_weights()[0]
conv_output, prediction = get_fp_output(fp, model)
true_class_weights = class_weights[:, prediction]
cam = np.zeros(dtype=np.float32, shape=conv_output.shape[0:2])
for i, w in enumerate(true_class_weights):
cam += w * conv_output[:, :, i]
return cam
# # biclust CAM
# In[11]:
biclusts = np.loadtxt("biclust_col.txt", int)
biclusts
# In[12]:
def get_biclust_cam(fp, biclust, model=model, labels=biclusts):
conv_output, _ = get_fp_output(fp, model)
return conv_output[..., biclusts == biclust].sum(axis=2)
# # plot
# In[13]:
def plot_bgr(img):
fig = plt.figure(figsize=(2, 2), dpi=300)
plt.axes().axis("off")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.tight_layout()
# In[14]:
def plot_rgb(img):
fig = plt.figure(figsize=(2, 2), dpi=300)
plt.axes().axis("off")
plt.imshow(img)
plt.tight_layout()
# In[15]:
def plot_gray(img, cmap=plt.cm.gray):
fig = plt.figure(figsize=(2, 2), dpi=300)
plt.axes().axis("off")
plt.imshow(img, cmap=cmap)
plt.tight_layout()
# # run
# In[16]:
from floorplan_analysis import read_mono_from_image_unicode
from floorplan_analysis import fp_float_from_mono
from floorplan_analysis import pad_fp
# In[17]:
mono = read_mono_from_image_unicode(dir_from + "2888_118A" + ".png")
fp_full = fp_float_from_mono(mono)
fp = pad_fp(fp_full, 56, 56)
conv_output, prediction = get_fp_output(fp)
# In[18]:
fp_full.shape
# In[19]:
conv_output.shape, prediction.shape
# In[20]:
prediction
# In[21]:
cam = get_fp_cam(fp)
cam = cv2.resize(cam, (56, 56))
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
# heatmap[cam < 0.2] = 0
plot_bgr(heatmap)
# In[22]:
cam = get_biclust_cam(fp, 3)
cam = cv2.resize(cam, (56, 56))
print(cam.max())
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
# heatmap[cam < 0.4] = 0
plot_bgr(heatmap)
# In[23]:
def visualize_fp(fps):
# adjusted for different luminance
channel_to_rgba = np.array(
[
[0.0, 0.0, 0.0, 0.0], # wall to black L0
[0.0, 0.33, 0.0, 0.0], # entrance to green L30
[1.0, 0.25, 0.0, 0.0], # LDK to red L57
[0.83, 0.87, 0.0, 0.0], # bedroom to yellow L85
[0.0, 0.26, 1.0, 0.0], # balcony to blue L40
[0.0, 0.81, 0.76, 0.0], # bathroom to cyan L75
]
)
# make colors subtractive
channel_to_rgba[:, 0:3] -= 1
# put it on white
fps_rgba = np.clip(
np.array([1.0, 1.0, 1.0, 1.0]) + (np.array(fps) @ channel_to_rgba), 0, 1
)
return fps_rgba.astype(np.float32)
# In[24]:
rgba = visualize_fp(fp_full)
plot_rgb(rgba)
# In[25]:
def visualize_fp_cam(fp):
fp_rgba = visualize_fp(fp)
fp_light = cv2.cvtColor(fp_rgba, cv2.COLOR_RGB2Lab)[:, :, 0] / 100
fp_pad = pad_fp(fp, 56, 56)
cam = get_fp_cam(fp_pad)
cam = cv2.resize(cam, (56, 56))
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
heatmap = pad_fp(heatmap, fp_light.shape[1], fp_light.shape[0])
heatmap[fp_light == 0] = 0
heatmap = heatmap.astype(np.float32) / 255
return 0.7 * heatmap + 0.3 * np.expand_dims(fp_light, 2)
# In[26]:
def visualize_biclust_cam(fp, biclust):
fp_rgba = visualize_fp(pad_fp(fp, max(56, fp.shape[1]), max(56, fp.shape[0])))
fp_light = cv2.cvtColor(fp_rgba, cv2.COLOR_RGB2Lab)[:, :, 0] / 100
fp_pad = pad_fp(fp, 56, 56)
cam = get_biclust_cam(fp_pad, biclust)
cam = cv2.resize(cam, (56, 56))
cam /= cam.max()
cam[cam <= 0] = 0
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_VIRIDIS)
# heatmap = pad_fp(heatmap, fp_light.shape[1], fp_light.shape[0])
heatmap = pad_fp(heatmap, max(56, fp_light.shape[1]), max(56, fp_light.shape[0]))
heatmap = heatmap.astype(np.float32) / 255
return 0.7 * heatmap + 0.3 * np.expand_dims(fp_light, 2)
# In[27]:
plot_bgr(visualize_fp_cam(fp_full))
# In[28]:
plot_bgr(visualize_biclust_cam(fp_full, 3))
# # process representative floorplans
# In[29]:
df = pd.read_csv("biclust.csv")
df["area_group"] = pd.cut(df.Area, [0, 50, 60, 85, np.inf], labels=False)
df
# In[30]:
df_sample = df.groupby(["cluster", "area_group"]).sample(frac=0.005, random_state=1106)
df_sample = df_sample.sort_values(["cluster", "area_group", "year"])
df_sample
# In[31]:
pd.crosstab(df_sample.cluster, df_sample.area_group)
# In[32]:
pd.crosstab(df_sample.cluster, df_sample.area_group).max(axis=0)
# In[33]:
widths = np.asarray([3, 4, 8, 7])
coords_col = np.insert(np.cumsum(widths), 0, 0)[:-1]
coords_col
# In[34]:
heights = np.maximum(
np.ceil(
pd.crosstab(df_sample.cluster, df_sample.area_group).to_numpy() / widths
).astype(int),
1,
).max(axis=1)
heights
# In[35]:
coords_row = np.insert(np.cumsum(heights), 0, 0)[:-1]
coords_row
# In[36]:
sum(heights)
# In[37]:
sum(widths)
# 총 31줄, 19열
# In[38]:
u = 84 # unit size
flip = False
# In[39]:
if not flip:
img_size = (sum(heights) * u, sum(widths) * u)
else:
img_size = (sum(widths) * u, sum(heights) * u)
# In[40]:
img_size
# In[41]:
img = np.ones(img_size + (3,), np.float32)
# img = np.zeros(img_size + (3,), np.float32)
# In[42]:
plot_bgr(pad_fp(visualize_biclust_cam(fp_full, 3), u, u, 1))
# In[43]:
df_sample[(df_sample.cluster == 0) & (df_sample.area_group == 2)]
# In[44]:
df_sample[(df_sample.cluster == 0) & (df_sample.area_group == 2)].ID.iloc[1]
# In[45]:
for ir, rr in enumerate(coords_row):
for ic, cc in enumerate(coords_col):
df_clust = df_sample[(df_sample.cluster == ir) & (df_sample.area_group == ic)]
for i in range(len(df_clust)):
r = i // widths[ic]
c = i - r * widths[ic]
id_ = df_clust.iloc[i].ID
clust = df_clust.iloc[i].cluster
img[
(rr + r) * u : (rr + r + 1) * u, (cc + c) * u : (cc + c + 1) * u
] = pad_fp(
visualize_biclust_cam(
fp_float_from_mono(
read_mono_from_image_unicode(dir_from + id_ + ".png")
),
clust,
),
u,
u,
1,
)
# In[46]:
fig = plt.figure(figsize=(11, 13), dpi=300)
ax = fig.gca()
im = plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax.set_xticks((coords_col + widths / 2) * u)
ax.set_xticklabels(
[
"One-room\n($\leq50\mathrm{m^2}$)",
"Small\n($\leq60\mathrm{m^2}$)",
"Medium\n($\leq85\mathrm{m^2}$)",
"Large\n($>85\mathrm{m^2}$)",
]
)
ax.set_yticks((coords_row + heights / 2 + 1 / 6) * u)
ax.set_yticklabels(range(1, biclusts.max() + 2))
ax.vlines(coords_col * u, 0, heights.sum() * u - 1, colors="k", lw=0.3)
ax.hlines(coords_row * u, 0, widths.sum() * u - 1, colors="k", lw=0.3)
fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# In[47]:
df_sample[(df_sample.cluster == 0)]
# 101160_113E
# 103915_112C
# 104127_107B
# 107903_113G
# 108838_117B
# In[48]:
def plot_bgr_scale(img):
size_x, size_y = img.shape[:2]
fig = plt.figure(figsize=(2 * size_x / 112, 2 * size_y / 112), dpi=300)
plt.axes().axis("off")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.tight_layout()
# In[49]:
ir, ic, i = 15, 0, 0
u_single = 84 # 56 84 112
df_clust = df_sample[(df_sample.cluster == ir) & (df_sample.area_group == ic)]
id_ = df_clust.iloc[i].ID
print(id_)
clust = df_clust.iloc[i].cluster
plot_bgr_scale(
pad_fp(
visualize_biclust_cam(
fp_float_from_mono(read_mono_from_image_unicode(dir_from + id_ + ".png")),
clust,
),
u_single,
u_single,
1,
)
)
# In[53]:
def plot_bams(id_, types):
print(id_)
fp = fp_float_from_mono(read_mono_from_image_unicode(dir_from + id_ + ".png"))
size_y, size_x = np.fmax(fp.shape[:2], 56)
clust_name = [
"8090-1",
"8090-2",
"8090-3",
"9000-1",
"9000-2",
"9000-3",
"9000-4",
"00-1",
"00-2",
"00-3",
"00-4",
"0010-1",
"0010-2",
"0010-3",
"10-1",
"10-2",
]
clusts = [type - 1 for type in types]
fig, axs = plt.subplots(1, len(clusts), figsize=(11 / 4 * len(clusts), 5), dpi=300)
for i, clust in enumerate(clusts):
ax = axs[i]
img = pad_fp(visualize_biclust_cam(fp, clust), size_x, size_y, 1)
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax.axis("off")
# title on bottom
# ax.set_title(clust_name[clust], y=-size_x / 56 / 10)
# title on top
ax.set_title(clust_name[clust], y=1)
plt.tight_layout()
return fig
# # 1980년대: 판상형, 복도형
# In[54]:
df_sample[
(df_sample.year >= 1980)
& (df_sample.year < 1990)
& (df_sample.cluster.isin([0, 1, 2]))
& (df_sample.area_group == 2)
]
# 복도형 중형
#
# 서울 구로 주공1차, 73.08㎡, 1986년
# In[55]:
fig = plot_bams("137_96", [1, 2, 3])
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# In[56]:
df_sample[
(df_sample.year >= 1980)
& (df_sample.year < 1990)
& (df_sample.cluster.isin([0, 1, 2]))
& (df_sample.area_group == 3)
]
# 복도형 대형
#
# 서울 압구정 한양7차, 106.22㎡, 1981년
# In[57]:
fig = plot_bams("501_114A", [1, 2, 3])
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# # 1990년대: 판상형, 계단실형
# In[58]:
df_sample[(df_sample.cluster.isin([3])) & (df_sample.area_group == 2)]
# 3LDK 중형
#
# 천안 일성3차, 84.82㎡, 1994년
# In[59]:
id_ = "7479_106"
fig1 = plot_bams(id_, [1, 2, 3])
fig2 = plot_bams(id_, range(4, 7 + 1))
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# In[60]:
df_sample[
(df_sample.year >= 1990)
& (df_sample.year < 2000)
& (df_sample.cluster.isin(range(7 + 1)))
& (df_sample.Rooms == 4)
]
# 4LDK 대형
#
# 인천 연수 하나2차, 99.42㎡, 1994년
# In[61]:
id_ = "2292_116"
fig1 = plot_bams(id_, [1, 2, 3])
fig2 = plot_bams(id_, range(4, 7 + 1))
# fig.savefig("bam.png", bbox_inches="tight", pad_inches=0)
# fig.savefig("bam.pdf", bbox_inches="tight", pad_inches=0)
# # 2000년대: 발코니, 코어 후면 배치
# In[62]:
df_sample[
(df_sample.year >= 2000)
& (df_sample.year < 2010)
& (df_sample.cluster.isin([9]))
& (df_sample.area_group == 2)
]
# 판상형
# 중형
#
# 경기
# 동화옥시죤5차,
# 84.58㎡,
# 2005년
# In[63]:
id_ = "17566_118"
fig1 = plot_bams(id_, range(8, 11 + 1))
fig2 = plot_bams(id_, range(12, 14 + 1))
# # 2010년대: 탑상형, 원룸형
# In[64]:
df_sample[(df_sample.cluster.isin([13])) & (df_sample.area_group == 2)]
# 탑상형 중앙부
# 중형
#
# 서울 서초포레스타5,
# 84.4㎡,
# 2014년
# In[65]:
id_ = "107903_112B3"
fig1 = plot_bams(id_, range(12, 14 + 1))
fig2 = plot_bams(id_, range(15, 16 + 1))
# In[66]:
df_sample[(df_sample.cluster.isin([15])) & (df_sample.area_group == 2)]
# 탑상형 단부 중형
#
# 천안 백석더샵,
# 84.25㎡,
# 2016년
# In[67]:
id_ = "108523_111C"
fig1 = plot_bams(id_, range(12, 14 + 1))
fig2 = plot_bams(id_, range(15, 16 + 1))
# In[68]:
df_sample[
(df_sample.cluster.isin([15]))
& (df_sample.year >= 2010)
& (df_sample.area_group == 2)
]
# 혼합형
# (L자형 주동 계단실형 코어)
#
# 세종 가락4단지이지더원,
# 79.59㎡,
# 2014년
# In[69]:
id_ = "107076_106C"
fig1 = plot_bams(id_, range(12, 14 + 1))
fig2 = plot_bams(id_, range(15, 16 + 1))
# In[70]:
df[(df.cluster.isin([14]))].Area.mean()
# In[71]:
df[(df.cluster.isin([14]))].Area.median()
# In[72]:
df[(df.cluster.isin([14])) & (df.year >= 2010) & (df.Area >= 23) & (df.Area <= 29)]
# 원룸형 도시형생활주택
#
# 서울 역삼대명벨리온,
# 23.62㎡,
# 2012년
# In[73]:
id_ = "104259_36G"
fig = plot_bams(id_, [3, 6, 15])
|
[
"numpy.fmax",
"fp_tensorflow.create_vgg_5y_model",
"numpy.argmax",
"pandas.read_csv",
"matplotlib.pyplot.axes",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"floorplan_analysis.read_mono_from_image_unicode",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"numpy.cumsum",
"numpy.loadtxt",
"cv2.resize",
"floorplan_analysis.fp_float_from_mono",
"numpy.uint8",
"numpy.asarray",
"pandas.cut",
"numpy.squeeze",
"floorplan_analysis.pad_fp",
"pandas.crosstab",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.keras.backend.function",
"numpy.array"
] |
[((598, 619), 'fp_tensorflow.create_vgg_5y_model', 'create_vgg_5y_model', ([], {}), '()\n', (617, 619), False, 'from fp_tensorflow import create_vgg_5y_model\n'), ((1659, 1693), 'numpy.loadtxt', 'np.loadtxt', (['"""biclust_col.txt"""', 'int'], {}), "('biclust_col.txt', int)\n", (1669, 1693), True, 'import numpy as np\n'), ((2601, 2662), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + '2888_118A' + '.png')"], {}), "(dir_from + '2888_118A' + '.png')\n", (2629, 2662), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((2673, 2697), 'floorplan_analysis.fp_float_from_mono', 'fp_float_from_mono', (['mono'], {}), '(mono)\n', (2691, 2697), False, 'from floorplan_analysis import fp_float_from_mono\n'), ((2703, 2726), 'floorplan_analysis.pad_fp', 'pad_fp', (['fp_full', '(56)', '(56)'], {}), '(fp_full, 56, 56)\n', (2709, 2726), False, 'from floorplan_analysis import pad_fp\n'), ((2916, 2941), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (2926, 2941), False, 'import cv2\n'), ((3142, 3167), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (3152, 3167), False, 'import cv2\n'), ((5553, 5579), 'pandas.read_csv', 'pd.read_csv', (['"""biclust.csv"""'], {}), "('biclust.csv')\n", (5564, 5579), True, 'import pandas as pd\n'), ((5599, 5653), 'pandas.cut', 'pd.cut', (['df.Area', '[0, 50, 60, 85, np.inf]'], {'labels': '(False)'}), '(df.Area, [0, 50, 60, 85, np.inf], labels=False)\n', (5605, 5653), True, 'import pandas as pd\n'), ((5852, 5904), 'pandas.crosstab', 'pd.crosstab', (['df_sample.cluster', 'df_sample.area_group'], {}), '(df_sample.cluster, df_sample.area_group)\n', (5863, 5904), True, 'import pandas as pd\n'), ((6007, 6031), 'numpy.asarray', 'np.asarray', (['[3, 4, 8, 7]'], {}), '([3, 4, 8, 7])\n', (6017, 6031), True, 'import numpy as np\n'), ((6646, 6682), 'numpy.ones', 'np.ones', (['(img_size + (3,))', 'np.float32'], {}), '(img_size + (3,), np.float32)\n', (6653, 6682), True, 'import numpy as np\n'), ((7783, 7820), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 13)', 'dpi': '(300)'}), '(figsize=(11, 13), dpi=300)\n', (7793, 7820), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1095), 'tensorflow.keras.backend.function', 'K.function', (['[model.layers[0].input]', '[final_conv_layer.output, model.layers[-1].output]'], {}), '([model.layers[0].input], [final_conv_layer.output, model.layers[\n -1].output])\n', (1013, 1095), True, 'import tensorflow.keras.backend as K\n'), ((1458, 1514), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'np.float32', 'shape': 'conv_output.shape[0:2]'}), '(dtype=np.float32, shape=conv_output.shape[0:2])\n', (1466, 1514), True, 'import numpy as np\n'), ((1942, 1977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)', 'dpi': '(300)'}), '(figsize=(2, 2), dpi=300)\n', (1952, 1977), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2080), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2078, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2159), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)', 'dpi': '(300)'}), '(figsize=(2, 2), dpi=300)\n', (2134, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2206), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2201, 2206), True, 'import matplotlib.pyplot as plt\n'), ((2211, 2229), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2227, 2229), True, 'import matplotlib.pyplot as plt\n'), ((2292, 2327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)', 'dpi': '(300)'}), '(figsize=(2, 2), dpi=300)\n', (2302, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2359, 2385), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': 'cmap'}), '(img, cmap=cmap)\n', (2369, 2385), True, 'import matplotlib.pyplot as plt\n'), ((2390, 2408), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2406, 2408), True, 'import matplotlib.pyplot as plt\n'), ((3006, 3025), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (3014, 3025), True, 'import numpy as np\n'), ((3249, 3268), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (3257, 3268), True, 'import numpy as np\n'), ((3433, 3587), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 0.33, 0.0, 0.0], [1.0, 0.25, 0.0, 0.0], [0.83,\n 0.87, 0.0, 0.0], [0.0, 0.26, 1.0, 0.0], [0.0, 0.81, 0.76, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 0.33, 0.0, 0.0], [1.0, 0.25, 0.0, 0.0\n ], [0.83, 0.87, 0.0, 0.0], [0.0, 0.26, 1.0, 0.0], [0.0, 0.81, 0.76, 0.0]])\n', (3441, 3587), True, 'import numpy as np\n'), ((4266, 4284), 'floorplan_analysis.pad_fp', 'pad_fp', (['fp', '(56)', '(56)'], {}), '(fp, 56, 56)\n', (4272, 4284), False, 'from floorplan_analysis import pad_fp\n'), ((4325, 4350), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (4335, 4350), False, 'import cv2\n'), ((4484, 4537), 'floorplan_analysis.pad_fp', 'pad_fp', (['heatmap', 'fp_light.shape[1]', 'fp_light.shape[0]'], {}), '(heatmap, fp_light.shape[1], fp_light.shape[0])\n', (4490, 4537), False, 'from floorplan_analysis import pad_fp\n'), ((4900, 4918), 'floorplan_analysis.pad_fp', 'pad_fp', (['fp', '(56)', '(56)'], {}), '(fp, 56, 56)\n', (4906, 4918), False, 'from floorplan_analysis import pad_fp\n'), ((4973, 4998), 'cv2.resize', 'cv2.resize', (['cam', '(56, 56)'], {}), '(cam, (56, 56))\n', (4983, 4998), False, 'import cv2\n'), ((7852, 7888), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (7864, 7888), False, 'import cv2\n'), ((8703, 8768), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * size_x / 112, 2 * size_y / 112)', 'dpi': '(300)'}), '(figsize=(2 * size_x / 112, 2 * size_y / 112), dpi=300)\n', (8713, 8768), True, 'import matplotlib.pyplot as plt\n'), ((8853, 8871), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8869, 8871), True, 'import matplotlib.pyplot as plt\n'), ((9474, 9499), 'numpy.fmax', 'np.fmax', (['fp.shape[:2]', '(56)'], {}), '(fp.shape[:2], 56)\n', (9481, 9499), True, 'import numpy as np\n'), ((10310, 10328), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10326, 10328), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1168), 'numpy.expand_dims', 'np.expand_dims', (['fp', '(0)'], {}), '(fp, 0)\n', (1161, 1168), True, 'import numpy as np\n'), ((1181, 1212), 'numpy.squeeze', 'np.squeeze', (['conv_output'], {'axis': '(0)'}), '(conv_output, axis=0)\n', (1191, 1212), True, 'import numpy as np\n'), ((1214, 1235), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1223, 1235), True, 'import numpy as np\n'), ((2020, 2056), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2032, 2056), False, 'import cv2\n'), ((4427, 4446), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (4435, 4446), True, 'import numpy as np\n'), ((5075, 5094), 'numpy.uint8', 'np.uint8', (['(255 * cam)'], {}), '(255 * cam)\n', (5083, 5094), True, 'import numpy as np\n'), ((5919, 5971), 'pandas.crosstab', 'pd.crosstab', (['df_sample.cluster', 'df_sample.area_group'], {}), '(df_sample.cluster, df_sample.area_group)\n', (5930, 5971), True, 'import pandas as pd\n'), ((6056, 6073), 'numpy.cumsum', 'np.cumsum', (['widths'], {}), '(widths)\n', (6065, 6073), True, 'import numpy as np\n'), ((6312, 6330), 'numpy.cumsum', 'np.cumsum', (['heights'], {}), '(heights)\n', (6321, 6330), True, 'import numpy as np\n'), ((8811, 8847), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8823, 8847), False, 'import cv2\n'), ((9398, 9451), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + id_ + '.png')"], {}), "(dir_from + id_ + '.png')\n", (9426, 9451), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((1982, 1992), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1990, 1992), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2174), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2172, 2174), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2342), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2340, 2342), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3964), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (3942, 3964), True, 'import numpy as np\n'), ((4196, 4236), 'cv2.cvtColor', 'cv2.cvtColor', (['fp_rgba', 'cv2.COLOR_RGB2Lab'], {}), '(fp_rgba, cv2.COLOR_RGB2Lab)\n', (4208, 4236), False, 'import cv2\n'), ((4650, 4677), 'numpy.expand_dims', 'np.expand_dims', (['fp_light', '(2)'], {}), '(fp_light, 2)\n', (4664, 4677), True, 'import numpy as np\n'), ((4830, 4870), 'cv2.cvtColor', 'cv2.cvtColor', (['fp_rgba', 'cv2.COLOR_RGB2Lab'], {}), '(fp_rgba, cv2.COLOR_RGB2Lab)\n', (4842, 4870), False, 'import cv2\n'), ((5359, 5386), 'numpy.expand_dims', 'np.expand_dims', (['fp_light', '(2)'], {}), '(fp_light, 2)\n', (5373, 5386), True, 'import numpy as np\n'), ((8773, 8783), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (8781, 8783), True, 'import matplotlib.pyplot as plt\n'), ((10085, 10121), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (10097, 10121), False, 'import cv2\n'), ((3968, 3981), 'numpy.array', 'np.array', (['fps'], {}), '(fps)\n', (3976, 3981), True, 'import numpy as np\n'), ((9173, 9226), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + id_ + '.png')"], {}), "(dir_from + id_ + '.png')\n", (9201, 9226), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((7569, 7622), 'floorplan_analysis.read_mono_from_image_unicode', 'read_mono_from_image_unicode', (["(dir_from + id_ + '.png')"], {}), "(dir_from + id_ + '.png')\n", (7597, 7622), False, 'from floorplan_analysis import read_mono_from_image_unicode\n'), ((6154, 6206), 'pandas.crosstab', 'pd.crosstab', (['df_sample.cluster', 'df_sample.area_group'], {}), '(df_sample.cluster, df_sample.area_group)\n', (6165, 6206), True, 'import pandas as pd\n')]
|
from sklearn import linear_model as lm
import scipy
import numpy as np
import pandas as pd
from skorecard.utils import convert_sparse_matrix
from sklearn.utils.validation import check_is_fitted
class LogisticRegression(lm.LogisticRegression):
"""Extended Logistic Regression.
Extends [sklearn.linear_model.LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
This class provides the following extra statistics, calculated on `.fit()` and accessible via `.get_stats()`:
- `cov_matrix_`: covariance matrix for the estimated parameters.
- `std_err_intercept_`: estimated uncertainty for the intercept
- `std_err_coef_`: estimated uncertainty for the coefficients
- `z_intercept_`: estimated z-statistic for the intercept
- `z_coef_`: estimated z-statistic for the coefficients
- `p_value_intercept_`: estimated p-value for the intercept
- `p_value_coef_`: estimated p-value for the coefficients
Example:
```python
from skorecard.datasets import load_uci_credit_card
from skorecard.bucketers import EqualFrequencyBucketer
from skorecard.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
X, y = load_uci_credit_card(return_X_y=True)
pipeline = Pipeline([
('bucketer', EqualFrequencyBucketer(n_bins=10)),
('clf', LogisticRegression(calculate_stats=True))
])
pipeline.fit(X, y)
assert pipeline.named_steps['clf'].p_val_coef_[0][0] > 0
pipeline.named_steps['clf'].get_stats()
```
An example output of `.get_stats()`:
Index | Coef. | Std.Err | z | Pz
--------- | ----------| ---------| ----------| ------------
const | -0.537571 | 0.096108 | -5.593394 | 2.226735e-08
EDUCATION | 0.010091 | 0.044874 | 0.224876 | 8.220757e-01
""" # noqa
def __init__(
self,
penalty="l2",
calculate_stats=False,
dual=False,
tol=0.0001,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="lbfgs",
max_iter=100,
multi_class="auto",
verbose=0,
warm_start=False,
n_jobs=None,
l1_ratio=None,
):
"""
Extends [sklearn.linear_model.LogisticRegression.fit()](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
Args:
calculate_stats (bool): If true, calculate statistics like standard error during fit, accessible with .get_stats()
""" # noqa
super(LogisticRegression, self).__init__(
penalty=penalty,
dual=dual,
tol=tol,
C=C,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight,
random_state=random_state,
solver=solver,
max_iter=max_iter,
multi_class=multi_class,
verbose=verbose,
warm_start=warm_start,
n_jobs=n_jobs,
l1_ratio=l1_ratio,
)
self.calculate_stats = calculate_stats
def fit(self, X, y, sample_weight=None, calculate_stats=False, **kwargs):
"""
Fit the model.
Overwrites [sklearn.linear_model.LogisticRegression.fit()](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
In addition to the standard fit by sklearn, this function will compute the covariance of the coefficients.
Args:
X (array-like, sparse matrix): Matrix of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (array-like): of shape (n_samples,)
Target vector relative to X.
sample_weight (array-like): of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
calculate_stats (bool): If true, calculate statistics like standard error during fit, accessible with .get_stats()
Returns:
self (LogisticRegression): Fitted estimator.
""" # noqa
if not self.calculate_stats and not calculate_stats:
return super().fit(X, y, sample_weight=sample_weight, **kwargs)
X = convert_sparse_matrix(X)
if isinstance(X, pd.DataFrame):
self.names_ = ["const"] + [f for f in X.columns]
else:
self.names_ = ["const"] + [f"x{i}" for i in range(X.shape[1])]
lr = super().fit(X, y, sample_weight=sample_weight, **kwargs)
predProbs = self.predict_proba(X)
# Design matrix -- add column of 1's at the beginning of your X matrix
if lr.fit_intercept:
X_design = np.hstack([np.ones((X.shape[0], 1)), X])
else:
X_design = X
p = np.product(predProbs, axis=1)
self.cov_matrix_ = np.linalg.inv((X_design * p[..., np.newaxis]).T @ X_design)
std_err = np.sqrt(np.diag(self.cov_matrix_)).reshape(1, -1)
# In case fit_intercept is set to True, then in the std_error array
# Index 0 corresponds to the intercept, from index 1 onwards it relates to the coefficients
# If fit intercept is False, then all the values are related to the coefficients
if lr.fit_intercept:
self.std_err_intercept_ = std_err[:, 0]
self.std_err_coef_ = std_err[:, 1:][0]
self.z_intercept_ = self.intercept_ / self.std_err_intercept_
# Get p-values under the gaussian assumption
self.p_val_intercept_ = scipy.stats.norm.sf(abs(self.z_intercept_)) * 2
else:
self.std_err_intercept_ = np.array([np.nan])
self.std_err_coef_ = std_err[0]
self.z_intercept_ = np.array([np.nan])
# Get p-values under the gaussian assumption
self.p_val_intercept_ = np.array([np.nan])
self.z_coef_ = self.coef_ / self.std_err_coef_
self.p_val_coef_ = scipy.stats.norm.sf(abs(self.z_coef_)) * 2
return self
def get_stats(self) -> pd.DataFrame:
"""
Puts the summary statistics of the fit() function into a pandas DataFrame.
Returns:
data (pandas DataFrame): The statistics dataframe, indexed by
the column name
"""
check_is_fitted(self)
if not hasattr(self, "std_err_coef_"):
msg = "Summary statistics were not calculated on .fit(). Options to fix:\n"
msg += "\t- Re-fit using .fit(X, y, calculate_stats=True)\n"
msg += "\t- Re-inititialize using LogisticRegression(calculate_stats=True)"
raise AssertionError(msg)
data = {
"Coef.": (self.intercept_.tolist() + self.coef_.tolist()[0]),
"Std.Err": (self.std_err_intercept_.tolist() + self.std_err_coef_.tolist()),
"z": (self.z_intercept_.tolist() + self.z_coef_.tolist()[0]),
"P>|z|": (self.p_val_intercept_.tolist() + self.p_val_coef_.tolist()[0]),
}
return pd.DataFrame(data, index=self.names_)
|
[
"pandas.DataFrame",
"numpy.ones",
"sklearn.utils.validation.check_is_fitted",
"numpy.product",
"numpy.linalg.inv",
"numpy.array",
"numpy.diag",
"skorecard.utils.convert_sparse_matrix"
] |
[((4592, 4616), 'skorecard.utils.convert_sparse_matrix', 'convert_sparse_matrix', (['X'], {}), '(X)\n', (4613, 4616), False, 'from skorecard.utils import convert_sparse_matrix\n'), ((5146, 5175), 'numpy.product', 'np.product', (['predProbs'], {'axis': '(1)'}), '(predProbs, axis=1)\n', (5156, 5175), True, 'import numpy as np\n'), ((5203, 5262), 'numpy.linalg.inv', 'np.linalg.inv', (['((X_design * p[..., np.newaxis]).T @ X_design)'], {}), '((X_design * p[..., np.newaxis]).T @ X_design)\n', (5216, 5262), True, 'import numpy as np\n'), ((6656, 6677), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (6671, 6677), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((7380, 7417), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'self.names_'}), '(data, index=self.names_)\n', (7392, 7417), True, 'import pandas as pd\n'), ((6000, 6018), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6008, 6018), True, 'import numpy as np\n'), ((6096, 6114), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6104, 6114), True, 'import numpy as np\n'), ((6209, 6227), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6217, 6227), True, 'import numpy as np\n'), ((5064, 5088), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (5071, 5088), True, 'import numpy as np\n'), ((5289, 5314), 'numpy.diag', 'np.diag', (['self.cov_matrix_'], {}), '(self.cov_matrix_)\n', (5296, 5314), True, 'import numpy as np\n')]
|
"""
Provides concrete tools for dealing with two of the most useful types of surfaces we have
"""
import numpy as np
from collections import namedtuple
from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader
from McUtils.Zachary import Surface, MultiSurface, InterpolatedSurface, TaylorSeriesSurface
__all__=[
"DipoleSurface",
"PotentialSurface"
]
class DipoleSurface(MultiSurface):
"""
Provides a unified interface to working with dipole surfaces.
Currently basically no fancier than a regular surface (although with convenient loading functions), but dipole-specific
stuff could come
"""
def __init__(self, mu_x, mu_y, mu_z):
"""
:param mu_x: X-component of dipole moment
:type mu_x: Surface
:param mu_y: Y-component of dipole moment
:type mu_y: Surface
:param mu_z: Z-component of dipole moment
:type mu_z: Surface
"""
if isinstance(mu_x.base, TaylorSeriesSurface):
self.mode = "taylor"
else:
self.mode = "interp"
super().__init__(
mu_x,
mu_y,
mu_z
)
@staticmethod
def get_log_values(log_file, keys=("StandardCartesianCoordinates", "DipoleMoments")):
with GaussianLogReader(log_file) as parser:
parse_data = parser.parse(keys)
carts = parse_data[keys[0]][1]
dipoles = np.array(parse_data[keys[1]])
return namedtuple('dipole_log_values', ['cartesians', 'dipoles'])(carts, dipoles)
@classmethod
def from_log_file(cls, log_file, coord_transf, keys=("StandardCartesianCoordinates", "DipoleMoments"), tol = .001, **opts):
"""
Loads dipoles from a Gaussian log file and builds a dipole surface by interpolating.
Obviously this only really works if we have a subset of "scan" coordinates, so at this stage the user is obligated
to furnish a function that'll take a set of Cartesian coordinates and convert them to "scan" coordinates.
Coordinerds can be helpful with this, as it provides a convenient syntax for Cartesian <-> ZMatrix conversions
:param log_file: a Gaussian log file to pull from
:type log_file: str
:return:
:rtype:
"""
carts, dipoles = cls.get_log_values(log_file, keys=keys)
scan_coords = coord_transf(carts)
if len(dipoles) != len(scan_coords):
raise ValueError(
"mismatch between number of dipoles ({}) and number of coordinates ({})".format(
len(dipoles),
len(scan_coords)
)
)
if scan_coords.ndim == 1:
scan_sort = np.argsort(scan_coords)
else:
scan_sort = np.lexsort(tuple(reversed(tuple(scan_coords.T))))
scan_coords = scan_coords[scan_sort]
dipoles = dipoles[scan_sort]
# this is particularly relevant for optimization scans...but we pull all the "unique" indices
# then we pull the indices right _before_ each unique one since that's the final one in the block of "uniques"
# finally we do a "roll" to make sure the order from the sort is preserved
tol_coords = np.floor(scan_coords/tol)
if tol_coords.ndim == 1:
diffs = np.diff(tol_coords)
else:
diffs = np.sum(abs(np.diff(tol_coords, axis=0)), axis=1)
inds = np.where(diffs != 0)[0]
inds = np.concatenate((inds, [len(inds)]))
scan_coords = scan_coords[inds]
dipoles = dipoles[inds]
dipoles = list(np.transpose(dipoles))
return DipoleSurface(*(
Surface(
((scan_coords, d), opts),
base = InterpolatedSurface,
dipole_component = "x" if i == 0 else "y" if i == 1 else "z"
) for i,d in enumerate(dipoles)
))
@staticmethod
def get_fchk_values(fchk_file):
with GaussianFChkReader(fchk_file) as parser:
parse_data = parser.parse(["Coordinates", "Dipole Moment", "Dipole Derivatives"])
center = parse_data["Coordinates"]
const_dipole = parse_data["Dipole Moment"]
derivs = parse_data["Dipole Derivatives"]
derivs = np.reshape(derivs, (int(len(derivs) / 3), 3))
return namedtuple('dipole_fchk_values', ['center', 'const', 'derivs'])(center, const_dipole, derivs)
@classmethod
def from_fchk_file(cls, fchk_file, **opts):
"""
Loads dipoles from a Gaussian formatted checkpoint file and builds a dipole surface via a linear approximation
:param fchk_file: a Gaussian fchk file to pull from
:type log_file: str
:return:
:rtype:
"""
center, const_dipole, derivs = cls.get_fchk_values(fchk_file)
derivs = list(np.transpose(derivs))
opts['center'] = center.flatten()
surfs = [None]*3
for i, d in enumerate(zip(derivs, list(const_dipole))):
d, r = d
opts = opts.copy()
opts["ref"] = r
surfs[i] = Surface(
((d,), opts),
base = TaylorSeriesSurface,
dipole_component="x" if i == 0 else "y" if i == 1 else "z"
)
return cls(*surfs)
def __call__(self, gridpoints, **opts):
"""
Explicitly overrides the Surface-level evaluation because we know the Taylor surface needs us to flatten our gridpoints
:param gridpoints:
:type gridpoints:
:param opts:
:type opts:
:return:
:rtype:
"""
gps = np.asarray(gridpoints)
if self.mode == "taylor":
if gps.ndim == 2:
gps = gps.flatten()
elif gps.ndim > 2:
gps = np.reshape(gps, gps.shape[:-2] + (np.product(gps.shape[-2:]),))
return super().__call__(gps, **opts)
class PotentialSurface(Surface):
"""
A potential surface structure to go along with the DipoleSurface.
Provides convenient access to dipole data + a unified interface to things like energy minimization
"""
@staticmethod
def get_log_values(log_file, keys=("StandardCartesianCoordinates", "ScanEnergies")):
with GaussianLogReader(log_file) as parser:
parse_data = parser.parse(keys)
# Need to be smarter about this. At some point we might be able to infer what type of log file we have...
coord_key = keys[0]
coords = parse_data[coord_key][1]
eng_key = keys[1]
if eng_key == "ScanEnergies":
energies = np.array(parse_data[eng_key].energies[:, -1])
else:
raise Exception("Haven't dealt with scan types beyond rigid ones...")
return namedtuple('potential_log_values', ['coords', 'energies'])(coords, energies)
@classmethod
def from_log_file(cls, log_file, coord_transf, keys=("StandardCartesianCoordinates", "ScanEnergies"), tol = .001, **opts):
"""
Loads dipoles from a Gaussian log file and builds a potential surface by interpolating.
Obviously this only really works if we have a subset of "scan" coordinates, so at this stage the user is obligated
to furnish a function that'll take a set of Cartesian coordinates and convert them to "scan" coordinates.
Coordinerds can be helpful with this, as it provides a convenient syntax for Cartesian <-> ZMatrix conversions.
:param log_file: a Gaussian log file to pull from
:type log_file: str
:return:
:rtype:
"""
dat = cls.get_log_values(log_file, keys=keys)
carts = dat.coords
pots = dat.energies
# raise Exception(carts, pots)
scan_coords = coord_transf(carts)
if len(pots) != len(scan_coords):
raise ValueError(
"mismatch between number of potential values ({}) and number of coordinates ({})".format(
len(pots),
len(scan_coords)
)
)
if scan_coords.ndim == 1:
scan_sort = np.argsort(scan_coords)
else:
scan_sort = np.lexsort(tuple(reversed(tuple(scan_coords.T))))
scan_coords = scan_coords[scan_sort]
pots = pots[scan_sort]
# this is particularly relevant for optimization scans...but we pull all the "unique" indices
# then we pull the indices right _before_ each unique one since that's the final one in the block of "uniques"
# finally we do a "roll" to make sure the order from the sort is preserved
tol_coords = np.floor(scan_coords/tol)
if tol_coords.ndim == 1:
diffs = np.diff(tol_coords)
else:
diffs = np.sum(abs(np.diff(tol_coords, axis=0)), axis=1)
inds = np.where(diffs != 0)[0]
inds = np.concatenate((inds, [len(inds)]))
scan_coords = scan_coords[inds].squeeze()
pots = pots[inds]
return cls(
((scan_coords, pots), opts),
base=InterpolatedSurface
)
@staticmethod
def get_fchk_values(fchk_file):
# TODO: I know I probably didn't do this right but I'm just getting a thing out for now
with GaussianFChkReader(fchk_file) as parser:
parse_data = parser.parse(["Coordinates", "Total Energy", "Gradient", "ForceConstants", "ForceDerivatives"])
center = parse_data["Coordinates"]
eng = parse_data["Total Energy"]
derivs = [parse_data['Gradient'], parse_data["ForceConstants"], parse_data["ForceDerivatives"]]
return namedtuple('potential_fchk_values', ['center', 'energy', 'derivs'])(
center, eng, derivs
)
@classmethod
def from_fchk_file(cls, fchk_file, **opts):
"""
Loads potential from a Gaussian formatted checkpoint file and builds a potential surface via a quartic approximation
:param fchk_file: a Gaussian fchk file to pull from
:type log_file: str
:return:
:rtype:
"""
center, energy, derivs = cls.get_fchk_values(fchk_file)
return cls((derivs, dict(ref=energy, center=center.flatten())), base=TaylorSeriesSurface, **opts)
|
[
"McUtils.GaussianInterface.GaussianFChkReader",
"numpy.asarray",
"numpy.floor",
"numpy.transpose",
"McUtils.Zachary.Surface",
"numpy.argsort",
"numpy.product",
"McUtils.GaussianInterface.GaussianLogReader",
"numpy.array",
"collections.namedtuple",
"numpy.diff",
"numpy.where"
] |
[((1430, 1459), 'numpy.array', 'np.array', (['parse_data[keys[1]]'], {}), '(parse_data[keys[1]])\n', (1438, 1459), True, 'import numpy as np\n'), ((3252, 3279), 'numpy.floor', 'np.floor', (['(scan_coords / tol)'], {}), '(scan_coords / tol)\n', (3260, 3279), True, 'import numpy as np\n'), ((5657, 5679), 'numpy.asarray', 'np.asarray', (['gridpoints'], {}), '(gridpoints)\n', (5667, 5679), True, 'import numpy as np\n'), ((8662, 8689), 'numpy.floor', 'np.floor', (['(scan_coords / tol)'], {}), '(scan_coords / tol)\n', (8670, 8689), True, 'import numpy as np\n'), ((1289, 1316), 'McUtils.GaussianInterface.GaussianLogReader', 'GaussianLogReader', (['log_file'], {}), '(log_file)\n', (1306, 1316), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((1476, 1534), 'collections.namedtuple', 'namedtuple', (['"""dipole_log_values"""', "['cartesians', 'dipoles']"], {}), "('dipole_log_values', ['cartesians', 'dipoles'])\n", (1486, 1534), False, 'from collections import namedtuple\n'), ((2732, 2755), 'numpy.argsort', 'np.argsort', (['scan_coords'], {}), '(scan_coords)\n', (2742, 2755), True, 'import numpy as np\n'), ((3331, 3350), 'numpy.diff', 'np.diff', (['tol_coords'], {}), '(tol_coords)\n', (3338, 3350), True, 'import numpy as np\n'), ((3449, 3469), 'numpy.where', 'np.where', (['(diffs != 0)'], {}), '(diffs != 0)\n', (3457, 3469), True, 'import numpy as np\n'), ((3620, 3641), 'numpy.transpose', 'np.transpose', (['dipoles'], {}), '(dipoles)\n', (3632, 3641), True, 'import numpy as np\n'), ((3984, 4013), 'McUtils.GaussianInterface.GaussianFChkReader', 'GaussianFChkReader', (['fchk_file'], {}), '(fchk_file)\n', (4002, 4013), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((4343, 4406), 'collections.namedtuple', 'namedtuple', (['"""dipole_fchk_values"""', "['center', 'const', 'derivs']"], {}), "('dipole_fchk_values', ['center', 'const', 'derivs'])\n", (4353, 4406), False, 'from collections import namedtuple\n'), ((4860, 4880), 'numpy.transpose', 'np.transpose', (['derivs'], {}), '(derivs)\n', (4872, 4880), True, 'import numpy as np\n'), ((5117, 5228), 'McUtils.Zachary.Surface', 'Surface', (['((d,), opts)'], {'base': 'TaylorSeriesSurface', 'dipole_component': "('x' if i == 0 else 'y' if i == 1 else 'z')"}), "(((d,), opts), base=TaylorSeriesSurface, dipole_component='x' if i ==\n 0 else 'y' if i == 1 else 'z')\n", (5124, 5228), False, 'from McUtils.Zachary import Surface, MultiSurface, InterpolatedSurface, TaylorSeriesSurface\n'), ((6288, 6315), 'McUtils.GaussianInterface.GaussianLogReader', 'GaussianLogReader', (['log_file'], {}), '(log_file)\n', (6305, 6315), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((6643, 6688), 'numpy.array', 'np.array', (['parse_data[eng_key].energies[:, -1]'], {}), '(parse_data[eng_key].energies[:, -1])\n', (6651, 6688), True, 'import numpy as np\n'), ((6801, 6859), 'collections.namedtuple', 'namedtuple', (['"""potential_log_values"""', "['coords', 'energies']"], {}), "('potential_log_values', ['coords', 'energies'])\n", (6811, 6859), False, 'from collections import namedtuple\n'), ((8148, 8171), 'numpy.argsort', 'np.argsort', (['scan_coords'], {}), '(scan_coords)\n', (8158, 8171), True, 'import numpy as np\n'), ((8741, 8760), 'numpy.diff', 'np.diff', (['tol_coords'], {}), '(tol_coords)\n', (8748, 8760), True, 'import numpy as np\n'), ((8859, 8879), 'numpy.where', 'np.where', (['(diffs != 0)'], {}), '(diffs != 0)\n', (8867, 8879), True, 'import numpy as np\n'), ((9291, 9320), 'McUtils.GaussianInterface.GaussianFChkReader', 'GaussianFChkReader', (['fchk_file'], {}), '(fchk_file)\n', (9309, 9320), False, 'from McUtils.GaussianInterface import GaussianLogReader, GaussianFChkReader\n'), ((9658, 9725), 'collections.namedtuple', 'namedtuple', (['"""potential_fchk_values"""', "['center', 'energy', 'derivs']"], {}), "('potential_fchk_values', ['center', 'energy', 'derivs'])\n", (9668, 9725), False, 'from collections import namedtuple\n'), ((3396, 3423), 'numpy.diff', 'np.diff', (['tol_coords'], {'axis': '(0)'}), '(tol_coords, axis=0)\n', (3403, 3423), True, 'import numpy as np\n'), ((3688, 3811), 'McUtils.Zachary.Surface', 'Surface', (['((scan_coords, d), opts)'], {'base': 'InterpolatedSurface', 'dipole_component': "('x' if i == 0 else 'y' if i == 1 else 'z')"}), "(((scan_coords, d), opts), base=InterpolatedSurface,\n dipole_component='x' if i == 0 else 'y' if i == 1 else 'z')\n", (3695, 3811), False, 'from McUtils.Zachary import Surface, MultiSurface, InterpolatedSurface, TaylorSeriesSurface\n'), ((8806, 8833), 'numpy.diff', 'np.diff', (['tol_coords'], {'axis': '(0)'}), '(tol_coords, axis=0)\n', (8813, 8833), True, 'import numpy as np\n'), ((5867, 5893), 'numpy.product', 'np.product', (['gps.shape[-2:]'], {}), '(gps.shape[-2:])\n', (5877, 5893), True, 'import numpy as np\n')]
|
from whatthefood.graph.node import Node
from collections.abc import Sequence
import numpy as np
class Grad(Node):
def __init__(self, y, xs):
self.unwrap = False
if not isinstance(xs, Sequence):
xs = [xs]
self.unwrap = True
self.outputs_graph = self._build_outputs_graph(y, xs)
self.outputs_graph_flattened = self._flatten_outputs_graph(self.outputs_graph)
super(Grad, self).__init__(None, False, *self._get_inputs(self.outputs_graph))
self.y = y
self.xs = xs
def do(self, *inputs):
values = {n: v for n, v in zip(self.inputs, inputs)}
inputs_grads = {}
ret = [None for _ in self.xs]
for op in self.outputs_graph_flattened:
if op is self.y:
grad = np.ones_like(values[self.y])
else:
grad = np.sum([
inputs_grads[o][o.inputs.index(op)]
for o in self.outputs_graph[op]
], axis=0)
input_values = [values[i] for i in op.inputs]
inputs_grads[op] = op.backpropagate(grad, *input_values)
if op in self.xs:
ret[self.xs.index(op)] = grad
return ret[0] if self.unwrap else ret
def _build_outputs_graph(self, y, xs):
outputs = {}
keep = y in xs
for i in y.inputs:
if i in xs:
keep = True
from_i = self._build_outputs_graph(i, xs)
if from_i is not None:
keep = True
for k, v in from_i.items():
if k in outputs:
outputs[k].update(v)
else:
outputs[k] = v
if i in outputs:
outputs[i].add(y)
else:
outputs[i] = {y}
if y not in outputs:
outputs[y] = set()
return outputs if keep else None
def _get_inputs(self, outputs_graph):
inputs = set(outputs_graph)
inputs.update(i for e in outputs_graph for i in e.inputs)
return inputs
def _flatten_outputs_graph(self, outputs):
flattened = []
# copy outputs
outputs = {k: set(v) for k, v in outputs.items()}
while outputs:
lasts = [k for k, v in outputs.items() if not v]
for l in lasts:
outputs.pop(l)
for l in lasts:
for v in outputs.values():
if l in v:
v.remove(l)
flattened.extend(lasts)
return flattened
def _build_tf(self, tf, *inputs):
y_tensors = [t for i, t in zip(self.inputs, inputs) if i is self.y]
assert len(y_tensors) == 1
x_tensors = [t for x in self.xs for i, t in zip(self.inputs, inputs) if i is x]
assert len(x_tensors) == len(self.xs)
return tf.gradients(y_tensors[0], x_tensors)
|
[
"numpy.ones_like"
] |
[((805, 833), 'numpy.ones_like', 'np.ones_like', (['values[self.y]'], {}), '(values[self.y])\n', (817, 833), True, 'import numpy as np\n')]
|
import datetime
import pandas as pd
import numpy as np
import subprocess
import pickle
from numpy import array
import seaborn as sn
import tensorflow as tf
import matplotlib.pyplot as plt
#combining predicitions and generating plot for pred. representation
def predict(data):
with open('rolf.pickle', 'rb') as handle:
rolf = pickle.load(handle)
#preproccessed data
data = pd.DataFrame.from_dict(data)
del data['epoch']
data = data.to_numpy()
data = data.astype('float64')
results = []
for i in range(len(data)):
results.append(rolf.predict(data[i]))
result1 = results
model = tf.keras.models.load_model('model')
data = pd.DataFrame.from_dict(data)
del data['epoch']
data = data.to_numpy()
data = data.astype('float64')
results = []
for i in range(len(data)):
results.append(np.argmax(model.predict(array([data[i]])))-1)
result2 = results
result1 = np.array(result1)
result2 = np.array(result2)
assert len(result1) == len(result2)
#couting through the arrays and generate plotting infos
counter1 = np.zeros(29)
counter2 = np.zeros(29)
for int in np.unique(result1):
for i in range(len(result1)):
if result1[i] == int:
counter1[int] = counter1[int] + 1
for int in np.unique(result2):
for i in range(len(result2)):
if result1[i] == int:
counter2[int] = counter2[int] + 1
colors1 = []
for i in range(29):
color = list(np.random.choice(range(256), size=3) / 256)
colors1.append(color)
colors2 = []
for i in range(29):
color = list(np.random.choice(range(256), size=3) / 256)
colors2.append(color)
plt.bar(x=np.arange(29), height=counter1, color=colors1)
plt.bar(x=np.arange(29), height=counter2, color=colors2)
plt.title('occurance of predicted clusters')
plt.xlabel('cluster/class')
plt.xticks(np.arange(29), rotation=90)
plt.ylabel('occurance')
plt.savefig('prediction.png')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"tensorflow.keras.models.load_model",
"pandas.DataFrame.from_dict",
"numpy.zeros",
"pickle.load",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.unique"
] |
[((394, 422), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (416, 422), True, 'import pandas as pd\n'), ((634, 669), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model"""'], {}), "('model')\n", (660, 669), True, 'import tensorflow as tf\n'), ((681, 709), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (703, 709), True, 'import pandas as pd\n'), ((946, 963), 'numpy.array', 'np.array', (['result1'], {}), '(result1)\n', (954, 963), True, 'import numpy as np\n'), ((978, 995), 'numpy.array', 'np.array', (['result2'], {}), '(result2)\n', (986, 995), True, 'import numpy as np\n'), ((1112, 1124), 'numpy.zeros', 'np.zeros', (['(29)'], {}), '(29)\n', (1120, 1124), True, 'import numpy as np\n'), ((1140, 1152), 'numpy.zeros', 'np.zeros', (['(29)'], {}), '(29)\n', (1148, 1152), True, 'import numpy as np\n'), ((1168, 1186), 'numpy.unique', 'np.unique', (['result1'], {}), '(result1)\n', (1177, 1186), True, 'import numpy as np\n'), ((1325, 1343), 'numpy.unique', 'np.unique', (['result2'], {}), '(result2)\n', (1334, 1343), True, 'import numpy as np\n'), ((1865, 1909), 'matplotlib.pyplot.title', 'plt.title', (['"""occurance of predicted clusters"""'], {}), "('occurance of predicted clusters')\n", (1874, 1909), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""cluster/class"""'], {}), "('cluster/class')\n", (1924, 1941), True, 'import matplotlib.pyplot as plt\n'), ((1989, 2012), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""occurance"""'], {}), "('occurance')\n", (1999, 2012), True, 'import matplotlib.pyplot as plt\n'), ((2017, 2046), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""prediction.png"""'], {}), "('prediction.png')\n", (2028, 2046), True, 'import matplotlib.pyplot as plt\n'), ((339, 358), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (350, 358), False, 'import pickle\n'), ((1957, 1970), 'numpy.arange', 'np.arange', (['(29)'], {}), '(29)\n', (1966, 1970), True, 'import numpy as np\n'), ((1753, 1766), 'numpy.arange', 'np.arange', (['(29)'], {}), '(29)\n', (1762, 1766), True, 'import numpy as np\n'), ((1814, 1827), 'numpy.arange', 'np.arange', (['(29)'], {}), '(29)\n', (1823, 1827), True, 'import numpy as np\n'), ((888, 904), 'numpy.array', 'array', (['[data[i]]'], {}), '([data[i]])\n', (893, 904), False, 'from numpy import array\n')]
|
import ast
import argparse
import logging
import warnings
import os
import json
import glob
import subprocess
import sys
import boto3
import pickle
import pandas as pd
from collections import Counter
from timeit import default_timer as timer
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import shutil
import networkx as nx
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
from prettytable import PrettyTable
import autogluon as ag
from autogluon import TabularPrediction as task
from autogluon.task.tabular_prediction import TabularDataset
from autogluon.utils.tabular.ml.constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS
print(f'DEBUG AutoGluon version : {ag.__version__}')
# ------------------------------------------------------------ #
# Training methods #
# ------------------------------------------------------------ #
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du','-sh', path]).split()[0].decode('utf-8')
def __load_input_data(path: str) -> TabularDataset:
"""
Load training data as dataframe
:param path:
:return: DataFrame
"""
input_data_files = os.listdir(path)
try:
input_dfs = [pd.read_csv(f'{path}/{data_file}') for data_file in input_data_files]
return task.Dataset(df=pd.concat(input_dfs))
except:
print(f'No csv data in {path}!')
return None
def format_for_print(df):
table = PrettyTable(list(df.columns))
for row in df.itertuples():
table.add_row(row[1:])
return str(table)
def get_roc_auc(y_test_true, y_test_pred, labels, class_labels_internal, model_output_dir):
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
from itertools import cycle
y_test_true_binalized = label_binarize(y_test_true, classes=labels)
if len(labels) == 2:
# binary classification
true_label_index = class_labels_internal.index(1)
y_test_pred = y_test_pred[:,true_label_index]
y_test_pred = np.reshape(y_test_pred, (-1, 1))
labels = labels[true_label_index:true_label_index+1]
n_classes = 1
else:
# multiclass classification
n_classes = len(labels)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_true_binalized[:, i], y_test_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_true_binalized.ravel(), y_test_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
sns.set(font_scale=1)
plt.figure()
lw = 2
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color,
lw=lw, label=f'ROC curve for {labels[i]} (area = %0.2f)' % roc_auc[i])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
plt.savefig(f'{model_output_dir}/roc_auc_curve.png')
def train(args):
model_output_dir = f'{args.output_dir}/data'
is_distributed = len(args.hosts) > 1
host_rank = args.hosts.index(args.current_host)
dist_ip_addrs = args.hosts
dist_ip_addrs.pop(host_rank)
# Load training and validation data
print(f'Train files: {os.listdir(args.train)}')
train_data = __load_input_data(args.train)
# Extract column info
target = args.fit_args['label']
columns = train_data.columns.tolist()
column_dict = {"columns":columns}
with open('columns.pkl', 'wb') as f:
pickle.dump(column_dict, f)
# Train models
predictor = task.fit(
train_data=train_data,
output_directory=args.model_dir,
**args.fit_args,
)
# Results summary
predictor.fit_summary(verbosity=3)
model_summary_fname_src = os.path.join(predictor.output_directory, 'SummaryOfModels.html')
model_summary_fname_tgt = os.path.join(model_output_dir, 'SummaryOfModels.html')
if os.path.exists(model_summary_fname_src):
shutil.copy(model_summary_fname_src, model_summary_fname_tgt)
# ensemble visualization
G = predictor._trainer.model_graph
remove = [node for node,degree in dict(G.degree()).items() if degree < 1]
G.remove_nodes_from(remove)
A = nx.nx_agraph.to_agraph(G)
A.graph_attr.update(rankdir='BT')
A.node_attr.update(fontsize=10)
for node in A.iternodes():
node.attr['shape'] = 'rectagle'
A.draw(os.path.join(model_output_dir, 'ensemble-model.png'), format='png', prog='dot')
# Optional test data
if args.test:
print(f'Test files: {os.listdir(args.test)}')
test_data = __load_input_data(args.test)
# Test data must be labeled for scoring
if args.fit_args['label'] in test_data:
# Leaderboard on test data
print('Running model on test data and getting Leaderboard...')
leaderboard = predictor.leaderboard(dataset=test_data, silent=True)
print(format_for_print(leaderboard), end='\n\n')
leaderboard.to_csv(f'{model_output_dir}/leaderboard.csv', index=False)
# Feature importance on test data
# Note: Feature importance must be calculated on held-out (test) data.
# If calculated on training data it will be biased due to overfitting.
if args.feature_importance:
print('Feature importance:')
# Increase rows to print feature importance
pd.set_option('display.max_rows', 500)
feature_importance = predictor.feature_importance(test_data)
feature_importance_df = pd.DataFrame(feature_importance, columns=['Importance score']).rename_axis(index='Feature')
print(feature_importance_df)
feature_importance_df.to_csv(f'{model_output_dir}/feature_importance.csv', index=True)
# Classification report and confusion matrix for classification model
if predictor.problem_type in [BINARY, MULTICLASS]:
from sklearn.metrics import classification_report, confusion_matrix
X_test = test_data.drop(args.fit_args['label'], axis=1)
y_test_true = test_data[args.fit_args['label']]
y_test_pred = predictor.predict(X_test)
y_test_pred_prob = predictor.predict_proba(X_test, as_multiclass=True)
report_dict = classification_report(y_test_true, y_test_pred, output_dict=True, labels=predictor.class_labels)
report_dict_df = pd.DataFrame(report_dict).T
report_dict_df.to_csv(f'{model_output_dir}/classification_report.csv', index=True)
cm = confusion_matrix(y_test_true, y_test_pred, labels=predictor.class_labels)
cm_df = pd.DataFrame(cm, predictor.class_labels, predictor.class_labels)
sns.set(font_scale=1)
cmap = 'coolwarm'
sns.heatmap(cm_df, annot=True, fmt='d', cmap=cmap)
plt.title('Confusion Matrix')
plt.ylabel('true label')
plt.xlabel('predicted label')
plt.show()
plt.savefig(f'{model_output_dir}/confusion_matrix.png')
get_roc_auc(y_test_true, y_test_pred_prob, predictor.class_labels, predictor.class_labels_internal, model_output_dir)
else:
warnings.warn('Skipping eval on test data since label column is not included.')
# Files summary
print(f'Model export summary:')
print(f"/opt/ml/model/: {os.listdir('/opt/ml/model/')}")
models_contents = os.listdir('/opt/ml/model/models')
print(f"/opt/ml/model/models: {models_contents}")
print(f"/opt/ml/model directory size: {du('/opt/ml/model/')}\n")
# ------------------------------------------------------------ #
# Training execution #
# ------------------------------------------------------------ #
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.register('type','bool', lambda v: v.lower() in ('yes', 'true', 't', '1'))
# Environment parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--output-dir', type=str, default=os.environ['SM_OUTPUT_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
# Arguments to be passed to task.fit()
parser.add_argument('--fit_args', type=lambda s: ast.literal_eval(s),
default="{'presets': ['optimize_for_deployment']}",
help='https://autogluon.mxnet.io/api/autogluon.task.html#tabularprediction')
# Additional options
parser.add_argument('--feature_importance', type='bool', default=True)
return parser.parse_args()
if __name__ == "__main__":
start = timer()
args = parse_args()
# Verify label is included
if 'label' not in args.fit_args:
raise ValueError('"label" is a required parameter of "fit_args"!')
# Convert optional fit call hyperparameters from strings
if 'hyperparameters' in args.fit_args:
for model_type,options in args.fit_args['hyperparameters'].items():
assert isinstance(options, dict)
for k,v in options.items():
args.fit_args['hyperparameters'][model_type][k] = eval(v)
# Print SageMaker args
print('fit_args:')
for k,v in args.fit_args.items():
print(f'{k}, type: {type(v)}, value: {v}')
# Make test data optional
if os.environ.get('SM_CHANNEL_TESTING'):
args.test = os.environ['SM_CHANNEL_TESTING']
else:
args.test = None
train(args)
# Package inference code with model export
subprocess.call('mkdir /opt/ml/model/code'.split())
subprocess.call('cp /opt/ml/code/inference.py /opt/ml/model/code/'.split())
subprocess.call('cp columns.pkl /opt/ml/model/code/'.split())
elapsed_time = round(timer()-start,3)
print(f'Elapsed time: {elapsed_time} seconds. Training Completed!')
|
[
"matplotlib.pyplot.title",
"sklearn.metrics.confusion_matrix",
"pickle.dump",
"seaborn.heatmap",
"argparse.ArgumentParser",
"autogluon.TabularPrediction.fit",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"itertools.cycle",
"os.path.join",
"pandas.set_option",
"shutil.copy",
"pandas.DataFrame",
"json.loads",
"os.path.exists",
"warnings.catch_warnings",
"numpy.reshape",
"seaborn.set",
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"subprocess.check_output",
"sklearn.preprocessing.label_binarize",
"matplotlib.pyplot.ylabel",
"os.listdir",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"warnings.filterwarnings",
"sklearn.metrics.roc_curve",
"timeit.default_timer",
"networkx.nx_agraph.to_agraph",
"os.environ.get",
"sklearn.metrics.auc",
"ast.literal_eval",
"warnings.warn",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((358, 383), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (381, 383), False, 'import warnings\n'), ((389, 451), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (412, 451), False, 'import warnings\n'), ((1316, 1332), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1326, 1332), False, 'import os\n'), ((1980, 2023), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['y_test_true'], {'classes': 'labels'}), '(y_test_true, classes=labels)\n', (1994, 2023), False, 'from sklearn.preprocessing import label_binarize\n'), ((2858, 2889), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (2861, 2889), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2899, 2920), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1)'}), '(font_scale=1)\n', (2906, 2920), True, 'import seaborn as sns\n'), ((2925, 2937), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2935, 2937), True, 'import matplotlib.pyplot as plt\n'), ((2962, 3009), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (2967, 3009), False, 'from itertools import cycle\n'), ((3200, 3261), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (3208, 3261), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3286), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (3274, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3291, 3312), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (3299, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3350), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (3327, 3350), True, 'import matplotlib.pyplot as plt\n'), ((3355, 3387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (3365, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3446), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver operating characteristic example"""'], {}), "('Receiver operating characteristic example')\n", (3401, 3446), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3480), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3461, 3480), True, 'import matplotlib.pyplot as plt\n'), ((3485, 3495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3493, 3495), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3552), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{model_output_dir}/roc_auc_curve.png"""'], {}), "(f'{model_output_dir}/roc_auc_curve.png')\n", (3511, 3552), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4276), 'autogluon.TabularPrediction.fit', 'task.fit', ([], {'train_data': 'train_data', 'output_directory': 'args.model_dir'}), '(train_data=train_data, output_directory=args.model_dir, **args.\n fit_args)\n', (4198, 4276), True, 'from autogluon import TabularPrediction as task\n'), ((4399, 4463), 'os.path.join', 'os.path.join', (['predictor.output_directory', '"""SummaryOfModels.html"""'], {}), "(predictor.output_directory, 'SummaryOfModels.html')\n", (4411, 4463), False, 'import os\n'), ((4494, 4548), 'os.path.join', 'os.path.join', (['model_output_dir', '"""SummaryOfModels.html"""'], {}), "(model_output_dir, 'SummaryOfModels.html')\n", (4506, 4548), False, 'import os\n'), ((4561, 4600), 'os.path.exists', 'os.path.exists', (['model_summary_fname_src'], {}), '(model_summary_fname_src)\n', (4575, 4600), False, 'import os\n'), ((4863, 4888), 'networkx.nx_agraph.to_agraph', 'nx.nx_agraph.to_agraph', (['G'], {}), '(G)\n', (4885, 4888), True, 'import networkx as nx\n'), ((8309, 8343), 'os.listdir', 'os.listdir', (['"""/opt/ml/model/models"""'], {}), "('/opt/ml/model/models')\n", (8319, 8343), False, 'import os\n'), ((8695, 8774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (8718, 8774), False, 'import argparse\n'), ((9895, 9902), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9900, 9902), True, 'from timeit import default_timer as timer\n'), ((10605, 10641), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TESTING"""'], {}), "('SM_CHANNEL_TESTING')\n", (10619, 10641), False, 'import os\n'), ((2220, 2252), 'numpy.reshape', 'np.reshape', (['y_test_pred', '(-1, 1)'], {}), '(y_test_pred, (-1, 1))\n', (2230, 2252), True, 'import numpy as np\n'), ((2586, 2643), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test_true_binalized[:, i]', 'y_test_pred[:, i]'], {}), '(y_test_true_binalized[:, i], y_test_pred[:, i])\n', (2595, 2643), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2665, 2684), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (2668, 2684), False, 'from sklearn.metrics import roc_curve, auc\n'), ((3070, 3183), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr[i]', 'tpr[i]'], {'color': 'color', 'lw': 'lw', 'label': "(f'ROC curve for {labels[i]} (area = %0.2f)' % roc_auc[i])"}), "(fpr[i], tpr[i], color=color, lw=lw, label=\n f'ROC curve for {labels[i]} (area = %0.2f)' % roc_auc[i])\n", (3078, 3183), True, 'import matplotlib.pyplot as plt\n'), ((4122, 4149), 'pickle.dump', 'pickle.dump', (['column_dict', 'f'], {}), '(column_dict, f)\n', (4133, 4149), False, 'import pickle\n'), ((4610, 4671), 'shutil.copy', 'shutil.copy', (['model_summary_fname_src', 'model_summary_fname_tgt'], {}), '(model_summary_fname_src, model_summary_fname_tgt)\n', (4621, 4671), False, 'import shutil\n'), ((5045, 5097), 'os.path.join', 'os.path.join', (['model_output_dir', '"""ensemble-model.png"""'], {}), "(model_output_dir, 'ensemble-model.png')\n", (5057, 5097), False, 'import os\n'), ((1363, 1397), 'pandas.read_csv', 'pd.read_csv', (['f"""{path}/{data_file}"""'], {}), "(f'{path}/{data_file}')\n", (1374, 1397), True, 'import pandas as pd\n'), ((8089, 8168), 'warnings.warn', 'warnings.warn', (['"""Skipping eval on test data since label column is not included."""'], {}), "('Skipping eval on test data since label column is not included.')\n", (8102, 8168), False, 'import warnings\n'), ((8958, 8992), 'json.loads', 'json.loads', (["os.environ['SM_HOSTS']"], {}), "(os.environ['SM_HOSTS'])\n", (8968, 8992), False, 'import json\n'), ((11024, 11031), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11029, 11031), True, 'from timeit import default_timer as timer\n'), ((1464, 1484), 'pandas.concat', 'pd.concat', (['input_dfs'], {}), '(input_dfs)\n', (1473, 1484), True, 'import pandas as pd\n'), ((3853, 3875), 'os.listdir', 'os.listdir', (['args.train'], {}), '(args.train)\n', (3863, 3875), False, 'import os\n'), ((6102, 6140), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (6115, 6140), True, 'import pandas as pd\n'), ((7083, 7184), 'sklearn.metrics.classification_report', 'classification_report', (['y_test_true', 'y_test_pred'], {'output_dict': '(True)', 'labels': 'predictor.class_labels'}), '(y_test_true, y_test_pred, output_dict=True, labels=\n predictor.class_labels)\n', (7104, 7184), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((7378, 7451), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test_true', 'y_test_pred'], {'labels': 'predictor.class_labels'}), '(y_test_true, y_test_pred, labels=predictor.class_labels)\n', (7394, 7451), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((7476, 7540), 'pandas.DataFrame', 'pd.DataFrame', (['cm', 'predictor.class_labels', 'predictor.class_labels'], {}), '(cm, predictor.class_labels, predictor.class_labels)\n', (7488, 7540), True, 'import pandas as pd\n'), ((7557, 7578), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1)'}), '(font_scale=1)\n', (7564, 7578), True, 'import seaborn as sns\n'), ((7629, 7679), 'seaborn.heatmap', 'sns.heatmap', (['cm_df'], {'annot': '(True)', 'fmt': '"""d"""', 'cmap': 'cmap'}), "(cm_df, annot=True, fmt='d', cmap=cmap)\n", (7640, 7679), True, 'import seaborn as sns\n'), ((7696, 7725), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (7705, 7725), True, 'import matplotlib.pyplot as plt\n'), ((7742, 7766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""true label"""'], {}), "('true label')\n", (7752, 7766), True, 'import matplotlib.pyplot as plt\n'), ((7783, 7812), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""predicted label"""'], {}), "('predicted label')\n", (7793, 7812), True, 'import matplotlib.pyplot as plt\n'), ((7829, 7839), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7837, 7839), True, 'import matplotlib.pyplot as plt\n'), ((7856, 7911), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{model_output_dir}/confusion_matrix.png"""'], {}), "(f'{model_output_dir}/confusion_matrix.png')\n", (7867, 7911), True, 'import matplotlib.pyplot as plt\n'), ((8255, 8283), 'os.listdir', 'os.listdir', (['"""/opt/ml/model/"""'], {}), "('/opt/ml/model/')\n", (8265, 8283), False, 'import os\n'), ((9524, 9543), 'ast.literal_eval', 'ast.literal_eval', (['s'], {}), '(s)\n', (9540, 9543), False, 'import ast\n'), ((5198, 5219), 'os.listdir', 'os.listdir', (['args.test'], {}), '(args.test)\n', (5208, 5219), False, 'import os\n'), ((7213, 7238), 'pandas.DataFrame', 'pd.DataFrame', (['report_dict'], {}), '(report_dict)\n', (7225, 7238), True, 'import pandas as pd\n'), ((1077, 1121), 'subprocess.check_output', 'subprocess.check_output', (["['du', '-sh', path]"], {}), "(['du', '-sh', path])\n", (1100, 1121), False, 'import subprocess\n'), ((6258, 6320), 'pandas.DataFrame', 'pd.DataFrame', (['feature_importance'], {'columns': "['Importance score']"}), "(feature_importance, columns=['Importance score'])\n", (6270, 6320), True, 'import pandas as pd\n')]
|
'''
Adjusted Kapre Spectrogram and Melspectrogram classes which don't
apply range compression in the case that melgram=True
'''
from kapre.time_frequency import Spectrogram, Melspectrogram
#from __future__ import absolute_import
import numpy as np
from keras import backend as K
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
"""[K] Convert (linear) amplitude to decibel (log10(x)).
x: Keras tensor or variable.
amin: minimum amplitude. amplitude smaller than `amin` is set to this.
dynamic_range: dynamic_range in decibel
"""
log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
#### These lines which to batch-wise range compression removed:-
## log_spec = log_spec - K.max(log_spec) # [-?, 0]
## log_spec = K.maximum(log_spec, -1 * dynamic_range) # [-80, 0]
return log_spec
class SpectrogramModified(Spectrogram):
def call(self, x):
output = self._spectrogram_mono(x[:, 0:1, :])
if self.is_mono is False:
for ch_idx in range(1, self.n_ch):
output = K.concatenate((output,
self._spectrogram_mono(x[:, ch_idx:ch_idx + 1, :])),
axis=self.ch_axis_idx)
if self.power_spectrogram != 2.0:
output = K.pow(K.sqrt(output), self.power_spectrogram)
if self.return_decibel_spectrogram:
output = amplitude_to_decibel(output) ## only difference from non-modified class
return output
class MelspectrogramModified(Melspectrogram):
def call(self, x):
power_spectrogram = super(Melspectrogram, self).call(x)
# now, channels_first: (batch_sample, n_ch, n_freq, n_time)
# channels_last: (batch_sample, n_freq, n_time, n_ch)
if self.image_data_format == 'channels_first':
power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 1, 3, 2])
else:
power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 3, 2, 1])
# now, whatever image_data_format, (batch_sample, n_ch, n_time, n_freq)
output = K.dot(power_spectrogram, self.freq2mel)
if self.image_data_format == 'channels_first':
output = K.permute_dimensions(output, [0, 1, 3, 2])
else:
output = K.permute_dimensions(output, [0, 3, 2, 1])
if self.power_melgram != 2.0:
output = K.pow(K.sqrt(output), self.power_melgram)
if self.return_decibel_melgram:
output = amplitude_to_decibel(output) ## only difference from non-modified class
return output
|
[
"keras.backend.dot",
"keras.backend.sqrt",
"numpy.log",
"keras.backend.floatx",
"keras.backend.maximum",
"keras.backend.permute_dimensions"
] |
[((2148, 2187), 'keras.backend.dot', 'K.dot', (['power_spectrogram', 'self.freq2mel'], {}), '(power_spectrogram, self.freq2mel)\n', (2153, 2187), True, 'from keras import backend as K\n'), ((636, 646), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (644, 646), True, 'from keras import backend as K\n'), ((1897, 1950), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['power_spectrogram', '[0, 1, 3, 2]'], {}), '(power_spectrogram, [0, 1, 3, 2])\n', (1917, 1950), True, 'from keras import backend as K\n'), ((1997, 2050), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['power_spectrogram', '[0, 3, 2, 1]'], {}), '(power_spectrogram, [0, 3, 2, 1])\n', (2017, 2050), True, 'from keras import backend as K\n'), ((2264, 2306), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['output', '[0, 1, 3, 2]'], {}), '(output, [0, 1, 3, 2])\n', (2284, 2306), True, 'from keras import backend as K\n'), ((2342, 2384), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['output', '[0, 3, 2, 1]'], {}), '(output, [0, 3, 2, 1])\n', (2362, 2384), True, 'from keras import backend as K\n'), ((596, 614), 'keras.backend.maximum', 'K.maximum', (['x', 'amin'], {}), '(x, amin)\n', (605, 614), True, 'from keras import backend as K\n'), ((618, 628), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (624, 628), True, 'import numpy as np\n'), ((1338, 1352), 'keras.backend.sqrt', 'K.sqrt', (['output'], {}), '(output)\n', (1344, 1352), True, 'from keras import backend as K\n'), ((2450, 2464), 'keras.backend.sqrt', 'K.sqrt', (['output'], {}), '(output)\n', (2456, 2464), True, 'from keras import backend as K\n')]
|
import numpy
import joblib
import json
from azureml.core.model import Model
# from inference_schema.schema_decorators import input_schema, output_schema
def init():
# load the model from file into a global object
global model
# we assume that we have just one model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder
# (./azureml-models/$MODEL_NAME/$VERSION)
model_path = Model.get_model_path(
model_name="driver_training_model.pkl")
model = joblib.load(model_path)
# input_sample = numpy.array([
# [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
# [10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]])
# output_sample = numpy.array([
# 5021.509689995557,
# 3693.645386402646])
# Inference_schema generates a schema for your web service
# It then creates an OpenAPI (Swagger) specification for the web service
# at http://<scoring_base_url>/swagger.json
# @input_schema('data', NumpyParameterType(input_sample))
# @output_schema(NumpyParameterType(output_sample))
def run(raw_data, request_headers):
data = json.loads(raw_data)["data"]
data = numpy.array(data) #Ensuring that we're deserialising the data in str format
result = model.predict(data)
# Demonstrate how we can log custom data into the Application Insights
# traces collection.
# The 'X-Ms-Request-id' value is generated internally and can be used to
# correlate a log entry with the Application Insights requests collection.
# The HTTP 'traceparent' header may be set by the caller to implement
# distributed tracing (per the W3C Trace Context proposed specification)
# and can be used to correlate the request to external systems.
print(
(
'{{"RequestId":"{0}", '
'"TraceParent":"{1}", '
'"NumberOfPredictions":{2}}}'
).format(
request_headers.get("X-Ms-Request-Id", ""),
request_headers.get("Traceparent", ""),
len(result),
)
)
return {"result": result.tolist()}
if __name__ == "__main__":
# Test scoring
init()
test_row = '{"data": [[0,1,8,1,0,0,1,0,0,0,0,0,0,0,12,1,0,0,0.5,0.3,0.610327781,7,1,-1,0,-1,1,1,1,2,1,65,1,0.316227766,0.669556409,0.352136337,3.464101615,0.1,0.8,0.6,1,1,6,3,6,2,9,1,1,1,12,0,1,1,0,0,1],[4,2,5,1,0,0,0,0,1,0,0,0,0,0,5,1,0,0,0.9,0.5,0.771362431,4,1,-1,0,0,11,1,1,0,1,103,1,0.316227766,0.60632002,0.358329457,2.828427125,0.4,0.5,0.4,3,3,8,4,10,2,7,2,0,3,10,0,0,1,1,0,1]]}'
prediction = run(test_row, {})
print("Test result: ", prediction)
|
[
"joblib.load",
"numpy.array",
"json.loads",
"azureml.core.model.Model.get_model_path"
] |
[((464, 524), 'azureml.core.model.Model.get_model_path', 'Model.get_model_path', ([], {'model_name': '"""driver_training_model.pkl"""'}), "(model_name='driver_training_model.pkl')\n", (484, 524), False, 'from azureml.core.model import Model\n'), ((546, 569), 'joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (557, 569), False, 'import joblib\n'), ((1186, 1203), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (1197, 1203), False, 'import numpy\n'), ((1146, 1166), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (1156, 1166), False, 'import json\n')]
|
'''
The whole reading model.
CNN + LSTM + A classifier with multinomial distribution.
'''
import torch
from torch import optim
from torch import nn
import torch.nn.functional as F
from torch.distributions import Bernoulli, Categorical
from torchtext import datasets
from torchtext import data
import os
import time
import numpy as np
import random
import argparse
from sklearn.metrics import accuracy_score
from networks import CNN_LSTM, Policy_C, Policy_N, Policy_S, ValueNetwork
from utils.utils import sample_policy_c, sample_policy_n, sample_policy_s, evaluate_earlystop, compute_policy_value_losses
from utils.utils import cnn_cost, lstm_cost, c_cost, n_cost, s_cost, cnn_whole
desc = '''
The whole reading model.
CNN + LSTM + A classifier with multinomial distribution.
'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--seed', type=int, default=2019, metavar='S',
help='random seed (default: 2019)')
args = parser.parse_args()
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
set_seed(args.seed)
TEXT = data.Field(sequential=True, tokenize='spacy', lower=True, fix_length=400) #
LABEL = data.LabelField(dtype=torch.float)
print('Splitting data...')
# download the IMDB dataset
train, test_data = datasets.IMDB.splits(TEXT, LABEL) # 25,000 training and 25,000 testing data
train_data, valid_data = train.split(split_ratio=0.8) # split training data into 20,000 training and 5,000 vlidation sample
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
MAX_VOCAB_SIZE = 25000
# use pretrained embedding of glove
print('Building vocabulary...')
TEXT.build_vocab(train_data, max_size=MAX_VOCAB_SIZE, vectors="glove.6B.100d", unk_init = torch.Tensor.normal_)
LABEL.build_vocab(train_data)
# split the datasets into batches
BATCH_SIZE = 64 # the batch size for a dataset iterator
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'device: {device}')
print('Building iterators...')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device)
# set up parameters
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
KER_SIZE = 5
HIDDEN_DIM = 128
LABEL_DIM = 2
N_FILTERS = 128
learning_rate = 0.001
# the number of training epoches
num_of_epoch = 10
# set up the criterion
criterion = nn.CrossEntropyLoss().to(device)
# set up models
clstm = CNN_LSTM(INPUT_DIM, EMBEDDING_DIM, KER_SIZE, N_FILTERS, HIDDEN_DIM).to(device)
policy_c = Policy_C(HIDDEN_DIM, HIDDEN_DIM, LABEL_DIM).to(device)
# set up optimiser
params = list(clstm.parameters()) + list(policy_c.parameters())
optimizer = optim.Adam(params, lr=learning_rate)
# add pretrained embeddings
pretrained_embeddings = TEXT.vocab.vectors
clstm.embedding.weight.data.copy_(pretrained_embeddings)
clstm.embedding.weight.requires_grad = True # update the initial weights
def evaluate(iterator):
clstm.eval()
policy_c.eval()
true_labels = []
pred_labels = []
eval_loss = 0
for index, valid in enumerate(iterator):
label = valid.label
text = valid.text.transpose(0,1)
batch_size = label.size()[0]
h_0 = torch.zeros([1, batch_size, 128]).to(device)
ht = clstm(text, h_0)
label_raws = policy_c(ht)
label_probs = F.softmax(label_raws, dim=1)
m = Categorical(label_probs)
pred_label = m.sample()
true_labels.extend(label.cpu().numpy())
pred_labels.extend(pred_label.cpu().squeeze().numpy())
loss = criterion(label_raws.squeeze(), label.to(torch.long))
eval_loss += loss/len(iterator)
eval_accuracy = accuracy_score(true_labels, pred_labels)
return eval_loss, eval_accuracy
def main():
'''
Training and evaluation of the model.
'''
print('training starts...')
for epoch in range(num_of_epoch):
clstm.train()
policy_c.train()
true_labels = []
pred_labels = []
train_loss = 0
for index, train in enumerate(train_iterator):
label = train.label # output_dim:64
text = train.text.transpose(0,1) #: 64, 400
batch_size = label.size()[0]
h_0 = torch.zeros([1, batch_size, 128]).to(device)
ht = clstm(text, h_0) #: 64, 128
label_raws = policy_c(ht)
optimizer.zero_grad()
loss = criterion(label_raws.squeeze(), label.to(torch.long))
loss.backward()
optimizer.step()
# draw a prediction label
label_probs = F.softmax(label_raws.detach(), dim=1)
m = Categorical(label_probs)
pred_label = m.sample()
true_labels.extend(label.cpu().numpy())
pred_labels.extend(pred_label.cpu().squeeze().numpy())
train_loss += loss/len(train_iterator)
train_accuracy = accuracy_score(true_labels, pred_labels)
print('epoch:{0}, train accuracy:{1}, train_loss:{2}'.format(epoch, train_accuracy, train_loss))
eval_loss, eval_accuracy = evaluate(valid_iterator)
print('epoch:{0}, eval accuracy:{1}, eval_loss:{2}'.format(epoch, eval_accuracy, eval_loss))
# testing
test_loss, test_accuracy = evaluate(test_iterator)
print('\n Test accuracy:{1}, test loss:{2}'.format(epoch, test_accuracy, test_loss))
if __name__ == '__main__':
main()
cost = cnn_whole + c_cost + lstm_cost * 24
print('whole reading FLOPs per data: ', cost)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.distributions.Categorical",
"torchtext.datasets.IMDB.splits",
"torch.manual_seed",
"sklearn.metrics.accuracy_score",
"torch.cuda.manual_seed",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torchtext.data.LabelField",
"torch.optim.Adam",
"random.seed",
"torch.cuda.is_available",
"networks.CNN_LSTM",
"networks.Policy_C",
"torch.zeros",
"torchtext.data.BucketIterator.splits",
"torchtext.data.Field"
] |
[((795, 836), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (818, 836), False, 'import argparse\n'), ((1238, 1311), 'torchtext.data.Field', 'data.Field', ([], {'sequential': '(True)', 'tokenize': '"""spacy"""', 'lower': '(True)', 'fix_length': '(400)'}), "(sequential=True, tokenize='spacy', lower=True, fix_length=400)\n", (1248, 1311), False, 'from torchtext import data\n'), ((1323, 1357), 'torchtext.data.LabelField', 'data.LabelField', ([], {'dtype': 'torch.float'}), '(dtype=torch.float)\n', (1338, 1357), False, 'from torchtext import data\n'), ((1433, 1466), 'torchtext.datasets.IMDB.splits', 'datasets.IMDB.splits', (['TEXT', 'LABEL'], {}), '(TEXT, LABEL)\n', (1453, 1466), False, 'from torchtext import datasets\n'), ((2308, 2414), 'torchtext.data.BucketIterator.splits', 'data.BucketIterator.splits', (['(train_data, valid_data, test_data)'], {'batch_size': 'BATCH_SIZE', 'device': 'device'}), '((train_data, valid_data, test_data), batch_size=\n BATCH_SIZE, device=device)\n', (2334, 2414), False, 'from torchtext import data\n'), ((2961, 2997), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': 'learning_rate'}), '(params, lr=learning_rate)\n', (2971, 2997), False, 'from torch import optim\n'), ((1012, 1029), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1023, 1029), False, 'import random\n'), ((1034, 1054), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1048, 1054), True, 'import numpy as np\n'), ((1059, 1082), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1076, 1082), False, 'import torch\n'), ((1087, 1115), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1109, 1115), False, 'import torch\n'), ((3958, 3998), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (3972, 3998), False, 'from sklearn.metrics import accuracy_score\n'), ((2164, 2189), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2187, 2189), False, 'import torch\n'), ((2663, 2684), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2682, 2684), False, 'from torch import nn\n'), ((2720, 2787), 'networks.CNN_LSTM', 'CNN_LSTM', (['INPUT_DIM', 'EMBEDDING_DIM', 'KER_SIZE', 'N_FILTERS', 'HIDDEN_DIM'], {}), '(INPUT_DIM, EMBEDDING_DIM, KER_SIZE, N_FILTERS, HIDDEN_DIM)\n', (2728, 2787), False, 'from networks import CNN_LSTM, Policy_C, Policy_N, Policy_S, ValueNetwork\n'), ((2810, 2853), 'networks.Policy_C', 'Policy_C', (['HIDDEN_DIM', 'HIDDEN_DIM', 'LABEL_DIM'], {}), '(HIDDEN_DIM, HIDDEN_DIM, LABEL_DIM)\n', (2818, 2853), False, 'from networks import CNN_LSTM, Policy_C, Policy_N, Policy_S, ValueNetwork\n'), ((3620, 3648), 'torch.nn.functional.softmax', 'F.softmax', (['label_raws'], {'dim': '(1)'}), '(label_raws, dim=1)\n', (3629, 3648), True, 'import torch.nn.functional as F\n'), ((3661, 3685), 'torch.distributions.Categorical', 'Categorical', (['label_probs'], {}), '(label_probs)\n', (3672, 3685), False, 'from torch.distributions import Bernoulli, Categorical\n'), ((5209, 5249), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (5223, 5249), False, 'from sklearn.metrics import accuracy_score\n'), ((4953, 4977), 'torch.distributions.Categorical', 'Categorical', (['label_probs'], {}), '(label_probs)\n', (4964, 4977), False, 'from torch.distributions import Bernoulli, Categorical\n'), ((3489, 3522), 'torch.zeros', 'torch.zeros', (['[1, batch_size, 128]'], {}), '([1, batch_size, 128])\n', (3500, 3522), False, 'import torch\n'), ((4527, 4560), 'torch.zeros', 'torch.zeros', (['[1, batch_size, 128]'], {}), '([1, batch_size, 128])\n', (4538, 4560), False, 'import torch\n')]
|
# DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
#
# This material is based upon work supported by the Assistant Secretary of Defense for Research and
# Engineering under Air Force Contract No. FA8721-05-C-0002 and/or FA8702-15-D-0001. Any opinions,
# findings, conclusions or recommendations expressed in this material are those of the author(s) and
# do not necessarily reflect the views of the Assistant Secretary of Defense for Research and
# Engineering.
#
# © 2017 Massachusetts Institute of Technology.
#
# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014)
#
# The software/firmware is provided to you on an As-Is basis
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or
# 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are
# defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than
# as specifically authorized by the U.S. Government may violate any copyrights that exist in this
# work.
import numpy as np
import json
import pickle
import torch
import math
import h5py
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import random
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
return vocab
class GQADataset(Dataset):
def __init__(self, vocab, answers, questions, questions_len, q_image_indices, question_id, object_feature, spatial_feature,
img_info, num_answer):
# convert data to tensor
self.all_answers = answers
self.all_questions = torch.from_numpy(np.asarray(questions)).long()
self.all_questions_len = torch.from_numpy(
np.asarray(questions_len)).long()
self.all_q_image_idxs = np.asarray(q_image_indices)
self.all_question_idxs = torch.from_numpy(np.asarray(question_id)).long()
self.spatial_feature = spatial_feature
self.object_feature = object_feature
self.img_info = img_info
self.num_answer = num_answer
self.vocab = vocab
def __getitem__(self, index):
answer = self.all_answers[index] if self.all_answers is not None else None
question = self.all_questions[index]
question_len = self.all_questions_len[index]
image_idx = self.all_q_image_idxs[index].item()
question_idx = self.all_question_idxs[index].item()
index = self.img_info[str(image_idx)]['index']
w = self.img_info[str(image_idx)]['width']
h = self.img_info[str(image_idx)]['height']
image_idx = torch.from_numpy(np.array([1]))
with h5py.File(self.object_feature, 'r') as fObject:
node_feat = fObject['features'][index] # (100, 2048)
boxes = fObject['bboxes'][index] # (4, 100)
with h5py.File(self.spatial_feature, 'r') as fSpatial:
scene_feat = fSpatial['features'][index] # (2048, 7, 7)
scene_feat = scene_feat.mean(2).mean(1)
scene_feat = np.expand_dims(scene_feat, axis=0)
scene_box = np.array([0, 0, w, h])
scene_box = np.expand_dims(scene_box, axis=0)
node_feat = np.concatenate([scene_feat, node_feat], axis=0) # (101, 2053)
boxes = np.concatenate([scene_box, boxes], axis=0)
spatial_feat = [0] * boxes.shape[0]
for i in range(boxes.shape[0]):
bbox = np.copy(boxes[i])
bbox_x = bbox[2] - bbox[0]
bbox_y = bbox[3] - bbox[1]
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) / (w * h)
bbox[0] /= w
bbox[1] /= h
bbox[2] /= w
bbox[3] /= h
spatial_feat[i] = np.array([bbox[0], bbox[1], bbox[2], bbox[3], bbox_x / w, bbox_y / h, area])
spatial_feat = torch.from_numpy(np.array(spatial_feat)).float()
node_feat = torch.from_numpy(node_feat)
return (question_idx, image_idx, answer, question, question_len, node_feat, spatial_feat)
def __len__(self):
return len(self.all_questions)
class GQADataLoader(DataLoader):
def __init__(self, **kwargs):
vocab_json_path = str(kwargs.pop('vocab_json'))
print('loading vocab from %s' % (vocab_json_path))
vocab = load_vocab(vocab_json_path)
question_pt_path = str(kwargs.pop('question_pt'))
print('loading questions from %s' % (question_pt_path))
with open(question_pt_path, 'rb') as f:
obj = pickle.load(f)
questions = obj['questions']
questions_len = obj['questions_len']
q_image_indices = obj['image_ids']
question_id = obj['question_ids'].astype(np.int)
answers = np.asarray(obj['answers'])
glove_matrix = obj['glove']
# print(q_image_indices)
# exit()
if 'train_num' in kwargs:
train_num = kwargs.pop('train_num')
if train_num > 0:
choices = random.choices(range(len(questions)), k=train_num)
questions = questions[choices]
questions_len = questions_len[choices]
q_image_indices = q_image_indices[choices]
question_id = question_id[choices]
answers = answers[choices]
if 'val_num' in kwargs:
val_num = kwargs.pop('val_num')
if val_num > 0:
choices = random.choices(range(len(questions)), k=val_num)
questions = questions[choices]
questions_len = questions_len[choices]
q_image_indices = q_image_indices[choices]
question_id = question_id[choices]
if 'test_num' in kwargs:
test_num = kwargs.pop('test_num')
if test_num > 0:
choices = random.choices(range(len(questions)), k=test_num)
questions = questions[choices]
questions_len = questions_len[choices]
q_image_indices = q_image_indices[choices]
question_id = question_id[choices]
self.object_feature = kwargs.pop('object_feature')
print('loading object feature from %s' % (self.object_feature))
self.spatial_feature = kwargs.pop('spatial_feature')
print('loading spatial feature from %s' % (self.spatial_feature))
self.img_info = kwargs.pop('img_info')
with open(self.img_info, "r") as file:
self.img_info = json.load(file)
self.dataset = GQADataset(vocab, answers, questions, questions_len, q_image_indices, question_id, self.object_feature,
self.spatial_feature, self.img_info, len(vocab['answer_token_to_idx']))
self.vocab = vocab
self.batch_size = kwargs['batch_size']
self.glove_matrix = glove_matrix
kwargs['collate_fn'] = default_collate
super().__init__(self.dataset, **kwargs)
def __len__(self):
return math.ceil(len(self.dataset) / self.batch_size)
|
[
"h5py.File",
"json.load",
"numpy.copy",
"numpy.asarray",
"numpy.expand_dims",
"pickle.load",
"numpy.array",
"numpy.concatenate",
"torch.from_numpy"
] |
[((1444, 1456), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1453, 1456), False, 'import json\n'), ((2111, 2138), 'numpy.asarray', 'np.asarray', (['q_image_indices'], {}), '(q_image_indices)\n', (2121, 2138), True, 'import numpy as np\n'), ((3505, 3552), 'numpy.concatenate', 'np.concatenate', (['[scene_feat, node_feat]'], {'axis': '(0)'}), '([scene_feat, node_feat], axis=0)\n', (3519, 3552), True, 'import numpy as np\n'), ((3584, 3626), 'numpy.concatenate', 'np.concatenate', (['[scene_box, boxes]'], {'axis': '(0)'}), '([scene_box, boxes], axis=0)\n', (3598, 3626), True, 'import numpy as np\n'), ((4198, 4225), 'torch.from_numpy', 'torch.from_numpy', (['node_feat'], {}), '(node_feat)\n', (4214, 4225), False, 'import torch\n'), ((2938, 2951), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2946, 2951), True, 'import numpy as np\n'), ((2966, 3001), 'h5py.File', 'h5py.File', (['self.object_feature', '"""r"""'], {}), "(self.object_feature, 'r')\n", (2975, 3001), False, 'import h5py\n'), ((3150, 3186), 'h5py.File', 'h5py.File', (['self.spatial_feature', '"""r"""'], {}), "(self.spatial_feature, 'r')\n", (3159, 3186), False, 'import h5py\n'), ((3345, 3379), 'numpy.expand_dims', 'np.expand_dims', (['scene_feat'], {'axis': '(0)'}), '(scene_feat, axis=0)\n', (3359, 3379), True, 'import numpy as np\n'), ((3404, 3426), 'numpy.array', 'np.array', (['[0, 0, w, h]'], {}), '([0, 0, w, h])\n', (3412, 3426), True, 'import numpy as np\n'), ((3451, 3484), 'numpy.expand_dims', 'np.expand_dims', (['scene_box'], {'axis': '(0)'}), '(scene_box, axis=0)\n', (3465, 3484), True, 'import numpy as np\n'), ((3731, 3748), 'numpy.copy', 'np.copy', (['boxes[i]'], {}), '(boxes[i])\n', (3738, 3748), True, 'import numpy as np\n'), ((4028, 4104), 'numpy.array', 'np.array', (['[bbox[0], bbox[1], bbox[2], bbox[3], bbox_x / w, bbox_y / h, area]'], {}), '([bbox[0], bbox[1], bbox[2], bbox[3], bbox_x / w, bbox_y / h, area])\n', (4036, 4104), True, 'import numpy as np\n'), ((4805, 4819), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4816, 4819), False, 'import pickle\n'), ((5040, 5066), 'numpy.asarray', 'np.asarray', (["obj['answers']"], {}), "(obj['answers'])\n", (5050, 5066), True, 'import numpy as np\n'), ((6778, 6793), 'json.load', 'json.load', (['file'], {}), '(file)\n', (6787, 6793), False, 'import json\n'), ((1952, 1973), 'numpy.asarray', 'np.asarray', (['questions'], {}), '(questions)\n', (1962, 1973), True, 'import numpy as np\n'), ((2045, 2070), 'numpy.asarray', 'np.asarray', (['questions_len'], {}), '(questions_len)\n', (2055, 2070), True, 'import numpy as np\n'), ((2189, 2212), 'numpy.asarray', 'np.asarray', (['question_id'], {}), '(question_id)\n', (2199, 2212), True, 'import numpy as np\n'), ((4146, 4168), 'numpy.array', 'np.array', (['spatial_feat'], {}), '(spatial_feat)\n', (4154, 4168), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.rnn import _transpose_batch_time
class Decoder:
def __init__(self, **kwargs):
self.encodings = None
self.num_sentence_characters = kwargs['num_sentence_characters']
self.dict_length = kwargs['dict_length']
self.max_num_words = kwargs['max_num_words']
self.batch_size = kwargs['batch_size']
self.simple_decoder = True
self.global_lat_decoder = False
self.decoder_units = kwargs['decoder_units']
self.units_encoder_lstm = kwargs['encoder_dim']
self.lat_word_dim = kwargs['lat_word_dim']
self.global_lat_dim = kwargs['global_lat_dim']
self.decoder_p3_units = kwargs['decoder_p3_units']
def make_global_latent(self, values, units_dense):
mean_pool = tf.reduce_mean(values, axis=-1)
pre_dist1 = tf.layers.dense(inputs=mean_pool, activation=tf.nn.relu, units=units_dense)
pre_dist2 = tf.layers.dense(inputs=pre_dist1, activation=None, units=units_dense * 2)
mu, log_sig = tf.split(tf.cast(pre_dist2, dtype=tf.float32), axis=-1, num_or_size_splits=2)
return mu, log_sig
def decoder1_p1(self, reuse, units_bilstm, encodings=None):
if encodings is None:
encodings = self.encodings
with tf.variable_scope('decoder_p1', reuse=reuse):
cell1 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
cell2 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
values, states = tf.nn.bidirectional_dynamic_rnn(inputs=encodings, dtype=tf.float32, cell_bw=cell1,
cell_fw=cell2, sequence_length=self.sentence_lens)
values = tf.concat(values, 2)
return values
def decoder2_p1(self, reuse, units_bilstm, global_latent):
# needs some work
# input = [global_latent for i in range(self.num_sentence_characters)]
with tf.variable_scope('decoder_p1', reuse=reuse):
cell1 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
cell2 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
values, states = tf.nn.bidirectional_dynamic_rnn(inputs=input, dtype=tf.float32, cell_bw=cell1,
cell_fw=cell2, sequence_length=tf.cast(hap_lens, tf.int32))
values = tf.concat(values, 2)
return values
def bahd_attention(self, queries, values, reuse):
with tf.variable_scope('attention_layer', reuse=reuse):
w1 = tf.get_variable(name='query_w', shape=[self.decoder_units, self.lat_word_dim])
w2 = tf.get_variable(name='value_w', shape=[self.lat_word_dim, self.lat_word_dim])
v = tf.get_variable(name='v', shape=[self.lat_word_dim])
print('here')
conv_q = tf.reshape(tf.einsum('ij,jk->ik', queries, w1), [-1, 1, self.lat_word_dim])
print('here1')
a_p1 = tf.reshape(tf.tile(conv_q, [1, 1, self.max_num_words]),
[self.batch_size, self.max_num_words, self.lat_word_dim])
print('here2')
print(w2)
print('a p1 {}'.format(a_p1))
a_p2 = tf.einsum('ijk,kl->ijl', values, w2)
print('a p2 {}'.format(a_p2))
print('here3')
out = tf.einsum('k,ijk->ij', v, tf.nn.tanh(name='combine', x=a_p1 + a_p2))
print('MAT for softmax {}'.format(out))
out_norm = tf.nn.softmax(out, dim=-1)
context = tf.reduce_sum(values * tf.reshape(tf.stack([out_norm for _ in range(self.lat_word_dim)], -1),
[self.batch_size, self.max_num_words, self.lat_word_dim]),
axis=-2)
# context2 = tf.matmul(tf.reshape(tf.diag(out_norm),[-1,self.max_num_words]),tf.transpose(values,[-1,self.max_num_words]))
# is this the same
# print('ALT CONTEXT {}'.format(context2))
print('CONTEX SHAPE {}'.format(context))
l1 = tf.concat([context, queries], axis=-1)
l1 = tf.reshape(l1, [self.batch_size, self.lat_word_dim + self.decoder_units])
return l1
def decoder_p2(self, num_hidden_word_units, inputs, sequence_length, global_latent, reuse, context_dim, max_time):
outputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
cell = tf.contrib.rnn.LSTMCell(self.decoder_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_loop_state = outputs_ta
context = self.bahd_attention(
queries=tf.zeros(shape=[self.batch_size, num_hidden_word_units], dtype=tf.float32), values=inputs,
reuse=None)
# next_input = tf.concat([tf.zeros(shape=[self.batch_size,self.lat_word_dim],dtype=tf.float32),tf.zeros(shape=[self.batch_size,self.global_lat_dim],dtype=tf.float32)],axis=-1)
next_input = tf.zeros(shape=[self.batch_size, self.lat_word_dim * 2 + self.global_lat_dim],
dtype=tf.float32)
else:
next_cell_state = cell_state
context = self.bahd_attention(queries=cell_output, values=inputs, reuse=True)
# should try passing in logits
# should also try doing the final decoding in a seperate RNN
# should try using a global latent vector here asap
# prediction = tf.layers.dense(inputs=context,activation=None,units=self.dict_length)
# took context out of decoder loop because softmax may be saturating
next_input = tf.concat([context, global_latent], axis=-1)
next_loop_state = loop_state.write(time - 1, context)
elements_finished = (time >= sequence_length)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('decoder_p2', reuse=reuse):
_, _, loop_state_ta = tf.nn.raw_rnn(cell, loop_fn)
# loop_state_out = _transpose_batch_time(loop_state_ta.stack())
return loop_state_ta
def decoder_p3(self, inputs, reuse, max_time, sequence_length):
# _inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time,name='context_array')
# _inputs_ta = _inputs_ta.unstack(tf.transpose(inputs,[1,0,2]))
_inputs_ta = inputs
outputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time, name='pred_char_array')
cell = tf.contrib.rnn.LSTMCell(self.decoder_p3_units)
def loop_fn(time, cell_output, cell_state, loop_state):
next_loop_state = loop_state
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_input = tf.concat(
[tf.zeros(shape=[self.batch_size, self.dict_length], dtype=tf.float32), _inputs_ta.read(time)],
axis=-1)
next_loop_state = outputs_ta
else:
next_cell_state = cell_state
prediction = tf.layers.dense(inputs=cell_output, activation=None, units=self.dict_length)
next_loop_state = loop_state.write(time - 1, prediction)
next_input = tf.concat([prediction, _inputs_ta.read(time)], axis=-1)
elements_finished = (time >= sequence_length)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('decoder_p3', reuse=reuse):
_, _, loop_ta = tf.nn.raw_rnn(cell, loop_fn)
output = _transpose_batch_time(loop_ta.stack())
return output
def run_decoder(self, units_lstm_decoder, sequence_length, units_dense_global, lat_words, reuse):
if self.simple_decoder:
global_mu, global_logsig = self.make_global_latent(values=lat_words, units_dense=units_dense_global)
eps = tf.random_normal(shape=[self.batch_size, units_dense_global], dtype=tf.float32)
global_latent = eps * tf.exp(global_logsig) + global_mu
out_2 = self.decoder_p2(sequence_length=sequence_length, num_hidden_word_units=units_lstm_decoder,
inputs=lat_words, reuse=reuse, global_latent=global_latent,
context_dim=units_lstm_decoder, max_time=self.num_sentence_characters)
out = self.decoder_p3(inputs=out_2, reuse=reuse, max_time=self.num_sentence_characters,
sequence_length=sequence_length)
return out, global_latent, global_logsig, global_mu
def prior(self, values, num_units, global_latent, word_lens, reuse):
global_latent = tf.transpose(tf.stack([global_latent for _ in range(self.max_num_words)]), [1, 0, 2])
print(' PRIOR input dim from post {}'.format(values))
values = tf.concat([tf.zeros(shape=[self.batch_size, 1, self.lat_word_dim], dtype=tf.float32), values], axis=1)
values = values[:, 0:-1, :]
values = tf.concat([tf.cast(values, dtype=tf.float32), global_latent], axis=-1)
print('PRIOR input dim to prior {}'.format(values))
with tf.variable_scope('prior', reuse=reuse):
cell = tf.contrib.rnn.LSTMCell(num_units)
values, _ = tf.nn.dynamic_rnn(cell=cell, inputs=values, sequence_length=word_lens, dtype=tf.float32)
with tf.variable_scope('prior/rnn', reuse=reuse):
w = tf.get_variable(name='prior_dense_w', shape=[self.lat_word_dim, self.lat_word_dim * 2],
dtype=tf.float32)
b = tf.get_variable(name='prior_dense_b', shape=self.lat_word_dim * 2, dtype=tf.float32)
out = tf.reshape(tf.matmul(tf.reshape(values, [-1, self.lat_word_dim]), w) + b,
[self.batch_size, self.max_num_words, self.lat_word_dim * 2])
mu, log_sig = tf.split(out, axis=-1, num_or_size_splits=2, name='prior_dense')
print('MU{}'.format(mu))
return [mu, log_sig]
def cost_function(self, predictions, true_input, global_mu, global_logsig, prior_mu, prior_logsig, posterior_mu,
posterior_logsig, shift, total_steps, global_step, kl=True):
mask = tf.reduce_sum(true_input, -1)
# reconstruction = tf.reduce_sum(tf.reduce_sum(-true_input*tf.log(predictions+1e-9),axis=-1),-1)
reconstruction = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(true_input, -1), logits=predictions) * mask,
-1)
# have to be very careful of order of the mean/stddev parmeters
# outer reduce sum for each KL term
'''
kl_p1 = 0.5 * (tf.reduce_sum(tf.exp(posterior_logsig - prior_logsig), axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.exp(prior_logsig)) * (posterior_mu - prior_mu),
axis=-1) - tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) + tf.reduce_sum(
(prior_logsig - posterior_logsig), axis=-1))
'''
kl_p1 = 0.5 * (tf.reduce_sum(tf.reduce_sum(prior_logsig, axis=1) - tf.reduce_sum(posterior_logsig, axis=1),
axis=-1) -
tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) * tf.cast(tf.shape(prior_mu)[1],
dtype=tf.float32) +
tf.reduce_sum(tf.reduce_sum(tf.divide(1, tf.exp(prior_logsig)) * tf.exp(posterior_logsig),
axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.exp(prior_logsig)) * (posterior_mu - prior_mu),
axis=-1), axis=-1))
'''
kl_global_lat = 0.5 * (
tf.reduce_sum(tf.exp(global_logsig), axis=-1) + tf.reduce_sum((global_mu * global_mu), axis=-1) - tf.cast(
tf.shape(global_mu)[-1], dtype=tf.float32) - tf.reduce_sum(global_logsig))
'''
kl_global_lat = 0.5 * (-tf.reduce_sum(global_logsig, axis=-1) - tf.cast(tf.shape(global_mu)[-1],
dtype=tf.float32) + tf.reduce_sum(
tf.exp(global_logsig), axis=-1) + tf.reduce_sum((global_mu * global_mu), axis=-1))
kl_p2 = kl_p1
# kl_p2 = tf.reduce_sum(kl_p1, -1)
if kl:
kl_p3 = kl_p2 + kl_global_lat
anneal_c = tf.cast(tf.minimum(tf.maximum(tf.divide((global_step - shift), total_steps), 0), 1),
dtype=tf.float32)
kl_p3 = kl_p3 * anneal_c
else:
anneal_c = 0
kl_p3 = tf.constant(0, dtype=tf.float32)
# sum over all seperate KLs for each lat var
cost = tf.reduce_mean(kl_p3 + reconstruction)
return cost, reconstruction, kl_p3, kl_p1, kl_global_lat, kl_p2, anneal_c
def test_cost_function(self, predictions, true_input, global_mu, global_logsig, prior_mu, prior_logsig,
posterior_mu, posterior_logsig):
mask = tf.reduce_sum(true_input, -1)
reconstruction = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(true_input, -1), logits=predictions) * mask,
-1)
# reconstruction = tf.reduce_sum(-true_input*tf.log(predictions+1e-9),axis=-1)
# have to be very careful of order of the mean/stddev parmeters
# outer reduce sum for each KL term
kl_p1 = 0.5 * (tf.reduce_sum(tf.exp(posterior_logsig - prior_logsig), axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.exp(prior_logsig)) * (posterior_mu - prior_mu),
axis=-1) - tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) + tf.reduce_sum(
(prior_logsig - posterior_logsig), axis=-1))
kl_global_lat = 0.5 * (tf.reduce_sum(tf.exp(global_logsig), axis=-1) + tf.reduce_sum((global_mu * global_mu),
axis=-1) - tf.cast(
tf.shape(global_mu)[-1], dtype=tf.float32) - tf.reduce_sum(global_logsig))
# sum over all seperate KLs for each lat var
kl_p2 = tf.reduce_sum(kl_p1, -1)
kl_p3 = kl_p2 + kl_global_lat
cost = tf.reduce_mean(kl_p3 + tf.reduce_sum(reconstruction, -1))
return cost, reconstruction, kl_p3, kl_p1
def calc_cost(self, kl, posterior_logsig, post_samples, global_mu, global_logsig, global_latent_sample,
posterior_mu, true_input, sentence_word_lens, predictions, shift, total_steps, global_step, reuse):
prior_mu, prior_logsig = self.prior(values=post_samples, num_units=self.units_encoder_lstm,
global_latent=global_latent_sample, word_lens=sentence_word_lens,
reuse=reuse)
cost, reconstruction, kl_p3, kl_p1, kl_global, kl_p2, anneal_c = self.cost_function(kl=kl,
predictions=predictions,
true_input=true_input,
global_mu=global_mu,
global_logsig=global_logsig,
prior_mu=prior_mu,
prior_logsig=prior_logsig,
posterior_mu=posterior_mu,
posterior_logsig=posterior_logsig,
shift=shift,
total_steps=total_steps,
global_step=global_step)
self.kls_hist = tf.summary.histogram('kls', tf.reduce_mean(kl_p1, 0))
self.global_kl_scalar = tf.summary.scalar('kls_global', tf.reduce_mean(kl_global))
self.rec_scalar = tf.summary.scalar('rec', tf.reduce_mean(reconstruction))
self.cost_scalar = tf.summary.scalar('full_cost', cost)
var_all = tf.nn.moments(x=posterior_mu, axes=0)
var_all = var_all[-1]
kl = tf.reduce_mean(kl_p3)
self.full_kl_scalar = tf.summary.scalar('full_kl', kl)
self.sum_all_activ_hist = tf.summary.histogram('active_lats_all', var_all)
var_g = tf.nn.moments(x=global_mu, axes=0)
var_g = var_g[-1]
self.sum_global_activ_hist = tf.summary.histogram('active_lats_global', var_g)
return cost, reconstruction, kl_p3, kl_p1, kl_global, kl_p2, anneal_c
def test_calc_cost(self, posterior_logsig, post_samples, global_mu, global_logsig, global_latent_sample,
posterior_mu, true_input, predictions, sentence_word_lens):
prior_mu, prior_logsig = self.prior(values=post_samples, num_units=self.units_encoder_lstm,
global_latent=global_latent_sample, word_lens=sentence_word_lens,
reuse=True)
cost, _, _, _ = self.test_cost_function(predictions=predictions, true_input=true_input, global_mu=global_mu,
global_logsig=global_logsig, prior_mu=prior_mu,
prior_logsig=prior_logsig, posterior_mu=posterior_mu,
posterior_logsig=posterior_logsig)
return cost
def generation(self, samples):
outputs_ta = tf.TensorArray(dtype=tf.float32, size=self.max_num_words)
cell = tf.contrib.rnn.LSTMCell(self.decoder_units)
print('GENER samples {}'.format(np.shape(samples)))
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_loop_state = outputs_ta
# self.lat_word_dim is very important, need from kevin
next_input = tf.concat(
[samples, tf.zeros(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)], axis=-1)
else:
next_cell_state = cell_state
w = tf.get_variable(name='prior_dense_w')
b = tf.get_variable(name='prior_dense_b')
print(cell_output)
cell_output = tf.reshape(tf.matmul(cell_output, w) + b, [self.batch_size, self.lat_word_dim * 2])
mu, logsig = tf.split(cell_output, axis=-1, num_or_size_splits=2)
eps = tf.random_normal(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)
samples_word = eps * tf.exp(logsig) + mu
next_input = tf.concat([samples, samples_word], axis=-1)
next_loop_state = loop_state.write(time - 1, samples_word)
elements_finished = (time >= self.max_num_words)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('prior', reuse=True):
_, _, loop_state_ta = tf.nn.raw_rnn(cell, loop_fn)
loop_state_out = _transpose_batch_time(loop_state_ta.stack())
context = self.decoder_p2(num_hidden_word_units=self.lat_word_dim, inputs=loop_state_out,
sequence_length=np.repeat(self.num_sentence_characters, self.batch_size, axis=-1),
global_latent=samples, reuse=True, context_dim=self.decoder_units,
max_time=self.num_sentence_characters)
predictions = self.decoder_p3(inputs=context, reuse=True,
sequence_length=np.repeat(self.num_sentence_characters, self.batch_size, axis=-1),
max_time=self.num_sentence_characters)
return predictions
# Example usage
# batch_len = np.random.randint(low=0,high=30,size=[10])
# arg_dict = {'global_lat_dim':10,'word_lens':batch_len,'batch_size':10,'max_num_words':30,'decoder_units':40,'encodings' : np.random.randn(10,30,40),'sentence_lens':np.random.randint(low=0,high=30,size=10),'num_sentence_characters':200,'dict_length':26}
# decoder = Decoder(**arg_dict)
# word_encoding_placeholder=tf.placeholder(dtype=tf.float32,shape=[decoder.batch_size,decoder.max_num_words,np.shape(decoder.encodings)[-1]])
# out_o, global_latent_o,global_logsig_o,global_mu_o = decoder.run_decoder(units_lstm_decoder=40,lat_words=word_encoding_placeholder,units_dense_global=40,sequence_length=batch_len)
# true_mat =np.zeros(shape=[decoder.batch_size,decoder.num_sentence_characters],dtype=np.float32)
# for k,i in enumerate(batch_len):
# true_mat[k,0:i] = np.random.randint(low=0,high=decoder.dict_length,size=[i])
# true_inp=true_mat
# posterior_mu =np.random.randn(10,30,40)
# posterior_logsig = np.exp(np.random.randn(10,30,40))
# cost= decoder.calc_cost(prior_mu=posterior_mu,prior_logsig=posterior_logsig,global_latent_sample=global_latent_o,global_logsig=global_logsig_o,global_mu=global_mu_o,predictions=out_o,true_input=tf.one_hot(indices=true_inp,depth =decoder.dict_length),posterior_logsig=posterior_logsig,posterior_mu=posterior_mu,post_samples=decoder.encodings)
#
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# cost_o=sess.run([cost],feed_dict={word_encoding_placeholder:decoder.encodings})
|
[
"tensorflow.einsum",
"tensorflow.reduce_sum",
"tensorflow.nn.tanh",
"tensorflow.reshape",
"numpy.shape",
"tensorflow.matmul",
"tensorflow.divide",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.split",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.nn.moments",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.cast",
"tensorflow.summary.histogram",
"tensorflow.exp",
"numpy.repeat",
"tensorflow.summary.scalar",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.tile",
"tensorflow.random_normal",
"tensorflow.nn.raw_rnn",
"tensorflow.nn.dynamic_rnn",
"tensorflow.argmax",
"tensorflow.layers.dense",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.TensorArray",
"tensorflow.contrib.rnn.LSTMCell"
] |
[((831, 862), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['values'], {'axis': '(-1)'}), '(values, axis=-1)\n', (845, 862), True, 'import tensorflow as tf\n'), ((883, 958), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'mean_pool', 'activation': 'tf.nn.relu', 'units': 'units_dense'}), '(inputs=mean_pool, activation=tf.nn.relu, units=units_dense)\n', (898, 958), True, 'import tensorflow as tf\n'), ((979, 1052), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pre_dist1', 'activation': 'None', 'units': '(units_dense * 2)'}), '(inputs=pre_dist1, activation=None, units=units_dense * 2)\n', (994, 1052), True, 'import tensorflow as tf\n'), ((1751, 1771), 'tensorflow.concat', 'tf.concat', (['values', '(2)'], {}), '(values, 2)\n', (1760, 1771), True, 'import tensorflow as tf\n'), ((2405, 2425), 'tensorflow.concat', 'tf.concat', (['values', '(2)'], {}), '(values, 2)\n', (2414, 2425), True, 'import tensorflow as tf\n'), ((4402, 4449), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time'}), '(dtype=tf.float32, size=max_time)\n', (4416, 4449), True, 'import tensorflow as tf\n'), ((4466, 4509), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['self.decoder_units'], {}), '(self.decoder_units)\n', (4489, 4509), True, 'import tensorflow as tf\n'), ((6727, 6798), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time', 'name': '"""pred_char_array"""'}), "(dtype=tf.float32, size=max_time, name='pred_char_array')\n", (6741, 6798), True, 'import tensorflow as tf\n'), ((6815, 6861), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['self.decoder_p3_units'], {}), '(self.decoder_p3_units)\n', (6838, 6861), True, 'import tensorflow as tf\n'), ((10669, 10698), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (10682, 10698), True, 'import tensorflow as tf\n'), ((13272, 13310), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(kl_p3 + reconstruction)'], {}), '(kl_p3 + reconstruction)\n', (13286, 13310), True, 'import tensorflow as tf\n'), ((13577, 13606), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (13590, 13606), True, 'import tensorflow as tf\n'), ((14733, 14757), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kl_p1', '(-1)'], {}), '(kl_p1, -1)\n', (14746, 14757), True, 'import tensorflow as tf\n'), ((17073, 17109), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""full_cost"""', 'cost'], {}), "('full_cost', cost)\n", (17090, 17109), True, 'import tensorflow as tf\n'), ((17128, 17165), 'tensorflow.nn.moments', 'tf.nn.moments', ([], {'x': 'posterior_mu', 'axes': '(0)'}), '(x=posterior_mu, axes=0)\n', (17141, 17165), True, 'import tensorflow as tf\n'), ((17209, 17230), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_p3'], {}), '(kl_p3)\n', (17223, 17230), True, 'import tensorflow as tf\n'), ((17262, 17294), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""full_kl"""', 'kl'], {}), "('full_kl', kl)\n", (17279, 17294), True, 'import tensorflow as tf\n'), ((17329, 17377), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""active_lats_all"""', 'var_all'], {}), "('active_lats_all', var_all)\n", (17349, 17377), True, 'import tensorflow as tf\n'), ((17394, 17428), 'tensorflow.nn.moments', 'tf.nn.moments', ([], {'x': 'global_mu', 'axes': '(0)'}), '(x=global_mu, axes=0)\n', (17407, 17428), True, 'import tensorflow as tf\n'), ((17492, 17541), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""active_lats_global"""', 'var_g'], {}), "('active_lats_global', var_g)\n", (17512, 17541), True, 'import tensorflow as tf\n'), ((18555, 18612), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'self.max_num_words'}), '(dtype=tf.float32, size=self.max_num_words)\n', (18569, 18612), True, 'import tensorflow as tf\n'), ((18628, 18671), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['self.decoder_units'], {}), '(self.decoder_units)\n', (18651, 18671), True, 'import tensorflow as tf\n'), ((1084, 1120), 'tensorflow.cast', 'tf.cast', (['pre_dist2'], {'dtype': 'tf.float32'}), '(pre_dist2, dtype=tf.float32)\n', (1091, 1120), True, 'import tensorflow as tf\n'), ((1328, 1372), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p1"""'], {'reuse': 'reuse'}), "('decoder_p1', reuse=reuse)\n", (1345, 1372), True, 'import tensorflow as tf\n'), ((1394, 1441), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (1417, 1441), True, 'import tensorflow as tf\n'), ((1462, 1509), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (1485, 1509), True, 'import tensorflow as tf\n'), ((1539, 1677), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'inputs': 'encodings', 'dtype': 'tf.float32', 'cell_bw': 'cell1', 'cell_fw': 'cell2', 'sequence_length': 'self.sentence_lens'}), '(inputs=encodings, dtype=tf.float32, cell_bw\n =cell1, cell_fw=cell2, sequence_length=self.sentence_lens)\n', (1570, 1677), True, 'import tensorflow as tf\n'), ((1977, 2021), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p1"""'], {'reuse': 'reuse'}), "('decoder_p1', reuse=reuse)\n", (1994, 2021), True, 'import tensorflow as tf\n'), ((2043, 2090), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (2066, 2090), True, 'import tensorflow as tf\n'), ((2111, 2158), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (2134, 2158), True, 'import tensorflow as tf\n'), ((2516, 2565), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention_layer"""'], {'reuse': 'reuse'}), "('attention_layer', reuse=reuse)\n", (2533, 2565), True, 'import tensorflow as tf\n'), ((2584, 2662), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""query_w"""', 'shape': '[self.decoder_units, self.lat_word_dim]'}), "(name='query_w', shape=[self.decoder_units, self.lat_word_dim])\n", (2599, 2662), True, 'import tensorflow as tf\n'), ((2680, 2757), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""value_w"""', 'shape': '[self.lat_word_dim, self.lat_word_dim]'}), "(name='value_w', shape=[self.lat_word_dim, self.lat_word_dim])\n", (2695, 2757), True, 'import tensorflow as tf\n'), ((2774, 2826), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""v"""', 'shape': '[self.lat_word_dim]'}), "(name='v', shape=[self.lat_word_dim])\n", (2789, 2826), True, 'import tensorflow as tf\n'), ((3251, 3287), 'tensorflow.einsum', 'tf.einsum', (['"""ijk,kl->ijl"""', 'values', 'w2'], {}), "('ijk,kl->ijl', values, w2)\n", (3260, 3287), True, 'import tensorflow as tf\n'), ((3519, 3545), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['out'], {'dim': '(-1)'}), '(out, dim=-1)\n', (3532, 3545), True, 'import tensorflow as tf\n'), ((4113, 4151), 'tensorflow.concat', 'tf.concat', (['[context, queries]'], {'axis': '(-1)'}), '([context, queries], axis=-1)\n', (4122, 4151), True, 'import tensorflow as tf\n'), ((4169, 4242), 'tensorflow.reshape', 'tf.reshape', (['l1', '[self.batch_size, self.lat_word_dim + self.decoder_units]'], {}), '(l1, [self.batch_size, self.lat_word_dim + self.decoder_units])\n', (4179, 4242), True, 'import tensorflow as tf\n'), ((6216, 6260), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p2"""'], {'reuse': 'reuse'}), "('decoder_p2', reuse=reuse)\n", (6233, 6260), True, 'import tensorflow as tf\n'), ((6296, 6324), 'tensorflow.nn.raw_rnn', 'tf.nn.raw_rnn', (['cell', 'loop_fn'], {}), '(cell, loop_fn)\n', (6309, 6324), True, 'import tensorflow as tf\n'), ((7888, 7932), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p3"""'], {'reuse': 'reuse'}), "('decoder_p3', reuse=reuse)\n", (7905, 7932), True, 'import tensorflow as tf\n'), ((7962, 7990), 'tensorflow.nn.raw_rnn', 'tf.nn.raw_rnn', (['cell', 'loop_fn'], {}), '(cell, loop_fn)\n', (7975, 7990), True, 'import tensorflow as tf\n'), ((8339, 8418), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[self.batch_size, units_dense_global]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, units_dense_global], dtype=tf.float32)\n', (8355, 8418), True, 'import tensorflow as tf\n'), ((9591, 9630), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prior"""'], {'reuse': 'reuse'}), "('prior', reuse=reuse)\n", (9608, 9630), True, 'import tensorflow as tf\n'), ((9651, 9685), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['num_units'], {}), '(num_units)\n', (9674, 9685), True, 'import tensorflow as tf\n'), ((9710, 9802), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'values', 'sequence_length': 'word_lens', 'dtype': 'tf.float32'}), '(cell=cell, inputs=values, sequence_length=word_lens,\n dtype=tf.float32)\n', (9727, 9802), True, 'import tensorflow as tf\n'), ((9812, 9855), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prior/rnn"""'], {'reuse': 'reuse'}), "('prior/rnn', reuse=reuse)\n", (9829, 9855), True, 'import tensorflow as tf\n'), ((9873, 9983), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_w"""', 'shape': '[self.lat_word_dim, self.lat_word_dim * 2]', 'dtype': 'tf.float32'}), "(name='prior_dense_w', shape=[self.lat_word_dim, self.\n lat_word_dim * 2], dtype=tf.float32)\n", (9888, 9983), True, 'import tensorflow as tf\n'), ((10027, 10116), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_b"""', 'shape': '(self.lat_word_dim * 2)', 'dtype': 'tf.float32'}), "(name='prior_dense_b', shape=self.lat_word_dim * 2, dtype=tf\n .float32)\n", (10042, 10116), True, 'import tensorflow as tf\n'), ((10322, 10386), 'tensorflow.split', 'tf.split', (['out'], {'axis': '(-1)', 'num_or_size_splits': '(2)', 'name': '"""prior_dense"""'}), "(out, axis=-1, num_or_size_splits=2, name='prior_dense')\n", (10330, 10386), True, 'import tensorflow as tf\n'), ((13170, 13202), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (13181, 13202), True, 'import tensorflow as tf\n'), ((16846, 16870), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_p1', '(0)'], {}), '(kl_p1, 0)\n', (16860, 16870), True, 'import tensorflow as tf\n'), ((16936, 16961), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_global'], {}), '(kl_global)\n', (16950, 16961), True, 'import tensorflow as tf\n'), ((17014, 17044), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reconstruction'], {}), '(reconstruction)\n', (17028, 17044), True, 'import tensorflow as tf\n'), ((20152, 20190), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prior"""'], {'reuse': '(True)'}), "('prior', reuse=True)\n", (20169, 20190), True, 'import tensorflow as tf\n'), ((20226, 20254), 'tensorflow.nn.raw_rnn', 'tf.nn.raw_rnn', (['cell', 'loop_fn'], {}), '(cell, loop_fn)\n', (20239, 20254), True, 'import tensorflow as tf\n'), ((2885, 2920), 'tensorflow.einsum', 'tf.einsum', (['"""ij,jk->ik"""', 'queries', 'w1'], {}), "('ij,jk->ik', queries, w1)\n", (2894, 2920), True, 'import tensorflow as tf\n'), ((3007, 3050), 'tensorflow.tile', 'tf.tile', (['conv_q', '[1, 1, self.max_num_words]'], {}), '(conv_q, [1, 1, self.max_num_words])\n', (3014, 3050), True, 'import tensorflow as tf\n'), ((3401, 3442), 'tensorflow.nn.tanh', 'tf.nn.tanh', ([], {'name': '"""combine"""', 'x': '(a_p1 + a_p2)'}), "(name='combine', x=a_p1 + a_p2)\n", (3411, 3442), True, 'import tensorflow as tf\n'), ((5230, 5331), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, self.lat_word_dim * 2 + self.global_lat_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.lat_word_dim * 2 + self.\n global_lat_dim], dtype=tf.float32)\n', (5238, 5331), True, 'import tensorflow as tf\n'), ((5930, 5974), 'tensorflow.concat', 'tf.concat', (['[context, global_latent]'], {'axis': '(-1)'}), '([context, global_latent], axis=-1)\n', (5939, 5974), True, 'import tensorflow as tf\n'), ((7482, 7558), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'cell_output', 'activation': 'None', 'units': 'self.dict_length'}), '(inputs=cell_output, activation=None, units=self.dict_length)\n', (7497, 7558), True, 'import tensorflow as tf\n'), ((9302, 9375), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, 1, self.lat_word_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, 1, self.lat_word_dim], dtype=tf.float32)\n', (9310, 9375), True, 'import tensorflow as tf\n'), ((9458, 9491), 'tensorflow.cast', 'tf.cast', (['values'], {'dtype': 'tf.float32'}), '(values, dtype=tf.float32)\n', (9465, 9491), True, 'import tensorflow as tf\n'), ((12745, 12790), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(global_mu * global_mu)'], {'axis': '(-1)'}), '(global_mu * global_mu, axis=-1)\n', (12758, 12790), True, 'import tensorflow as tf\n'), ((14274, 14329), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prior_logsig - posterior_logsig)'], {'axis': '(-1)'}), '(prior_logsig - posterior_logsig, axis=-1)\n', (14287, 14329), True, 'import tensorflow as tf\n'), ((14634, 14662), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['global_logsig'], {}), '(global_logsig)\n', (14647, 14662), True, 'import tensorflow as tf\n'), ((14835, 14868), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['reconstruction', '(-1)'], {}), '(reconstruction, -1)\n', (14848, 14868), True, 'import tensorflow as tf\n'), ((18712, 18729), 'numpy.shape', 'np.shape', (['samples'], {}), '(samples)\n', (18720, 18729), True, 'import numpy as np\n'), ((19341, 19378), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_w"""'}), "(name='prior_dense_w')\n", (19356, 19378), True, 'import tensorflow as tf\n'), ((19399, 19436), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_b"""'}), "(name='prior_dense_b')\n", (19414, 19436), True, 'import tensorflow as tf\n'), ((19616, 19668), 'tensorflow.split', 'tf.split', (['cell_output'], {'axis': '(-1)', 'num_or_size_splits': '(2)'}), '(cell_output, axis=-1, num_or_size_splits=2)\n', (19624, 19668), True, 'import tensorflow as tf\n'), ((19691, 19769), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[self.batch_size, self.lat_word_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)\n', (19707, 19769), True, 'import tensorflow as tf\n'), ((19857, 19900), 'tensorflow.concat', 'tf.concat', (['[samples, samples_word]'], {'axis': '(-1)'}), '([samples, samples_word], axis=-1)\n', (19866, 19900), True, 'import tensorflow as tf\n'), ((20477, 20542), 'numpy.repeat', 'np.repeat', (['self.num_sentence_characters', 'self.batch_size'], {'axis': '(-1)'}), '(self.num_sentence_characters, self.batch_size, axis=-1)\n', (20486, 20542), True, 'import numpy as np\n'), ((20838, 20903), 'numpy.repeat', 'np.repeat', (['self.num_sentence_characters', 'self.batch_size'], {'axis': '(-1)'}), '(self.num_sentence_characters, self.batch_size, axis=-1)\n', (20847, 20903), True, 'import numpy as np\n'), ((2359, 2386), 'tensorflow.cast', 'tf.cast', (['hap_lens', 'tf.int32'], {}), '(hap_lens, tf.int32)\n', (2366, 2386), True, 'import tensorflow as tf\n'), ((8453, 8474), 'tensorflow.exp', 'tf.exp', (['global_logsig'], {}), '(global_logsig)\n', (8459, 8474), True, 'import tensorflow as tf\n'), ((4886, 4960), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, num_hidden_word_units]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, num_hidden_word_units], dtype=tf.float32)\n', (4894, 4960), True, 'import tensorflow as tf\n'), ((7220, 7289), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, self.dict_length]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.dict_length], dtype=tf.float32)\n', (7228, 7289), True, 'import tensorflow as tf\n'), ((10151, 10194), 'tensorflow.reshape', 'tf.reshape', (['values', '[-1, self.lat_word_dim]'], {}), '(values, [-1, self.lat_word_dim])\n', (10161, 10194), True, 'import tensorflow as tf\n'), ((10910, 10935), 'tensorflow.argmax', 'tf.argmax', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (10919, 10935), True, 'import tensorflow as tf\n'), ((12711, 12732), 'tensorflow.exp', 'tf.exp', (['global_logsig'], {}), '(global_logsig)\n', (12717, 12732), True, 'import tensorflow as tf\n'), ((12970, 13013), 'tensorflow.divide', 'tf.divide', (['(global_step - shift)', 'total_steps'], {}), '(global_step - shift, total_steps)\n', (12979, 13013), True, 'import tensorflow as tf\n'), ((13713, 13738), 'tensorflow.argmax', 'tf.argmax', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (13722, 13738), True, 'import tensorflow as tf\n'), ((14425, 14470), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(global_mu * global_mu)'], {'axis': '(-1)'}), '(global_mu * global_mu, axis=-1)\n', (14438, 14470), True, 'import tensorflow as tf\n'), ((19175, 19245), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, self.lat_word_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)\n', (19183, 19245), True, 'import tensorflow as tf\n'), ((19513, 19538), 'tensorflow.matmul', 'tf.matmul', (['cell_output', 'w'], {}), '(cell_output, w)\n', (19522, 19538), True, 'import tensorflow as tf\n'), ((19807, 19821), 'tensorflow.exp', 'tf.exp', (['logsig'], {}), '(logsig)\n', (19813, 19821), True, 'import tensorflow as tf\n'), ((11518, 11553), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prior_logsig'], {'axis': '(1)'}), '(prior_logsig, axis=1)\n', (11531, 11553), True, 'import tensorflow as tf\n'), ((11556, 11595), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['posterior_logsig'], {'axis': '(1)'}), '(posterior_logsig, axis=1)\n', (11569, 11595), True, 'import tensorflow as tf\n'), ((12511, 12548), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['global_logsig'], {'axis': '(-1)'}), '(global_logsig, axis=-1)\n', (12524, 12548), True, 'import tensorflow as tf\n'), ((14024, 14063), 'tensorflow.exp', 'tf.exp', (['(posterior_logsig - prior_logsig)'], {}), '(posterior_logsig - prior_logsig)\n', (14030, 14063), True, 'import tensorflow as tf\n'), ((14226, 14248), 'tensorflow.shape', 'tf.shape', (['posterior_mu'], {}), '(posterior_mu)\n', (14234, 14248), True, 'import tensorflow as tf\n'), ((14391, 14412), 'tensorflow.exp', 'tf.exp', (['global_logsig'], {}), '(global_logsig)\n', (14397, 14412), True, 'import tensorflow as tf\n'), ((14589, 14608), 'tensorflow.shape', 'tf.shape', (['global_mu'], {}), '(global_mu)\n', (14597, 14608), True, 'import tensorflow as tf\n'), ((11676, 11698), 'tensorflow.shape', 'tf.shape', (['posterior_mu'], {}), '(posterior_mu)\n', (11684, 11698), True, 'import tensorflow as tf\n'), ((11732, 11750), 'tensorflow.shape', 'tf.shape', (['prior_mu'], {}), '(prior_mu)\n', (11740, 11750), True, 'import tensorflow as tf\n'), ((11950, 11974), 'tensorflow.exp', 'tf.exp', (['posterior_logsig'], {}), '(posterior_logsig)\n', (11956, 11974), True, 'import tensorflow as tf\n'), ((12559, 12578), 'tensorflow.shape', 'tf.shape', (['global_mu'], {}), '(global_mu)\n', (12567, 12578), True, 'import tensorflow as tf\n'), ((11926, 11946), 'tensorflow.exp', 'tf.exp', (['prior_logsig'], {}), '(prior_logsig)\n', (11932, 11946), True, 'import tensorflow as tf\n'), ((12121, 12141), 'tensorflow.exp', 'tf.exp', (['prior_logsig'], {}), '(prior_logsig)\n', (12127, 12141), True, 'import tensorflow as tf\n'), ((14144, 14164), 'tensorflow.exp', 'tf.exp', (['prior_logsig'], {}), '(prior_logsig)\n', (14150, 14164), True, 'import tensorflow as tf\n')]
|
import numpy as np
### 1
def fib_matrix(n):
for i in range(n):
res = pow((np.matrix([[1, 1], [1, 0]], dtype='int64')), i) * np.matrix([[1], [0]])
print(int(res[0][0]))
# 调用
fib_matrix(100)
### 2
# 使用矩阵计算斐波那契数列
def Fibonacci_Matrix_tool(n):
Matrix = np.matrix("1 1;1 0", dtype='int64')
# 返回是matrix类型
return np.linalg.matrix_power(Matrix, n)
def Fibonacci_Matrix(n):
result_list = []
for i in range(0, n):
result_list.append(np.array(Fibonacci_Matrix_tool(i))[0][0])
return result_list
# 调用
Fibonacci_Matrix(100)
### pow 速度 比 双**号快, np.linalg.matrix_power也是一种方法
|
[
"numpy.matrix",
"numpy.linalg.matrix_power"
] |
[((278, 313), 'numpy.matrix', 'np.matrix', (['"""1 1;1 0"""'], {'dtype': '"""int64"""'}), "('1 1;1 0', dtype='int64')\n", (287, 313), True, 'import numpy as np\n'), ((343, 376), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['Matrix', 'n'], {}), '(Matrix, n)\n', (365, 376), True, 'import numpy as np\n'), ((138, 159), 'numpy.matrix', 'np.matrix', (['[[1], [0]]'], {}), '([[1], [0]])\n', (147, 159), True, 'import numpy as np\n'), ((88, 130), 'numpy.matrix', 'np.matrix', (['[[1, 1], [1, 0]]'], {'dtype': '"""int64"""'}), "([[1, 1], [1, 0]], dtype='int64')\n", (97, 130), True, 'import numpy as np\n')]
|
import argparse
import json
from multiprocessing.util import Finalize
from typing import Dict, List, Tuple
from multiprocessing import Pool as ProcessPool
import itertools
import pickle
import numpy as np
import os
from os.path import join
from tqdm import tqdm
from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, \
IterativeQuestionAndParagraphs
from hotpot.data_handling.dataset import QuestionAndParagraphsSpec
from hotpot.encoding.paragraph_encoder import SentenceEncoderSingleContext, SentenceEncoderIterativeModel
from hotpot.tfidf_retriever.doc_db import DocDB
from hotpot.tokenizers import CoreNLPTokenizer
from hotpot.utils import ResourceLoader
PROCESS_TOK = None
PROCESS_DB = None
def init():
global PROCESS_TOK, PROCESS_DB
PROCESS_TOK = CoreNLPTokenizer()
Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)
PROCESS_DB = DocDB()
Finalize(PROCESS_DB, PROCESS_DB.close, exitpriority=100)
def fetch_sentences(doc_title):
global PROCESS_DB
return PROCESS_DB.get_doc_sentences(doc_title)
def tokenize(text):
global PROCESS_TOK
return PROCESS_TOK.tokenize(text)
def tokenize_document(doc: Tuple[str, List[str]]) -> Dict[str, List[List[str]]]:
return {doc[0]: [tokenize(x).words() for x in doc[1]]}
def tokenize_from_db(title: str) -> Dict[str, List[List[str]]]:
return {title: [tokenize(' '.join(fetch_sentences(title))).words()]}
# class DocumentsEncodingSaver(object):
# def __init__(self, encodings_path: str):
# self.encodings_path = encodings_path
# self.encodings = None
# self.title2idx2par_name = None
#
# def _load_encodings(self):
# self.encodings = np.load(self.encodings_path)
#
# def get_document_encodings(self, title: str):
# if self.encodings is None:
# self._load_encodings()
# return self.encodings[title]
#
# def build_document_encodings_from_paragraphs(self, par_name2enc: Dict[str, np.ndarray]):
# par_names = list(par_name2enc.keys())
# title2par_names = {title: list(par_names)
# for title, par_names in
# itertools.groupby(sorted(par_names, key=par_name_to_title), key=par_name_to_title)}
# title2encs = {}
# self.title2idx2par_name = {}
# for title, p_names in tqdm(title2par_names.items()):
# par2ids = {}
# reps = []
# total_sentences = 0
# for p_name in p_names:
# rep = par_name2enc[p_name]
# par2ids[p_name] = list(range(total_sentences, total_sentences + len(rep)))
# reps.append(rep)
# total_sentences += len(rep)
# id2par = {i: p for p, ids in par2ids.items() for i in ids}
# reps = np.concatenate(reps, axis=0)
# title2encs[title] = reps
# self.title2idx2par_name[title] = id2par
# np.savez_compressed(self.encodings_path, **title2encs)
class DocumentEncodingHandler(object):
def __init__(self, encodings_dir: str):
self.encodings_dir = os.path.abspath(encodings_dir)
self.titles2filenames = self._get_titles_to_filenames()
def _title_to_filename_json(self):
return join(self.encodings_dir, "title_to_filenames.json")
def _get_titles_to_filenames(self):
titles2files = {}
if not os.path.exists(self._title_to_filename_json()):
with open(self._title_to_filename_json(), 'w') as f:
pass
return {}
with open(self._title_to_filename_json(), 'r') as f:
for line in f:
titles2files.update(json.loads(line))
return titles2files
def _title_to_npy(self, title: str):
return join(self.encodings_dir, f"{self.titles2filenames[title]}.npy")
def _title_to_idx2parname(self, title: str):
return join(self.encodings_dir, f"{self.titles2filenames[title]}_idx2pname.pkl")
def get_document_encodings(self, title: str) -> np.ndarray:
return np.load(self._title_to_npy(title))
def get_document_idx2pname(self, title: str) -> Dict[int, str]:
with open(self._title_to_idx2parname(title), 'rb') as f:
return pickle.load(f)
def save_document_encoding(self, par_name2enc: Dict[str, np.ndarray], overwrite=False):
title = par_name_to_title(next(iter(par_name2enc)))
if title in self.titles2filenames and not overwrite:
raise ValueError(f"Overwrite enabled, {title} encodings already exist")
par2ids = {}
reps = []
total_sentences = 0
for p_name in par_name2enc:
if par_name_to_title(p_name) != title:
raise ValueError("All paragraphs must belong to the same title")
rep = par_name2enc[p_name]
par2ids[p_name] = list(range(total_sentences, total_sentences + len(rep)))
reps.append(rep)
total_sentences += len(rep)
id2par = {i: p for p, ids in par2ids.items() for i in ids}
reps = np.concatenate(reps, axis=0)
if title not in self.titles2filenames:
self.titles2filenames[title] = str(len(self.titles2filenames))
with open(self._title_to_filename_json(), 'a') as f:
json.dump({title: self.titles2filenames[title]}, f)
f.write(os.linesep)
with open(self._title_to_idx2parname(title), 'wb') as f:
pickle.dump(id2par, f)
np.save(self._title_to_npy(title), reps)
def save_multiple_documents(self, par_name2enc: Dict[str, np.ndarray], overwrite=False):
par_names = list(par_name2enc.keys())
title2par_names = {title: list(par_names)
for title, par_names in
itertools.groupby(sorted(par_names, key=par_name_to_title), key=par_name_to_title)}
for title, p_names in tqdm(title2par_names.items()):
self.save_document_encoding({p_name: par_name2enc[p_name] for p_name in p_names}, overwrite=overwrite)
# def convert_single_file_to_current_format(self, old_saver: DocumentsEncodingSaver):
# for title in tqdm(old_saver.title2idx2par_name.keys()):
# encs = old_saver.get_document_encodings(title)
# idx2par_names = old_saver.title2idx2par_name[title]
# self.titles2filenames[title] = str(len(self.titles2filenames))
# with open(self._title_to_filename_json(), 'a') as f:
# json.dump({title: self.titles2filenames[title]}, f)
# f.write(os.linesep)
# with open(self._title_to_idx2parname(title), 'wb') as f:
# pickle.dump(idx2par_names, f)
# np.save(self._title_to_npy(title), encs)
def par_name_to_title(par_name):
return '_'.join(par_name.split('_')[:-1])
def encode_from_file(docs_file, questions_file, encodings_dir, encoder_model, num_workers, hotpot: bool,
long_batch: int, short_batch: int, use_chars: bool, use_ema: bool, checkpoint: str,
document_chunk_size=1000, samples=None, encode_all_db=False):
"""
:param out_file: .npz file to dump the encodings
:param docs_file: path to json file whose structure is [{title: list of paragraphs}, ...]
:return:
"""
doc_encs_handler = DocumentEncodingHandler(encodings_dir)
# Setup worker pool
workers = ProcessPool(
num_workers,
initializer=init,
initargs=[]
)
if docs_file is not None:
with open(docs_file, 'r') as f:
documents = json.load(f)
documents = {k: v for k, v in documents.items() if k not in doc_encs_handler.titles2filenames}
tokenized_documents = {}
tupled_doc_list = [(title, pars) for title, pars in documents.items()]
if samples is not None:
print(f"sampling {samples} samples")
tupled_doc_list = tupled_doc_list[:samples]
print("Tokenizing from file...")
with tqdm(total=len(tupled_doc_list), ncols=80) as pbar:
for tok_doc in tqdm(workers.imap_unordered(tokenize_document, tupled_doc_list)):
tokenized_documents.update(tok_doc)
pbar.update()
else:
if questions_file is not None:
with open(questions_file, 'r') as f:
questions = json.load(f)
all_titles = list(set([title for q in questions for title in q['top_titles']]))
else:
print("encoding all DB!")
all_titles = DocDB().get_doc_titles()
if samples is not None:
print(f"sampling {samples} samples")
all_titles = all_titles[:samples]
all_titles = [t for t in all_titles if t not in doc_encs_handler.titles2filenames]
tokenized_documents = {}
print("Tokenizing from DB...")
with tqdm(total=len(all_titles), ncols=80) as pbar:
for tok_doc in tqdm(workers.imap_unordered(tokenize_from_db, all_titles)):
tokenized_documents.update(tok_doc)
pbar.update()
workers.close()
workers.join()
voc = set()
for paragraphs in tokenized_documents.values():
for par in paragraphs:
voc.update(par)
if not hotpot:
spec = QuestionAndParagraphsSpec(batch_size=None, max_num_contexts=1,
max_num_question_words=None, max_num_context_words=None)
encoder = SentenceEncoderSingleContext(model_dir_path=encoder_model, vocabulary=voc, spec=spec,
loader=ResourceLoader(), use_char_inputs=use_chars,
use_ema=use_ema, checkpoint=checkpoint)
else:
spec = QuestionAndParagraphsSpec(batch_size=None, max_num_contexts=2,
max_num_question_words=None, max_num_context_words=None)
encoder = SentenceEncoderIterativeModel(model_dir_path=encoder_model, vocabulary=voc, spec=spec,
loader=ResourceLoader(), use_char_inputs=use_chars,
use_ema=use_ema, checkpoint=checkpoint)
tokenized_documents_items = list(tokenized_documents.items())
for tokenized_doc_chunk in tqdm([tokenized_documents_items[i:i + document_chunk_size]
for i in range(0, len(tokenized_documents_items), document_chunk_size)],
ncols=80):
flattened_pars_with_names = [(f"{title}_{i}", par)
for title, pars in tokenized_doc_chunk for i, par in enumerate(pars)]
# filtering out empty paragraphs (probably had some short string the tokenization removed)
# important to notice that the filtered paragraphs will have no representation,
# but they still exist in the numbering of paragraphs for consistency with the docs.
flattened_pars_with_names = [(name, par) for name, par in flattened_pars_with_names if len(par) > 0]
# sort such that longer paragraphs are first to identify OOMs early on
flattened_pars_with_names = sorted(flattened_pars_with_names, key=lambda x: len(x[1]), reverse=True)
long_paragraphs_ids = [i for i, name_par in enumerate(flattened_pars_with_names) if len(name_par[1]) >= 900]
short_paragraphs_ids = [i for i, name_par in enumerate(flattened_pars_with_names) if len(name_par[1]) < 900]
# print(f"Encoding {len(flattened_pars_with_names)} paragraphs...")
name2enc = {}
dummy_question = "Hello Hello".split()
if not hotpot:
model_paragraphs = [BinaryQuestionAndParagraphs(question=dummy_question,
paragraphs=[x], label=1,
num_distractors=0, question_id='dummy')
for _, x in flattened_pars_with_names]
else:
# todo allow precomputed sentence segments
model_paragraphs = [IterativeQuestionAndParagraphs(question=dummy_question,
paragraphs=[x, dummy_question],
first_label=1, second_label=1,
question_id='dummy', sentence_segments=None)
for _, x in flattened_pars_with_names]
# print("Encoding long paragraphs...")
long_pars = [model_paragraphs[i] for i in long_paragraphs_ids]
name2enc.update({flattened_pars_with_names[long_paragraphs_ids[i]][0]: enc
for i, enc in
enumerate(encoder.encode_paragraphs(long_pars, batch_size=long_batch, show_progress=True)
if not hotpot
else encoder.encode_first_paragraphs(long_pars, batch_size=long_batch,
show_progress=True))})
# print("Encoding short paragraphs...")
short_pars = [model_paragraphs[i] for i in short_paragraphs_ids]
name2enc.update({flattened_pars_with_names[short_paragraphs_ids[i]][0]: enc
for i, enc in enumerate(encoder.encode_paragraphs(short_pars, batch_size=short_batch,
show_progress=True)
if not hotpot
else encoder.encode_first_paragraphs(short_pars,
batch_size=short_batch,
show_progress=True)
)})
doc_encs_handler.save_multiple_documents(name2enc)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Encode a dataset')
parser.add_argument('encodings_dir', help="directory to dump the encodings")
parser.add_argument('encoder_model', help="model to encode with")
parser.add_argument('--docs_file', default=None, help="a document json filename from which to load the top-k dataset")
parser.add_argument('--questions_file', default=None,
help="a questions json filename from which to load the top-k dataset."
" For hotpot, loads docs from DB")
parser.add_argument('--encode-all-db', action='store_true')
parser.add_argument('--checkpoint', type=str, default='best', choices=['best', 'latest'])
parser.add_argument('--ema', action='store_true')
parser.add_argument('--num-workers', type=int, default=16)
parser.add_argument('--hotpot', action='store_true')
parser.add_argument('--long-batch', type=int, default=8)
parser.add_argument('--short-batch', type=int, default=128)
parser.add_argument('--use-chars', action='store_true')
parser.add_argument('--doc-chunk', type=int, default=1000)
parser.add_argument('--samples', type=int, default=None)
args = parser.parse_args()
if (args.docs_file and args.questions_file) or (not args.docs_file and not args.questions_file):
if not args.encode_all_db or (args.encode_all_db and (args.docs_file or args.questions_file)):
raise ValueError("please, questions file or docs file")
if not args.hotpot and not args.docs_file:
raise ValueError("only hotpot supports retrieving from db")
encode_from_file(args.docs_file, args.questions_file, args.encodings_dir,
args.encoder_model, args.num_workers, hotpot=args.hotpot,
long_batch=args.long_batch, short_batch=args.short_batch, use_chars=args.use_chars,
document_chunk_size=args.doc_chunk, use_ema=args.ema, checkpoint=args.checkpoint,
samples=args.samples, encode_all_db=args.encode_all_db)
|
[
"json.dump",
"os.path.abspath",
"multiprocessing.util.Finalize",
"pickle.dump",
"argparse.ArgumentParser",
"json.load",
"hotpot.data_handling.dataset.QuestionAndParagraphsSpec",
"json.loads",
"hotpot.tokenizers.CoreNLPTokenizer",
"pickle.load",
"hotpot.utils.ResourceLoader",
"hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs",
"hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs",
"multiprocessing.Pool",
"hotpot.tfidf_retriever.doc_db.DocDB",
"os.path.join",
"numpy.concatenate"
] |
[((800, 818), 'hotpot.tokenizers.CoreNLPTokenizer', 'CoreNLPTokenizer', ([], {}), '()\n', (816, 818), False, 'from hotpot.tokenizers import CoreNLPTokenizer\n'), ((823, 884), 'multiprocessing.util.Finalize', 'Finalize', (['PROCESS_TOK', 'PROCESS_TOK.shutdown'], {'exitpriority': '(100)'}), '(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)\n', (831, 884), False, 'from multiprocessing.util import Finalize\n'), ((902, 909), 'hotpot.tfidf_retriever.doc_db.DocDB', 'DocDB', ([], {}), '()\n', (907, 909), False, 'from hotpot.tfidf_retriever.doc_db import DocDB\n'), ((914, 970), 'multiprocessing.util.Finalize', 'Finalize', (['PROCESS_DB', 'PROCESS_DB.close'], {'exitpriority': '(100)'}), '(PROCESS_DB, PROCESS_DB.close, exitpriority=100)\n', (922, 970), False, 'from multiprocessing.util import Finalize\n'), ((7459, 7514), 'multiprocessing.Pool', 'ProcessPool', (['num_workers'], {'initializer': 'init', 'initargs': '[]'}), '(num_workers, initializer=init, initargs=[])\n', (7470, 7514), True, 'from multiprocessing import Pool as ProcessPool\n'), ((14155, 14210), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Encode a dataset"""'}), "(description='Encode a dataset')\n", (14178, 14210), False, 'import argparse\n'), ((3136, 3166), 'os.path.abspath', 'os.path.abspath', (['encodings_dir'], {}), '(encodings_dir)\n', (3151, 3166), False, 'import os\n'), ((3286, 3337), 'os.path.join', 'join', (['self.encodings_dir', '"""title_to_filenames.json"""'], {}), "(self.encodings_dir, 'title_to_filenames.json')\n", (3290, 3337), False, 'from os.path import join\n'), ((3803, 3866), 'os.path.join', 'join', (['self.encodings_dir', 'f"""{self.titles2filenames[title]}.npy"""'], {}), "(self.encodings_dir, f'{self.titles2filenames[title]}.npy')\n", (3807, 3866), False, 'from os.path import join\n'), ((3932, 4005), 'os.path.join', 'join', (['self.encodings_dir', 'f"""{self.titles2filenames[title]}_idx2pname.pkl"""'], {}), "(self.encodings_dir, f'{self.titles2filenames[title]}_idx2pname.pkl')\n", (3936, 4005), False, 'from os.path import join\n'), ((5099, 5127), 'numpy.concatenate', 'np.concatenate', (['reps'], {'axis': '(0)'}), '(reps, axis=0)\n', (5113, 5127), True, 'import numpy as np\n'), ((9347, 9470), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'None', 'max_num_contexts': '(1)', 'max_num_question_words': 'None', 'max_num_context_words': 'None'}), '(batch_size=None, max_num_contexts=1,\n max_num_question_words=None, max_num_context_words=None)\n', (9372, 9470), False, 'from hotpot.data_handling.dataset import QuestionAndParagraphsSpec\n'), ((9823, 9946), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'None', 'max_num_contexts': '(2)', 'max_num_question_words': 'None', 'max_num_context_words': 'None'}), '(batch_size=None, max_num_contexts=2,\n max_num_question_words=None, max_num_context_words=None)\n', (9848, 9946), False, 'from hotpot.data_handling.dataset import QuestionAndParagraphsSpec\n'), ((4274, 4288), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4285, 4288), False, 'import pickle\n'), ((5496, 5518), 'pickle.dump', 'pickle.dump', (['id2par', 'f'], {}), '(id2par, f)\n', (5507, 5518), False, 'import pickle\n'), ((7640, 7652), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7649, 7652), False, 'import json\n'), ((5331, 5382), 'json.dump', 'json.dump', (['{title: self.titles2filenames[title]}', 'f'], {}), '({title: self.titles2filenames[title]}, f)\n', (5340, 5382), False, 'import json\n'), ((8415, 8427), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8424, 8427), False, 'import json\n'), ((9666, 9682), 'hotpot.utils.ResourceLoader', 'ResourceLoader', ([], {}), '()\n', (9680, 9682), False, 'from hotpot.utils import ResourceLoader\n'), ((10144, 10160), 'hotpot.utils.ResourceLoader', 'ResourceLoader', ([], {}), '()\n', (10158, 10160), False, 'from hotpot.utils import ResourceLoader\n'), ((11771, 11893), 'hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs', 'BinaryQuestionAndParagraphs', ([], {'question': 'dummy_question', 'paragraphs': '[x]', 'label': '(1)', 'num_distractors': '(0)', 'question_id': '"""dummy"""'}), "(question=dummy_question, paragraphs=[x], label=\n 1, num_distractors=0, question_id='dummy')\n", (11798, 11893), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((12181, 12352), 'hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs', 'IterativeQuestionAndParagraphs', ([], {'question': 'dummy_question', 'paragraphs': '[x, dummy_question]', 'first_label': '(1)', 'second_label': '(1)', 'question_id': '"""dummy"""', 'sentence_segments': 'None'}), "(question=dummy_question, paragraphs=[x,\n dummy_question], first_label=1, second_label=1, question_id='dummy',\n sentence_segments=None)\n", (12211, 12352), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((3700, 3716), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3710, 3716), False, 'import json\n'), ((8597, 8604), 'hotpot.tfidf_retriever.doc_db.DocDB', 'DocDB', ([], {}), '()\n', (8602, 8604), False, 'from hotpot.tfidf_retriever.doc_db import DocDB\n')]
|
import pytest
from numpy import allclose, array, asarray, add, ndarray, generic
from lightning import series, image
pytestmark = pytest.mark.usefixtures("eng")
def test_first(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
assert allclose(data.first(), [1, 2, 3])
data = image.fromlist([array([[1, 2], [3, 4]]), array([[5, 6], [7, 8]])], engine=eng)
assert allclose(data.first(), [[1, 2], [3, 4]])
def test_asarray(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
converted = asarray(data)
assert allclose(converted, [[1, 2, 3], [4, 5, 6]])
def test_casting(eng):
data = series.fromlist([array([1, 2, 3], 'int16')], engine=eng)
assert data.astype('int64').toarray().dtype == 'int64'
assert data.astype('float32').toarray().dtype == 'float32'
assert data.astype('float64').toarray().dtype == 'float64'
assert data.astype('float16', casting='unsafe').toarray().dtype == 'float16'
def test_slicing(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
assert data.toarray().shape == (2, 3)
assert data[:, :].shape == (2, 3)
assert data[:, :].toarray().shape == (2, 3)
assert data[0, :].shape == (1, 3)
assert data[0, :].toarray().shape == (3,)
def test_toarray(eng):
original = [array([1, 2, 3]), array([4, 5, 6])]
data = series.fromlist(original, engine=eng)
assert allclose(data.toarray(), original)
original = [array([[1, 2], [3, 4]]), array([[5, 6], [7, 8]])]
data = image.fromlist(original, engine=eng)
assert allclose(data.toarray(), original)
def test_elementwise(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2raw = asarray([[7, 8, 9], [10, 11, 12]])
mat1 = series.fromlist(mat1raw, engine=eng)
mat2 = series.fromlist(mat2raw, engine=eng)
result = mat1.element_wise(mat2, add)
truth = mat1raw + mat2raw
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(3))
def test_elementwise_scalar(eng):
matraw = asarray([[1, 2, 3], [4, 5, 6]])
mat = series.fromlist(matraw, engine=eng)
result = mat.element_wise(2, add)
truth = matraw + 2
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(3))
def test_elementwise_plus(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2raw = asarray([[7, 8, 9], [10, 11, 12]])
mat1 = series.fromlist(mat1raw, engine=eng)
mat2 = series.fromlist(mat2raw, engine=eng)
result = mat1.plus(mat2)
truth = mat1raw + mat2raw
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(3))
def test_reduce(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
reduced = data.reduce(lambda x, y: x + y)
assert allclose(reduced.shape, [1, 3])
assert allclose(reduced.toarray(), [5, 7, 9])
def test_map(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
mapped = data.map(lambda x: x.sum())
assert allclose(mapped.shape, [2, 1])
assert allclose(mapped.toarray(), [6, 15])
mapped = data.map(lambda x: x + 1)
assert allclose(mapped.shape, [2, 3])
assert allclose(mapped.toarray(), [[2, 3, 4], [5, 6, 7]])
def test_map_with_keys(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
mapped = data.map(lambda kv: kv[0] + kv[1], with_keys=True)
assert allclose(mapped.shape, [2, 3])
assert allclose(mapped.toarray(), [[1, 2, 3], [5, 6, 7]])
data = image.fromlist([array([[1, 1], [1, 1]]), array([[2, 2], [2, 2]])], engine=eng)
mapped = data.map(lambda kv: kv[0] + kv[1], with_keys=True)
assert allclose(mapped.shape, [2, 2, 2])
assert allclose(mapped.toarray(), [[[1, 1], [1, 1]], [[3, 3], [3, 3]]])
def test_repartition(eng):
if eng is not None:
data = image.fromlist([array([1, 1]), array([2, 2]), array([3, 3]), array([4, 4]),
array([5, 5]), array([6, 6]), array([7, 7]), array([8, 8]),
array([9, 9]), array([10, 10]), array([11, 11]), array([12, 12])],
engine=eng, npartitions=10)
assert allclose(data.first(), array([1, 1]))
assert isinstance(data.first(), (ndarray, generic))
data = data.repartition(3)
assert allclose(data.first(), array([1, 1]))
data = series.fromlist([array([1, 1]), array([2, 2]), array([3, 3]), array([4, 4]),
array([5, 5]), array([6, 6]), array([7, 7]), array([8, 8]),
array([9, 9]), array([10, 10]), array([11, 11]), array([12, 12])],
engine=eng, npartitions=10)
assert allclose(data.first(), array([1, 1]))
data = data.repartition(3)
assert allclose(data.first(), array([1, 1]))
assert isinstance(data.first(), (ndarray, generic))
|
[
"numpy.asarray",
"numpy.allclose",
"numpy.array",
"lightning.image.fromlist",
"lightning.series.fromlist",
"pytest.mark.usefixtures"
] |
[((131, 161), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""eng"""'], {}), "('eng')\n", (154, 161), False, 'import pytest\n'), ((569, 582), 'numpy.asarray', 'asarray', (['data'], {}), '(data)\n', (576, 582), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((595, 638), 'numpy.allclose', 'allclose', (['converted', '[[1, 2, 3], [4, 5, 6]]'], {}), '(converted, [[1, 2, 3], [4, 5, 6]])\n', (603, 638), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1400, 1437), 'lightning.series.fromlist', 'series.fromlist', (['original'], {'engine': 'eng'}), '(original, engine=eng)\n', (1415, 1437), False, 'from lightning import series, image\n'), ((1561, 1597), 'lightning.image.fromlist', 'image.fromlist', (['original'], {'engine': 'eng'}), '(original, engine=eng)\n', (1575, 1597), False, 'from lightning import series, image\n'), ((1687, 1718), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (1694, 1718), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1733, 1767), 'numpy.asarray', 'asarray', (['[[7, 8, 9], [10, 11, 12]]'], {}), '([[7, 8, 9], [10, 11, 12]])\n', (1740, 1767), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1779, 1815), 'lightning.series.fromlist', 'series.fromlist', (['mat1raw'], {'engine': 'eng'}), '(mat1raw, engine=eng)\n', (1794, 1815), False, 'from lightning import series, image\n'), ((1827, 1863), 'lightning.series.fromlist', 'series.fromlist', (['mat2raw'], {'engine': 'eng'}), '(mat2raw, engine=eng)\n', (1842, 1863), False, 'from lightning import series, image\n'), ((2074, 2105), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (2081, 2105), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2116, 2151), 'lightning.series.fromlist', 'series.fromlist', (['matraw'], {'engine': 'eng'}), '(matraw, engine=eng)\n', (2131, 2151), False, 'from lightning import series, image\n'), ((2350, 2381), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (2357, 2381), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2396, 2430), 'numpy.asarray', 'asarray', (['[[7, 8, 9], [10, 11, 12]]'], {}), '([[7, 8, 9], [10, 11, 12]])\n', (2403, 2430), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2442, 2478), 'lightning.series.fromlist', 'series.fromlist', (['mat1raw'], {'engine': 'eng'}), '(mat1raw, engine=eng)\n', (2457, 2478), False, 'from lightning import series, image\n'), ((2490, 2526), 'lightning.series.fromlist', 'series.fromlist', (['mat2raw'], {'engine': 'eng'}), '(mat2raw, engine=eng)\n', (2505, 2526), False, 'from lightning import series, image\n'), ((2833, 2864), 'numpy.allclose', 'allclose', (['reduced.shape', '[1, 3]'], {}), '(reduced.shape, [1, 3])\n', (2841, 2864), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3065, 3095), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 1]'], {}), '(mapped.shape, [2, 1])\n', (3073, 3095), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3193, 3223), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 3]'], {}), '(mapped.shape, [2, 3])\n', (3201, 3223), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3469, 3499), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 3]'], {}), '(mapped.shape, [2, 3])\n', (3477, 3499), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3727, 3760), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 2, 2]'], {}), '(mapped.shape, [2, 2, 2])\n', (3735, 3760), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1353, 1369), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1358, 1369), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1371, 1387), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (1376, 1387), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1500, 1523), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1505, 1523), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1525, 1548), 'numpy.array', 'array', (['[[5, 6], [7, 8]]'], {}), '([[5, 6], [7, 8]])\n', (1530, 1548), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((213, 229), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (218, 229), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((231, 247), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (236, 247), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((334, 357), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (339, 357), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((359, 382), 'numpy.array', 'array', (['[[5, 6], [7, 8]]'], {}), '([[5, 6], [7, 8]])\n', (364, 382), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((503, 519), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (508, 519), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((521, 537), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (526, 537), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((692, 717), 'numpy.array', 'array', (['[1, 2, 3]', '"""int16"""'], {}), "([1, 2, 3], 'int16')\n", (697, 717), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1051, 1067), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1056, 1067), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1069, 1085), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (1074, 1085), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2727, 2743), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2732, 2743), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2745, 2761), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (2750, 2761), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2964, 2980), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2969, 2980), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2982, 2998), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (2987, 2998), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3345, 3361), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (3350, 3361), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3363, 3379), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (3368, 3379), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3589, 3612), 'numpy.array', 'array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (3594, 3612), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3614, 3637), 'numpy.array', 'array', (['[[2, 2], [2, 2]]'], {}), '([[2, 2], [2, 2]])\n', (3619, 3637), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4269, 4282), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4274, 4282), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4417, 4430), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4422, 4430), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4813, 4826), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4818, 4826), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4901, 4914), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4906, 4914), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3921, 3934), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (3926, 3934), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3936, 3949), 'numpy.array', 'array', (['[2, 2]'], {}), '([2, 2])\n', (3941, 3949), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3951, 3964), 'numpy.array', 'array', (['[3, 3]'], {}), '([3, 3])\n', (3956, 3964), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3966, 3979), 'numpy.array', 'array', (['[4, 4]'], {}), '([4, 4])\n', (3971, 3979), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4013, 4026), 'numpy.array', 'array', (['[5, 5]'], {}), '([5, 5])\n', (4018, 4026), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4028, 4041), 'numpy.array', 'array', (['[6, 6]'], {}), '([6, 6])\n', (4033, 4041), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4043, 4056), 'numpy.array', 'array', (['[7, 7]'], {}), '([7, 7])\n', (4048, 4056), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4058, 4071), 'numpy.array', 'array', (['[8, 8]'], {}), '([8, 8])\n', (4063, 4071), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4105, 4118), 'numpy.array', 'array', (['[9, 9]'], {}), '([9, 9])\n', (4110, 4118), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4120, 4135), 'numpy.array', 'array', (['[10, 10]'], {}), '([10, 10])\n', (4125, 4135), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4137, 4152), 'numpy.array', 'array', (['[11, 11]'], {}), '([11, 11])\n', (4142, 4152), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4154, 4169), 'numpy.array', 'array', (['[12, 12]'], {}), '([12, 12])\n', (4159, 4169), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4465, 4478), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4470, 4478), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4480, 4493), 'numpy.array', 'array', (['[2, 2]'], {}), '([2, 2])\n', (4485, 4493), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4495, 4508), 'numpy.array', 'array', (['[3, 3]'], {}), '([3, 3])\n', (4500, 4508), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4510, 4523), 'numpy.array', 'array', (['[4, 4]'], {}), '([4, 4])\n', (4515, 4523), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4557, 4570), 'numpy.array', 'array', (['[5, 5]'], {}), '([5, 5])\n', (4562, 4570), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4572, 4585), 'numpy.array', 'array', (['[6, 6]'], {}), '([6, 6])\n', (4577, 4585), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4587, 4600), 'numpy.array', 'array', (['[7, 7]'], {}), '([7, 7])\n', (4592, 4600), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4602, 4615), 'numpy.array', 'array', (['[8, 8]'], {}), '([8, 8])\n', (4607, 4615), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4649, 4662), 'numpy.array', 'array', (['[9, 9]'], {}), '([9, 9])\n', (4654, 4662), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4664, 4679), 'numpy.array', 'array', (['[10, 10]'], {}), '([10, 10])\n', (4669, 4679), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4681, 4696), 'numpy.array', 'array', (['[11, 11]'], {}), '([11, 11])\n', (4686, 4696), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4698, 4713), 'numpy.array', 'array', (['[12, 12]'], {}), '([12, 12])\n', (4703, 4713), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 5 08:18:05 2022
https://thatascience.com/learn-machine-learning/pipeline-in-scikit-learn/
@author: qian.cao
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import os
import sys
sys.path.append("../bonebox/metrics/")
from FeaturesRadiomics import *
import matplotlib.pyplot as plt
if __name__ == "__main__":
outDir = "/gpfs_projects/qian.cao/BoneBox-out/test_20220422_bin_cross_parallel_biomarker_C/"
os.makedirs(outDir,exist_ok = True)
featuresDir = "/gpfs_projects/qian.cao/BoneBox-out/test_20220422_bin_cross_parallel/"
# Copied from bin_cross_parallel
nScales = np.linspace(1.2, 0.2, 60) # change noise only # sweeps across noise and resolution settings
rScales = np.linspace(1, 0.3, 40)
# Size of the test split
num_bones_test = 7 # number of bones reserved for testing
test_split_size = num_bones_test/16
num_test = int(num_bones_test*13)
featureNames = getRadiomicFeatureNames() # TODO: save and read from file
features = np.load(featuresDir+"featuresArray.npy")
fem_dir = "../data/"
roi_vm_mean = np.load(fem_dir+"roi_vm_mean.npy")
# Training and testing scores
y_preds = np.zeros((num_test,features.shape[2],features.shape[3],features.shape[4]))
r2Test = np.zeros((features.shape[2],features.shape[3],features.shape[4]))
importances = np.zeros((features.shape[1],features.shape[2],features.shape[3],features.shape[4]))
# remember to save y_test as well, this is constant throughout the script
# for cind in range(features.shape[2]): # imaging condition
#%% Reference Configuration
ref_config = (30,15,1)
indNoise, indResolution, sind = ref_config # TODO: think about just having random sind
feat = features[:,:,indNoise,indResolution,sind]
X = feat
y = roi_vm_mean
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_split_size, random_state = 3, shuffle=False)
rf_pipe = Pipeline([('scl', StandardScaler()),
('reg',RandomForestRegressor(n_estimators=100, min_samples_split=10, random_state=0, n_jobs=-1))])
rf_pipe.fit(X_train, y_train)
#%% Run through all scenarios
for indNoise, nscale in enumerate(nScales):
for indResolution, rscale in enumerate(rScales):
print(f"noise: {indNoise}, resolution: {indResolution}")
for sind in range(features.shape[4]): # seed, instance
# # feature
feat = features[:,:,indNoise,indResolution,sind]
# # data and target
X = feat
y = roi_vm_mean
# # Splitting data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_split_size, random_state = 3, shuffle=False)
# # Random forest Tree Regression Pipeline
# rf_pipe = Pipeline([('scl', StandardScaler()),
# ('reg',RandomForestRegressor(n_estimators=100, min_samples_split=10, random_state=0, n_jobs=-1))])
# rf_pipe.fit(X_train, y_train)
y_pred = rf_pipe.predict(X_test)
# scoreTest[cind,sind] = rf_pipe.score(y_pred, y_test)
# Save output
y_preds[:,indNoise,indResolution,sind] = y_pred
r2Test[indNoise,indResolution,sind] = np.corrcoef(y_pred, y_test)[0,1]**2
importances[:,indNoise,indResolution,sind] = rf_pipe['reg'].feature_importances_
# Correlation plot
plt.ioff()
plt.figure()
plt.plot(y_test, y_pred,'b.')
plt.plot(*(np.linspace(0,np.max(y)),)*2,'k--')
plt.xlabel("True")
plt.ylabel("Predicted")
plt.xlim([0,np.max(y)])
plt.ylim([0,np.max(y)])
plt.title(f"r2: {r2Test[indNoise,indResolution,sind]} Noise: {indNoise}, Resolution: {indResolution}, Instance: {sind}")
plt.savefig(outDir+f"correlation_{indNoise}_{indResolution}_{sind}.png")
plt.close("all")
np.save(outDir+"y_preds",y_preds)
np.save(outDir+"y_test",y_test)
np.save(outDir+"r2Test",r2Test)
np.save(outDir+"importances",importances)
#%% Figures and Analysis
y_preds = np.load(outDir+"y_preds.npy")
y_test = np.load(outDir+"y_test.npy")
r2Test = np.load(outDir+"r2Test.npy")
importances = np.load(outDir+"importances.npy")
plt.ion()
fig = plt.figure(figsize=(7,8))
cax = fig.axes
im = plt.imshow(np.mean(r2Test,axis=2))
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title("r2 mean")
plt.colorbar()
plt.savefig(outDir+"fig-r2-mean.png")
plt.figure(figsize=(7,8))
plt.imshow(np.std(r2Test,axis=2),cmap="inferno")
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title("r2 std")
plt.colorbar()
plt.savefig(outDir+"fig-r2-std.png")
#%% feature importances
for ind in range(importances.shape[0]):
print(ind)
img = importances[ind,:,:,:]
fn = featureNames[ind]
plt.ioff()
fig = plt.figure(figsize=(7,8))
cax = fig.axes
im = plt.imshow(np.mean(img,axis=2),cmap="YlGn")
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title(f"Importance Mean: {fn}")
plt.colorbar()
plt.savefig(outDir+f"fig-imp-{fn}-mean.png")
plt.figure(figsize=(7,8))
plt.imshow(np.std(img,axis=2),cmap="BuPu")
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title(f"Importance Std: {fn}")
plt.colorbar()
plt.savefig(outDir+f"fig-imp-{fn}-std.png")
plt.close("all")
#%%
|
[
"matplotlib.pyplot.title",
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.figure",
"numpy.mean",
"sys.path.append",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.linspace",
"numpy.save",
"numpy.corrcoef",
"sklearn.ensemble.RandomForestRegressor",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.ylabel",
"os.makedirs",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((528, 566), 'sys.path.append', 'sys.path.append', (['"""../bonebox/metrics/"""'], {}), "('../bonebox/metrics/')\n", (543, 566), False, 'import sys\n'), ((762, 796), 'os.makedirs', 'os.makedirs', (['outDir'], {'exist_ok': '(True)'}), '(outDir, exist_ok=True)\n', (773, 796), False, 'import os\n'), ((949, 974), 'numpy.linspace', 'np.linspace', (['(1.2)', '(0.2)', '(60)'], {}), '(1.2, 0.2, 60)\n', (960, 974), True, 'import numpy as np\n'), ((1055, 1078), 'numpy.linspace', 'np.linspace', (['(1)', '(0.3)', '(40)'], {}), '(1, 0.3, 40)\n', (1066, 1078), True, 'import numpy as np\n'), ((1350, 1392), 'numpy.load', 'np.load', (["(featuresDir + 'featuresArray.npy')"], {}), "(featuresDir + 'featuresArray.npy')\n", (1357, 1392), True, 'import numpy as np\n'), ((1439, 1475), 'numpy.load', 'np.load', (["(fem_dir + 'roi_vm_mean.npy')"], {}), "(fem_dir + 'roi_vm_mean.npy')\n", (1446, 1475), True, 'import numpy as np\n'), ((1527, 1604), 'numpy.zeros', 'np.zeros', (['(num_test, features.shape[2], features.shape[3], features.shape[4])'], {}), '((num_test, features.shape[2], features.shape[3], features.shape[4]))\n', (1535, 1604), True, 'import numpy as np\n'), ((1615, 1682), 'numpy.zeros', 'np.zeros', (['(features.shape[2], features.shape[3], features.shape[4])'], {}), '((features.shape[2], features.shape[3], features.shape[4]))\n', (1623, 1682), True, 'import numpy as np\n'), ((1699, 1790), 'numpy.zeros', 'np.zeros', (['(features.shape[1], features.shape[2], features.shape[3], features.shape[4])'], {}), '((features.shape[1], features.shape[2], features.shape[3], features\n .shape[4]))\n', (1707, 1790), True, 'import numpy as np\n'), ((2220, 2305), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_split_size', 'random_state': '(3)', 'shuffle': '(False)'}), '(X, y, test_size=test_split_size, random_state=3, shuffle=False\n )\n', (2236, 2305), False, 'from sklearn.model_selection import train_test_split\n'), ((4943, 4974), 'numpy.load', 'np.load', (["(outDir + 'y_preds.npy')"], {}), "(outDir + 'y_preds.npy')\n", (4950, 4974), True, 'import numpy as np\n'), ((4986, 5016), 'numpy.load', 'np.load', (["(outDir + 'y_test.npy')"], {}), "(outDir + 'y_test.npy')\n", (4993, 5016), True, 'import numpy as np\n'), ((5028, 5058), 'numpy.load', 'np.load', (["(outDir + 'r2Test.npy')"], {}), "(outDir + 'r2Test.npy')\n", (5035, 5058), True, 'import numpy as np\n'), ((5075, 5110), 'numpy.load', 'np.load', (["(outDir + 'importances.npy')"], {}), "(outDir + 'importances.npy')\n", (5082, 5110), True, 'import numpy as np\n'), ((5118, 5127), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (5125, 5127), True, 'import matplotlib.pyplot as plt\n'), ((5143, 5169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (5153, 5169), True, 'import matplotlib.pyplot as plt\n'), ((5236, 5260), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (5246, 5260), True, 'import matplotlib.pyplot as plt\n'), ((5265, 5290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (5275, 5290), True, 'import matplotlib.pyplot as plt\n'), ((5295, 5315), 'matplotlib.pyplot.title', 'plt.title', (['"""r2 mean"""'], {}), "('r2 mean')\n", (5304, 5315), True, 'import matplotlib.pyplot as plt\n'), ((5320, 5334), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5332, 5334), True, 'import matplotlib.pyplot as plt\n'), ((5339, 5378), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + 'fig-r2-mean.png')"], {}), "(outDir + 'fig-r2-mean.png')\n", (5350, 5378), True, 'import matplotlib.pyplot as plt\n'), ((5386, 5412), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (5396, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5469, 5493), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (5479, 5493), True, 'import matplotlib.pyplot as plt\n'), ((5498, 5523), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (5508, 5523), True, 'import matplotlib.pyplot as plt\n'), ((5528, 5547), 'matplotlib.pyplot.title', 'plt.title', (['"""r2 std"""'], {}), "('r2 std')\n", (5537, 5547), True, 'import matplotlib.pyplot as plt\n'), ((5552, 5566), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5564, 5566), True, 'import matplotlib.pyplot as plt\n'), ((5571, 5609), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + 'fig-r2-std.png')"], {}), "(outDir + 'fig-r2-std.png')\n", (5582, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5231), 'numpy.mean', 'np.mean', (['r2Test'], {'axis': '(2)'}), '(r2Test, axis=2)\n', (5215, 5231), True, 'import numpy as np\n'), ((5427, 5449), 'numpy.std', 'np.std', (['r2Test'], {'axis': '(2)'}), '(r2Test, axis=2)\n', (5433, 5449), True, 'import numpy as np\n'), ((5803, 5813), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (5811, 5813), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5863), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (5847, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5951, 5975), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (5961, 5975), True, 'import matplotlib.pyplot as plt\n'), ((5984, 6009), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (5994, 6009), True, 'import matplotlib.pyplot as plt\n'), ((6018, 6053), 'matplotlib.pyplot.title', 'plt.title', (['f"""Importance Mean: {fn}"""'], {}), "(f'Importance Mean: {fn}')\n", (6027, 6053), True, 'import matplotlib.pyplot as plt\n'), ((6062, 6076), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6074, 6076), True, 'import matplotlib.pyplot as plt\n'), ((6085, 6131), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + f'fig-imp-{fn}-mean.png')"], {}), "(outDir + f'fig-imp-{fn}-mean.png')\n", (6096, 6131), True, 'import matplotlib.pyplot as plt\n'), ((6147, 6173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (6157, 6173), True, 'import matplotlib.pyplot as plt\n'), ((6232, 6256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (6242, 6256), True, 'import matplotlib.pyplot as plt\n'), ((6265, 6290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (6275, 6290), True, 'import matplotlib.pyplot as plt\n'), ((6299, 6333), 'matplotlib.pyplot.title', 'plt.title', (['f"""Importance Std: {fn}"""'], {}), "(f'Importance Std: {fn}')\n", (6308, 6333), True, 'import matplotlib.pyplot as plt\n'), ((6342, 6356), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6354, 6356), True, 'import matplotlib.pyplot as plt\n'), ((6365, 6410), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + f'fig-imp-{fn}-std.png')"], {}), "(outDir + f'fig-imp-{fn}-std.png')\n", (6376, 6410), True, 'import matplotlib.pyplot as plt\n'), ((6426, 6442), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6435, 6442), True, 'import matplotlib.pyplot as plt\n'), ((4706, 4742), 'numpy.save', 'np.save', (["(outDir + 'y_preds')", 'y_preds'], {}), "(outDir + 'y_preds', y_preds)\n", (4713, 4742), True, 'import numpy as np\n'), ((4752, 4786), 'numpy.save', 'np.save', (["(outDir + 'y_test')", 'y_test'], {}), "(outDir + 'y_test', y_test)\n", (4759, 4786), True, 'import numpy as np\n'), ((4796, 4830), 'numpy.save', 'np.save', (["(outDir + 'r2Test')", 'r2Test'], {}), "(outDir + 'r2Test', r2Test)\n", (4803, 4830), True, 'import numpy as np\n'), ((4840, 4884), 'numpy.save', 'np.save', (["(outDir + 'importances')", 'importances'], {}), "(outDir + 'importances', importances)\n", (4847, 4884), True, 'import numpy as np\n'), ((5910, 5930), 'numpy.mean', 'np.mean', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (5917, 5930), True, 'import numpy as np\n'), ((6192, 6211), 'numpy.std', 'np.std', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (6198, 6211), True, 'import numpy as np\n'), ((2335, 2351), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2349, 2351), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2385, 2478), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'min_samples_split': '(10)', 'random_state': '(0)', 'n_jobs': '(-1)'}), '(n_estimators=100, min_samples_split=10, random_state=\n 0, n_jobs=-1)\n', (2406, 2478), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((3168, 3253), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_split_size', 'random_state': '(3)', 'shuffle': '(False)'}), '(X, y, test_size=test_split_size, random_state=3, shuffle=False\n )\n', (3184, 3253), False, 'from sklearn.model_selection import train_test_split\n'), ((4114, 4124), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (4122, 4124), True, 'import matplotlib.pyplot as plt\n'), ((4141, 4153), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4151, 4153), True, 'import matplotlib.pyplot as plt\n'), ((4170, 4200), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test', 'y_pred', '"""b."""'], {}), "(y_test, y_pred, 'b.')\n", (4178, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4279, 4297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""True"""'], {}), "('True')\n", (4289, 4297), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4337), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predicted"""'], {}), "('Predicted')\n", (4324, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4434, 4566), 'matplotlib.pyplot.title', 'plt.title', (['f"""r2: {r2Test[indNoise, indResolution, sind]} Noise: {indNoise}, Resolution: {indResolution}, Instance: {sind}"""'], {}), "(\n f'r2: {r2Test[indNoise, indResolution, sind]} Noise: {indNoise}, Resolution: {indResolution}, Instance: {sind}'\n )\n", (4443, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4645), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + f'correlation_{indNoise}_{indResolution}_{sind}.png')"], {}), "(outDir + f'correlation_{indNoise}_{indResolution}_{sind}.png')\n", (4582, 4645), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4676), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4669, 4676), True, 'import matplotlib.pyplot as plt\n'), ((3913, 3940), 'numpy.corrcoef', 'np.corrcoef', (['y_pred', 'y_test'], {}), '(y_pred, y_test)\n', (3924, 3940), True, 'import numpy as np\n'), ((4366, 4375), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4372, 4375), True, 'import numpy as np\n'), ((4406, 4415), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4412, 4415), True, 'import numpy as np\n'), ((4241, 4250), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4247, 4250), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Python Standard Library
pass
# Third-Party Libraries
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb
# Local Library
import mivp
# ------------------------------------------------------------------------------
grey_4 = to_rgb("#ced4da")
# ------------------------------------------------------------------------------
def Q(f, xs, ys):
X, Y = np.meshgrid(xs, ys)
v = np.vectorize
fx = v(lambda x, y: f([x, y])[0])
fy = v(lambda x, y: f([x, y])[1])
return X, Y, fx(X, Y), fy(X, Y)
# ------------------------------------------------------------------------------
# Vector field
def fun(t, xy):
x, y = xy
dx = - y + 0.5*np.cos(0.5*t)
dy = x - np.sin(0.5*t)
return [dx, dy]
# Time span & frame rate
t_span = (0.0, 20.0)
df = 60.0
dt = 1.0 / df
t = np.arange(t_span[0], t_span[1], dt)
t = np.r_[t, t_span[1]]
# Initial set boundary
y0 = [0.0, 0.0]
radius = 0.5
n = 10
xc, yc = y0
def vectorize(fun):
return np.vectorize(fun, signature="()->(n)")
@vectorize
def boundary(s):
if 0 <= s < 0.25:
return np.array([-0.5 + 4 * s, 0.5])
elif 0.25 <= s < 0.5:
return np.array([0.5, 0.5 - 4 * (s - 0.25)])
elif 0.5 <= s < 0.75:
return np.array([0.5 - 4 * (s - 0.5), -0.5])
else:
return np.array([-0.5, -0.5 + 4 * (s - 0.75)])
# Precision
rtol = 1e-9 # default: 1e-3
atol = 1e-12 # default: 1e-6
# ------------------------------------------------------------------------------
fig = plt.figure()
x = y = np.linspace(-1.0, 1.0, 1000)
#plt.streamplot(*Q(lambda xy: fun(0, xy), x, y), color=grey_4, zorder=-100)
c = cx, cy = np.array([0.0, 0.0])
plt.plot([cx], [cy], lw=3.0, marker="o", ms=10.0, markevery=[-1],
markeredgecolor="white", color="black")
plt.axis("square")
plt.axis("off")
data = mivp.solve_alt(
fun=fun,
t_eval=t,
boundary=lambda s: 0.5*boundary(s),
boundary_rtol=0.0,
boundary_atol=0.05,
rtol=rtol,
atol=atol,
method="LSODA",
)
circle = None
def display_radius(i, axes):
global circle
if circle:
circle.remove()
x, y = data[i]
r = max(np.sqrt((x - cx)**2 + (y - cy)**2))
theta = np.linspace(0, 2*np.pi, 1000)
circle = axes.plot(
cx+r*np.cos(theta), cy+r*np.sin(theta),
linestyle='dashed', color="k", linewidth=1.0,
)[0]
plt.axis([-4/3, 4/3, -1, 1])
mivp.generate_movie(data, filename="hausdorff.mp4", axes=fig.axes[0], fps=df, hook=display_radius)
|
[
"numpy.meshgrid",
"numpy.vectorize",
"matplotlib.pyplot.plot",
"mivp.generate_movie",
"matplotlib.pyplot.axis",
"matplotlib.colors.to_rgb",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.cos",
"numpy.sqrt"
] |
[((289, 306), 'matplotlib.colors.to_rgb', 'to_rgb', (['"""#ced4da"""'], {}), "('#ced4da')\n", (295, 306), False, 'from matplotlib.colors import to_rgb\n'), ((858, 893), 'numpy.arange', 'np.arange', (['t_span[0]', 't_span[1]', 'dt'], {}), '(t_span[0], t_span[1], dt)\n', (867, 893), True, 'import numpy as np\n'), ((1544, 1556), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1554, 1556), True, 'import matplotlib.pyplot as plt\n'), ((1565, 1593), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(1000)'], {}), '(-1.0, 1.0, 1000)\n', (1576, 1593), True, 'import numpy as np\n'), ((1683, 1703), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1691, 1703), True, 'import numpy as np\n'), ((1704, 1813), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx]', '[cy]'], {'lw': '(3.0)', 'marker': '"""o"""', 'ms': '(10.0)', 'markevery': '[-1]', 'markeredgecolor': '"""white"""', 'color': '"""black"""'}), "([cx], [cy], lw=3.0, marker='o', ms=10.0, markevery=[-1],\n markeredgecolor='white', color='black')\n", (1712, 1813), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1836), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (1826, 1836), True, 'import matplotlib.pyplot as plt\n'), ((1837, 1852), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1845, 1852), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2533), 'mivp.generate_movie', 'mivp.generate_movie', (['data'], {'filename': '"""hausdorff.mp4"""', 'axes': 'fig.axes[0]', 'fps': 'df', 'hook': 'display_radius'}), "(data, filename='hausdorff.mp4', axes=fig.axes[0], fps=\n df, hook=display_radius)\n", (2449, 2533), False, 'import mivp\n'), ((419, 438), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (430, 438), True, 'import numpy as np\n'), ((1023, 1061), 'numpy.vectorize', 'np.vectorize', (['fun'], {'signature': '"""()->(n)"""'}), "(fun, signature='()->(n)')\n", (1035, 1061), True, 'import numpy as np\n'), ((2224, 2255), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (2235, 2255), True, 'import numpy as np\n'), ((2398, 2430), 'matplotlib.pyplot.axis', 'plt.axis', (['[-4 / 3, 4 / 3, -1, 1]'], {}), '([-4 / 3, 4 / 3, -1, 1])\n', (2406, 2430), True, 'import matplotlib.pyplot as plt\n'), ((746, 761), 'numpy.sin', 'np.sin', (['(0.5 * t)'], {}), '(0.5 * t)\n', (752, 761), True, 'import numpy as np\n'), ((1129, 1158), 'numpy.array', 'np.array', (['[-0.5 + 4 * s, 0.5]'], {}), '([-0.5 + 4 * s, 0.5])\n', (1137, 1158), True, 'import numpy as np\n'), ((2176, 2214), 'numpy.sqrt', 'np.sqrt', (['((x - cx) ** 2 + (y - cy) ** 2)'], {}), '((x - cx) ** 2 + (y - cy) ** 2)\n', (2183, 2214), True, 'import numpy as np\n'), ((719, 734), 'numpy.cos', 'np.cos', (['(0.5 * t)'], {}), '(0.5 * t)\n', (725, 734), True, 'import numpy as np\n'), ((1200, 1237), 'numpy.array', 'np.array', (['[0.5, 0.5 - 4 * (s - 0.25)]'], {}), '([0.5, 0.5 - 4 * (s - 0.25)])\n', (1208, 1237), True, 'import numpy as np\n'), ((1279, 1316), 'numpy.array', 'np.array', (['[0.5 - 4 * (s - 0.5), -0.5]'], {}), '([0.5 - 4 * (s - 0.5), -0.5])\n', (1287, 1316), True, 'import numpy as np\n'), ((1342, 1381), 'numpy.array', 'np.array', (['[-0.5, -0.5 + 4 * (s - 0.75)]'], {}), '([-0.5, -0.5 + 4 * (s - 0.75)])\n', (1350, 1381), True, 'import numpy as np\n'), ((2291, 2304), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2297, 2304), True, 'import numpy as np\n'), ((2311, 2324), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2317, 2324), True, 'import numpy as np\n')]
|
"""
Unit tests for the density class
"""
from unittest import TestCase
import sys
sys.path.append('../src')
import numpy as np
import unittest
import suftware as sw
import os
class Density1d(TestCase):
def setUp(self):
self.N = 5
self.data = sw.simulate_density_data(distribution_type='uniform', N=self.N,seed=1)
def tearDown(self):
pass
# method that checks the main calculation of deft_1d by calling _run and ensuring that we get the correct Q_star
def test_density(self):
actual_Q_star = Q = sw.DensityEstimator(self.data)
expected_Q_star = np.array([.56458204, 1.66943372, 1.56915093, 1.29922676, 0.94761056, 0.60883489, 0.34458301])
self.assertEqual(actual_Q_star.Q_star.evaluate(actual_Q_star.grid).all(),expected_Q_star.all())
# helper method for test_get_data_file_hand()
def raiseFileNotFoundError(self):
return FileNotFoundError
suite = unittest.TestLoader().loadTestsFromTestCase(Density1d)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"sys.path.append",
"unittest.TextTestRunner",
"suftware.simulate_density_data",
"numpy.array",
"unittest.TestLoader",
"suftware.DensityEstimator"
] |
[((84, 109), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (99, 109), False, 'import sys\n'), ((268, 339), 'suftware.simulate_density_data', 'sw.simulate_density_data', ([], {'distribution_type': '"""uniform"""', 'N': 'self.N', 'seed': '(1)'}), "(distribution_type='uniform', N=self.N, seed=1)\n", (292, 339), True, 'import suftware as sw\n'), ((551, 581), 'suftware.DensityEstimator', 'sw.DensityEstimator', (['self.data'], {}), '(self.data)\n', (570, 581), True, 'import suftware as sw\n'), ((608, 707), 'numpy.array', 'np.array', (['[0.56458204, 1.66943372, 1.56915093, 1.29922676, 0.94761056, 0.60883489, \n 0.34458301]'], {}), '([0.56458204, 1.66943372, 1.56915093, 1.29922676, 0.94761056, \n 0.60883489, 0.34458301])\n', (616, 707), True, 'import numpy as np\n'), ((942, 963), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (961, 963), False, 'import unittest\n'), ((997, 1033), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1020, 1033), False, 'import unittest\n')]
|
from typing import List, Dict
import matplotlib.pyplot as plt
import numpy as np
from mushroom_rl.algorithms.value.td.q_learning import QLearning
from mushroom_rl.core import Core, Agent, Environment
from mushroom_rl.policy import EpsGreedy
from mushroom_rl.utils.dataset import compute_J
from mushroom_rl.utils.parameters import Parameter
from mdp.algo.model_free.env.deep_sea import DeepSea
from mdp.algo.model_free.g_learning import GLearning
from mdp.algo.model_free.mirl import MIRL
from mdp.algo.model_free.psi_learning import PsiLearning
from mdp.experiment.model_free import Experiment
def experiment_deepsea(agent: Agent, env: Environment, n_episodes: int, k: int) -> List[np.ndarray]:
reward_k = list()
for seed in range(k):
# Set the seed
np.random.seed(seed)
# Reinforcement learning experiment
core = Core(agent, env)
# Train
core.learn(n_episodes=n_episodes, n_steps_per_fit=1, render=False, quiet=True)
# Evaluate results for n_episodes
dataset_q = core.evaluate(n_episodes=1, render=False, quiet=True)
# Compute the average objective value
r = np.mean(compute_J(dataset_q, 1))
reward_k.append(r)
return reward_k
def run():
max_steps = 7
steps = list()
k = 25
n_episodes = 100
agents = dict(
q=QLearning,
psi=PsiLearning,
g=GLearning,
mirl=MIRL
)
q = [10, 50, 90]
labels: map[List[str]] = map(lambda l: [f'{l}_median', f'{l}_10:90'], agents.keys())
markers = ['o', '^', '>', '<']
alphas = [.3, .25, .2, .15]
rewards: Dict[str, List[List[np.ndarray]]] = dict()
for key in agents.keys():
l_q = list()
for _ in q:
l_q.append(list())
rewards[key] = l_q
best_reward = list()
for exponent in range(1, max_steps + 1):
size = np.power(2, exponent)
steps.append(size)
print('Step: {}, size: {}'.format(exponent, size))
# Create the grid environment
env = DeepSea(size, start=(0, 0), goal=(size - 1, size - 1))
# Use an epsilon-greedy policy
epsilon = .1
pi = EpsGreedy(epsilon=epsilon)
learning_rate = Parameter(.1 / 10)
for key, value in agents.items():
agent = value(env.info, pi, learning_rate=learning_rate)
reward_k = experiment_deepsea(agent, env, n_episodes, k)
# q_p10, q_p50, q_p90
q_p = np.percentile(reward_k, q)
reward_list = rewards[key]
for r_i, q_pi in zip(reward_list, q_p):
r_i.append(q_pi)
sum_reward = 0
for j in range(size - 2):
sum_reward -= 1 ** j * (0.01 / size)
best_reward.append(1 + (0.01 / size) + sum_reward)
steps = np.array(steps)
for label, marker, alpha, key in zip(labels, markers, alphas, agents.keys()):
q_p10, q_p50, q_p90 = rewards[key]
plt.plot(steps, np.array(q_p50), marker=marker, label=label[0])
plt.fill_between(steps, q_p10, q_p90, alpha=alpha)
plt.plot(steps, best_reward, label='Best reward')
plt.xlabel('Size of gridworld')
plt.ylabel('Cumulative average reward after 100 episodes')
plt.title('Deep Sea Experiment')
plt.legend()
plt.tight_layout()
plt.grid(True)
plt.show()
if __name__ == '__main__':
result, time = Experiment.benchmark(run)
print(time)
|
[
"matplotlib.pyplot.title",
"mdp.algo.model_free.env.deep_sea.DeepSea",
"numpy.random.seed",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.tight_layout",
"mushroom_rl.utils.dataset.compute_J",
"numpy.power",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.percentile",
"mushroom_rl.core.Core",
"matplotlib.pyplot.ylabel",
"mushroom_rl.policy.EpsGreedy",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"mushroom_rl.utils.parameters.Parameter",
"numpy.array",
"mdp.experiment.model_free.Experiment.benchmark",
"matplotlib.pyplot.xlabel"
] |
[((2804, 2819), 'numpy.array', 'np.array', (['steps'], {}), '(steps)\n', (2812, 2819), True, 'import numpy as np\n'), ((3081, 3130), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'best_reward'], {'label': '"""Best reward"""'}), "(steps, best_reward, label='Best reward')\n", (3089, 3130), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3166), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Size of gridworld"""'], {}), "('Size of gridworld')\n", (3145, 3166), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3229), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative average reward after 100 episodes"""'], {}), "('Cumulative average reward after 100 episodes')\n", (3181, 3229), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3266), 'matplotlib.pyplot.title', 'plt.title', (['"""Deep Sea Experiment"""'], {}), "('Deep Sea Experiment')\n", (3243, 3266), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3281, 3283), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3306), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3304, 3306), True, 'import matplotlib.pyplot as plt\n'), ((3311, 3325), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3319, 3325), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3338, 3340), True, 'import matplotlib.pyplot as plt\n'), ((3389, 3414), 'mdp.experiment.model_free.Experiment.benchmark', 'Experiment.benchmark', (['run'], {}), '(run)\n', (3409, 3414), False, 'from mdp.experiment.model_free import Experiment\n'), ((778, 798), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (792, 798), True, 'import numpy as np\n'), ((859, 875), 'mushroom_rl.core.Core', 'Core', (['agent', 'env'], {}), '(agent, env)\n', (863, 875), False, 'from mushroom_rl.core import Core, Agent, Environment\n'), ((1879, 1900), 'numpy.power', 'np.power', (['(2)', 'exponent'], {}), '(2, exponent)\n', (1887, 1900), True, 'import numpy as np\n'), ((2040, 2094), 'mdp.algo.model_free.env.deep_sea.DeepSea', 'DeepSea', (['size'], {'start': '(0, 0)', 'goal': '(size - 1, size - 1)'}), '(size, start=(0, 0), goal=(size - 1, size - 1))\n', (2047, 2094), False, 'from mdp.algo.model_free.env.deep_sea import DeepSea\n'), ((2169, 2195), 'mushroom_rl.policy.EpsGreedy', 'EpsGreedy', ([], {'epsilon': 'epsilon'}), '(epsilon=epsilon)\n', (2178, 2195), False, 'from mushroom_rl.policy import EpsGreedy\n'), ((2221, 2240), 'mushroom_rl.utils.parameters.Parameter', 'Parameter', (['(0.1 / 10)'], {}), '(0.1 / 10)\n', (2230, 2240), False, 'from mushroom_rl.utils.parameters import Parameter\n'), ((3025, 3075), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['steps', 'q_p10', 'q_p90'], {'alpha': 'alpha'}), '(steps, q_p10, q_p90, alpha=alpha)\n', (3041, 3075), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1184), 'mushroom_rl.utils.dataset.compute_J', 'compute_J', (['dataset_q', '(1)'], {}), '(dataset_q, 1)\n', (1170, 1184), False, 'from mushroom_rl.utils.dataset import compute_J\n'), ((2473, 2499), 'numpy.percentile', 'np.percentile', (['reward_k', 'q'], {}), '(reward_k, q)\n', (2486, 2499), True, 'import numpy as np\n'), ((2969, 2984), 'numpy.array', 'np.array', (['q_p50'], {}), '(q_p50)\n', (2977, 2984), True, 'import numpy as np\n')]
|
# Copyright 2019 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import heapq
import os
import random
import sys
import time
import math
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import gym
from gym import spaces
from gym.envs.registration import register
from gym.utils import seeding
import numpy as np
from common import sender_obs
from common.utils import pcc_aurora_reward, read_json_file
from simulator.trace import Trace
import pandas as pd
MAX_CWND = 5000
MIN_CWND = 4
MAX_RATE = 20000
MIN_RATE = 5
REWARD_SCALE = 0.001
EVENT_TYPE_SEND = 'S'
EVENT_TYPE_ACK = 'A'
BYTES_PER_PACKET = 1500
LATENCY_PENALTY = 1.0
LOSS_PENALTY = 1.0
USE_LATENCY_NOISE = True
MAX_LATENCY_NOISE = 1.1
# DEBUG = True
DEBUG = False
MI_RTT_PROPORTION = 1.0
# PACKET_LOG_FLAG = False
PACKET_LOG_FLAG = True
def debug_print(msg):
if DEBUG:
print(msg, file=sys.stderr, flush=True)
class EmuReplay:
def __init__(self, ):
df = pd.read_csv('aurora_emulation_log.csv')
self.ts = df['timestamp'].tolist()
self.send_rate = df['send_rate'].tolist()
self.idx = 0
def get_ts(self):
if self.idx > len(self.ts):
self.idx = len(self.ts) -1
ts = self.ts[self.idx]
self.idx += 1
return ts
def get_rate(self):
return self.send_rate[self.idx] / 8 / BYTES_PER_PACKET
def reset(self):
self.idx = 0
class Link():
def __init__(self, trace: Trace):
self.trace = trace
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.queue_size = self.trace.get_queue_size()
self.pkt_in_queue = 0
def get_cur_queue_delay(self, event_time):
self.pkt_in_queue = max(0, self.pkt_in_queue -
(event_time - self.queue_delay_update_time) *
self.get_bandwidth(event_time))
self.queue_delay_update_time = event_time
cur_queue_delay = math.ceil(
self.pkt_in_queue) / self.get_bandwidth(event_time)
return cur_queue_delay
def get_cur_latency(self, event_time):
q_delay = self.get_cur_queue_delay(event_time)
# print('queue delay: ', q_delay)
return self.trace.get_delay(event_time) / 1000.0 + q_delay
def packet_enters_link(self, event_time):
if (random.random() < self.trace.get_loss_rate()):
return False
self.queue_delay = self.get_cur_queue_delay(event_time)
extra_delay = 1.0 / self.get_bandwidth(event_time)
if 1 + math.ceil(self.pkt_in_queue) > self.queue_size:
# print("{}\tDrop!".format(event_time))
return False
self.queue_delay += extra_delay
self.pkt_in_queue += 1
return True
def print_debug(self):
print("Link:")
# TODO: Do not use timestamp 0.
print("Bandwidth: %.3fMbps" % (self.trace.get_bandwidth(0)))
# TODO: Do not use timestamp 0.
print("Delay: %.3fms" % (self.trace.get_delay(0)))
print("Queue Delay: %.3fms" % (self.queue_delay * 1000))
print("One Packet Queue Delay: %.3fms" % (
1000.0 * 1 / (self.trace.get_bandwidth(0) * 1e6 / 8 / BYTES_PER_PACKET)))
print("Queue size: %dpackets" % self.queue_size)
print("Loss: %.4f" % self.trace.get_loss_rate())
def reset(self):
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.pkt_in_queue = 0
def get_bandwidth(self, ts):
return self.trace.get_bandwidth(ts) * 1e6 / 8 / BYTES_PER_PACKET
class Network():
def __init__(self, senders, links, env):
self.event_count = 0
self.q = []
self.cur_time = 0.0
self.senders = senders
self.links = links
self.queue_initial_packets()
self.env = env
self.pkt_log = []
def queue_initial_packets(self):
for sender in self.senders:
sender.register_network(self)
sender.reset_obs()
heapq.heappush(self.q, (0, sender, EVENT_TYPE_SEND,
0, 0.0, False, self.event_count, sender.rto, 0))
self.event_count += 1
def reset(self):
self.pkt_log = []
self.cur_time = 0.0
self.q = []
[link.reset() for link in self.links]
[sender.reset() for sender in self.senders]
self.queue_initial_packets()
def get_cur_time(self):
return self.cur_time
def run_for_dur(self, dur, action=None):
# if self.cur_time > 1.75:
# pass
# else:
# self.senders[0].rate = self.env.replay.get_rate()
# dur = self.env.replay.get_ts() - self.cur_time
end_time = min(self.cur_time + dur, self.env.current_trace.timestamps[-1])
debug_print('MI from {} to {}, dur {}'.format(
self.cur_time, end_time, dur))
for sender in self.senders:
sender.reset_obs()
while True:
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = self.q[0]
if event_time >= end_time:
self.cur_time = end_time
break
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = heapq.heappop(self.q)
self.cur_time = event_time
new_event_time = event_time
new_event_type = event_type
new_next_hop = next_hop
new_latency = cur_latency
new_dropped = dropped
new_event_queue_delay = event_queue_delay
push_new_event = False
debug_print("Got %d event %s, to link %d, latency %f at time %f, "
"next_hop %d, dropped %s, event_q length %f, "
"sender rate %f, duration: %f, queue_size: %f, "
"rto: %f, cwnd: %f, ssthresh: %f, sender rto %f, "
"pkt in flight %d, wait time %d" % (
event_id, event_type, next_hop, cur_latency,
event_time, next_hop, dropped, len(self.q),
sender.rate, dur, self.links[0].queue_size,
rto, sender.cwnd, sender.ssthresh, sender.rto,
int(sender.bytes_in_flight/BYTES_PER_PACKET),
sender.pkt_loss_wait_time))
if event_type == EVENT_TYPE_ACK:
if next_hop == len(sender.path):
# if cur_latency > 1.0:
# sender.timeout(cur_latency)
# sender.on_packet_lost(cur_latency)
if rto >= 0 and cur_latency > rto and sender.pkt_loss_wait_time <= 0:
sender.timeout()
dropped = True
new_dropped = True
elif dropped:
sender.on_packet_lost(cur_latency)
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'lost',
BYTES_PER_PACKET])
else:
sender.on_packet_acked(cur_latency)
debug_print('Ack packet at {}'.format(self.cur_time))
# log packet acked
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'acked',
BYTES_PER_PACKET, cur_latency,
event_queue_delay])
else:
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
new_latency += link_latency
new_event_time += link_latency
push_new_event = True
elif event_type == EVENT_TYPE_SEND:
if next_hop == 0:
if sender.can_send_packet():
sender.on_packet_sent()
# print('Send packet at {}'.format(self.cur_time))
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'sent',
BYTES_PER_PACKET])
push_new_event = True
heapq.heappush(self.q, (self.cur_time + (1.0 / sender.rate),
sender, EVENT_TYPE_SEND, 0, 0.0,
False, self.event_count, sender.rto,
0))
self.event_count += 1
else:
push_new_event = True
if next_hop == sender.dest:
new_event_type = EVENT_TYPE_ACK
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
# link_latency += self.env.current_trace.get_delay_noise(self.cur_time) / 1000
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
new_latency += link_latency
new_event_time += link_latency
new_dropped = not sender.path[next_hop].packet_enters_link(
self.cur_time)
if not new_dropped:
sender.queue_delay_samples.append(new_event_queue_delay)
if push_new_event:
heapq.heappush(self.q, (new_event_time, sender, new_event_type,
new_next_hop, new_latency, new_dropped,
event_id, rto, new_event_queue_delay))
for sender in self.senders:
sender.record_run()
sender_mi = self.senders[0].get_run_data()
throughput = sender_mi.get("recv rate") # bits/sec
latency = sender_mi.get("avg latency") # second
loss = sender_mi.get("loss ratio")
debug_print("thpt %f, delay %f, loss %f, bytes sent %f, bytes acked %f" % (
throughput/1e6, latency, loss, sender_mi.bytes_sent, sender_mi.bytes_acked))
reward = pcc_aurora_reward(
throughput / 8 / BYTES_PER_PACKET, latency, loss,
np.mean(self.env.current_trace.bandwidths) * 1e6 / 8 / BYTES_PER_PACKET)
if latency > 0.0:
self.env.run_dur = MI_RTT_PROPORTION * sender_mi.get("avg latency") + (1 / self.links[0].get_bandwidth(self.cur_time))
# self.env.run_dur = max(MI_RTT_PROPORTION * sender_mi.get("avg latency"), 5 * (1 / self.senders[0].rate))
# print(self.env.run_dur)
return reward * REWARD_SCALE
class Sender():
def __init__(self, rate, path, dest, features, cwnd=25, history_len=10,
delta_scale=1):
self.id = Sender._get_next_id()
self.delta_scale = delta_scale
self.starting_rate = rate
self.rate = rate
self.sent = 0
self.acked = 0
self.lost = 0
self.bytes_in_flight = 0
self.min_latency = None
self.rtt_samples = []
self.queue_delay_samples = []
self.prev_rtt_samples = self.rtt_samples
self.sample_time = []
self.net = None
self.path = path
self.dest = dest
self.history_len = history_len
self.features = features
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.cwnd = cwnd
self.use_cwnd = False
self.rto = -1
self.ssthresh = 0
self.pkt_loss_wait_time = -1
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
_next_id = 1
def _get_next_id():
result = Sender._next_id
Sender._next_id += 1
return result
def apply_rate_delta(self, delta):
# if self.got_data:
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_rate(self.rate * (1.0 + delta))
else:
self.set_rate(self.rate / (1.0 - delta))
def apply_cwnd_delta(self, delta):
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_cwnd(self.cwnd * (1.0 + delta))
else:
self.set_cwnd(self.cwnd / (1.0 - delta))
def can_send_packet(self):
if self.use_cwnd:
return int(self.bytes_in_flight) / BYTES_PER_PACKET < self.cwnd
else:
return True
def register_network(self, net):
self.net = net
def on_packet_sent(self):
self.sent += 1
self.bytes_in_flight += BYTES_PER_PACKET
def on_packet_acked(self, rtt):
self.estRTT = (7.0 * self.estRTT + rtt) / 8.0 # RTT of emulation way
self.RTTVar = (self.RTTVar * 7.0 + abs(rtt - self.estRTT) * 1.0) / 8.0
self.acked += 1
self.rtt_samples.append(rtt)
# self.rtt_samples.append(self.estRTT)
if (self.min_latency is None) or (rtt < self.min_latency):
self.min_latency = rtt
self.bytes_in_flight -= BYTES_PER_PACKET
def on_packet_lost(self, rtt):
self.lost += 1
self.bytes_in_flight -= BYTES_PER_PACKET
def set_rate(self, new_rate):
self.rate = new_rate
# print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
if self.rate > MAX_RATE:
self.rate = MAX_RATE
if self.rate < MIN_RATE:
self.rate = MIN_RATE
def set_cwnd(self, new_cwnd):
self.cwnd = int(new_cwnd)
#print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
# if self.cwnd > MAX_CWND:
# self.cwnd = MAX_CWND
# if self.cwnd < MIN_CWND:
# self.cwnd = MIN_CWND
def record_run(self):
smi = self.get_run_data()
# if not self.got_data and smi.rtt_samples:
# self.got_data = True
# self.history.step(smi)
# else:
self.history.step(smi)
def get_obs(self):
return self.history.as_array()
def get_run_data(self):
obs_end_time = self.net.get_cur_time()
#obs_dur = obs_end_time - self.obs_start_time
#print("Got %d acks in %f seconds" % (self.acked, obs_dur))
#print("Sent %d packets in %f seconds" % (self.sent, obs_dur))
#print("self.rate = %f" % self.rate)
# print(self.acked, self.sent)
rtt_samples = self.rtt_samples if self.rtt_samples else self.prev_rtt_samples
# if not self.rtt_samples:
# print(self.obs_start_time, obs_end_time, self.rate)
# rtt_samples is empty when there is no packet acked in MI
# Solution: inherit from previous rtt_samples.
return sender_obs.SenderMonitorInterval(
self.id,
bytes_sent=self.sent * BYTES_PER_PACKET,
bytes_acked=self.acked * BYTES_PER_PACKET,
bytes_lost=self.lost * BYTES_PER_PACKET,
send_start=self.obs_start_time,
send_end=obs_end_time,
recv_start=self.obs_start_time,
recv_end=obs_end_time,
rtt_samples=self.rtt_samples,
queue_delay_samples=self.queue_delay_samples,
packet_size=BYTES_PER_PACKET
)
def reset_obs(self):
self.sent = 0
self.acked = 0
self.lost = 0
if self.rtt_samples:
self.prev_rtt_samples = self.rtt_samples
self.rtt_samples = []
self.queue_delay_samples = []
self.obs_start_time = self.net.get_cur_time()
def print_debug(self):
print("Sender:")
print("Obs: %s" % str(self.get_obs()))
print("Rate: %f" % self.rate)
print("Sent: %d" % self.sent)
print("Acked: %d" % self.acked)
print("Lost: %d" % self.lost)
print("Min Latency: %s" % str(self.min_latency))
def reset(self):
#print("Resetting sender!")
self.rate = self.starting_rate
self.bytes_in_flight = 0
self.min_latency = None
self.reset_obs()
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
def timeout(self):
# placeholder
pass
class SimulatedNetworkEnv(gym.Env):
def __init__(self, traces, history_len=10,
features="sent latency inflation,latency ratio,send ratio",
congestion_control_type="aurora", train_flag=False,
delta_scale=1.0):
"""Network environment used in simulation.
congestion_control_type: aurora is pcc-rl. cubic is TCPCubic.
"""
assert congestion_control_type in {"aurora", "cubic"}, \
"Unrecognized congestion_control_type {}.".format(
congestion_control_type)
# self.replay = EmuReplay()
self.delta_scale = delta_scale
self.traces = traces
self.current_trace = np.random.choice(self.traces)
self.train_flag = train_flag
self.congestion_control_type = congestion_control_type
if self.congestion_control_type == 'aurora':
self.use_cwnd = False
elif self.congestion_control_type == 'cubic':
self.use_cwnd = True
self.history_len = history_len
# print("History length: %d" % history_len)
self.features = features.split(",")
# print("Features: %s" % str(self.features))
self.links = None
self.senders = None
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.run_dur = None
self.run_period = 0.1
self.steps_taken = 0
self.debug_thpt_changes = False
self.last_thpt = None
self.last_rate = None
if self.use_cwnd:
self.action_space = spaces.Box(
np.array([-1e12, -1e12]), np.array([1e12, 1e12]), dtype=np.float32)
else:
self.action_space = spaces.Box(
np.array([-1e12]), np.array([1e12]), dtype=np.float32)
self.observation_space = None
# use_only_scale_free = True
single_obs_min_vec = sender_obs.get_min_obs_vector(self.features)
single_obs_max_vec = sender_obs.get_max_obs_vector(self.features)
self.observation_space = spaces.Box(np.tile(single_obs_min_vec, self.history_len),
np.tile(single_obs_max_vec,
self.history_len),
dtype=np.float32)
self.reward_sum = 0.0
self.reward_ewma = 0.0
self.episodes_run = -1
def seed(self, seed=None):
self.rand, seed = seeding.np_random(seed)
return [seed]
def _get_all_sender_obs(self):
sender_obs = self.senders[0].get_obs()
sender_obs = np.array(sender_obs).reshape(-1,)
return sender_obs
def step(self, actions):
#print("Actions: %s" % str(actions))
# print(actions)
for i in range(0, 1): # len(actions)):
#print("Updating rate for sender %d" % i)
action = actions
self.senders[i].apply_rate_delta(action[0])
if self.use_cwnd:
self.senders[i].apply_cwnd_delta(action[1])
# print("Running for %fs" % self.run_dur)
reward = self.net.run_for_dur(self.run_dur, action=actions[0])
self.steps_taken += 1
sender_obs = self._get_all_sender_obs()
should_stop = self.current_trace.is_finished(self.net.get_cur_time())
self.reward_sum += reward
# print('env step: {}s'.format(time.time() - t_start))
return sender_obs, reward, should_stop, {}
def print_debug(self):
print("---Link Debug---")
for link in self.links:
link.print_debug()
print("---Sender Debug---")
for sender in self.senders:
sender.print_debug()
def create_new_links_and_senders(self):
# self.replay.reset()
self.links = [Link(self.current_trace), Link(self.current_trace)]
if self.congestion_control_type == "aurora":
if not self.train_flag:
self.senders = [Sender( #self.replay.get_rate(),
# 2500000 / 8 /BYTES_PER_PACKET / 0.048,
# 12000000 / 8 /BYTES_PER_PACKET / 0.048,
# 10 / (self.current_trace.get_delay(0) *2/1000),
100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
else:
# self.senders = [Sender(random.uniform(0.3, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len)]
# self.senders = [Sender(random.uniform(10/bw, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len,
# delta_scale=self.delta_scale)]
self.senders = [Sender(100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
elif self.congestion_control_type == "cubic":
raise NotImplementedError
else:
raise RuntimeError("Unrecognized congestion_control_type {}".format(
self.congestion_control_type))
# self.run_dur = 3 * lat
# self.run_dur = 1 * lat
if not self.senders[0].rtt_samples:
# self.run_dur = 0.473
# self.run_dur = 5 / self.senders[0].rate
self.run_dur = 0.01
# self.run_dur = self.current_trace.get_delay(0) * 2 / 1000
# self.run_dur = self.replay.get_ts() - 0
def reset(self):
self.steps_taken = 0
self.net.reset()
self.current_trace = np.random.choice(self.traces)
self.current_trace.reset()
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.episodes_run += 1
# self.replay.reset()
self.net.run_for_dur(self.run_dur)
self.reward_ewma *= 0.99
self.reward_ewma += 0.01 * self.reward_sum
# print("Reward: %0.2f, Ewma Reward: %0.2f" % (self.reward_sum, self.reward_ewma))
self.reward_sum = 0.0
return self._get_all_sender_obs()
register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
|
[
"warnings.simplefilter",
"heapq.heappush",
"math.ceil",
"pandas.read_csv",
"common.sender_obs.SenderMonitorInterval",
"heapq.heappop",
"random.random",
"common.sender_obs.SenderHistory",
"common.sender_obs.get_min_obs_vector",
"numpy.array",
"numpy.tile",
"numpy.mean",
"numpy.random.choice",
"common.sender_obs.get_max_obs_vector",
"gym.envs.registration.register",
"gym.utils.seeding.np_random"
] |
[((680, 740), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'UserWarning'}), "(action='ignore', category=UserWarning)\n", (701, 740), False, 'import warnings\n'), ((24695, 24771), 'gym.envs.registration.register', 'register', ([], {'id': '"""PccNs-v0"""', 'entry_point': '"""simulator.network:SimulatedNetworkEnv"""'}), "(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')\n", (24703, 24771), False, 'from gym.envs.registration import register\n'), ((1506, 1545), 'pandas.read_csv', 'pd.read_csv', (['"""aurora_emulation_log.csv"""'], {}), "('aurora_emulation_log.csv')\n", (1517, 1545), True, 'import pandas as pd\n'), ((12785, 12851), 'common.sender_obs.SenderHistory', 'sender_obs.SenderHistory', (['self.history_len', 'self.features', 'self.id'], {}), '(self.history_len, self.features, self.id)\n', (12809, 12851), False, 'from common import sender_obs\n'), ((16345, 16750), 'common.sender_obs.SenderMonitorInterval', 'sender_obs.SenderMonitorInterval', (['self.id'], {'bytes_sent': '(self.sent * BYTES_PER_PACKET)', 'bytes_acked': '(self.acked * BYTES_PER_PACKET)', 'bytes_lost': '(self.lost * BYTES_PER_PACKET)', 'send_start': 'self.obs_start_time', 'send_end': 'obs_end_time', 'recv_start': 'self.obs_start_time', 'recv_end': 'obs_end_time', 'rtt_samples': 'self.rtt_samples', 'queue_delay_samples': 'self.queue_delay_samples', 'packet_size': 'BYTES_PER_PACKET'}), '(self.id, bytes_sent=self.sent *\n BYTES_PER_PACKET, bytes_acked=self.acked * BYTES_PER_PACKET, bytes_lost\n =self.lost * BYTES_PER_PACKET, send_start=self.obs_start_time, send_end\n =obs_end_time, recv_start=self.obs_start_time, recv_end=obs_end_time,\n rtt_samples=self.rtt_samples, queue_delay_samples=self.\n queue_delay_samples, packet_size=BYTES_PER_PACKET)\n', (16377, 16750), False, 'from common import sender_obs\n'), ((17688, 17754), 'common.sender_obs.SenderHistory', 'sender_obs.SenderHistory', (['self.history_len', 'self.features', 'self.id'], {}), '(self.history_len, self.features, self.id)\n', (17712, 17754), False, 'from common import sender_obs\n'), ((18715, 18744), 'numpy.random.choice', 'np.random.choice', (['self.traces'], {}), '(self.traces)\n', (18731, 18744), True, 'import numpy as np\n'), ((19942, 19986), 'common.sender_obs.get_min_obs_vector', 'sender_obs.get_min_obs_vector', (['self.features'], {}), '(self.features)\n', (19971, 19986), False, 'from common import sender_obs\n'), ((20016, 20060), 'common.sender_obs.get_max_obs_vector', 'sender_obs.get_max_obs_vector', (['self.features'], {}), '(self.features)\n', (20045, 20060), False, 'from common import sender_obs\n'), ((20509, 20532), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (20526, 20532), False, 'from gym.utils import seeding\n'), ((24174, 24203), 'numpy.random.choice', 'np.random.choice', (['self.traces'], {}), '(self.traces)\n', (24190, 24203), True, 'import numpy as np\n'), ((2522, 2550), 'math.ceil', 'math.ceil', (['self.pkt_in_queue'], {}), '(self.pkt_in_queue)\n', (2531, 2550), False, 'import math\n'), ((2895, 2910), 'random.random', 'random.random', ([], {}), '()\n', (2908, 2910), False, 'import random\n'), ((4575, 4680), 'heapq.heappush', 'heapq.heappush', (['self.q', '(0, sender, EVENT_TYPE_SEND, 0, 0.0, False, self.event_count, sender.rto, 0)'], {}), '(self.q, (0, sender, EVENT_TYPE_SEND, 0, 0.0, False, self.\n event_count, sender.rto, 0))\n', (4589, 4680), False, 'import heapq\n'), ((5914, 5935), 'heapq.heappop', 'heapq.heappop', (['self.q'], {}), '(self.q)\n', (5927, 5935), False, 'import heapq\n'), ((20105, 20150), 'numpy.tile', 'np.tile', (['single_obs_min_vec', 'self.history_len'], {}), '(single_obs_min_vec, self.history_len)\n', (20112, 20150), True, 'import numpy as np\n'), ((20196, 20241), 'numpy.tile', 'np.tile', (['single_obs_max_vec', 'self.history_len'], {}), '(single_obs_max_vec, self.history_len)\n', (20203, 20241), True, 'import numpy as np\n'), ((3105, 3133), 'math.ceil', 'math.ceil', (['self.pkt_in_queue'], {}), '(self.pkt_in_queue)\n', (3114, 3133), False, 'import math\n'), ((10865, 11015), 'heapq.heappush', 'heapq.heappush', (['self.q', '(new_event_time, sender, new_event_type, new_next_hop, new_latency,\n new_dropped, event_id, rto, new_event_queue_delay)'], {}), '(self.q, (new_event_time, sender, new_event_type,\n new_next_hop, new_latency, new_dropped, event_id, rto,\n new_event_queue_delay))\n', (10879, 11015), False, 'import heapq\n'), ((19640, 19686), 'numpy.array', 'np.array', (['[-1000000000000.0, -1000000000000.0]'], {}), '([-1000000000000.0, -1000000000000.0])\n', (19648, 19686), True, 'import numpy as np\n'), ((19666, 19710), 'numpy.array', 'np.array', (['[1000000000000.0, 1000000000000.0]'], {}), '([1000000000000.0, 1000000000000.0])\n', (19674, 19710), True, 'import numpy as np\n'), ((19782, 19810), 'numpy.array', 'np.array', (['[-1000000000000.0]'], {}), '([-1000000000000.0])\n', (19790, 19810), True, 'import numpy as np\n'), ((19801, 19828), 'numpy.array', 'np.array', (['[1000000000000.0]'], {}), '([1000000000000.0])\n', (19809, 19828), True, 'import numpy as np\n'), ((20659, 20679), 'numpy.array', 'np.array', (['sender_obs'], {}), '(sender_obs)\n', (20667, 20679), True, 'import numpy as np\n'), ((9454, 9590), 'heapq.heappush', 'heapq.heappush', (['self.q', '(self.cur_time + 1.0 / sender.rate, sender, EVENT_TYPE_SEND, 0, 0.0, False,\n self.event_count, sender.rto, 0)'], {}), '(self.q, (self.cur_time + 1.0 / sender.rate, sender,\n EVENT_TYPE_SEND, 0, 0.0, False, self.event_count, sender.rto, 0))\n', (9468, 9590), False, 'import heapq\n'), ((11650, 11692), 'numpy.mean', 'np.mean', (['self.env.current_trace.bandwidths'], {}), '(self.env.current_trace.bandwidths)\n', (11657, 11692), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
# https://www.kaggle.com/marketneutral/purged-time-series-cv-xgboost-optuna/data
# modified code for group gaps; source
# https://github.com/getgaurav2/scikit-learn/blob/d4a3af5cc9da3a76f0266932644b884c99724c57/sklearn/model_selection/_split.py#L2243
class PurgedGroupTimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
group_gap : int, default=None
Gap between train and test
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate((train_array,
train_array_tmp)),
axis=None), axis=None)
# train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate((test_array,
test_array_tmp)),
axis=None), axis=None)
# test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in test_array]
|
[
"sklearn.model_selection._split.indexable",
"numpy.concatenate",
"sklearn.model_selection._split._num_samples",
"numpy.argsort",
"numpy.arange",
"numpy.unique"
] |
[((3291, 3314), 'sklearn.model_selection._split.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (3300, 3314), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3335, 3350), 'sklearn.model_selection._split._num_samples', '_num_samples', (['X'], {}), '(X)\n', (3347, 3350), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3603, 3639), 'numpy.unique', 'np.unique', (['groups'], {'return_index': '(True)'}), '(groups, return_index=True)\n', (3612, 3639), True, 'import numpy as np\n'), ((3703, 3718), 'sklearn.model_selection._split._num_samples', '_num_samples', (['X'], {}), '(X)\n', (3715, 3718), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3738, 3765), 'sklearn.model_selection._split._num_samples', '_num_samples', (['unique_groups'], {}), '(unique_groups)\n', (3750, 3765), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3785, 3805), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3794, 3805), True, 'import numpy as np\n'), ((3666, 3681), 'numpy.argsort', 'np.argsort', (['ind'], {}), '(ind)\n', (3676, 3681), True, 'import numpy as np\n'), ((4860, 4906), 'numpy.concatenate', 'np.concatenate', (['(train_array, train_array_tmp)'], {}), '((train_array, train_array_tmp))\n', (4874, 4906), True, 'import numpy as np\n'), ((5421, 5465), 'numpy.concatenate', 'np.concatenate', (['(test_array, test_array_tmp)'], {}), '((test_array, test_array_tmp))\n', (5435, 5465), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 08:54:07 2018
@author: bwhe
"""
import ast
import numpy as np
import pandas as pd
import gc
import lightgbm as lgb
import pickle
import time
import w2v
from itertools import repeat
def remove_iteral(sentence):
return ast.literal_eval(sentence)
readfile = './cf2xgb_pred1_v1.csv.gz'
savefile = 'cf2xgb_1_pred1_v2.csv.gz'
df_test = pd.read_csv(readfile, usecols=['pid', 'pred', 'scores'], nrows=10)
df_test = df_test.rename(columns={'scores': 'cf_prob'})
w2v_test_pids = df_test.pid.unique()
# save for later prediction
final_df_test = df_test[['pid']]
df_test['pred'] = df_test['pred'].apply(remove_iteral)
df_test['cf_prob'] = df_test['cf_prob'].apply(remove_iteral)
''' convert the list mode to column mode '''
result_test = pd.DataFrame([(tup.pid, pred, cf_prob) for tup in df_test.itertuples()
for pred, cf_prob in zip(tup.pred, tup.cf_prob)])
result_test = result_test.fillna(0)
result_test.columns = ['pid', 'track_uri', 'cf_prob']
del df_test
gc.collect()
''' add relative cf probability score '''
def add_avg_cfscore(data):
tmp = data.groupby('pid')['cf_prob'].mean().reset_index().rename(columns={'cf_prob':'cf_avg_prob'})
data = data.merge(tmp, left_on=['pid'], right_on=['pid'], how='left')
data['cf_rlt_prob'] = data['cf_prob'] / data['cf_avg_prob']
data = data.drop(['cf_avg_prob'], axis=1)
return data
start_time = time.time()
result_test = add_avg_cfscore(result_test)
print("average score --- %s seconds ---" % (time.time() - start_time))
''' add song frequency '''
start_time = time.time()
songfreq = pd.read_csv('../data/songfreq.csv.gz')
result_test = result_test.merge(songfreq, left_on=['track_uri'], right_on=['track_uri'], how='left')
print("songfreq merge --- %s seconds ---" % (time.time() - start_time))
''' add album uri '''
start_time = time.time()
with open('../data/song2album.pkl', 'rb') as f:
song2album = pickle.load(f)
tracks_test = result_test['track_uri'].values
album_uri = [song2album[track] for track in tracks_test]
result_test['album_uri'] = album_uri
print("add album uri --- %s seconds ---" % (time.time() - start_time))
del album_uri, song2album
gc.collect()
''' add track uri '''
start_time = time.time()
with open('../data/song2artist.pkl', 'rb') as f:
song2artist = pickle.load(f)
artist_uri_test = [song2artist[track] for track in tracks_test]
result_test['artist_uri'] = artist_uri_test
print("add artist uri --- %s seconds ---" % (time.time() - start_time))
del artist_uri_test, song2artist
gc.collect()
pids_test = result_test['pid']
''' add similarity between playlist name and track name '''
with open('../data/song2names.pkl', 'rb') as f:
song2names = pickle.load(f)
song_names_test = [song2names[track] for track in tracks_test]
del song2names
gc.collect()
with open('../data/test_pid2more_clean_name.pkl', 'rb') as f:
pid2names = pickle.load(f)
pid_names_test = [pid2names[pid] for pid in pids_test]
del pid2names
gc.collect()
from difflib import SequenceMatcher
def similar(var):
a = var[0]
b = var[1]
a = str(a).lower()
b = str(b).lower()
return SequenceMatcher(None, a, b).ratio()
#name_sim = [similar(str(a).lower(), str(b)) for a, b in zip(song_names, pid_names)]
start_time = time.time()
name_sim_test = list(map(similar, zip(song_names_test, pid_names_test)))
result_test['name_sim'] = name_sim_test
print("calculate track name similarity --- %s seconds ---" % (time.time() - start_time))
''' add similarity between playlist name and album name '''
with open('../data/song2album_name.pkl', 'rb') as f:
song2album_names = pickle.load(f)
album_names_test = [song2album_names[track] for track in tracks_test]
start_time = time.time()
album_sim_test = list(map(similar, zip(album_names_test, pid_names_test)))
result_test['album_sim'] = album_sim_test
print("calculate album similarity --- %s seconds ---" % (time.time() - start_time))
del song2album_names, album_names_test
gc.collect()
''' add similarity between playlist name and artist name '''
start_time = time.time()
with open('../data/song2artist_name.pkl', 'rb') as f:
song2artist_names = pickle.load(f)
artist_names_test = [song2artist_names[track] for track in tracks_test]
artist_sim_test = list(map(similar, zip(artist_names_test, pid_names_test)))
result_test['artist_sim'] = artist_sim_test
print("calculate artist name similarity --- %s seconds ---" % (time.time() - start_time))
del song2artist_names, artist_names_test
gc.collect()
''' add similarity '''
from gensim.models import Word2Vec
w2v.build_track_w2v(w2v_test_pids)
w2v.build_album_w2v(w2v_test_pids)
w2v.build_artist_w2v(w2v_test_pids)
model1 = Word2Vec.load('../data/w2v_model1.bin')
model2 = Word2Vec.load('../data/w2v_model2.bin')
model3 = Word2Vec.load('../data/w2v_model3.bin')
with open('../data/song2album.pkl', 'rb') as f:
song2album = pickle.load(f)
with open('../data/song2artist.pkl', 'rb') as f:
song2artist = pickle.load(f)
def remove_iteral(sentence):
return ast.literal_eval(sentence)
df = pd.read_csv(readfile, usecols=['pid','pos_songs'], nrows=None)
df['pos_songs'] = df['pos_songs'].apply(remove_iteral)
result_test = result_test.merge(df, left_on=['pid'], right_on=['pid'], how='left')
def track_sim(var):
pos_song = var[0]
track = var[1]
try:
return model1.wv.similarity(str(pos_song), str(track))
except:
return 0
def album_sim(var):
pos_song = var[0]
track = var[1]
try:
return model2.wv.similarity(str(song2album[pos_song]), str(song2album[track]))
except:
return 0
def artist_sim(var):
pos_song = var[0]
track = var[1]
try:
return model3.wv.similarity(str(song2artist[pos_song]), str(song2artist[track]))
except:
return 0
def w2v_sim(tup):
pos_songs = tup.pos_songs
track = tup.track_uri
track_scores = list(map(track_sim, zip(pos_songs, repeat(track))))
album_scores = list(map(album_sim, zip(pos_songs, repeat(track))))
artist_scores = list(map(artist_sim, zip(pos_songs, repeat(track))))
return np.mean(track_scores), np.mean(album_scores), np.mean(artist_scores)
def add_w2v_sim(data):
track_w2v_sim_arr = []
album_w2v_sim_arr = []
artist_w2v_sim_arr = []
for tup in data.itertuples():
track_score, album_score, artist_score = w2v_sim(tup)
track_w2v_sim_arr.append(track_score)
album_w2v_sim_arr.append(album_score)
artist_w2v_sim_arr.append(artist_score)
data['w2v_track_sim'] = track_w2v_sim_arr
data['w2v_album_sim'] = album_w2v_sim_arr
data['w2v_artist_sim'] = artist_w2v_sim_arr
return data
start_time = time.time()
result_test = add_w2v_sim(result_test)
print("track w2v similarity --- %s seconds ---" % (time.time() - start_time))
result_test.to_csv(savefile, index=False, compression='gzip')
|
[
"pandas.read_csv",
"time.time",
"w2v.build_artist_w2v",
"gc.collect",
"difflib.SequenceMatcher",
"pickle.load",
"numpy.mean",
"w2v.build_album_w2v",
"ast.literal_eval",
"gensim.models.Word2Vec.load",
"w2v.build_track_w2v",
"itertools.repeat"
] |
[((419, 485), 'pandas.read_csv', 'pd.read_csv', (['readfile'], {'usecols': "['pid', 'pred', 'scores']", 'nrows': '(10)'}), "(readfile, usecols=['pid', 'pred', 'scores'], nrows=10)\n", (430, 485), True, 'import pandas as pd\n'), ((1086, 1098), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1096, 1098), False, 'import gc\n'), ((1495, 1506), 'time.time', 'time.time', ([], {}), '()\n', (1504, 1506), False, 'import time\n'), ((1667, 1678), 'time.time', 'time.time', ([], {}), '()\n', (1676, 1678), False, 'import time\n'), ((1691, 1729), 'pandas.read_csv', 'pd.read_csv', (['"""../data/songfreq.csv.gz"""'], {}), "('../data/songfreq.csv.gz')\n", (1702, 1729), True, 'import pandas as pd\n'), ((1946, 1957), 'time.time', 'time.time', ([], {}), '()\n', (1955, 1957), False, 'import time\n'), ((2285, 2297), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2295, 2297), False, 'import gc\n'), ((2337, 2348), 'time.time', 'time.time', ([], {}), '()\n', (2346, 2348), False, 'import time\n'), ((2653, 2665), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2663, 2665), False, 'import gc\n'), ((2926, 2938), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2936, 2938), False, 'import gc\n'), ((3110, 3122), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3120, 3122), False, 'import gc\n'), ((3411, 3422), 'time.time', 'time.time', ([], {}), '()\n', (3420, 3422), False, 'import time\n'), ((3873, 3884), 'time.time', 'time.time', ([], {}), '()\n', (3882, 3884), False, 'import time\n'), ((4132, 4144), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4142, 4144), False, 'import gc\n'), ((4223, 4234), 'time.time', 'time.time', ([], {}), '()\n', (4232, 4234), False, 'import time\n'), ((4662, 4674), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4672, 4674), False, 'import gc\n'), ((4740, 4774), 'w2v.build_track_w2v', 'w2v.build_track_w2v', (['w2v_test_pids'], {}), '(w2v_test_pids)\n', (4759, 4774), False, 'import w2v\n'), ((4776, 4810), 'w2v.build_album_w2v', 'w2v.build_album_w2v', (['w2v_test_pids'], {}), '(w2v_test_pids)\n', (4795, 4810), False, 'import w2v\n'), ((4812, 4847), 'w2v.build_artist_w2v', 'w2v.build_artist_w2v', (['w2v_test_pids'], {}), '(w2v_test_pids)\n', (4832, 4847), False, 'import w2v\n'), ((4860, 4899), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""../data/w2v_model1.bin"""'], {}), "('../data/w2v_model1.bin')\n", (4873, 4899), False, 'from gensim.models import Word2Vec\n'), ((4910, 4949), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""../data/w2v_model2.bin"""'], {}), "('../data/w2v_model2.bin')\n", (4923, 4949), False, 'from gensim.models import Word2Vec\n'), ((4960, 4999), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""../data/w2v_model3.bin"""'], {}), "('../data/w2v_model3.bin')\n", (4973, 4999), False, 'from gensim.models import Word2Vec\n'), ((5262, 5325), 'pandas.read_csv', 'pd.read_csv', (['readfile'], {'usecols': "['pid', 'pos_songs']", 'nrows': 'None'}), "(readfile, usecols=['pid', 'pos_songs'], nrows=None)\n", (5273, 5325), True, 'import pandas as pd\n'), ((6938, 6949), 'time.time', 'time.time', ([], {}), '()\n', (6947, 6949), False, 'import time\n'), ((298, 324), 'ast.literal_eval', 'ast.literal_eval', (['sentence'], {}), '(sentence)\n', (314, 324), False, 'import ast\n'), ((2025, 2039), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2036, 2039), False, 'import pickle\n'), ((2418, 2432), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2429, 2432), False, 'import pickle\n'), ((2830, 2844), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2841, 2844), False, 'import pickle\n'), ((3023, 3037), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3034, 3037), False, 'import pickle\n'), ((3771, 3785), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3782, 3785), False, 'import pickle\n'), ((4315, 4329), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4326, 4329), False, 'import pickle\n'), ((5075, 5089), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5086, 5089), False, 'import pickle\n'), ((5165, 5179), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5176, 5179), False, 'import pickle\n'), ((5224, 5250), 'ast.literal_eval', 'ast.literal_eval', (['sentence'], {}), '(sentence)\n', (5240, 5250), False, 'import ast\n'), ((6341, 6362), 'numpy.mean', 'np.mean', (['track_scores'], {}), '(track_scores)\n', (6348, 6362), True, 'import numpy as np\n'), ((6364, 6385), 'numpy.mean', 'np.mean', (['album_scores'], {}), '(album_scores)\n', (6371, 6385), True, 'import numpy as np\n'), ((6387, 6409), 'numpy.mean', 'np.mean', (['artist_scores'], {}), '(artist_scores)\n', (6394, 6409), True, 'import numpy as np\n'), ((1596, 1607), 'time.time', 'time.time', ([], {}), '()\n', (1605, 1607), False, 'import time\n'), ((1878, 1889), 'time.time', 'time.time', ([], {}), '()\n', (1887, 1889), False, 'import time\n'), ((2228, 2239), 'time.time', 'time.time', ([], {}), '()\n', (2237, 2239), False, 'import time\n'), ((2589, 2600), 'time.time', 'time.time', ([], {}), '()\n', (2598, 2600), False, 'import time\n'), ((3275, 3302), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'a', 'b'], {}), '(None, a, b)\n', (3290, 3302), False, 'from difflib import SequenceMatcher\n'), ((3601, 3612), 'time.time', 'time.time', ([], {}), '()\n', (3610, 3612), False, 'import time\n'), ((4062, 4073), 'time.time', 'time.time', ([], {}), '()\n', (4071, 4073), False, 'import time\n'), ((4590, 4601), 'time.time', 'time.time', ([], {}), '()\n', (4599, 4601), False, 'import time\n'), ((7042, 7053), 'time.time', 'time.time', ([], {}), '()\n', (7051, 7053), False, 'import time\n'), ((6166, 6179), 'itertools.repeat', 'repeat', (['track'], {}), '(track)\n', (6172, 6179), False, 'from itertools import repeat\n'), ((6238, 6251), 'itertools.repeat', 'repeat', (['track'], {}), '(track)\n', (6244, 6251), False, 'from itertools import repeat\n'), ((6312, 6325), 'itertools.repeat', 'repeat', (['track'], {}), '(track)\n', (6318, 6325), False, 'from itertools import repeat\n')]
|
import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
import torch
import numpy as np
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from ...registry import BACKBONES
from ..utils.resnet_r3d_utils import *
class BasicBlock(nn.Module):
def __init__(self,
input_filters,
num_filters,
base_filters,
down_sampling=False,
down_sampling_temporal=None,
block_type='3d',
is_real_3d=True,
group=1,
with_bn=True):
super(BasicBlock, self).__init__()
self.num_filters = num_filters
self.base_filters = base_filters
self.input_filters = input_filters
self.with_bn = with_bn
if self.with_bn:
conv3d = conv3d_wobias
else:
conv3d = conv3d_wbias
if block_type == '2.5d':
assert is_real_3d
if down_sampling_temporal is None:
down_sampling_temporal = down_sampling
if down_sampling:
if is_real_3d and down_sampling_temporal:
self.down_sampling_stride = [2, 2, 2]
else:
self.down_sampling_stride = [1, 2, 2]
else:
self.down_sampling_stride = [1, 1, 1]
self.down_sampling = down_sampling
self.relu = nn.ReLU()
self.conv1 = add_conv3d(input_filters, num_filters,
kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],
stride=self.down_sampling_stride,
pad=[1, 1, 1] if is_real_3d else [0, 1, 1],
block_type=block_type, with_bn=self.with_bn)
if self.with_bn:
self.bn1 = add_bn(num_filters)
self.conv2 = add_conv3d(num_filters, num_filters,
kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],
stride=[1, 1, 1],
pad=[1, 1, 1] if is_real_3d else [0, 1, 1],
block_type=block_type, with_bn=self.with_bn)
if self.with_bn:
self.bn2 = add_bn(num_filters)
if num_filters != input_filters or down_sampling:
self.conv3 = conv3d(input_filters, num_filters, kernel=[1, 1, 1],
stride=self.down_sampling_stride, pad=[0, 0, 0])
if self.with_bn:
self.bn3 = nn.BatchNorm3d(num_filters, eps=1e-3)
def forward(self, x):
identity = x
out = self.conv1(x)
if self.with_bn:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.with_bn:
out = self.bn2(out)
if self.down_sampling or self.num_filters != self.input_filters:
identity = self.conv3(identity)
if self.with_bn:
identity = self.bn3(identity)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self,
input_filters,
num_filters,
base_filters,
down_sampling=False,
down_sampling_temporal=None,
block_type='3d',
is_real_3d=True,
group=1,
with_bn=True):
super(Bottleneck, self).__init__()
self.num_filters = num_filters
self.base_filters = base_filters
self.input_filters = input_filters
self.with_bn = with_bn
if self.with_bn:
conv3d = conv3d_wobias
else:
conv3d = conv3d_wbias
if block_type == '2.5d':
assert is_real_3d
if down_sampling_temporal is None:
down_sampling_temporal = down_sampling
if down_sampling:
if is_real_3d and down_sampling_temporal:
self.down_sampling_stride = [2, 2, 2]
else:
self.down_sampling_stride = [1, 2, 2]
else:
self.down_sampling_stride = [1, 1, 1]
self.down_sampling = down_sampling
self.relu = nn.ReLU()
self.conv0 = add_conv3d(input_filters, base_filters, kernel=[
1, 1, 1], stride=[1, 1, 1], pad=[0, 0, 0], with_bn=self.with_bn)
if self.with_bn:
self.bn0 = add_bn(base_filters)
self.conv1 = add_conv3d(base_filters, base_filters,
kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],
stride=self.down_sampling_stride,
pad=[1, 1, 1] if is_real_3d else [0, 1, 1],
block_type=block_type, with_bn=self.with_bn)
if self.with_bn:
self.bn1 = add_bn(base_filters)
self.conv2 = add_conv3d(base_filters, num_filters, kernel=[
1, 1, 1], pad=[0, 0, 0], stride=[1, 1, 1], with_bn=self.with_bn)
if self.with_bn:
self.bn2 = add_bn(num_filters)
if num_filters != input_filters or down_sampling:
self.conv3 = conv3d(input_filters, num_filters, kernel=[1, 1, 1],
stride=self.down_sampling_stride, pad=[0, 0, 0])
if self.with_bn:
self.bn3 = nn.BatchNorm3d(num_filters, eps=1e-3)
def forward(self, x):
identity = x
if self.with_bn:
out = self.relu(self.bn0(self.conv0(x)))
out = self.relu(self.bn1(self.conv1(out)))
out = self.bn2(self.conv2(out))
else:
out = self.relu(self.conv0(x))
out = self.relu(self.conv1(out))
out = self.conv2(out)
if self.down_sampling or self.num_filters != self.input_filters:
identity = self.conv3(identity)
if self.with_bn:
identity = self.bn3(identity)
out += identity
out = self.relu(out)
return out
def make_plain_res_layer(block, num_blocks, in_filters, num_filters, base_filters,
block_type='3d', down_sampling=False, down_sampling_temporal=None,
is_real_3d=True, with_bn=True):
layers = []
layers.append(block(in_filters, num_filters, base_filters, down_sampling=down_sampling,
down_sampling_temporal=down_sampling_temporal, block_type=block_type,
is_real_3d=is_real_3d, with_bn=with_bn))
for i in range(num_blocks - 1):
layers.append(block(num_filters, num_filters, base_filters,
block_type=block_type, is_real_3d=is_real_3d, with_bn=with_bn))
return module_list(layers)
BLOCK_CONFIG = {
10: (1, 1, 1, 1),
16: (2, 2, 2, 1),
18: (2, 2, 2, 2),
26: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
SHALLOW_FILTER_CONFIG = [
[64, 64],
[128, 128],
[256, 256],
[512, 512]
]
DEEP_FILTER_CONFIG = [
[256, 64],
[512, 128],
[1024, 256],
[2048, 512]
]
@BACKBONES.register_module
class ResNet_R3D(nn.Module):
def __init__(self,
pretrained=None,
num_input_channels=3,
depth=34,
block_type='2.5d',
channel_multiplier=1.0,
bottleneck_multiplier=1.0,
conv1_kernel_t=3,
conv1_stride_t=1,
use_pool1=False,
bn_eval=True,
bn_frozen=True,
with_bn=True):
# parameter initialization
super(ResNet_R3D, self).__init__()
self.pretrained = pretrained
self.num_input_channels = num_input_channels
self.depth = depth
self.block_type = block_type
self.channel_multiplier = channel_multiplier
self.bottleneck_multiplier = bottleneck_multiplier
self.conv1_kernel_t = conv1_kernel_t
self.conv1_stride_t = conv1_stride_t
self.use_pool1 = use_pool1
self.relu = nn.ReLU()
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.with_bn = with_bn
global comp_count, comp_idx
comp_idx = 0
comp_count = 0
if self.with_bn:
conv3d = conv3d_wobias
else:
conv3d = conv3d_wbias
# stem block
if self.block_type in ['2.5d', '2.5d-sep']:
self.conv1_s = conv3d(self.num_input_channels, 45, [
1, 7, 7], [1, 2, 2], [0, 3, 3])
if self.with_bn:
self.bn1_s = nn.BatchNorm3d(45, eps=1e-3)
self.conv1_t = conv3d(45, 64, [self.conv1_kernel_t, 1, 1], [self.conv1_stride_t, 1, 1],
[(self.conv1_kernel_t - 1) // 2, 0, 0])
if self.with_bn:
self.bn1_t = nn.BatchNorm3d(64, eps=1e-3)
else:
self.conv1 = conv3d(self.num_input_channels, 64, [self.conv1_kernel_t, 7, 7],
[self.conv1_stride_t, 2, 2], [(self.conv1_kernel_t - 1) // 2, 3, 3])
if self.with_bn:
self.bn1 = nn.BatchNorm3d(64, eps=1e-3)
if self.use_pool1:
self.pool1 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[
1, 2, 2], padding=[0, 1, 1])
self.stage_blocks = BLOCK_CONFIG[self.depth]
if self.depth <= 18 or self.depth == 34:
self.block = BasicBlock
else:
self.block = Bottleneck
if self.depth <= 34:
self.filter_config = SHALLOW_FILTER_CONFIG
else:
self.filter_config = DEEP_FILTER_CONFIG
self.filter_config = np.multiply(
self.filter_config, self.channel_multiplier).astype(np.int)
layer1 = make_plain_res_layer(self.block, self.stage_blocks[0],
64, self.filter_config[0][0],
int(self.filter_config[0][1]
* self.bottleneck_multiplier),
block_type=self.block_type,
with_bn=self.with_bn)
self.add_module('layer1', layer1)
layer2 = make_plain_res_layer(self.block, self.stage_blocks[1],
self.filter_config[0][0], self.filter_config[1][0],
int(self.filter_config[1][1]
* self.bottleneck_multiplier),
block_type=self.block_type, down_sampling=True,
with_bn=self.with_bn)
self.add_module('layer2', layer2)
layer3 = make_plain_res_layer(self.block, self.stage_blocks[2],
self.filter_config[1][0], self.filter_config[2][0],
int(self.filter_config[2][1]
* self.bottleneck_multiplier),
block_type=self.block_type, down_sampling=True,
with_bn=self.with_bn)
self.add_module('layer3', layer3)
layer4 = make_plain_res_layer(self.block, self.stage_blocks[3],
self.filter_config[2][0], self.filter_config[3][0],
int(self.filter_config[3][1]
* self.bottleneck_multiplier),
block_type=self.block_type, down_sampling=True,
with_bn=self.with_bn)
self.add_module('layer4', layer4)
self.res_layers = ['layer1', 'layer2', 'layer3', 'layer4']
def forward(self, x):
if self.block_type in ['2.5d', '2.5d-sep']:
if self.with_bn:
x = self.relu(self.bn1_s(self.conv1_s(x)))
x = self.relu(self.bn1_t(self.conv1_t(x)))
else:
x = self.relu(self.conv1_s(x))
x = self.relu(self.conv1_t(x))
else:
if self.with_bn:
x = self.relu(self.bn1(self.conv1(x)))
else:
x = self.relu(self.conv1(x))
if self.use_pool1:
x = self.pool1(x)
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
return x
def init_weights(self):
if isinstance(self.pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def train(self, mode=True):
super(ResNet_R3D, self).train(mode)
if self.bn_eval and self.with_bn:
for m in self.modules():
if isinstance(m, nn.BatchNorm3d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
|
[
"torch.nn.BatchNorm3d",
"torch.nn.ReLU",
"numpy.multiply",
"mmcv.cnn.constant_init",
"mmcv.cnn.kaiming_init",
"mmcv.runner.load_checkpoint",
"torch.nn.MaxPool3d",
"logging.getLogger"
] |
[((1424, 1433), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1431, 1433), True, 'import torch.nn as nn\n'), ((4251, 4260), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4258, 4260), True, 'import torch.nn as nn\n'), ((8206, 8215), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8213, 8215), True, 'import torch.nn as nn\n'), ((9398, 9470), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '[1, 3, 3]', 'stride': '[1, 2, 2]', 'padding': '[0, 1, 1]'}), '(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1])\n', (9410, 9470), True, 'import torch.nn as nn\n'), ((12756, 12775), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (12773, 12775), False, 'import logging\n'), ((12788, 12855), 'mmcv.runner.load_checkpoint', 'load_checkpoint', (['self', 'self.pretrained'], {'strict': '(False)', 'logger': 'logger'}), '(self, self.pretrained, strict=False, logger=logger)\n', (12803, 12855), False, 'from mmcv.runner import load_checkpoint\n'), ((2541, 2579), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['num_filters'], {'eps': '(0.001)'}), '(num_filters, eps=0.001)\n', (2555, 2579), True, 'import torch.nn as nn\n'), ((5435, 5473), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['num_filters'], {'eps': '(0.001)'}), '(num_filters, eps=0.001)\n', (5449, 5473), True, 'import torch.nn as nn\n'), ((8765, 8794), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(45)'], {'eps': '(0.001)'}), '(45, eps=0.001)\n', (8779, 8794), True, 'import torch.nn as nn\n'), ((9026, 9055), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {'eps': '(0.001)'}), '(64, eps=0.001)\n', (9040, 9055), True, 'import torch.nn as nn\n'), ((9316, 9345), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {'eps': '(0.001)'}), '(64, eps=0.001)\n', (9330, 9345), True, 'import torch.nn as nn\n'), ((9878, 9934), 'numpy.multiply', 'np.multiply', (['self.filter_config', 'self.channel_multiplier'], {}), '(self.filter_config, self.channel_multiplier)\n', (9889, 9934), True, 'import numpy as np\n'), ((12996, 13011), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['m'], {}), '(m)\n', (13008, 13011), False, 'from mmcv.cnn import constant_init, kaiming_init\n'), ((13084, 13103), 'mmcv.cnn.constant_init', 'constant_init', (['m', '(1)'], {}), '(m, 1)\n', (13097, 13103), False, 'from mmcv.cnn import constant_init, kaiming_init\n')]
|
from abc import ABC, abstractmethod
from hyperopt import STATUS_OK
import numpy as np
import logging
import pandas as pd
import shap
import matplotlib.pyplot as plt
import seaborn as sns
from crosspredict.iterator import Iterator
class CrossModelFabric(ABC):
def __init__(self,
iterator: Iterator,
params,
feature_name,
col_target,
cols_cat='auto',
num_boost_round=99999,
early_stopping_rounds=50,
valid=True,
random_state=0,
cross_target_encoder=None
):
self.params = params
self.feature_name = feature_name
self.cols_cat = cols_cat
self.num_boost_round = num_boost_round
self.early_stopping_rounds = early_stopping_rounds
self.valid = valid
self.col_target = col_target
self.random_state = random_state
self.iterator = iterator
self.cross_target_encoder = cross_target_encoder
self.models = {}
self.scores = None
self.score_max = None
self.num_boost_optimal = None
self.std = None
@abstractmethod
def get_hyperopt_space(self, params, random_state):
pass
@abstractmethod
def get_dataset(self, data, label, categorical_feature, **kwargs):
pass
@abstractmethod
def train(
self,
params,
train_set,
train_name,
valid_sets,
valid_name,
num_boost_round,
evals_result,
categorical_feature,
early_stopping_rounds,
verbose_eval):
pass
def fit(self, df):
log = logging.getLogger(__name__)
scores = {}
scores_avg = []
log.info(self.params)
self.iterator.fit(df=df)
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = pd.concat([train, encoded_train], axis=1)
val = pd.concat([val, encoded_test], axis=1)
X_train, X_val = train[self.feature_name], val[self.feature_name]
y_train, y_val = train[self.col_target], val[self.col_target]
dtrain = self.get_dataset(
data=X_train.astype(float),
label=y_train,
categorical_feature=self.cols_cat)
dvalid = self.get_dataset(data=X_val.astype(float), label=y_val,
categorical_feature=self.cols_cat)
if fold % self.iterator.n_splits == 0:
log.info(f'REPEAT FOLDS {fold//self.iterator.n_splits} START')
# Обучение
evals_result = {}
if self.valid:
model = self.train(
params=self.params,
train_set=dtrain,
train_name='train',
valid_set=dvalid,
valid_name='eval',
num_boost_round=self.num_boost_round,
evals_result=evals_result,
categorical_feature=self.cols_cat,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=False)
else:
model = self.train(params=self.params,
train_set=dtrain,
num_boost_round=self.num_boost_round,
categorical_feature=self.cols_cat,
verbose_eval=False)
self.models[fold] = model
if self.valid:
# Построение прогнозов при разном виде взаимодействия
scores[fold] = evals_result['eval'][self.params['metric']]
best_auc = np.max(evals_result['eval'][self.params['metric']])
scores_avg.append(best_auc)
log.info(f'\tCROSSVALIDATION FOLD {fold%self.iterator.n_splits} ENDS with best `{self.params["metric"]}` = {best_auc}')
if self.valid:
self.scores = pd.DataFrame(
dict([(k, pd.Series(v)) for k, v in scores.items()]))
mask = self.scores.isnull().sum(axis=1) == 0
self.num_boost_optimal = np.argmax(
self.scores[mask].mean(axis=1).values)
self.score_max = self.scores[mask].mean(
axis=1)[self.num_boost_optimal]
# self.score_max = np.mean(scores_avg)
self.std = self.scores[mask].std(axis=1)[self.num_boost_optimal]
# self.std = np.std(scores_avg)
result = {'loss': -self.score_max,
'status': STATUS_OK,
'std': self.std,
'score_max': self.score_max,
'scores_all': scores_avg,
'num_boost': int(self.num_boost_optimal),
}
log.info(result)
return result
return self
def transform(self, df):
x = df[self.feature_name]
y = df[self.col_target]
predict = pd.Series(index=df.index, data=np.zeros(df.shape[0]))
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = pd.concat([train, encoded_train], axis=1)
val = pd.concat([val, encoded_test], axis=1)
X_train, X_val = train[self.feature_name], val[self.feature_name]
y_train, y_val = train[self.col_target], val[self.col_target]
# Подготовка данных в нужном формате
model = self.models[fold]
predict.loc[X_val.index] += \
model.predict(X_val[model.feature_name()].astype(float),
num_iteration=self.num_boost_optimal) / self.iterator.n_repeats
return predict
def predict(self, test):
predict = pd.Series(index=test.index, data=np.zeros(test.shape[0]))
models_len = len(self.models.keys())
if self.cross_target_encoder is not None:
encoded_test = self.cross_target_encoder.predict(test)
test = pd.concat([test, encoded_test], axis=1)
for fold in self.models.keys():
model = self.models[fold]
predict += model.predict(test[model.feature_name()].astype(
float), num_iteration=self.num_boost_optimal) / models_len
return predict
def shap(self, df: pd.DataFrame, n_samples=500):
'''
:param df:
:param n_samples: количество записей которое будет семплироваться в каждом тестовом фолде для анализы shap values
:return:
'''
fig = plt.figure(figsize=(10, 10))
log = logging.getLogger(__name__)
shap_df_fin = pd.DataFrame(columns=['feature'])
x = df[self.feature_name]
y = df[self.col_target]
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = pd.concat([train, encoded_train], axis=1)
val = pd.concat([val, encoded_test], axis=1)
X_train, X_val = train[self.feature_name], val[self.feature_name]
y_train, y_val = train[self.col_target], val[self.col_target]
model = self.models[fold]
explainer = shap.TreeExplainer(model)
df_sample = X_val[model.feature_name()].sample(
n=n_samples, random_state=0, replace=True).astype(float)
if self.params['metric']=='auc':
shap_values = explainer.shap_values(df_sample)[1]
else:
shap_values = explainer.shap_values(df_sample)
shap_df = pd.DataFrame(zip(model.feature_name(), np.mean(
np.abs(shap_values), axis=0)), columns=['feature', 'shap_' + str(fold)])
shap_df_fin = pd.merge(shap_df_fin, shap_df,
how='outer', on='feature')
shap_feature_stats = shap_df_fin.set_index('feature').agg(
['mean', 'std'], axis=1).sort_values('mean', ascending=False)
cols_best = shap_feature_stats[:30].index
best_features = shap_df_fin.loc[shap_df_fin['feature'].isin(cols_best)]
best_features_melt = pd.melt(
best_features, id_vars=['feature'], value_vars=[
feature for feature in best_features.columns.values.tolist() if feature not in ['feature']])
sns.barplot(x='value', y='feature', data=best_features_melt,
estimator=np.mean, order=cols_best)
return fig, shap_feature_stats.reset_index()
def shap_summary_plot(self, test: pd.DataFrame, n_samples=500):
fig = plt.figure()
log = logging.getLogger(__name__)
shap_df_fin = pd.DataFrame(columns=['feature'])
if self.cross_target_encoder is not None:
encoded_test = self.cross_target_encoder.predict(test=test)
test = pd.concat([test, encoded_test], axis=1)
# Подготовка данных в нужном формате
model = self.models[0]
explainer = shap.TreeExplainer(model)
df_sample = test[model.feature_name()].sample(
n=n_samples, random_state=0, replace=True).astype(float)
if self.params['metric']=='auc':
shap_values = explainer.shap_values(df_sample)[1]
else:
shap_values = explainer.shap_values(df_sample)
shap_df = pd.DataFrame(zip(model.feature_name(), np.mean(
np.abs(shap_values), axis=0)), columns=['feature', 'shap_'])
shap_df_fin = pd.merge(shap_df_fin, shap_df, how='outer', on='feature')
shap.summary_plot(shap_values, df_sample, show=False, )
return fig
|
[
"pandas.DataFrame",
"numpy.abs",
"pandas.merge",
"seaborn.barplot",
"numpy.zeros",
"shap.TreeExplainer",
"matplotlib.pyplot.figure",
"numpy.max",
"pandas.Series",
"shap.summary_plot",
"pandas.concat",
"logging.getLogger"
] |
[((1763, 1790), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1780, 1790), False, 'import logging\n'), ((7084, 7112), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7094, 7112), True, 'import matplotlib.pyplot as plt\n'), ((7127, 7154), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7144, 7154), False, 'import logging\n'), ((7177, 7210), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['feature']"}), "(columns=['feature'])\n", (7189, 7210), True, 'import pandas as pd\n'), ((9002, 9103), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""value"""', 'y': '"""feature"""', 'data': 'best_features_melt', 'estimator': 'np.mean', 'order': 'cols_best'}), "(x='value', y='feature', data=best_features_melt, estimator=np.\n mean, order=cols_best)\n", (9013, 9103), True, 'import seaborn as sns\n'), ((9255, 9267), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9265, 9267), True, 'import matplotlib.pyplot as plt\n'), ((9282, 9309), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (9299, 9309), False, 'import logging\n'), ((9332, 9365), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['feature']"}), "(columns=['feature'])\n", (9344, 9365), True, 'import pandas as pd\n'), ((9644, 9669), 'shap.TreeExplainer', 'shap.TreeExplainer', (['model'], {}), '(model)\n', (9662, 9669), False, 'import shap\n'), ((10131, 10188), 'pandas.merge', 'pd.merge', (['shap_df_fin', 'shap_df'], {'how': '"""outer"""', 'on': '"""feature"""'}), "(shap_df_fin, shap_df, how='outer', on='feature')\n", (10139, 10188), True, 'import pandas as pd\n'), ((10198, 10251), 'shap.summary_plot', 'shap.summary_plot', (['shap_values', 'df_sample'], {'show': '(False)'}), '(shap_values, df_sample, show=False)\n', (10215, 10251), False, 'import shap\n'), ((6542, 6581), 'pandas.concat', 'pd.concat', (['[test, encoded_test]'], {'axis': '(1)'}), '([test, encoded_test], axis=1)\n', (6551, 6581), True, 'import pandas as pd\n'), ((7883, 7908), 'shap.TreeExplainer', 'shap.TreeExplainer', (['model'], {}), '(model)\n', (7901, 7908), False, 'import shap\n'), ((8419, 8476), 'pandas.merge', 'pd.merge', (['shap_df_fin', 'shap_df'], {'how': '"""outer"""', 'on': '"""feature"""'}), "(shap_df_fin, shap_df, how='outer', on='feature')\n", (8427, 8476), True, 'import pandas as pd\n'), ((9507, 9546), 'pandas.concat', 'pd.concat', (['[test, encoded_test]'], {'axis': '(1)'}), '([test, encoded_test], axis=1)\n', (9516, 9546), True, 'import pandas as pd\n'), ((2185, 2226), 'pandas.concat', 'pd.concat', (['[train, encoded_train]'], {'axis': '(1)'}), '([train, encoded_train], axis=1)\n', (2194, 2226), True, 'import pandas as pd\n'), ((2249, 2287), 'pandas.concat', 'pd.concat', (['[val, encoded_test]'], {'axis': '(1)'}), '([val, encoded_test], axis=1)\n', (2258, 2287), True, 'import pandas as pd\n'), ((4032, 4083), 'numpy.max', 'np.max', (["evals_result['eval'][self.params['metric']]"], {}), "(evals_result['eval'][self.params['metric']])\n", (4038, 4083), True, 'import numpy as np\n'), ((5369, 5390), 'numpy.zeros', 'np.zeros', (['df.shape[0]'], {}), '(df.shape[0])\n', (5377, 5390), True, 'import numpy as np\n'), ((5678, 5719), 'pandas.concat', 'pd.concat', (['[train, encoded_train]'], {'axis': '(1)'}), '([train, encoded_train], axis=1)\n', (5687, 5719), True, 'import pandas as pd\n'), ((5742, 5780), 'pandas.concat', 'pd.concat', (['[val, encoded_test]'], {'axis': '(1)'}), '([val, encoded_test], axis=1)\n', (5751, 5780), True, 'import pandas as pd\n'), ((6336, 6359), 'numpy.zeros', 'np.zeros', (['test.shape[0]'], {}), '(test.shape[0])\n', (6344, 6359), True, 'import numpy as np\n'), ((7564, 7605), 'pandas.concat', 'pd.concat', (['[train, encoded_train]'], {'axis': '(1)'}), '([train, encoded_train], axis=1)\n', (7573, 7605), True, 'import pandas as pd\n'), ((7628, 7666), 'pandas.concat', 'pd.concat', (['[val, encoded_test]'], {'axis': '(1)'}), '([val, encoded_test], axis=1)\n', (7637, 7666), True, 'import pandas as pd\n'), ((10048, 10067), 'numpy.abs', 'np.abs', (['shap_values'], {}), '(shap_values)\n', (10054, 10067), True, 'import numpy as np\n'), ((8320, 8339), 'numpy.abs', 'np.abs', (['shap_values'], {}), '(shap_values)\n', (8326, 8339), True, 'import numpy as np\n'), ((4355, 4367), 'pandas.Series', 'pd.Series', (['v'], {}), '(v)\n', (4364, 4367), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
An example to set OMP threads in FCI calculations. In old pyscf versions,
different number of OpenMP threads may lead to slightly different answers.
This issue was fixed. see github issue #249.
'''
from functools import reduce
import numpy
from pyscf import gto, lo, fci, ao2mo, scf, lib
mol = gto.M(atom=[('H', 0, 0, i*1.8) for i in range(10)],
basis = 'sto6g', unit='B')
s = mol.intor('cint1e_ovlp_sph')
orb = lo.lowdin(s)
#mf = scf.RHF(mol).run()
#orb = mf.mo_coeff
h1 = mol.intor('cint1e_nuc_sph')
h1+= mol.intor('cint1e_kin_sph')
h1 = reduce(numpy.dot, (orb.T, h1, orb))
h2 = ao2mo.kernel(mol, orb)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(),
max_cycle=500, max_space=100, verbose=5)
print(e)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(), ci0=ci,
max_cycle=500, max_space=100, verbose=5)
print(e)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(), ci0=ci,
max_cycle=500, max_space=100, verbose=5)
print(e)
#
# Reducing OMP threads can improve the numerical stability
#
# Set OMP_NUM_THREADS to 1
lib.num_threads(1)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(),
max_cycle=500, max_space=100, verbose=5)
print(e)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(), ci0=ci,
max_cycle=500, max_space=100, verbose=5)
print(e)
#
# Another Example.
#
import h5py
with h5py.File('spin_op_hamiltonian.h5', 'r') as f:
h1 = lib.unpack_tril(f['h1'].value)
h2 = f['h2'].value
norb = 10
nelec = (5,5)
na = fci.cistring.num_strings(norb, nelec[0])
c0 = numpy.zeros((na,na))
c0[0,0] = 1
solver = fci.addons.fix_spin_(fci.direct_spin0.FCI())
# Smooth convergence was found with single thread.
solver.threads = 1
solver.kernel(h1, h2, norb, nelec, ci0=c0, verbose=5)
# When switching to multi-threads, numerical fluctuation leads to convergence
# problem
solver.threads = 4
solver.kernel(h1, h2, norb, nelec, ci0=c0, verbose=5)
|
[
"h5py.File",
"pyscf.lib.num_threads",
"pyscf.fci.direct_spin0.FCI",
"pyscf.ao2mo.kernel",
"numpy.zeros",
"pyscf.fci.cistring.num_strings",
"functools.reduce",
"pyscf.lo.lowdin",
"pyscf.lib.unpack_tril"
] |
[((485, 497), 'pyscf.lo.lowdin', 'lo.lowdin', (['s'], {}), '(s)\n', (494, 497), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((614, 649), 'functools.reduce', 'reduce', (['numpy.dot', '(orb.T, h1, orb)'], {}), '(numpy.dot, (orb.T, h1, orb))\n', (620, 649), False, 'from functools import reduce\n'), ((655, 677), 'pyscf.ao2mo.kernel', 'ao2mo.kernel', (['mol', 'orb'], {}), '(mol, orb)\n', (667, 677), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1252, 1270), 'pyscf.lib.num_threads', 'lib.num_threads', (['(1)'], {}), '(1)\n', (1267, 1270), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1770, 1810), 'pyscf.fci.cistring.num_strings', 'fci.cistring.num_strings', (['norb', 'nelec[0]'], {}), '(norb, nelec[0])\n', (1794, 1810), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1816, 1837), 'numpy.zeros', 'numpy.zeros', (['(na, na)'], {}), '((na, na))\n', (1827, 1837), False, 'import numpy\n'), ((1630, 1670), 'h5py.File', 'h5py.File', (['"""spin_op_hamiltonian.h5"""', '"""r"""'], {}), "('spin_op_hamiltonian.h5', 'r')\n", (1639, 1670), False, 'import h5py\n'), ((1686, 1716), 'pyscf.lib.unpack_tril', 'lib.unpack_tril', (["f['h1'].value"], {}), "(f['h1'].value)\n", (1701, 1716), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1879, 1901), 'pyscf.fci.direct_spin0.FCI', 'fci.direct_spin0.FCI', ([], {}), '()\n', (1899, 1901), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n')]
|
#!/usr/bin/env python3
import sys
import numpy as np
from PySide6.QtCore import Qt, Slot
from PySide6.QtGui import QAction, QKeySequence
from PySide6.QtWidgets import (
QApplication, QHBoxLayout, QLabel,
QMainWindow, QPushButton, QSizePolicy,
QVBoxLayout, QWidget
)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.figure import Figure
from skimage import data
from skimage.color import rgb2hed
from skimage.exposure import rescale_intensity
class ApplicationWindow(QMainWindow):
"""Example base on the example by 'scikit-image' gallery"""
def __init__(self, root, parent=None):
super(ApplicationWindow, self).__init__(parent)
self._main = QWidget()
self.setCentralWidget(self._main)
# Main menu bar
self.menu = self.menuBar()
self.menu_file = self.menu.addMenu("File")
exit = QAction("Exit", self, triggered=root.quit)
self.menu_file.addAction(exit)
self.menu_about = self.menu.addMenu("&About")
about = QAction(
"About Qt", self,
shortcut=QKeySequence(QKeySequence.HelpContents),
triggered=root.aboutQt
)
self.menu_about.addAction(about)
# Create an artificial color close to the original one
self.ihc_rgb = data.immunohistochemistry()
self.ihc_hed = rgb2hed(self.ihc_rgb)
main_layout = QVBoxLayout(self._main)
plot_layout = QHBoxLayout()
button_layout = QHBoxLayout()
label_layout = QHBoxLayout()
self.canvas1 = FigureCanvas(Figure(figsize=(5, 5)))
self.canvas2 = FigureCanvas(Figure(figsize=(5, 5)))
self._ax1 = self.canvas1.figure.subplots()
self._ax2 = self.canvas2.figure.subplots()
self._ax1.imshow(self.ihc_rgb)
plot_layout.addWidget(self.canvas1)
plot_layout.addWidget(self.canvas2)
self.button1 = QPushButton("Hematoxylin")
self.button2 = QPushButton("Eosin")
self.button3 = QPushButton("DAB")
self.button4 = QPushButton("Fluorescene")
self.button1.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button2.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button3.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button4.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button1.clicked.connect(self.plot_hematoxylin)
self.button2.clicked.connect(self.plot_eosin)
self.button3.clicked.connect(self.plot_dab)
self.button4.clicked.connect(self.plot_final)
self.label1 = QLabel("Original", alignment=Qt.AlignCenter)
self.label2 = QLabel("", alignment=Qt.AlignCenter)
font = self.label1.font()
font.setPointSize(16)
self.label1.setFont(font)
self.label2.setFont(font)
label_layout.addWidget(self.label1)
label_layout.addWidget(self.label2)
button_layout.addWidget(self.button1)
button_layout.addWidget(self.button2)
button_layout.addWidget(self.button3)
button_layout.addWidget(self.button4)
main_layout.addLayout(label_layout, 2)
main_layout.addLayout(plot_layout, 88)
main_layout.addLayout(button_layout, 10)
# Default image
self.plot_hematoxylin()
def set_buttons_state(self, states):
self.button1.setEnabled(states[0])
self.button2.setEnabled(states[1])
self.button3.setEnabled(states[2])
self.button4.setEnabled(states[3])
@Slot()
def plot_hematoxylin(self):
cmap_hema = LinearSegmentedColormap.from_list(
"mycmap", ["white", "navy"]
)
self._ax2.imshow(self.ihc_hed[:, :, 0], cmap=cmap_hema)
self.canvas2.draw()
self.label2.setText("Hematoxylin")
self.set_buttons_state((False, True, True, True))
@Slot()
def plot_eosin(self):
cmap_eosin = LinearSegmentedColormap.from_list(
"mycmap", ["darkviolet", "white"]
)
self._ax2.imshow(self.ihc_hed[:, :, 1], cmap=cmap_eosin)
self.canvas2.draw()
self.label2.setText("Eosin")
self.set_buttons_state((True, False, True, True))
@Slot()
def plot_dab(self):
cmap_dab = LinearSegmentedColormap.from_list(
"mycmap", ["white", "saddlebrown"]
)
self._ax2.imshow(self.ihc_hed[:, :, 2], cmap=cmap_dab)
self.canvas2.draw()
self.label2.setText("DAB")
self.set_buttons_state((True, True, False, True))
@Slot()
def plot_final(self):
h = rescale_intensity(self.ihc_hed[:, :, 0], out_range=(0, 1))
d = rescale_intensity(self.ihc_hed[:, :, 2], out_range=(0, 1))
zdh = np.dstack((np.zeros_like(h), d, h))
self._ax2.imshow(zdh)
self.canvas2.draw()
self.label2.setText("Stain separated image")
self.set_buttons_state((True, True, True, False))
def main():
root = QApplication(sys.argv)
app = ApplicationWindow(root)
app.show()
sys.exit(root.exec())
if __name__ == "__main__":
main()
|
[
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.zeros_like",
"PySide6.QtGui.QAction",
"skimage.data.immunohistochemistry",
"PySide6.QtGui.QKeySequence",
"skimage.exposure.rescale_intensity",
"PySide6.QtWidgets.QVBoxLayout",
"PySide6.QtWidgets.QWidget",
"PySide6.QtWidgets.QPushButton",
"PySide6.QtWidgets.QLabel",
"skimage.color.rgb2hed",
"matplotlib.figure.Figure",
"PySide6.QtWidgets.QApplication",
"PySide6.QtCore.Slot",
"PySide6.QtWidgets.QHBoxLayout"
] |
[((3659, 3665), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (3663, 3665), False, 'from PySide6.QtCore import Qt, Slot\n'), ((4002, 4008), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (4006, 4008), False, 'from PySide6.QtCore import Qt, Slot\n'), ((4341, 4347), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (4345, 4347), False, 'from PySide6.QtCore import Qt, Slot\n'), ((4673, 4679), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (4677, 4679), False, 'from PySide6.QtCore import Qt, Slot\n'), ((5092, 5114), 'PySide6.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5104, 5114), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((782, 791), 'PySide6.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (789, 791), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((960, 1002), 'PySide6.QtGui.QAction', 'QAction', (['"""Exit"""', 'self'], {'triggered': 'root.quit'}), "('Exit', self, triggered=root.quit)\n", (967, 1002), False, 'from PySide6.QtGui import QAction, QKeySequence\n'), ((1387, 1414), 'skimage.data.immunohistochemistry', 'data.immunohistochemistry', ([], {}), '()\n', (1412, 1414), False, 'from skimage import data\n'), ((1438, 1459), 'skimage.color.rgb2hed', 'rgb2hed', (['self.ihc_rgb'], {}), '(self.ihc_rgb)\n', (1445, 1459), False, 'from skimage.color import rgb2hed\n'), ((1483, 1506), 'PySide6.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self._main'], {}), '(self._main)\n', (1494, 1506), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1529, 1542), 'PySide6.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1540, 1542), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1567, 1580), 'PySide6.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1578, 1580), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1604, 1617), 'PySide6.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1615, 1617), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1995, 2021), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""Hematoxylin"""'], {}), "('Hematoxylin')\n", (2006, 2021), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2045, 2065), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""Eosin"""'], {}), "('Eosin')\n", (2056, 2065), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2089, 2107), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""DAB"""'], {}), "('DAB')\n", (2100, 2107), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2131, 2157), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""Fluorescene"""'], {}), "('Fluorescene')\n", (2142, 2157), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2727, 2771), 'PySide6.QtWidgets.QLabel', 'QLabel', (['"""Original"""'], {'alignment': 'Qt.AlignCenter'}), "('Original', alignment=Qt.AlignCenter)\n", (2733, 2771), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2794, 2830), 'PySide6.QtWidgets.QLabel', 'QLabel', (['""""""'], {'alignment': 'Qt.AlignCenter'}), "('', alignment=Qt.AlignCenter)\n", (2800, 2830), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((3718, 3780), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""mycmap"""', "['white', 'navy']"], {}), "('mycmap', ['white', 'navy'])\n", (3751, 3780), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((4056, 4124), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""mycmap"""', "['darkviolet', 'white']"], {}), "('mycmap', ['darkviolet', 'white'])\n", (4089, 4124), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((4391, 4460), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""mycmap"""', "['white', 'saddlebrown']"], {}), "('mycmap', ['white', 'saddlebrown'])\n", (4424, 4460), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((4718, 4776), 'skimage.exposure.rescale_intensity', 'rescale_intensity', (['self.ihc_hed[:, :, 0]'], {'out_range': '(0, 1)'}), '(self.ihc_hed[:, :, 0], out_range=(0, 1))\n', (4735, 4776), False, 'from skimage.exposure import rescale_intensity\n'), ((4789, 4847), 'skimage.exposure.rescale_intensity', 'rescale_intensity', (['self.ihc_hed[:, :, 2]'], {'out_range': '(0, 1)'}), '(self.ihc_hed[:, :, 2], out_range=(0, 1))\n', (4806, 4847), False, 'from skimage.exposure import rescale_intensity\n'), ((1655, 1677), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1661, 1677), False, 'from matplotlib.figure import Figure\n'), ((1715, 1737), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1721, 1737), False, 'from matplotlib.figure import Figure\n'), ((1173, 1212), 'PySide6.QtGui.QKeySequence', 'QKeySequence', (['QKeySequence.HelpContents'], {}), '(QKeySequence.HelpContents)\n', (1185, 1212), False, 'from PySide6.QtGui import QAction, QKeySequence\n'), ((4873, 4889), 'numpy.zeros_like', 'np.zeros_like', (['h'], {}), '(h)\n', (4886, 4889), True, 'import numpy as np\n')]
|
from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.pyplot as plt
# Milne-Simpson PC method
def milnePC(def_fn, xa, xb, ya, N):
f = def_fn # intakes function to method to approximate
h = (xb - xa) / N # creates step size based on input values of a, b, N
t = np.arange(xa, xb + h, h) # array initialized to hold mesh points t
y = np.zeros((N + 1,)) # array to hold Midpoint Method approximated y values
y[0] = ya # initial condition
# using RK4 to obtain the first 3 points
for i in range(0, N):
if i in range(0, 3):
k1 = h * f(t[i], y[i])
k2 = h * f(t[i] + (h / 2.0), y[i] + (k1 / 2.0))
k3 = h * f(t[i] + (h / 2.0), y[i] + (k2 / 2.0))
k4 = h * f(t[i] + h, y[i] + k3)
y[i + 1] = y[i] + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0
else:
y[i + 1] = y[i-3] + (4*h/3)*(2*f(t[i], y[i]) - f(t[i-1], y[i-1])
+ 2*f(t[i-2], y[i-2]))
y[i + 1] = y[i-1] + (h/3)*(f(t[i+1], y[i+1]) + 4*f(t[i], y[i])
+ f(t[i-1], y[i-1]))
return t, y
# Adams Fourth Order PC
def adamsPC(def_fn, xa, xb, ya, h):
f = def_fn # intakes function to method to approximate
N = int((xb - xa) / h) # creates step size based on input values of a, b, N
t = np.arange(xa, xb + h, h) # array intialized to hold mesh points t
y = np.zeros((N + 1,)) # array to hold Midpoint Method approximated y values
y[0] = ya # initial condition
# using RK4 to obtain the first 3 points
for i in range(0, N):
if i in range(0, 3):
k1 = h * f(t[i], y[i])
k2 = h * f(t[i] + (h / 2.0), y[i] + (k1 / 2.0))
k3 = h * f(t[i] + (h / 2.0), y[i] + (k2 / 2.0))
k4 = h * f(t[i] + h, y[i] + k3)
y[i + 1] = y[i] + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0
else:
y[i + 1] = y[i] + (h/24.0) * (55.0 * f(t[i], y[i]) - 59.0 * f(t[i - 1], y[i - 1])
+ 37.0 * f(t[i - 2], y[i - 2]) - 9.0 * f(t[i - 3], y[i - 3]))
y[i + 1] = y[i] + (h/24.0) * (9.0 * f(t[i + 1], y[i + 1])
+ 19.0 * f(t[i],y[i]) - 5.0 * f(t[i - 1], y[i - 1]) + f(t[i - 2], y[i - 2]))
return t, y
if __name__ == "__main__":
d_f = lambda x, y: (2 - 2*x*y)/(x**2 + 1)
f = lambda x: (2*x + 1)/(x**2 + 1)
x_1 = np.arange(0, 1.1, 0.1)
x_2 = np.arange(0, 1.05, 0.05)
x_milne_1, result_milne_1 = milnePC(d_f, 0, 1, 1, 10)
x_milne_2, result_milne_2 = milnePC(d_f, 0, 1, 1, 20)
x_adam_1, result_adam_1 = adamsPC(d_f, 0, 1, 1, 0.1)
x_adam_2, result_adam_2 = adamsPC(d_f, 0, 1, 1, 0.05)
y_exact_1 = f(x_1)
y_exact_2 = f(x_2)
print(result_adam_1)
err_milne_1 = np.abs(y_exact_1 - result_milne_1)
err_adam_1 = np.abs(y_exact_1 - result_adam_1)
err_milne_2 = np.abs(y_exact_2 - result_milne_2)
err_adam_2 = np.abs(y_exact_2 - result_adam_2)
print(err_adam_1)
print(err_adam_2)
for i in range(len(err_adam_1)):
print(err_adam_1[i] / err_adam_2[i*2])
print(err_milne_1)
print(err_milne_2)
for i in range(len(err_milne_1)):
print(err_milne_1[i] / err_milne_2[i*2])
plt.figure(1)
plt.plot(x_1, err_adam_1, label='ABM4')
plt.plot(x_1, err_milne_1, label='Milne-Simpson')
#plt.plot(x_2, err_adam_2, label='h=0.05')
plt.xlabel('t')
plt.ylabel('Absolute Error')
plt.title('Stability Comparison when h = 0.1')
plt.legend()
plt.figure(2)
plt.plot(x_1, err_milne_1, label='h=0.1')
plt.plot(x_2, err_milne_2, label='h=0.05')
plt.xlabel('t')
plt.ylabel('Absolute Error')
plt.title('Milne-Simpson Predictor-Corrector')
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.abs",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((301, 325), 'numpy.arange', 'np.arange', (['xa', '(xb + h)', 'h'], {}), '(xa, xb + h, h)\n', (310, 325), True, 'import numpy as np\n'), ((378, 396), 'numpy.zeros', 'np.zeros', (['(N + 1,)'], {}), '((N + 1,))\n', (386, 396), True, 'import numpy as np\n'), ((1381, 1405), 'numpy.arange', 'np.arange', (['xa', '(xb + h)', 'h'], {}), '(xa, xb + h, h)\n', (1390, 1405), True, 'import numpy as np\n'), ((1457, 1475), 'numpy.zeros', 'np.zeros', (['(N + 1,)'], {}), '((N + 1,))\n', (1465, 1475), True, 'import numpy as np\n'), ((2468, 2490), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (2477, 2490), True, 'import numpy as np\n'), ((2501, 2525), 'numpy.arange', 'np.arange', (['(0)', '(1.05)', '(0.05)'], {}), '(0, 1.05, 0.05)\n', (2510, 2525), True, 'import numpy as np\n'), ((2851, 2885), 'numpy.abs', 'np.abs', (['(y_exact_1 - result_milne_1)'], {}), '(y_exact_1 - result_milne_1)\n', (2857, 2885), True, 'import numpy as np\n'), ((2903, 2936), 'numpy.abs', 'np.abs', (['(y_exact_1 - result_adam_1)'], {}), '(y_exact_1 - result_adam_1)\n', (2909, 2936), True, 'import numpy as np\n'), ((2955, 2989), 'numpy.abs', 'np.abs', (['(y_exact_2 - result_milne_2)'], {}), '(y_exact_2 - result_milne_2)\n', (2961, 2989), True, 'import numpy as np\n'), ((3007, 3040), 'numpy.abs', 'np.abs', (['(y_exact_2 - result_adam_2)'], {}), '(y_exact_2 - result_adam_2)\n', (3013, 3040), True, 'import numpy as np\n'), ((3312, 3325), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3322, 3325), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3370), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1', 'err_adam_1'], {'label': '"""ABM4"""'}), "(x_1, err_adam_1, label='ABM4')\n", (3339, 3370), True, 'import matplotlib.pyplot as plt\n'), ((3375, 3424), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1', 'err_milne_1'], {'label': '"""Milne-Simpson"""'}), "(x_1, err_milne_1, label='Milne-Simpson')\n", (3383, 3424), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3492), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (3487, 3492), True, 'import matplotlib.pyplot as plt\n'), ((3497, 3525), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absolute Error"""'], {}), "('Absolute Error')\n", (3507, 3525), True, 'import matplotlib.pyplot as plt\n'), ((3530, 3576), 'matplotlib.pyplot.title', 'plt.title', (['"""Stability Comparison when h = 0.1"""'], {}), "('Stability Comparison when h = 0.1')\n", (3539, 3576), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3593), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3591, 3593), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3612), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (3609, 3612), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3659), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1', 'err_milne_1'], {'label': '"""h=0.1"""'}), "(x_1, err_milne_1, label='h=0.1')\n", (3626, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3706), 'matplotlib.pyplot.plot', 'plt.plot', (['x_2', 'err_milne_2'], {'label': '"""h=0.05"""'}), "(x_2, err_milne_2, label='h=0.05')\n", (3672, 3706), True, 'import matplotlib.pyplot as plt\n'), ((3711, 3726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (3721, 3726), True, 'import matplotlib.pyplot as plt\n'), ((3731, 3759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absolute Error"""'], {}), "('Absolute Error')\n", (3741, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3764, 3810), 'matplotlib.pyplot.title', 'plt.title', (['"""Milne-Simpson Predictor-Corrector"""'], {}), "('Milne-Simpson Predictor-Corrector')\n", (3773, 3810), True, 'import matplotlib.pyplot as plt\n'), ((3815, 3827), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3825, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3841, 3843), True, 'import matplotlib.pyplot as plt\n')]
|
#Load packages
import numpy as np
import pandas as pd
import torch
from tqdm.auto import tqdm
import pytorch_lightning as pl
from transformers import AutoTokenizer, AutoModel
#Import package for aspect sentiment prediction
import aspect_based_sentiment_analysis as absa
#Load the ABSA sentiment model
nlp = absa.load()
#List containing the different aspect categories
ASPECTS = ['price','speed','reliability','coverage', 'customer service']
#Load BerTweet tokenizer
TOKENIZER = AutoTokenizer.from_pretrained("vinai/bertweet-base", normalization=True)
#Load the BERTweet model
BERTWEET_MODEL = AutoModel.from_pretrained("vinai/bertweet-base", from_tf = True, return_dict = True)
class ISP_TweetAspectClassifier(pl.LightningModule):
#Set the aspect classifier
def __init__(self, n_classes=5, n_training_steps=None, n_warmup_steps=None, lr=2e-5):
super().__init__()
self.lr = lr
self.n_warmup_steps = n_warmup_steps
self.n_training_steps = n_training_steps
self.bert = BERTWEET_MODEL
self.classifier = torch.nn.Linear(self.bert.config.hidden_size, n_classes)
self.criterion = torch.nn.BCELoss()
def forward(self, input_ids, attention_mask, labels = None):
output = self.bert(input_ids, attention_mask=attention_mask)
output = self.classifier(output.pooler_output)
output = torch.sigmoid(output)
loss = 0
if labels is not None:
loss = self.criterion(output, labels)
return loss, output
#Load the best model from training
mlc_model = ISP_TweetAspectClassifier.load_from_checkpoint(
"../models/absa-aspect-extraction/bertweet/ae-epoch=19-val_loss=0.33.ckpt",
n_classes=len(ASPECTS)
)
def run(df, col_name, optimal_threshold = 0.3):
"""
Function to perform ABSA on tweets using the multi-label bertweet classifier.
ABSA is a two-part task of aspect extraction and aspect sentiment prediction
Inputs:
- df (pd DataFrame): A pandas dataframe to perform annotation on
- col_name (str): The specific column in the dataframe containing the tweets run absa on
Output:
- absa_df (pd DataFrame): DataFrame containing the tweets and the ABSA results
"""
#List to store detected aspects and their sentiments
df_list = []
#Iterate through all the tweets
for tweet in df[col_name]:
#List to store the aspects detected
aspects_detected = []
#List to store the sentiment values (Positive, Negative or Neutral) for the aspects
detected_sentiments = []
#Encode the tweet
encoding = TOKENIZER.encode_plus(
tweet,
add_special_tokens=True,
max_length=TOKENIZER.model_max_length,
return_token_type_ids=False,
padding="max_length",
return_attention_mask=True,
return_tensors='pt'
)
#Get the model's prediction
_, model_prediction = mlc_model(encoding["input_ids"], encoding["attention_mask"])
model_prediction = model_prediction.detach().numpy()
#Determine the aspects detected using the optimal threshold found during fine-tuning
model_prediction = np.where(model_prediction > optimal_threshold, 1, 0)
#Iterate through the model's predictions for each aspect
for pred_idx in range(len(model_prediction[0])):
#If the aspect was detected
if model_prediction[0][pred_idx] == 1:
#Note it down
aspects_detected.append(ASPECTS[pred_idx])
if aspects_detected:
#Next, carry out sentiment prediction on the aspects detected
sentiment = nlp(tweet,aspects = aspects_detected)
#Iterate through each aspect sentiment predicted results
for senti_result in sentiment.examples:
#Get the sentiment scores
scores = np.array(senti_result.scores)
#Find the max sentiment score (i.e. the predicted sentiment value)
max_score = np.argmax(scores)
#Record the sentiment (string) category for the aspect
if max_score == 2:
detected_sentiments.append("Positive")
elif max_score == 1:
detected_sentiments.append("Negative")
else:
detected_sentiments.append("Neutral")
#Add the detected aspects and sentiments from the sentence to the list
df_list.append([tweet,aspects_detected,detected_sentiments])
else:
df_list.append([tweet,[None],[None]])
absa_df = pd.DataFrame(df_list,
columns=[col_name,'Detected aspects','Predicted sentiment'])
return absa_df
|
[
"pandas.DataFrame",
"torch.nn.BCELoss",
"numpy.argmax",
"transformers.AutoModel.from_pretrained",
"transformers.AutoTokenizer.from_pretrained",
"torch.sigmoid",
"numpy.where",
"aspect_based_sentiment_analysis.load",
"numpy.array",
"torch.nn.Linear"
] |
[((309, 320), 'aspect_based_sentiment_analysis.load', 'absa.load', ([], {}), '()\n', (318, 320), True, 'import aspect_based_sentiment_analysis as absa\n'), ((482, 554), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""vinai/bertweet-base"""'], {'normalization': '(True)'}), "('vinai/bertweet-base', normalization=True)\n", (511, 554), False, 'from transformers import AutoTokenizer, AutoModel\n'), ((598, 683), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['"""vinai/bertweet-base"""'], {'from_tf': '(True)', 'return_dict': '(True)'}), "('vinai/bertweet-base', from_tf=True, return_dict=True\n )\n", (623, 683), False, 'from transformers import AutoTokenizer, AutoModel\n'), ((4910, 4998), 'pandas.DataFrame', 'pd.DataFrame', (['df_list'], {'columns': "[col_name, 'Detected aspects', 'Predicted sentiment']"}), "(df_list, columns=[col_name, 'Detected aspects',\n 'Predicted sentiment'])\n", (4922, 4998), True, 'import pandas as pd\n'), ((1075, 1131), 'torch.nn.Linear', 'torch.nn.Linear', (['self.bert.config.hidden_size', 'n_classes'], {}), '(self.bert.config.hidden_size, n_classes)\n', (1090, 1131), False, 'import torch\n'), ((1157, 1175), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (1173, 1175), False, 'import torch\n'), ((1391, 1412), 'torch.sigmoid', 'torch.sigmoid', (['output'], {}), '(output)\n', (1404, 1412), False, 'import torch\n'), ((3353, 3405), 'numpy.where', 'np.where', (['(model_prediction > optimal_threshold)', '(1)', '(0)'], {}), '(model_prediction > optimal_threshold, 1, 0)\n', (3361, 3405), True, 'import numpy as np\n'), ((4144, 4173), 'numpy.array', 'np.array', (['senti_result.scores'], {}), '(senti_result.scores)\n', (4152, 4173), True, 'import numpy as np\n'), ((4286, 4303), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (4295, 4303), True, 'import numpy as np\n')]
|
# encoding: utf-8
import os
import numpy as np
from histolab.slide import Slide
from histolab.tiler import GridTiler, RandomTiler, ScoreTiler
from histolab.scorer import NucleiScorer
from ..fixtures import SVS
from ..util import load_expectation
class DescribeRandomTiler:
def it_locates_tiles_on_the_slide(self, tmpdir):
slide = Slide(SVS.CMU_1_SMALL_REGION, os.path.join(tmpdir, "processed"))
slide.save_scaled_image(10)
random_tiles_extractor = RandomTiler(
tile_size=(512, 512), n_tiles=2, level=0, seed=42, check_tissue=False
)
expectation = load_expectation(
"tiles-location-images/cmu-1-small-region-tiles-location-random",
type_="png",
)
tiles_location_img = random_tiles_extractor.locate_tiles(slide, scale_factor=10)
np.testing.assert_array_almost_equal(
np.asarray(tiles_location_img), expectation
)
class DescribeGridTiler:
def it_locates_tiles_on_the_slide(self, tmpdir):
slide = Slide(SVS.CMU_1_SMALL_REGION, os.path.join(tmpdir, "processed"))
grid_tiles_extractor = GridTiler(
tile_size=(512, 512),
level=0,
check_tissue=False,
)
expectation = load_expectation(
"tiles-location-images/cmu-1-small-region-tiles-location-grid", type_="png"
)
tiles_location_img = grid_tiles_extractor.locate_tiles(slide, scale_factor=10)
np.testing.assert_array_almost_equal(
np.asarray(tiles_location_img), expectation
)
class DescribeScoreTiler:
def it_locates_tiles_on_the_slide(self, tmpdir):
slide = Slide(SVS.CMU_1_SMALL_REGION, os.path.join(tmpdir, "processed"))
scored_tiles_extractor = ScoreTiler(
scorer=NucleiScorer(),
tile_size=(512, 512),
n_tiles=100,
level=0,
check_tissue=False,
)
expectation = load_expectation(
"tiles-location-images/cmu-1-small-region-tiles-location-scored",
type_="png",
)
scored_location_img = scored_tiles_extractor.locate_tiles(
slide, scale_factor=10
)
np.testing.assert_array_almost_equal(
np.asarray(scored_location_img), expectation
)
|
[
"histolab.tiler.RandomTiler",
"numpy.asarray",
"histolab.tiler.GridTiler",
"os.path.join",
"histolab.scorer.NucleiScorer"
] |
[((482, 569), 'histolab.tiler.RandomTiler', 'RandomTiler', ([], {'tile_size': '(512, 512)', 'n_tiles': '(2)', 'level': '(0)', 'seed': '(42)', 'check_tissue': '(False)'}), '(tile_size=(512, 512), n_tiles=2, level=0, seed=42, check_tissue\n =False)\n', (493, 569), False, 'from histolab.tiler import GridTiler, RandomTiler, ScoreTiler\n'), ((1134, 1194), 'histolab.tiler.GridTiler', 'GridTiler', ([], {'tile_size': '(512, 512)', 'level': '(0)', 'check_tissue': '(False)'}), '(tile_size=(512, 512), level=0, check_tissue=False)\n', (1143, 1194), False, 'from histolab.tiler import GridTiler, RandomTiler, ScoreTiler\n'), ((378, 411), 'os.path.join', 'os.path.join', (['tmpdir', '"""processed"""'], {}), "(tmpdir, 'processed')\n", (390, 411), False, 'import os\n'), ((888, 918), 'numpy.asarray', 'np.asarray', (['tiles_location_img'], {}), '(tiles_location_img)\n', (898, 918), True, 'import numpy as np\n'), ((1068, 1101), 'os.path.join', 'os.path.join', (['tmpdir', '"""processed"""'], {}), "(tmpdir, 'processed')\n", (1080, 1101), False, 'import os\n'), ((1526, 1556), 'numpy.asarray', 'np.asarray', (['tiles_location_img'], {}), '(tiles_location_img)\n', (1536, 1556), True, 'import numpy as np\n'), ((1707, 1740), 'os.path.join', 'os.path.join', (['tmpdir', '"""processed"""'], {}), "(tmpdir, 'processed')\n", (1719, 1740), False, 'import os\n'), ((2268, 2299), 'numpy.asarray', 'np.asarray', (['scored_location_img'], {}), '(scored_location_img)\n', (2278, 2299), True, 'import numpy as np\n'), ((1806, 1820), 'histolab.scorer.NucleiScorer', 'NucleiScorer', ([], {}), '()\n', (1818, 1820), False, 'from histolab.scorer import NucleiScorer\n')]
|
import numpy
import cupy
from cupy import core
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
This function uses the first N elements of `vals`, where N is the number
of true values in `mask`.
Args:
arr (cupy.ndarray): Array to put data into.
mask (array-like): Boolean mask array. Must have the same size as `a`.
vals (array-like): Values to put into `a`. Only the first
N elements are used, where N is the number of True values in
`mask`. If `vals` is smaller than N, it will be repeated, and if
elements of `a` are to be masked, this sequence must be non-empty.
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.place`
"""
# TODO(niboshi): Avoid nonzero which may synchronize the device.
mask = cupy.asarray(mask)
if arr.size != mask.size:
raise ValueError('Mask and data must be the same size.')
vals = cupy.asarray(vals)
mask_indices = mask.ravel().nonzero()[0] # may synchronize
if mask_indices.size == 0:
return
if vals.size == 0:
raise ValueError('Cannot insert from an empty array.')
arr.put(mask_indices, vals, mode='wrap')
def put(a, ind, v, mode='wrap'):
"""Replaces specified elements of an array with given values.
Args:
a (cupy.ndarray): Target array.
ind (array-like): Target indices, interpreted as integers.
v (array-like): Values to place in `a` at target indices.
If `v` is shorter than `ind` it will be repeated as necessary.
mode (str): How out-of-bounds indices will behave. Its value must be
either `'raise'`, `'wrap'` or `'clip'`. Otherwise,
:class:`TypeError` is raised.
.. note::
Default `mode` is set to `'wrap'` to avoid unintended performance drop.
If you need NumPy's behavior, please pass `mode='raise'` manually.
.. seealso:: :func:`numpy.put`
"""
a.put(ind, v, mode=mode)
_putmask_kernel = core.ElementwiseKernel(
'Q mask, raw S values, uint64 len_vals', 'T out',
'''
if (mask) out = (T) values[i % len_vals];
''',
'putmask_kernel'
)
def putmask(a, mask, values):
"""
Changes elements of an array inplace, based on a conditional mask and
input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
Args:
a (cupy.ndarray): Target array.
mask (cupy.ndarray): Boolean mask array. It has to be
the same shape as `a`.
values (cupy.ndarray or scalar): Values to put into `a` where `mask`
is True. If `values` is smaller than `a`, then it will be
repeated.
Examples
--------
>>> x = cupy.arange(6).reshape(2, 3)
>>> cupy.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = cupy.arange(6)
>>> cupy.putmask(x, x>2, cupy.array([-33, -44]))
>>> x
array([ 0, 1, 2, -44, -33, -44])
.. seealso:: :func:`numpy.putmask`
"""
if not isinstance(a, cupy.ndarray):
raise TypeError('`a` should be of type cupy.ndarray')
if not isinstance(mask, cupy.ndarray):
raise TypeError('`mask` should be of type cupy.ndarray')
if not (cupy.isscalar(values) or isinstance(values, cupy.ndarray)):
raise TypeError('`values` should be of type cupy.ndarray')
if not a.shape == mask.shape:
raise ValueError('mask and data must be the same size')
mask = mask.astype(numpy.bool_)
if cupy.isscalar(values):
a[mask] = values
elif not numpy.can_cast(values.dtype, a.dtype):
raise TypeError('Cannot cast array data from'
' {} to {} according to the rule \'safe\''
.format(values.dtype, a.dtype))
elif a.shape == values.shape:
a[mask] = values[mask]
else:
values = values.ravel()
_putmask_kernel(mask, values, len(values), a)
def fill_diagonal(a, val, wrap=False):
"""Fills the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Args:
a (cupy.ndarray): The array, at least 2-D.
val (scalar): The value to be written on the diagonal.
Its type must be compatible with that of the array a.
wrap (bool): If specified, the diagonal is "wrapped" after N columns.
This affects only tall matrices.
Examples
--------
>>> a = cupy.zeros((3, 3), int)
>>> cupy.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
.. seealso:: :func:`numpy.fill_diagonal`
"""
# The followings are imported from the original numpy
if a.ndim < 2:
raise ValueError('array must be at least 2-d')
end = None
if a.ndim == 2:
step = a.shape[1] + 1
if not wrap:
end = a.shape[1] * a.shape[1]
else:
if not numpy.alltrue(numpy.diff(a.shape) == 0):
raise ValueError('All dimensions of input must be of equal length')
step = 1 + numpy.cumprod(a.shape[:-1]).sum()
a.flat[:end:step] = val
def diag_indices(n, ndim=2):
"""Return the indices to access the main diagonal of an array.
Returns a tuple of indices that can be used to access the main
diagonal of an array with ``ndim >= 2`` dimensions and shape
(n, n, ..., n).
Args:
n (int): The size, along each dimension of the arrays for which
the indices are to be returned.
ndim (int): The number of dimensions. default `2`.
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = cupy.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = cupy.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Create indices to manipulate a 3-D array:
>>> d3 = cupy.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = cupy.zeros((2, 2, 2), dtype=int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 1]]])
.. seealso:: :func:`numpy.diag_indices`
"""
idx = cupy.arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Args:
arr (cupy.ndarray): At least 2-D.
.. seealso:: :func:`numpy.diag_indices_from`
"""
if not isinstance(arr, cupy.ndarray):
raise TypeError("Argument must be cupy.ndarray")
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not cupy.all(cupy.diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
|
[
"numpy.cumprod",
"cupy.isscalar",
"cupy.asarray",
"cupy.core.ElementwiseKernel",
"numpy.can_cast",
"numpy.diff",
"cupy.arange",
"cupy.diff"
] |
[((2253, 2411), 'cupy.core.ElementwiseKernel', 'core.ElementwiseKernel', (['"""Q mask, raw S values, uint64 len_vals"""', '"""T out"""', '"""\n if (mask) out = (T) values[i % len_vals];\n """', '"""putmask_kernel"""'], {}), '(\'Q mask, raw S values, uint64 len_vals\', \'T out\',\n """\n if (mask) out = (T) values[i % len_vals];\n """, \'putmask_kernel\'\n )\n', (2275, 2411), False, 'from cupy import core\n'), ((1062, 1080), 'cupy.asarray', 'cupy.asarray', (['mask'], {}), '(mask)\n', (1074, 1080), False, 'import cupy\n'), ((1187, 1205), 'cupy.asarray', 'cupy.asarray', (['vals'], {}), '(vals)\n', (1199, 1205), False, 'import cupy\n'), ((3912, 3933), 'cupy.isscalar', 'cupy.isscalar', (['values'], {}), '(values)\n', (3925, 3933), False, 'import cupy\n'), ((7115, 7129), 'cupy.arange', 'cupy.arange', (['n'], {}), '(n)\n', (7126, 7129), False, 'import cupy\n'), ((3641, 3662), 'cupy.isscalar', 'cupy.isscalar', (['values'], {}), '(values)\n', (3654, 3662), False, 'import cupy\n'), ((3974, 4011), 'numpy.can_cast', 'numpy.can_cast', (['values.dtype', 'a.dtype'], {}), '(values.dtype, a.dtype)\n', (3988, 4011), False, 'import numpy\n'), ((7753, 7773), 'cupy.diff', 'cupy.diff', (['arr.shape'], {}), '(arr.shape)\n', (7762, 7773), False, 'import cupy\n'), ((5521, 5540), 'numpy.diff', 'numpy.diff', (['a.shape'], {}), '(a.shape)\n', (5531, 5540), False, 'import numpy\n'), ((5647, 5674), 'numpy.cumprod', 'numpy.cumprod', (['a.shape[:-1]'], {}), '(a.shape[:-1])\n', (5660, 5674), False, 'import numpy\n')]
|
from skimage.segmentation import slic
from skimage.util import img_as_float
from skimage import io
import datetime
from PIL import Image
import numpy as np
imgname="taili"
image = img_as_float(io.imread(imgname+".png"))
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+" Start.")
for numSegments in [8000]:#1000,2000,3000,4000,5000,6000,7000,9000,10000
for cp in [5]:#3,4,6,2
for sig in [6]:#2,4,6,
segments = slic(image, n_segments = numSegments, sigma = sig,compactness=cp)
img=Image.fromarray(np.array(segments, np.uint8))
img.save(imgname+"_%d seg_" % (numSegments)+str(cp)+"_comp"+"_%d_sigma.png" % (sig) , "png")
print(imgname+"_%d bodr" % (numSegments)+str(cp)+"_comp"+"_%d_sigma.png " % (sig)+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S ')+" Output over.")
|
[
"skimage.segmentation.slic",
"datetime.datetime.now",
"numpy.array",
"skimage.io.imread"
] |
[((202, 229), 'skimage.io.imread', 'io.imread', (["(imgname + '.png')"], {}), "(imgname + '.png')\n", (211, 229), False, 'from skimage import io\n'), ((461, 523), 'skimage.segmentation.slic', 'slic', (['image'], {'n_segments': 'numSegments', 'sigma': 'sig', 'compactness': 'cp'}), '(image, n_segments=numSegments, sigma=sig, compactness=cp)\n', (465, 523), False, 'from skimage.segmentation import slic\n'), ((236, 259), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (257, 259), False, 'import datetime\n'), ((562, 590), 'numpy.array', 'np.array', (['segments', 'np.uint8'], {}), '(segments, np.uint8)\n', (570, 590), True, 'import numpy as np\n'), ((795, 818), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (816, 818), False, 'import datetime\n')]
|
# coding: utf-8
import copy
from functools import reduce
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from flearn.common.strategy import AVG
from flearn.common.trainer import Trainer
class AVGTrainer(Trainer):
def __init__(self, model, optimizer, criterion, device, display=True):
super().__init__(model, optimizer, criterion, device, display)
# 源代码的梯度,是-pretrain的值?
self.model_o = copy.deepcopy(self.model.state_dict())
self.mse_criterion = nn.MSELoss()
self.kl_criterion = nn.KLDivLoss()
self.temp = 1
def forward(self, data, target):
data, target = data.to(self.device), target.to(self.device)
# _, _, output = self.model(data)
# (th, sh), (tx, sx), (ty, sy) = self.model(data)
(th_lst, sh_lst), (ty, sy) = self.model(data)
loss_ce = self.criterion(ty, target) + self.criterion(sy, target)
# loss_mse = 0.0
loss_mse = (
self.mse_criterion(th_lst[-1], sh_lst[-1]) / loss_ce
+ self.mse_criterion(th_lst[-2], sh_lst[-2]) / loss_ce
)
def ts_kl_f(a, b):
a_log_soft = F.log_softmax(a / self.temp, dim=1)
b_soft = F.softmax(b / self.temp, dim=1)
return self.kl_criterion(a_log_soft, b_soft)
loss_kl = ts_kl_f(ty, sy) / loss_ce + ts_kl_f(sy, ty) / loss_ce
loss = loss_ce + loss_kl + loss_mse
# 教师输出的精度
output = ty
iter_acc = self.metrics(output, target)
return output, loss, iter_acc
# def train(self, data_loader, epochs=1):
# self.model_o = copy.deepcopy(self.model.state_dict())
# return super().train(data_loader, epochs)
class FedKD(AVG):
"""
客户端两个模型的 Loss
仅上传学生模型(小模型)的参数,且用SVD后的参数进行传输
[1]
学生模型和教师模型分别在model中实现
"""
# def client(self, trainer, agg_weight=1):
# w_shared = {"agg_weight": agg_weight}
# w_local = trainer.weight
# w_shared["params"] = {
# k: v.cpu() for k, v in w_local.items() if "teacher" not in k
# }
# return w_shared
# https://github.com/wuch15/FedKD/blob/main/run.py
def client(self, trainer, agg_weight=1):
# 随着轮数的变化而变化, svd的k, energy
# energy = 0.95+((1+comm_round)/10)*(0.98-0.95)
self.energy = 1 # init_value
w_shared = {"agg_weight": agg_weight}
# w_local = trainer.weight
w_local = trainer.grads
w_shared["params"] = {}
for key, value in w_local.items():
conv_flag = False
params_mat = value.cpu().numpy()
w_shared["params"][key] = params_mat
if "bias" not in key and len(params_mat.shape) > 1:
# 卷积层
if len(params_mat.shape) == 4:
conv_flag = True
c, k, h, w = params_mat.shape
params_mat = params_mat.reshape(c * k, h * w)
U, Sigma, VT = np.linalg.svd(params_mat, full_matrices=False)
threshold = 0
sigma_square_sum = np.sum(np.square(Sigma))
if sigma_square_sum != 0:
for singular_value_num in range(len(Sigma)):
if (
np.sum(np.square(Sigma[:singular_value_num]))
> self.energy * sigma_square_sum
):
threshold = singular_value_num
break
U = U[:, :threshold]
Sigma = Sigma[:threshold]
VT = VT[:threshold, :]
# 原代码是在服务器上进行dot,但这样会增加通信成本(需要传输u、sigma、v),所以这里换成本地实现
# con_restruct1 = np.dot(np.dot(U, np.diag(Sigma)), VT)
w_shared["params"][key] = np.dot(np.dot(U, np.diag(Sigma)), VT)
if conv_flag:
w_shared["params"][key] = w_shared["params"][key].reshape(
c, k, h, w
)
return w_shared
def server_ensemble(self, agg_weight_lst, w_local_lst, key_lst=None):
if key_lst == None:
all_local_key_lst = [set(w_local.keys()) for w_local in w_local_lst]
key_lst = reduce(lambda x, y: x & y, all_local_key_lst)
# sum up weights
w_glob = {k: agg_weight_lst[0] * w_local_lst[0][k] for k in key_lst}
for agg_weight, w_local in zip(agg_weight_lst[1:], w_local_lst[1:]):
for k in key_lst:
w_glob[k] += agg_weight * w_local[k]
molecular = np.sum(agg_weight_lst)
for k in w_glob.keys():
w_glob[k] = np.divide(w_glob[k], molecular)
return w_glob
def client_revice(self, trainer, data_glob_d):
w_local = trainer.weight_o
w_glob = data_glob_d["w_glob"]
for key, value in w_glob.items():
real_params_value = value
conv_flag = False
# 类似的,在服务器端除了要dot,再mean之后还需要再做一次svd。这里换成本地实现
if "bias" not in key and len(value.shape) > 1:
# 卷积层
if len(value.shape) == 4:
conv_flag = True
c, k, h, w = value.shape
params_mat = value.reshape(c * k, h * w)
else:
params_mat = value
U, Sigma, VT = np.linalg.svd(params_mat, full_matrices=False)
sigma_square_sum = np.sum(np.square(Sigma))
if sigma_square_sum != 0:
threshold = 0
for singular_value_num in range(len(Sigma)):
if np.sum(
np.square(Sigma[:singular_value_num])
) >= self.energy * np.sum(np.square(Sigma)):
threshold = singular_value_num
break
U = U[:, :threshold]
Sigma = Sigma[:threshold]
VT = VT[:threshold, :]
# t_lst = [u, sigma, v]
real_params_value = np.dot(np.dot(U, np.diag(Sigma)), VT)
if conv_flag:
real_params_value = real_params_value.reshape(c, k, h, w)
w_local[key] = w_local[key] + torch.FloatTensor(real_params_value)
return w_local
if __name__ == "__main__":
from model import ModelFedCon
model_base = ModelFedCon("simple-cnn", out_dim=256, n_classes=10)
d = model_base.state_dict()
conv_m = d["features.conv1.weight"].numpy()
fc_m = d["l1.weight"].numpy()
u, s, v = np.linalg.svd(fc_m, full_matrices=False)
t1_r = np.dot(np.dot(u, np.diag(s)), v)
t1_dist = torch.dist(torch.tensor(fc_m), torch.tensor(t1_r))
print(t1_dist)
t2_r = np.dot(u, np.diag(s), v)
t2_dist = torch.dist(torch.tensor(fc_m), torch.tensor(t2_r))
print(t2_dist)
t3_r = np.matmul(u, np.diag(s), v)
t3_dist = torch.dist(torch.tensor(fc_m), torch.tensor(t3_r))
print(t3_dist)
# u, s, v = np.linalg.svd(conv_m, full_matrices=False)
U, Sigma, VT = np.linalg.svd(
np.reshape(
conv_m,
(
conv_m.shape[0] * conv_m.shape[1],
conv_m.shape[2] * conv_m.shape[3],
),
),
full_matrices=False,
)
con_restruct1 = np.dot(np.dot(U, np.diag(Sigma)), VT)
t4_r = np.reshape(
con_restruct1,
(conv_m.shape[0], conv_m.shape[1], conv_m.shape[2], conv_m.shape[3]),
)
# t4_r = np.dot(np.dot(u, s[:, None, :]), v)
t4_dist = torch.dist(torch.tensor(conv_m), torch.tensor(t4_r))
print(t4_dist)
|
[
"numpy.diag",
"numpy.divide",
"torch.nn.MSELoss",
"numpy.sum",
"torch.nn.KLDivLoss",
"numpy.square",
"torch.FloatTensor",
"torch.nn.functional.softmax",
"numpy.linalg.svd",
"numpy.reshape",
"torch.nn.functional.log_softmax",
"functools.reduce",
"model.ModelFedCon",
"torch.tensor"
] |
[((6475, 6527), 'model.ModelFedCon', 'ModelFedCon', (['"""simple-cnn"""'], {'out_dim': '(256)', 'n_classes': '(10)'}), "('simple-cnn', out_dim=256, n_classes=10)\n", (6486, 6527), False, 'from model import ModelFedCon\n'), ((6657, 6697), 'numpy.linalg.svd', 'np.linalg.svd', (['fc_m'], {'full_matrices': '(False)'}), '(fc_m, full_matrices=False)\n', (6670, 6697), True, 'import numpy as np\n'), ((7452, 7552), 'numpy.reshape', 'np.reshape', (['con_restruct1', '(conv_m.shape[0], conv_m.shape[1], conv_m.shape[2], conv_m.shape[3])'], {}), '(con_restruct1, (conv_m.shape[0], conv_m.shape[1], conv_m.shape[2\n ], conv_m.shape[3]))\n', (7462, 7552), True, 'import numpy as np\n'), ((523, 535), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (533, 535), True, 'import torch.nn as nn\n'), ((564, 578), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {}), '()\n', (576, 578), True, 'import torch.nn as nn\n'), ((4624, 4646), 'numpy.sum', 'np.sum', (['agg_weight_lst'], {}), '(agg_weight_lst)\n', (4630, 4646), True, 'import numpy as np\n'), ((6768, 6786), 'torch.tensor', 'torch.tensor', (['fc_m'], {}), '(fc_m)\n', (6780, 6786), False, 'import torch\n'), ((6788, 6806), 'torch.tensor', 'torch.tensor', (['t1_r'], {}), '(t1_r)\n', (6800, 6806), False, 'import torch\n'), ((6849, 6859), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (6856, 6859), True, 'import numpy as np\n'), ((6889, 6907), 'torch.tensor', 'torch.tensor', (['fc_m'], {}), '(fc_m)\n', (6901, 6907), False, 'import torch\n'), ((6909, 6927), 'torch.tensor', 'torch.tensor', (['t2_r'], {}), '(t2_r)\n', (6921, 6927), False, 'import torch\n'), ((6973, 6983), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (6980, 6983), True, 'import numpy as np\n'), ((7013, 7031), 'torch.tensor', 'torch.tensor', (['fc_m'], {}), '(fc_m)\n', (7025, 7031), False, 'import torch\n'), ((7033, 7051), 'torch.tensor', 'torch.tensor', (['t3_r'], {}), '(t3_r)\n', (7045, 7051), False, 'import torch\n'), ((7174, 7268), 'numpy.reshape', 'np.reshape', (['conv_m', '(conv_m.shape[0] * conv_m.shape[1], conv_m.shape[2] * conv_m.shape[3])'], {}), '(conv_m, (conv_m.shape[0] * conv_m.shape[1], conv_m.shape[2] *\n conv_m.shape[3]))\n', (7184, 7268), True, 'import numpy as np\n'), ((7645, 7665), 'torch.tensor', 'torch.tensor', (['conv_m'], {}), '(conv_m)\n', (7657, 7665), False, 'import torch\n'), ((7667, 7685), 'torch.tensor', 'torch.tensor', (['t4_r'], {}), '(t4_r)\n', (7679, 7685), False, 'import torch\n'), ((1178, 1213), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['(a / self.temp)'], {'dim': '(1)'}), '(a / self.temp, dim=1)\n', (1191, 1213), True, 'import torch.nn.functional as F\n'), ((1235, 1266), 'torch.nn.functional.softmax', 'F.softmax', (['(b / self.temp)'], {'dim': '(1)'}), '(b / self.temp, dim=1)\n', (1244, 1266), True, 'import torch.nn.functional as F\n'), ((4296, 4341), 'functools.reduce', 'reduce', (['(lambda x, y: x & y)', 'all_local_key_lst'], {}), '(lambda x, y: x & y, all_local_key_lst)\n', (4302, 4341), False, 'from functools import reduce\n'), ((4703, 4734), 'numpy.divide', 'np.divide', (['w_glob[k]', 'molecular'], {}), '(w_glob[k], molecular)\n', (4712, 4734), True, 'import numpy as np\n'), ((6727, 6737), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (6734, 6737), True, 'import numpy as np\n'), ((7420, 7434), 'numpy.diag', 'np.diag', (['Sigma'], {}), '(Sigma)\n', (7427, 7434), True, 'import numpy as np\n'), ((2990, 3036), 'numpy.linalg.svd', 'np.linalg.svd', (['params_mat'], {'full_matrices': '(False)'}), '(params_mat, full_matrices=False)\n', (3003, 3036), True, 'import numpy as np\n'), ((5411, 5457), 'numpy.linalg.svd', 'np.linalg.svd', (['params_mat'], {'full_matrices': '(False)'}), '(params_mat, full_matrices=False)\n', (5424, 5457), True, 'import numpy as np\n'), ((6334, 6370), 'torch.FloatTensor', 'torch.FloatTensor', (['real_params_value'], {}), '(real_params_value)\n', (6351, 6370), False, 'import torch\n'), ((3110, 3126), 'numpy.square', 'np.square', (['Sigma'], {}), '(Sigma)\n', (3119, 3126), True, 'import numpy as np\n'), ((5500, 5516), 'numpy.square', 'np.square', (['Sigma'], {}), '(Sigma)\n', (5509, 5516), True, 'import numpy as np\n'), ((3863, 3877), 'numpy.diag', 'np.diag', (['Sigma'], {}), '(Sigma)\n', (3870, 3877), True, 'import numpy as np\n'), ((6154, 6168), 'numpy.diag', 'np.diag', (['Sigma'], {}), '(Sigma)\n', (6161, 6168), True, 'import numpy as np\n'), ((3299, 3336), 'numpy.square', 'np.square', (['Sigma[:singular_value_num]'], {}), '(Sigma[:singular_value_num])\n', (3308, 3336), True, 'import numpy as np\n'), ((5723, 5760), 'numpy.square', 'np.square', (['Sigma[:singular_value_num]'], {}), '(Sigma[:singular_value_num])\n', (5732, 5760), True, 'import numpy as np\n'), ((5811, 5827), 'numpy.square', 'np.square', (['Sigma'], {}), '(Sigma)\n', (5820, 5827), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#%% NumPyの読み込み
import numpy as np
# SciPyのstatsモジュールの読み込み
import scipy.stats as st
# Pandasの読み込み
import pandas as pd
# PyMCの読み込み
import pymc3 as pm
# MatplotlibのPyplotモジュールの読み込み
import matplotlib.pyplot as plt
# tqdmからプログレスバーの関数を読み込む
from tqdm import trange
# 日本語フォントの設定
from matplotlib.font_manager import FontProperties
import sys
if sys.platform.startswith('win'):
FontPath = 'C:\\Windows\\Fonts\\meiryo.ttc'
elif sys.platform.startswith('darwin'):
FontPath = '/System/Library/Fonts/ヒラギノ角ゴシック W4.ttc'
elif sys.platform.startswith('linux'):
FontPath = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'
else:
print('このPythonコードが対応していないOSを使用しています.')
sys.exit()
jpfont = FontProperties(fname=FontPath)
#%% ギブズ・サンプラーによる正規分布の平均と分散に関するベイズ推論
# 正規分布の平均と分散のギブズ・サンプラー
def gibbs_gaussian(data, iterations, mu0, tau0, nu0, lam0):
"""
入力
data: データ
iterations: 反復回数
mu0: 平均の事前分布(正規分布)の平均
tau0: 平均の事前分布(正規分布)の標準偏差
nu0: 分散の事前分布(逆ガンマ分布)の形状パラメータ
lam0: 分散の事前分布(逆ガンマ分布)の尺度パラメータ
出力
runs: モンテカルロ標本
"""
n = data.size
sum_data = data.sum()
mean_data = sum_data / n
variance_data = data.var()
inv_tau02 = 1.0 / tau0**2
mu0_tau02 = mu0 * inv_tau02
a = 0.5 * (n + nu0)
c = n * variance_data + lam0
sigma2 = variance_data
runs = np.empty((iterations, 2))
for idx in trange(iterations):
variance_mu = 1.0 / (n / sigma2 + inv_tau02)
mean_mu = variance_mu * (sum_data / sigma2 + mu0_tau02)
mu = st.norm.rvs(loc=mean_mu, scale=np.sqrt(variance_mu))
b = 0.5 * (n * (mu - mean_data)**2 + c)
sigma2 = st.invgamma.rvs(a, scale=b)
runs[idx, 0] = mu
runs[idx, 1] = sigma2
return runs
# モンテカルロ標本からの事後統計量の計算
def mcmc_stats(runs, burnin, prob, batch):
"""
入力
runs: モンテカルロ標本
burnin: バーンインの回数
prob: 区間確率 (0 < prob < 1)
batch: 乱数系列の分割数
出力
事後統計量のデータフレーム
"""
traces = runs[burnin:, :]
n = traces.shape[0] // batch
k = traces.shape[1]
alpha = 100 * (1.0 - prob)
post_mean = np.mean(traces, axis=0)
post_median = np.median(traces, axis=0)
post_sd = np.std(traces, axis=0)
mc_err = [pm.mcse(traces[:, i].reshape((n, batch), order='F')).item(0) \
for i in range(k)]
ci_lower = np.percentile(traces, 0.5 * alpha, axis=0)
ci_upper = np.percentile(traces, 100 - 0.5 * alpha, axis=0)
hpdi = pm.hpd(traces, 1.0 - prob)
rhat = [pm.rhat(traces[:, i].reshape((n, batch), order='F')).item(0) \
for i in range(k)]
stats = np.vstack((post_mean, post_median, post_sd, mc_err,
ci_lower, ci_upper, hpdi.T, rhat)).T
stats_string = ['平均', '中央値', '標準偏差', '近似誤差',
'信用区間(下限)', '信用区間(上限)',
'HPDI(下限)', 'HPDI(上限)', '$\\hat R$']
param_string = ['平均 $\\mu$', '分散 $\\sigma^2$']
return pd.DataFrame(stats, index=param_string, columns=stats_string)
#%% 正規分布からのデータ生成
mu = 1.0
sigma = 2.0
n = 50
np.random.seed(99)
data = st.norm.rvs(loc=mu, scale=sigma, size=n)
#%% ギブズ・サンプラーの実行
mu0 = 0.0
tau0 = 1.0
nu0 = 5.0
lam0 = 7.0
prob = 0.95
burnin = 2000
samplesize = 20000
iterations = burnin + samplesize
np.random.seed(123)
runs = gibbs_gaussian(data, iterations, mu0, tau0, nu0, lam0)
#%% 事後統計量の計算
batch = 4
results = mcmc_stats(runs, burnin, prob, batch)
print(results.to_string(float_format='{:,.4f}'.format))
#%% 事後分布のグラフの作成
fig, ax = plt.subplots(2, 2, num=1, figsize=(8, 3), facecolor='w')
labels = ['$\\mu$', '$\\sigma^2$']
for index in range(2):
mc_trace = runs[burnin:, index]
if index == 0:
x_min = mc_trace.min() - 0.2 * np.abs(mc_trace.min())
x_max = mc_trace.max() + 0.2 * np.abs(mc_trace.max())
x = np.linspace(x_min, x_max, 250)
prior = st.norm.pdf(x, loc=mu0, scale=tau0)
else:
x_min = 0.0
x_max = mc_trace.max() + 0.2 * np.abs(mc_trace.max())
x = np.linspace(x_min, x_max, 250)
prior = st.invgamma.pdf(x, 0.5*nu0, scale=0.5*lam0)
ax[index, 0].set_xlabel('乱数系列', fontproperties=jpfont)
ax[index, 1].set_xlabel('周辺事後分布', fontproperties=jpfont)
posterior = st.gaussian_kde(mc_trace).evaluate(x)
ax[index, 0].plot(mc_trace, 'k-', linewidth=0.1)
ax[index, 0].set_xlim(1, samplesize)
ax[index, 0].set_ylabel(labels[index], fontproperties=jpfont)
ax[index, 1].plot(x, posterior, 'k-', label='事後分布')
ax[index, 1].plot(x, prior, 'k:', label='事前分布')
ax[index, 1].set_xlim(x_min, x_max)
ax[index, 1].set_ylim(0, 1.1*posterior.max())
ax[index, 1].set_ylabel('確率密度', fontproperties=jpfont)
ax[index, 1].legend(loc='best', frameon=False, prop=jpfont)
plt.tight_layout()
plt.savefig('pybayes_fig_gibbs_gaussian.png', dpi=300)
plt.show()
|
[
"sys.platform.startswith",
"numpy.random.seed",
"scipy.stats.norm.rvs",
"numpy.empty",
"scipy.stats.invgamma.rvs",
"numpy.mean",
"scipy.stats.invgamma.pdf",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.font_manager.FontProperties",
"numpy.std",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"tqdm.trange",
"numpy.median",
"scipy.stats.gaussian_kde",
"numpy.percentile",
"pymc3.hpd",
"numpy.vstack",
"sys.exit",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((372, 402), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (395, 402), False, 'import sys\n'), ((734, 764), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'fname': 'FontPath'}), '(fname=FontPath)\n', (748, 764), False, 'from matplotlib.font_manager import FontProperties\n'), ((3132, 3150), 'numpy.random.seed', 'np.random.seed', (['(99)'], {}), '(99)\n', (3146, 3150), True, 'import numpy as np\n'), ((3158, 3198), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'loc': 'mu', 'scale': 'sigma', 'size': 'n'}), '(loc=mu, scale=sigma, size=n)\n', (3169, 3198), True, 'import scipy.stats as st\n'), ((3336, 3355), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (3350, 3355), True, 'import numpy as np\n'), ((3571, 3627), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'num': '(1)', 'figsize': '(8, 3)', 'facecolor': '"""w"""'}), "(2, 2, num=1, figsize=(8, 3), facecolor='w')\n", (3583, 3627), True, 'import matplotlib.pyplot as plt\n'), ((4818, 4836), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4834, 4836), True, 'import matplotlib.pyplot as plt\n'), ((4837, 4891), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pybayes_fig_gibbs_gaussian.png"""'], {'dpi': '(300)'}), "('pybayes_fig_gibbs_gaussian.png', dpi=300)\n", (4848, 4891), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4900, 4902), True, 'import matplotlib.pyplot as plt\n'), ((457, 490), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (480, 490), False, 'import sys\n'), ((1427, 1452), 'numpy.empty', 'np.empty', (['(iterations, 2)'], {}), '((iterations, 2))\n', (1435, 1452), True, 'import numpy as np\n'), ((1468, 1486), 'tqdm.trange', 'trange', (['iterations'], {}), '(iterations)\n', (1474, 1486), False, 'from tqdm import trange\n'), ((2208, 2231), 'numpy.mean', 'np.mean', (['traces'], {'axis': '(0)'}), '(traces, axis=0)\n', (2215, 2231), True, 'import numpy as np\n'), ((2250, 2275), 'numpy.median', 'np.median', (['traces'], {'axis': '(0)'}), '(traces, axis=0)\n', (2259, 2275), True, 'import numpy as np\n'), ((2290, 2312), 'numpy.std', 'np.std', (['traces'], {'axis': '(0)'}), '(traces, axis=0)\n', (2296, 2312), True, 'import numpy as np\n'), ((2438, 2480), 'numpy.percentile', 'np.percentile', (['traces', '(0.5 * alpha)'], {'axis': '(0)'}), '(traces, 0.5 * alpha, axis=0)\n', (2451, 2480), True, 'import numpy as np\n'), ((2496, 2544), 'numpy.percentile', 'np.percentile', (['traces', '(100 - 0.5 * alpha)'], {'axis': '(0)'}), '(traces, 100 - 0.5 * alpha, axis=0)\n', (2509, 2544), True, 'import numpy as np\n'), ((2556, 2582), 'pymc3.hpd', 'pm.hpd', (['traces', '(1.0 - prob)'], {}), '(traces, 1.0 - prob)\n', (2562, 2582), True, 'import pymc3 as pm\n'), ((3025, 3086), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {'index': 'param_string', 'columns': 'stats_string'}), '(stats, index=param_string, columns=stats_string)\n', (3037, 3086), True, 'import pandas as pd\n'), ((553, 585), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (576, 585), False, 'import sys\n'), ((1736, 1763), 'scipy.stats.invgamma.rvs', 'st.invgamma.rvs', (['a'], {'scale': 'b'}), '(a, scale=b)\n', (1751, 1763), True, 'import scipy.stats as st\n'), ((2701, 2791), 'numpy.vstack', 'np.vstack', (['(post_mean, post_median, post_sd, mc_err, ci_lower, ci_upper, hpdi.T, rhat)'], {}), '((post_mean, post_median, post_sd, mc_err, ci_lower, ci_upper,\n hpdi.T, rhat))\n', (2710, 2791), True, 'import numpy as np\n'), ((3877, 3907), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(250)'], {}), '(x_min, x_max, 250)\n', (3888, 3907), True, 'import numpy as np\n'), ((3924, 3959), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['x'], {'loc': 'mu0', 'scale': 'tau0'}), '(x, loc=mu0, scale=tau0)\n', (3935, 3959), True, 'import scipy.stats as st\n'), ((4064, 4094), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(250)'], {}), '(x_min, x_max, 250)\n', (4075, 4094), True, 'import numpy as np\n'), ((4111, 4158), 'scipy.stats.invgamma.pdf', 'st.invgamma.pdf', (['x', '(0.5 * nu0)'], {'scale': '(0.5 * lam0)'}), '(x, 0.5 * nu0, scale=0.5 * lam0)\n', (4126, 4158), True, 'import scipy.stats as st\n'), ((714, 724), 'sys.exit', 'sys.exit', ([], {}), '()\n', (722, 724), False, 'import sys\n'), ((4299, 4324), 'scipy.stats.gaussian_kde', 'st.gaussian_kde', (['mc_trace'], {}), '(mc_trace)\n', (4314, 4324), True, 'import scipy.stats as st\n'), ((1649, 1669), 'numpy.sqrt', 'np.sqrt', (['variance_mu'], {}), '(variance_mu)\n', (1656, 1669), True, 'import numpy as np\n')]
|
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
from scipy.stats import rv_discrete
import numpy as np
__all__ = ['nonuniform', 'gibbs']
# noinspection PyMethodOverriding,PyPep8Naming
class nonuniform_gen(rv_discrete):
"""A nonuniform discrete random variable.
%(before_notes)s
%(example)s
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) <NAME> and <NAME>
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def _argcheck(self, a):
self.a = a
return abs(a.sum() - 1) <= np.finfo(np.float16).eps
def _pmf(self, x, a):
# port discreteLogprob
raise NotImplementedError
def _ppf(self, q, a):
raise NotImplementedError
def _stats(self, a):
raise NotImplementedError
# noinspection PyArgumentList
def _rvs(self, a):
r = np.random.rand()
if self._size is not None:
r = np.random.rand(self._size)
s = np.zeros(self._size, dtype=np.int32)
cum_prob = np.cumsum(a.ravel())
if self._size is None:
cum_prob2 = cum_prob[0:-1]
s = np.sum(r > cum_prob2)
else:
n = a.size
if n < self._size:
for i in range(n - 1):
s += r > cum_prob[i]
else:
cum_prob2 = cum_prob[0:-1]
for i in range(self._size):
# noinspection PyTypeChecker
s[i] = np.sum(r[i] > cum_prob2)
return s
nonuniform = nonuniform_gen(name='nonuniform', longname='A discrete non-uniform '
'(random integer)')
# noinspection PyMethodOverriding,PyPep8Naming
class gibbs_gen(rv_discrete):
"""A Gibbs distributed discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `gibbs` is::
exp(a/t)
gibbs.pmf(x) = ------------
sum(exp(a/t)
%(example)s
"""
def _argcheck(self, t):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct
and 0's where they are not.
"""
return t >= 0
def _nonzero(self, k, t):
return k == k
def _pmf(self, a, t):
values = np.exp(a / t)
# noinspection PyTypeChecker
if np.any(t <= np.finfo(np.float16).eps):
max_value = max(a)
values = np.asarray([val if val == max_value else 0. for val in a])
return values / np.sum(values)
def _ppf(self, a, t):
raise NotImplementedError
def _stats(self, t):
raise NotImplementedError
gibbs = gibbs_gen(name='gibbs', longname='Gibbs distribution '
'(random integer)')
|
[
"numpy.sum",
"six.moves.range",
"numpy.asarray",
"numpy.zeros",
"numpy.finfo",
"numpy.exp",
"numpy.random.rand"
] |
[((1104, 1120), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1118, 1120), True, 'import numpy as np\n'), ((1211, 1247), 'numpy.zeros', 'np.zeros', (['self._size'], {'dtype': 'np.int32'}), '(self._size, dtype=np.int32)\n', (1219, 1247), True, 'import numpy as np\n'), ((2589, 2602), 'numpy.exp', 'np.exp', (['(a / t)'], {}), '(a / t)\n', (2595, 2602), True, 'import numpy as np\n'), ((1172, 1198), 'numpy.random.rand', 'np.random.rand', (['self._size'], {}), '(self._size)\n', (1186, 1198), True, 'import numpy as np\n'), ((1376, 1397), 'numpy.sum', 'np.sum', (['(r > cum_prob2)'], {}), '(r > cum_prob2)\n', (1382, 1397), True, 'import numpy as np\n'), ((2743, 2804), 'numpy.asarray', 'np.asarray', (['[(val if val == max_value else 0.0) for val in a]'], {}), '([(val if val == max_value else 0.0) for val in a])\n', (2753, 2804), True, 'import numpy as np\n'), ((2826, 2840), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (2832, 2840), True, 'import numpy as np\n'), ((796, 816), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (804, 816), True, 'import numpy as np\n'), ((1491, 1503), 'six.moves.range', 'range', (['(n - 1)'], {}), '(n - 1)\n', (1496, 1503), False, 'from six.moves import range\n'), ((1632, 1649), 'six.moves.range', 'range', (['self._size'], {}), '(self._size)\n', (1637, 1649), False, 'from six.moves import range\n'), ((1727, 1751), 'numpy.sum', 'np.sum', (['(r[i] > cum_prob2)'], {}), '(r[i] > cum_prob2)\n', (1733, 1751), True, 'import numpy as np\n'), ((2664, 2684), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (2672, 2684), True, 'import numpy as np\n')]
|
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
def calc_mean_std(x):
return (np.mean(x), np.std(x, ddof=1) / np.sqrt(len(x)))
def color_func(p):
if p > 0.2:
return 'dodgerblue'
elif p < 0.05:
return 'orange'
else:
return 'seagreen'
def match_i_Huskies_passing_table(filename, match_i):
'''
Match i-th Huskies players passing table
Return: {playername: [origin, destination]}
'''
passing = pd.read_csv(filename)
player_dic = {}
for i in range(len(passing)):
if passing['MatchID'][i] == match_i:
if passing['TeamID'][i] == 'Huskies':
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
return player_dic
def match_i_passing_table(filename, team_id, match_i):
'''
Match i-th {TeamID} players passing table
Return: {playername: [origin, destination]}
'''
passing = pd.read_csv(filename)
player_dic = {}
if match_i == 'all':
for i in range(len(passing)):
if passing['TeamID'][i] == team_id:
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
else:
for i in range(len(passing)):
if passing['MatchID'][i] == match_i:
if passing['TeamID'][i] == team_id:
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
return player_dic
|
[
"pandas.read_csv",
"numpy.mean",
"numpy.std",
"warnings.filterwarnings"
] |
[((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((553, 574), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (564, 574), True, 'import pandas as pd\n'), ((1402, 1423), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1413, 1423), True, 'import pandas as pd\n'), ((178, 188), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (185, 188), True, 'import numpy as np\n'), ((190, 207), 'numpy.std', 'np.std', (['x'], {'ddof': '(1)'}), '(x, ddof=1)\n', (196, 207), True, 'import numpy as np\n')]
|
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.analysis.model.interface import (
TxnColumns as TC,
MarketDataColumns as MC,
PlColumns as PL,
TxnPlColumns as TPL,
)
from tia.analysis.perf import periods_in_year, guess_freq
from tia.util.decorator import lazy_property
from tia.util.fmt import new_dynamic_formatter
__all__ = ["ProfitAndLoss"]
def _dly_to_ltd(frame, dly_cols):
frame = frame.copy()
ilocs = [frame.columns.get_loc(_) for _ in dly_cols]
sums = frame[dly_cols].cumsum()
# BUG when copying a single row, oddly
if len(frame.index) == 1:
frame.iloc[0, ilocs] = sums.iloc[0, list(range(len(dly_cols)))]
else:
frame.iloc[:, ilocs] = sums.iloc[:, list(range(len(dly_cols)))]
return frame
def _ltd_to_dly(frame, ltd_cols):
pl = frame.copy()
ilocs = [frame.columns.get_loc(_) for _ in ltd_cols]
diff = frame[ltd_cols].diff()
# not sure why this is failing
# pl.iloc[1:, ilocs] = diff.iloc[1:]
for i, cidx in enumerate(ilocs):
pl.iloc[1:, cidx] = diff.iloc[1:, i]
return pl
class OpenAverageProfitAndLossCalculator(object):
def compute(self, txns):
"""Compute the long/short live-to-date transaction level profit and loss. Uses an open average calculation"""
txndata = txns.frame
mktdata = txns.pricer.get_eod_frame()
if not isinstance(mktdata.index, pd.DatetimeIndex):
mktdata.to_timestamp(freq="B")
# get the set of all txn dts and mkt data dts
pl = pd.merge(txndata, mktdata.reset_index(), how="outer", on=TPL.DT)
if pl[TC.PID].isnull().all():
ltd_frame = pd.DataFrame(index=pl.index)
ltd_frame[TPL.DT] = pl[PL.DT]
ltd_frame[TPL.POS] = 0
ltd_frame[TPL.PID] = 0
ltd_frame[TPL.TID] = 0
ltd_frame[TPL.TXN_QTY] = np.nan
ltd_frame[TPL.TXN_PX] = np.nan
ltd_frame[TPL.TXN_FEES] = 0
ltd_frame[TPL.TXN_PREMIUM] = 0
ltd_frame[TPL.TXN_INTENT] = 0
ltd_frame[TPL.TXN_ACTION] = 0
ltd_frame[TPL.CLOSE_PX] = pl[TPL.CLOSE_PX]
ltd_frame[TPL.OPEN_VAL] = 0
ltd_frame[TPL.MKT_VAL] = 0
ltd_frame[TPL.TOT_VAL] = 0
ltd_frame[TPL.DVDS] = 0
ltd_frame[TPL.FEES] = 0
ltd_frame[TPL.RPL_GROSS] = 0
ltd_frame[TPL.RPL] = 0
ltd_frame[TPL.UPL] = 0
ltd_frame[TPL.PL] = 0
return ltd_frame
else:
pl.sort([TC.DT, TC.PID, TC.TID], inplace=1)
pl.reset_index(inplace=1, drop=1)
# check that all days can be priced
has_position = pl[TC.PID] > 0
missing_pxs = pl[MC.CLOSE].isnull()
missing = pl[TC.DT][has_position & missing_pxs]
if len(missing) > 0:
msg = "insufficient price data: {0} prices missing for dates {1}"
mdates = ",".join([_.strftime("%Y-%m-%d") for _ in set(missing[:5])])
mdates += len(missing) > 5 and "..." or ""
raise Exception(msg.format(len(missing), mdates))
# Now there is a row for every timestamp. Now compute the pl and fill in where missing data should be
cols = [
TC.DT,
TC.POS,
TC.PID,
TC.TID,
TC.INTENT,
TC.ACTION,
TC.FEES,
TC.QTY,
TC.PX,
TC.PREMIUM,
TC.OPEN_VAL,
]
(
dts,
pos_qtys,
pids,
tids,
intents,
sides,
txn_fees,
txn_qtys,
txn_pxs,
premiums,
open_vals,
) = [pl[c] for c in cols]
dvds, closing_pxs, mkt_vals = [
pl[c] for c in [MC.DVDS, MC.CLOSE, MC.MKT_VAL]
]
# Ensure only end of day is kept for dividends (join will match dvd to any transaction during day
dvds = dvds.where(dts != dts.shift(-1), 0)
# fill in pl dates
open_vals.ffill(inplace=1)
open_vals.fillna(0, inplace=1)
pos_qtys.ffill(inplace=1)
pos_qtys.fillna(0, inplace=1)
# pid is the only tricky one, copy only while position is open
inpos = intents.notnull() | (pos_qtys != 0)
pids = np.where(inpos, pids.ffill(), 0)
pl["pid"] = pids.astype(int)
# Zero fill missing
dvds.fillna(0, inplace=1)
tids.fillna(0, inplace=1)
tids = tids.astype(int)
intents.fillna(0, inplace=1)
intents = intents.astype(int)
sides.fillna(0, inplace=1)
sides = sides.astype(int)
txn_fees.fillna(0, inplace=1)
premiums.fillna(0, inplace=1)
# LTD p/l calculation
fees = txn_fees.cumsum()
total_vals = premiums.cumsum()
mkt_vals *= pos_qtys
dvds = (dvds * pos_qtys).cumsum()
rpl_gross = total_vals - open_vals
rpl = rpl_gross + fees + dvds
upl = mkt_vals + open_vals
tpl = upl + rpl
# build the result
data = OrderedDict()
data[TPL.DT] = dts
data[TPL.POS] = pos_qtys
data[TPL.PID] = pids
data[TPL.TID] = tids
data[TPL.TXN_QTY] = txn_qtys
data[TPL.TXN_PX] = txn_pxs
data[TPL.TXN_FEES] = txn_fees
data[TPL.TXN_PREMIUM] = premiums
data[TPL.TXN_INTENT] = intents
data[TPL.TXN_ACTION] = sides
data[TPL.CLOSE_PX] = closing_pxs
data[TPL.OPEN_VAL] = open_vals
data[TPL.MKT_VAL] = mkt_vals
data[TPL.TOT_VAL] = total_vals
data[TPL.DVDS] = dvds
data[TPL.FEES] = fees
data[TPL.RPL_GROSS] = rpl_gross
data[TPL.RPL] = rpl
data[TPL.UPL] = upl
data[TPL.PL] = tpl
ltd_frame = pd.DataFrame(data, columns=list(data.keys()))
return ltd_frame
class TxnProfitAndLossDetails(object):
def __init__(self, txns=None, frame=None, ltd_frame=None):
"""
:param txns: Txns object
"""
if txns is None and frame is None and ltd_frame is None:
raise ValueError("Either {txns, frame, ltd_frame} must be defined")
self.txns = txns
self._frame = frame
self._ltd_frame = ltd_frame
self.ltd_cols = [
TPL.FEES,
TPL.TOT_VAL,
TPL.RPL_GROSS,
TPL.DVDS,
TPL.RPL,
TPL.RPL,
TPL.UPL,
TPL.PL,
]
@property
def ltd_frame(self):
if self._ltd_frame is None:
if self._frame is not None:
self._ltd_frame = _dly_to_ltd(self._frame, self.ltd_cols)
elif self.txns is not None:
self._ltd_frame = OpenAverageProfitAndLossCalculator().compute(
self.txns
)
else:
raise Exception("either txns or pl frame must be defined")
return self._ltd_frame
@property
def frame(self):
if self._frame is None:
ltd = self.ltd_frame
self._frame = _ltd_to_dly(ltd, self.ltd_cols)
return self._frame
def asfreq(self, freq):
frame = self.frame
pl = frame[PL.ALL].set_index(PL.DT)
if freq == "B":
resampled = pl.groupby(pl.index.date).apply(lambda f: f.sum())
resampled.index = pd.DatetimeIndex([i for i in resampled.index])
return ProfitAndLossDetails(resampled)
else:
resampled = pl.resample(freq, how="sum")
return ProfitAndLossDetails(resampled)
# -----------------------------------------------------------
# Resampled data
dly = lazy_property(lambda self: self.asfreq("B"), "dly")
weekly = lazy_property(lambda self: self.asfreq("W"), "weekly")
monthly = lazy_property(lambda self: self.asfreq("M"), "monthly")
quarterly = lazy_property(lambda self: self.asfreq("Q"), "quarterly")
annual = lazy_property(lambda self: self.asfreq("A"), "annual")
def get_pid_mask(self, pid):
return self.frame[TPL.PID] == pid
def truncate(self, before=None, after=None, pid=None):
if before is None and after is None and pid is None:
return self
elif before or after:
sub = self.frame.truncate(before, after)
return TxnProfitAndLossDetails(frame=sub)
else:
mask = self.get_pid_mask(pid)
frame = self.frame
sub = frame.ix[mask.values]
return TxnProfitAndLossDetails(frame=sub)
def iter_by_year(self):
for key, grp in self.frame.groupby(self.frame[TPL.DT].dt.year):
yield key, TxnProfitAndLossDetails(frame=grp)
def subset(self, txns):
"""To perform a subset it is not possible to reuse the frame since it is LTD, so we convert to daily then
compute ltd from daily
:param txns: the update Txns object
:return:
"""
result = TxnProfitAndLossDetails(txns)
# TODO - add reusing calcs. Issue is when removing PIDs, then could be multiple entries per dt
# use daily txn, clear all values where != pid
# determine which Timestamp columns can be removed as an old position may have multiple txns on same day
# recreate ltd from dly
# Need to take care if a dvd occurs at end of day
return result
class ProfitAndLossDetails(object):
def __init__(self, frame=None, ltd_frame=None):
self._frame = frame
self._ltd_frame = ltd_frame
@property
def ltd_frame(self):
ltd = self._ltd_frame
if ltd is None:
if self._frame is None:
raise Exception(
"Both frame and ltd frame are None. At least one must be defined."
)
self._ltd_frame = ltd = _dly_to_ltd(self._frame, PL.LTDS)
return ltd
@property
def frame(self):
obs = self._frame
if obs is None:
if self._ltd_frame is None:
raise Exception(
"Both frame and ltd frames are None. At least one must be defined."
)
self._frame = obs = _ltd_to_dly(self._ltd_frame, PL.LTDS)
return obs
def rolling_frame(self, n):
return pd.rolling_sum(self.frame, n)
def asfreq(self, freq):
"""Resample the p&l at the specified frequency
:param freq:
:return: Pl object
"""
frame = self.frame
if freq == "B":
resampled = frame.groupby(frame.index.date).apply(lambda f: f.sum())
resampled.index = pd.DatetimeIndex([i for i in resampled.index])
return ProfitAndLossDetails(resampled)
else:
resampled = frame.resample(freq, how="sum")
return ProfitAndLossDetails(resampled)
@lazy_property
def drawdown_info(self):
dd = self.drawdowns.to_frame()
last = dd.index[-1]
dd.columns = ["vals"]
dd["nonzero"] = (dd.vals != 0).astype(int)
dd["gid"] = (dd.nonzero.shift(1) != dd.nonzero).astype(int).cumsum()
ixs = (
dd.reset_index()
.groupby(["nonzero", "gid"])[dd.index.name or "index"]
.apply(lambda x: np.array(x))
)
rows = []
if 1 in ixs:
for ix in ixs[1]:
sub = dd.ix[ix]
# need to get t+1 since actually draw down ends on the 0 value
end = dd.index[
dd.index.get_loc(sub.index[-1]) + (last != sub.index[-1] and 1 or 0)
]
rows.append([sub.index[0], end, sub.vals.min(), sub.vals.idxmin()])
f = pd.DataFrame.from_records(
rows, columns=["dd start", "dd end", "maxdd", "maxdd dt"]
)
f["days"] = (f["dd end"] - f["dd start"]).astype("timedelta64[D]")
return f
@lazy_property
def drawdowns(self):
ltd = self.ltd_frame.pl
maxpl = pd.expanding_max(ltd)
maxpl[maxpl < 0] = 0
dd = ltd - maxpl
return dd
# scalar data
cnt = property(lambda self: self.frame.pl.notnull().astype(int).sum())
mean = lazy_property(lambda self: self.frame.pl.mean(), "mean")
avg = mean
std = lazy_property(lambda self: self.frame.pl.std(), "std")
std_ann = lazy_property(
lambda self: np.sqrt(periods_in_year(self.frame.pl)) * self.std, "std_ann"
)
maxdd = lazy_property(lambda self: self.drawdown_info["maxdd"].min(), "maxdd")
dd_avg = lazy_property(lambda self: self.drawdown_info["maxdd"].mean(), "dd_avg")
min = property(lambda self: self.frame.pl.min())
max = property(lambda self: self.frame.pl.max())
@lazy_property
def maxdd_dt(self):
if self.drawdown_info.empty:
return None
else:
return self.drawdown_info["maxdd dt"].ix[
self.drawdown_info["maxdd"].idxmin()
]
@lazy_property
def summary(self):
d = OrderedDict()
d["avg"] = self.avg
d["std"] = self.std
d["maxdd"] = self.maxdd
d["maxdd dt"] = self.maxdd_dt
d["dd avg"] = self.dd_avg
d["cnt"] = self.cnt
return pd.Series(d, name=self.frame.index.freq or guess_freq(self.frame.index))
def _repr_html_(self):
from tia.util.fmt import new_dynamic_formatter
fmt = new_dynamic_formatter(
method="row", precision=2, pcts=1, trunc_dot_zeros=1, parens=1
)
return fmt(self.summary.to_frame())._repr_html_()
def plot_ltd(
self, ax=None, style="k", label="ltd", show_dd=1, guess_xlabel=1, title=True
):
ltd = self.ltd_frame.pl
ax = ltd.plot(ax=ax, style=style, label=label)
if show_dd:
dd = self.drawdowns
dd.plot(style="r", label="drawdowns", alpha=0.5)
ax.fill_between(dd.index, 0, dd.values, facecolor="red", alpha=0.25)
fmt = lambda x: x
# guess the formatter
if guess_xlabel:
from tia.util.fmt import guess_formatter
from tia.util.mplot import AxesFormat
fmt = guess_formatter(ltd.abs().max(), precision=1)
AxesFormat().Y.apply_format(fmt).apply(ax)
ax.legend(loc="upper left", prop={"size": 12})
# show the actualy date and value
mdt, mdd = self.maxdd_dt, self.maxdd
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.25)
try:
dtstr = "{0}".format(mdt.to_period())
except:
# assume daily
dtstr = "{0}".format(hasattr(mdt, "date") and mdt.date() or mdt)
ax.text(
mdt,
dd[mdt],
"{1} \n {0}".format(fmt(mdd), dtstr).strip(),
ha="center",
va="top",
size=8,
bbox=bbox_props,
)
if title is True:
df = new_dynamic_formatter(precision=1, parens=False, trunc_dot_zeros=True)
total = df(ltd.iloc[-1])
vol = df(self.std)
mdd = df(self.maxdd)
title = "pnl %s vol %s maxdd %s" % (total, vol, mdd)
title and ax.set_title(title, fontdict=dict(fontsize=10, fontweight="bold"))
return ax
def truncate(self, before=None, after=None):
if before is None and after is None:
return self
else:
sub = self.frame.truncate(before, after)
return ProfitAndLossDetails(frame=sub)
class ProfitAndLoss(object):
def __init__(self, dly_details):
self._dly_details = dly_details
dly_details = property(lambda self: self._dly_details)
dly_frame = property(lambda self: self.dly_details.frame)
ltd_dly_frame = property(lambda self: self.dly_details.ltd_frame)
dly = property(lambda self: self.dly_frame.pl)
ltd_dly = property(lambda self: self.ltd_dly_frame.pl)
weekly_details = lazy_property(
lambda self: self.txn_details.weekly, "weekly_details"
)
weekly_frame = property(lambda self: self.weekly_details.frame)
ltd_weekly_frame = property(lambda self: self.weekly_details.ltd_frame)
weekly = property(lambda self: self.weekly_frame.pl)
ltd_weekly = property(lambda self: self.ltd_weekly_frame.pl)
monthly_details = lazy_property(
lambda self: self.txn_details.monthly, "monthly_details"
)
monthly_frame = property(lambda self: self.monthly_details.frame)
ltd_monthly_frame = property(lambda self: self.monthly_details.ltd_frame)
monthly = property(lambda self: self.monthly_frame.pl)
ltd_monthly = property(lambda self: self.ltd_monthly_frame.pl)
quarterly_details = lazy_property(
lambda self: self.txn_details.quarterly, "quarterly_details"
)
quarterly_frame = property(lambda self: self.quarterly_details.frame)
ltd_quarterly_frame = property(lambda self: self.quarterly_details.ltd_frame)
quarterly = property(lambda self: self.quarterly_frame.pl)
ltd_quarterly = property(lambda self: self.ltd_quarterly_frame.pl)
annual_details = lazy_property(
lambda self: self.txn_details.annual, "annual_details"
)
annual_frame = property(lambda self: self.annual_details.frame)
ltd_annual_frame = property(lambda self: self.annual_details.ltd_frame)
annual = property(lambda self: self.annual_frame.pl)
ltd_annual = property(lambda self: self.ltd_annual_frame.pl)
def iter_by_year(self):
for yr, details in self.dly_details.iter_by_year():
yield yr, ProfitAndLoss(details)
def truncate(self, before=None, after=None, pid=None):
if before is None and after is None and pid is None:
return self
else:
details = self.dly_details.truncate(before, after)
return ProfitAndLoss(details)
def report_by_year(
self,
summary_fct=None,
years=None,
ltd=1,
prior_n_yrs=None,
first_n_yrs=None,
ranges=None,
bm_rets=None,
):
"""Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
"""
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(pl):
monthly = pl.monthly_details
dly = pl.dly_details
data = OrderedDict()
data["mpl avg"] = monthly.mean
data["mpl std ann"] = monthly.std_ann
data["maxdd"] = dly.maxdd
data["maxdd dt"] = dly.maxdd_dt
data["avg dd"] = dly.dd_avg
data["best month"] = monthly.max
data["worst month"] = monthly.min
data["best day"] = dly.max
data["worst day"] = dly.min
data["nmonths"] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, pandl in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(pandl)
# First n years
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = "12/31/%s" % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results["first {0}yrs".format(first)] = summary_fct(firstN)
# Ranges
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate("1/1/%s" % yr_start, "12/31/%s" % yr_end)
results["{0}-{1}".format(yr_start, yr_end)] = summary_fct(rng_rets)
# Prior n years
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = "1/1/%s" % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results["past {0}yrs".format(prior)] = summary_fct(priorN)
# LTD
if ltd:
results["ltd"] = summary_fct(self)
return pd.DataFrame(results, index=list(results.values())[0].keys()).T
class TxnProfitAndLoss(ProfitAndLoss):
def __init__(self, txns=None, txnpl_details=None):
if txns is None and txnpl_details is None:
raise ValueError("txns or txn_details must be specified")
self.txns = txns
self._txn_details = txnpl_details
# Don't set the attribute, wany lazy property to be called
# ProfitAndLoss.__init__(self, None)
@property
def txn_details(self):
if self._txn_details is None:
self._txn_details = TxnProfitAndLossDetails(self.txns)
return self._txn_details
txn_frame = property(lambda self: self.txn_details.frame)
ltd_txn_frame = property(lambda self: self.txn_details.ltd_frame)
txn = property(lambda self: self.txn_frame.set_index(PL.DT).pl)
ltd_txn = property(lambda self: self.ltd_txn_frame.set_index(PL.DT).pl)
dly_details = lazy_property(lambda self: self.txn_details.dly, "dly_details")
def truncate(self, before=None, after=None, pid=None):
if before is None and after is None and pid is None:
return self
else:
details = self.txn_details.truncate(before, after, pid)
return TxnProfitAndLoss(txnpl_details=details)
def get_pid_mask(self, pid):
return self.txn_details.get_pid_mask(pid)
|
[
"pandas.DataFrame",
"tia.analysis.perf.guess_freq",
"tia.util.decorator.lazy_property",
"numpy.isscalar",
"pandas.expanding_max",
"pandas.rolling_sum",
"pandas.DatetimeIndex",
"tia.util.fmt.new_dynamic_formatter",
"tia.util.mplot.AxesFormat",
"numpy.array",
"pandas.DataFrame.from_records",
"collections.OrderedDict",
"tia.analysis.perf.periods_in_year"
] |
[((16498, 16567), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.weekly)', '"""weekly_details"""'], {}), "(lambda self: self.txn_details.weekly, 'weekly_details')\n", (16511, 16567), False, 'from tia.util.decorator import lazy_property\n'), ((16871, 16942), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.monthly)', '"""monthly_details"""'], {}), "(lambda self: self.txn_details.monthly, 'monthly_details')\n", (16884, 16942), False, 'from tia.util.decorator import lazy_property\n'), ((17256, 17331), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.quarterly)', '"""quarterly_details"""'], {}), "(lambda self: self.txn_details.quarterly, 'quarterly_details')\n", (17269, 17331), False, 'from tia.util.decorator import lazy_property\n'), ((17658, 17727), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.annual)', '"""annual_details"""'], {}), "(lambda self: self.txn_details.annual, 'annual_details')\n", (17671, 17727), False, 'from tia.util.decorator import lazy_property\n'), ((22414, 22477), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.dly)', '"""dly_details"""'], {}), "(lambda self: self.txn_details.dly, 'dly_details')\n", (22427, 22477), False, 'from tia.util.decorator import lazy_property\n'), ((10738, 10767), 'pandas.rolling_sum', 'pd.rolling_sum', (['self.frame', 'n'], {}), '(self.frame, n)\n', (10752, 10767), True, 'import pandas as pd\n'), ((12147, 12235), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['rows'], {'columns': "['dd start', 'dd end', 'maxdd', 'maxdd dt']"}), "(rows, columns=['dd start', 'dd end', 'maxdd',\n 'maxdd dt'])\n", (12172, 12235), True, 'import pandas as pd\n'), ((12439, 12460), 'pandas.expanding_max', 'pd.expanding_max', (['ltd'], {}), '(ltd)\n', (12455, 12460), True, 'import pandas as pd\n'), ((13463, 13476), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13474, 13476), False, 'from collections import OrderedDict\n'), ((13851, 13940), 'tia.util.fmt.new_dynamic_formatter', 'new_dynamic_formatter', ([], {'method': '"""row"""', 'precision': '(2)', 'pcts': '(1)', 'trunc_dot_zeros': '(1)', 'parens': '(1)'}), "(method='row', precision=2, pcts=1, trunc_dot_zeros=1,\n parens=1)\n", (13872, 13940), False, 'from tia.util.fmt import new_dynamic_formatter\n'), ((20166, 20179), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20177, 20179), False, 'from collections import OrderedDict\n'), ((1690, 1718), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'pl.index'}), '(index=pl.index)\n', (1702, 1718), True, 'import pandas as pd\n'), ((5419, 5432), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5430, 5432), False, 'from collections import OrderedDict\n'), ((7804, 7850), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[i for i in resampled.index]'], {}), '([i for i in resampled.index])\n', (7820, 7850), True, 'import pandas as pd\n'), ((11075, 11121), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[i for i in resampled.index]'], {}), '([i for i in resampled.index])\n', (11091, 11121), True, 'import pandas as pd\n'), ((15480, 15550), 'tia.util.fmt.new_dynamic_formatter', 'new_dynamic_formatter', ([], {'precision': '(1)', 'parens': '(False)', 'trunc_dot_zeros': '(True)'}), '(precision=1, parens=False, trunc_dot_zeros=True)\n', (15501, 15550), False, 'from tia.util.fmt import new_dynamic_formatter\n'), ((19418, 19436), 'numpy.isscalar', 'np.isscalar', (['years'], {}), '(years)\n', (19429, 19436), True, 'import numpy as np\n'), ((11709, 11720), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (11717, 11720), True, 'import numpy as np\n'), ((19638, 19651), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19649, 19651), False, 'from collections import OrderedDict\n'), ((12833, 12863), 'tia.analysis.perf.periods_in_year', 'periods_in_year', (['self.frame.pl'], {}), '(self.frame.pl)\n', (12848, 12863), False, 'from tia.analysis.perf import periods_in_year, guess_freq\n'), ((13723, 13751), 'tia.analysis.perf.guess_freq', 'guess_freq', (['self.frame.index'], {}), '(self.frame.index)\n', (13733, 13751), False, 'from tia.analysis.perf import periods_in_year, guess_freq\n'), ((20458, 20482), 'numpy.isscalar', 'np.isscalar', (['first_n_yrs'], {}), '(first_n_yrs)\n', (20469, 20482), True, 'import numpy as np\n'), ((21110, 21134), 'numpy.isscalar', 'np.isscalar', (['prior_n_yrs'], {}), '(prior_n_yrs)\n', (21121, 21134), True, 'import numpy as np\n'), ((14698, 14710), 'tia.util.mplot.AxesFormat', 'AxesFormat', ([], {}), '()\n', (14708, 14710), False, 'from tia.util.mplot import AxesFormat\n')]
|
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
from keras.models import Sequential, load_model
from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding
sys.stderr = stderr
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def build_sample_model(vocab_size,emb_size=100,num_layers=1,hidden_size=100,dropout=0.2):
model = Sequential()
model.add(Embedding(vocab_size,emb_size,batch_input_shape=(1,1)))
for i in range(num_layers):
model.add(LSTM(hidden_size,return_sequences=(i<num_layers-1),stateful=True))
model.add(Dropout(dropout))
model.add(Dense(vocab_size))
model.add(Activation('softmax'))
return model
def sample(header, num_chars):
model = build_sample_model(vocab_size)
model.load_weights('generator/weights/weights.h5')
sampled = [char_to_ix[c] for c in header]
for c in header[:-1]:
batch = np.zeros((1, 1))
batch[0, 0] = char_to_ix[c]
model.predict_on_batch(batch)
for i in range(num_chars):
batch = np.zeros((1, 1))
if sampled:
batch[0, 0] = sampled[-1]
else:
batch[0, 0] = np.random.randint(vocab_size)
result = model.predict_on_batch(batch).ravel()
sample = np.random.choice(range(vocab_size), p=result)
if ix_to_char[sample] == "\n":
break
sampled.append(sample)
return ''.join(ix_to_char[c] for c in sampled)
text = open("generator/names.txt").read()
char_to_ix = {ch:i for (i,ch) in enumerate(sorted(list(set(text))))}
ix_to_char = {i:ch for (ch,i) in char_to_ix.items()}
vocab_size = len(char_to_ix)
|
[
"keras.layers.Activation",
"warnings.filterwarnings",
"keras.layers.LSTM",
"keras.layers.Dropout",
"numpy.zeros",
"keras.layers.Dense",
"numpy.random.randint",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((45, 102), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (68, 102), False, 'import warnings\n'), ((464, 476), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (474, 476), False, 'from keras.models import Sequential, load_model\n'), ((488, 545), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'emb_size'], {'batch_input_shape': '(1, 1)'}), '(vocab_size, emb_size, batch_input_shape=(1, 1))\n', (497, 545), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((694, 711), 'keras.layers.Dense', 'Dense', (['vocab_size'], {}), '(vocab_size)\n', (699, 711), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((724, 745), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (734, 745), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((962, 978), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (970, 978), True, 'import numpy as np\n'), ((1080, 1096), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1088, 1096), True, 'import numpy as np\n'), ((585, 654), 'keras.layers.LSTM', 'LSTM', (['hidden_size'], {'return_sequences': '(i < num_layers - 1)', 'stateful': '(True)'}), '(hidden_size, return_sequences=i < num_layers - 1, stateful=True)\n', (589, 654), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((664, 680), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (671, 680), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((1165, 1194), 'numpy.random.randint', 'np.random.randint', (['vocab_size'], {}), '(vocab_size)\n', (1182, 1194), True, 'import numpy as np\n')]
|
import os
import random
import numpy as np
import cv2
from keras.utils import Sequence
# This vvvvv is for example_preprocess function and augs
# from albumentations import (
# HorizontalFlip, VerticalFlip, Flip, Transpose, Rotate, ShiftScaleRotate, RandomScale,
# RandomBrightness, RandomContrast, RandomBrightnessContrast, JpegCompression, Blur,
# MedianBlur, Compose, OneOf
# )
class SegDataGenerator(Sequence):
''' Data generator class for segmentation
Note:
Used as data generator in fit_generator from keras.
Includes support for augmentations via passing prepocessing function
as preprocessing_function parameter. For example interface of preprocessing function
see example_preprocess function.
Args:
input_directory (str): path to the folder where the input images are stored
mask_directory (str): path to the folder where the masks are stored
input_extention (str): extention of the input images files
mask_extention (str): extention of the input masks files
input_shape (tuple/list): target shape of the input images
mask_shape (tuple/list): target shape of the masks
batch_size (int): batch size
preload_dataset (bool): if True input images and masks will be loaded to RAM (should be set to False if dataset if larger than available RAM)
prob_aug (float): probability of getting augmented image
preprocessing_function (func): function that performs preprocessing and augmentation (if needed) (see example_preprocess function)
Attributes:
no public attributes
'''
def __init__(self,
input_directory, mask_directory,
input_extention='.jpg', mask_extention='.png',
input_shape=(256, 256, 3), mask_shape=(256, 256, 1),
batch_size=4, preload_dataset=False, prob_aug=0.5,
preprocessing_function=None):
self._dir = input_directory
self._mask_dir = mask_directory
self._in_shape = input_shape
self._mask_shape = mask_shape
self._fext = input_extention
self._mext = mask_extention
self._batch_size = batch_size
in_files = list(filter(lambda x: x.endswith(self._fext), os.listdir(self._dir)))
in_files.sort()
mask_files = list(filter(lambda x: x.endswith(self._mext), os.listdir(self._mask_dir)))
mask_files.sort()
self._files = list()
for i, name in enumerate(in_files):
self._files.append((name, mask_files[i]))
random.shuffle(self._files)
self._preload = preload_dataset
self._prob_aug = prob_aug
self._data = None
self._masks = None
if (preprocessing_function is not None) and callable(preprocessing_function):
self._preprocess = preprocessing_function
else:
self._preprocess = self._def_preprocess
if self._preload:
self._data = list()
for i, names in enumerate(self._files):
img = cv2.imread(os.path.join(self._dir, names[0]), cv2.IMREAD_UNCHANGED)
mask = cv2.imread(os.path.join(self._mask_dir, names[1]), cv2.IMREAD_UNCHANGED)
self._data.append((img, mask))
def __len__(self):
return int(np.ceil(len(self._in_files) / float(self._batch_size)))
def __getitem__(self, idx):
h = 0
w = 1
c = 2
batch_x = np.empty((self._batch_size, self._in_shape[h], self._in_shape[w], self._in_shape[c]), dtype='float32')
batch_y = np.empty((self._batch_size, self._mask_shape[h], self._mask_shape[w], self._mask_shape[c]), dtype='float32')
inter = cv2.INTER_AREA
if self._preload:
for i, imgs in enumerate(self._data[idx*self._batch_size:(idx+1)*self._batch_size]):
if (imgs[0].shape[w] < self._in_shape[w]) or (imgs[0].shape[h] < self._in_shape[h]):
inter = cv2.INTER_CUBIC
batch_img = cv2.resize(imgs[0], dsize=(self._in_shape[w], self._in_shape[h]), interpolation=inter)
batch_mask = cv2.resize(imgs[1], dsize=(self._mask_shape[w], self._mask_shape[h]), interpolation=inter)
batch_img, batch_mask = self._preprocess(batch_img, batch_mask, self._prob_aug)
batch_x[i] = batch_img.astype('float32')
batch_y[i] = batch_mask.astype('float32')
else:
for i, names in enumerate(self._files[idx*self._batch_size:(idx+1)*self._batch_size]):
img = cv2.imread(os.path.join(self._dir, names[0]), cv2.IMREAD_UNCHANGED)
mask = cv2.imread(os.path.join(self._mask_dir, names[1]), cv2.IMREAD_UNCHANGED)
if (img.shape[w] < self._in_shape[w]) or (img.shape[h] < self._in_shape[h]):
inter = cv2.INTER_CUBIC
batch_img = cv2.resize(img, dsize=(self._in_shape[w], self._in_shape[h]), interpolation=inter)
batch_mask = cv2.resize(mask, dsize=(self._mask_shape[w], self._mask_shape[h]), interpolation=inter)
batch_img, batch_mask = self._preprocess(batch_img, batch_mask, self._prob_aug)
batch_x[i] = batch_img.astype('float32')
batch_y[i] = batch_mask.astype('float32')
return batch_x, batch_y
@staticmethod
def _def_preprocess(img, mask, prob_aug):
''' Default preprocessing and augmentation function for SegDataGenerator class
Args:
img (numpy.ndarray): input image as numpy array (loaded using opencv, skimage or other compatible modules)
mask (numpy.ndarray): mask as numpy array (loaded using opencv, skimage or other compatible modules)
prob_aug (float): probability of getting augmented image (if used)
Returns:
tuple: tuple of preprocessed (image, mask)
'''
return img, mask
# vvvvv Example augmentation and preprocessing function vvvvv Albumentation module must be installed
# def example_augs(p=0.5):
# return Compose([
# OneOf([
# Flip(p=0.5),
# Transpose(p=0.2),
# Rotate(limit=90, interpolation=cv2.INTER_CUBIC, p=0.2),
# ShiftScaleRotate(shift_limit=0.125,
# scale_limit=0.25,
# rotate_limit=90,
# interpolation=cv2.INTER_CUBIC, p=0.5),
# RandomScale(scale_limit=0.2, interpolation=cv2.INTER_CUBIC, p=0.2)
# ], p=0.75),
# OneOf([
# RandomBrightness(limit=0.1, p=0.5),
# RandomContrast(limit=0.1, p=0.2),
# RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.1)
# ], p=0.25),
# JpegCompression(quality_lower=90, p=0.1),
# OneOf([
# Blur(blur_limit=3, p=0.1),
# MedianBlur(blur_limit=5, p=0.1)
# ], p=0.1)
# ], p=p)
# def example_preprocess(img, mask, prob_aug):
# ''' Example preprocessing and augmentation function for SegDataGenerator class
# Args:
# img (numpy.ndarray): input image as numpy array (loaded using opencv, skimage or other compatible modules)
# mask (numpy.ndarray): mask as numpy array (loaded using opencv, skimage or other compatible modules)
# prob_aug (float): probability of getting augmented image (if used)
# Returns:
# tuple: tuple of preprocessed (image, mask)
# '''
# augs = example_augs(p=prob_aug)
# data = {'image': img, 'mask': mask}
# augmented = augs(**data)
# aimg = augmented['image']
# amask = augmented['mask']
# aimg_yuv = cv2.cvtColor(aimg, cv2.COLOR_BGR2YUV)
# aimg_hls = cv2.cvtColor(aimg, cv2.COLOR_BGR2HLS)
# clahe = cv2.createCLAHE(clipLimit=2., tileGridSize=(5,5))
# yuv_split = cv2.split(aimg_yuv)
# hls_split = cv2.split(aimg_hls)
# yuv_split[0] = clahe.apply(yuv_split[0])
# aimg = cv2.merge((yuv_split[0], hls_split[2], yuv_split[2]))
# return aimg, amask
|
[
"random.shuffle",
"numpy.empty",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((2673, 2700), 'random.shuffle', 'random.shuffle', (['self._files'], {}), '(self._files)\n', (2687, 2700), False, 'import random\n'), ((3604, 3711), 'numpy.empty', 'np.empty', (['(self._batch_size, self._in_shape[h], self._in_shape[w], self._in_shape[c])'], {'dtype': '"""float32"""'}), "((self._batch_size, self._in_shape[h], self._in_shape[w], self.\n _in_shape[c]), dtype='float32')\n", (3612, 3711), True, 'import numpy as np\n'), ((3726, 3839), 'numpy.empty', 'np.empty', (['(self._batch_size, self._mask_shape[h], self._mask_shape[w], self.\n _mask_shape[c])'], {'dtype': '"""float32"""'}), "((self._batch_size, self._mask_shape[h], self._mask_shape[w], self.\n _mask_shape[c]), dtype='float32')\n", (3734, 3839), True, 'import numpy as np\n'), ((2357, 2378), 'os.listdir', 'os.listdir', (['self._dir'], {}), '(self._dir)\n', (2367, 2378), False, 'import os\n'), ((2474, 2500), 'os.listdir', 'os.listdir', (['self._mask_dir'], {}), '(self._mask_dir)\n', (2484, 2500), False, 'import os\n'), ((4190, 4280), 'cv2.resize', 'cv2.resize', (['imgs[0]'], {'dsize': '(self._in_shape[w], self._in_shape[h])', 'interpolation': 'inter'}), '(imgs[0], dsize=(self._in_shape[w], self._in_shape[h]),\n interpolation=inter)\n', (4200, 4280), False, 'import cv2\n'), ((4307, 4401), 'cv2.resize', 'cv2.resize', (['imgs[1]'], {'dsize': '(self._mask_shape[w], self._mask_shape[h])', 'interpolation': 'inter'}), '(imgs[1], dsize=(self._mask_shape[w], self._mask_shape[h]),\n interpolation=inter)\n', (4317, 4401), False, 'import cv2\n'), ((5111, 5198), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(self._in_shape[w], self._in_shape[h])', 'interpolation': 'inter'}), '(img, dsize=(self._in_shape[w], self._in_shape[h]), interpolation\n =inter)\n', (5121, 5198), False, 'import cv2\n'), ((5224, 5315), 'cv2.resize', 'cv2.resize', (['mask'], {'dsize': '(self._mask_shape[w], self._mask_shape[h])', 'interpolation': 'inter'}), '(mask, dsize=(self._mask_shape[w], self._mask_shape[h]),\n interpolation=inter)\n', (5234, 5315), False, 'import cv2\n'), ((3195, 3228), 'os.path.join', 'os.path.join', (['self._dir', 'names[0]'], {}), '(self._dir, names[0])\n', (3207, 3228), False, 'import os\n'), ((3287, 3325), 'os.path.join', 'os.path.join', (['self._mask_dir', 'names[1]'], {}), '(self._mask_dir, names[1])\n', (3299, 3325), False, 'import os\n'), ((4769, 4802), 'os.path.join', 'os.path.join', (['self._dir', 'names[0]'], {}), '(self._dir, names[0])\n', (4781, 4802), False, 'import os\n'), ((4861, 4899), 'os.path.join', 'os.path.join', (['self._mask_dir', 'names[1]'], {}), '(self._mask_dir, names[1])\n', (4873, 4899), False, 'import os\n')]
|
"""https://github.com/kujason/scene_vis"""
import os
import numpy as np
import vtk
class VtkImage:
"""Image
"""
def __init__(self):
self.vtk_actor = vtk.vtkImageActor()
# Need to keep reference to the image
self.image = None
self.vtk_image_data = None
def _save_image_data(self, vtk_image_data):
self.vtk_image_data = vtk_image_data
self.vtk_actor.SetInputData(vtk_image_data)
def set_image(self, image):
"""Setup image actor from image data
Args:
image: RGB image array
"""
# Flip vertically and change BGR->RGB
image = np.copy(image)[::-1, :, ::-1]
# Save reference to image
self.image = np.ascontiguousarray(image, dtype=np.uint8)
# Setup vtkImageImport
height, width = image.shape[0:2]
vtk_image_import = vtk.vtkImageImport()
vtk_image_import.SetDataSpacing(1, 1, 1)
vtk_image_import.SetDataOrigin(0, 0, 0)
vtk_image_import.SetWholeExtent(0, width - 1, 0, height - 1, 0, 0)
vtk_image_import.SetDataExtentToWholeExtent()
vtk_image_import.SetDataScalarTypeToUnsignedChar()
vtk_image_import.SetNumberOfScalarComponents(3)
vtk_image_import.SetImportVoidPointer(self.image)
vtk_image_import.Update()
# Get vtkImageData
vtk_image_data = vtk_image_import.GetOutput()
self._save_image_data(vtk_image_data)
def set_image_path(self, image_path):
"""Setup image actor from image at given path
Args:
image_path: path to image
"""
# Check extension
extension = os.path.splitext(image_path)[1]
if extension == '.png':
# Setup vtk image data
vtk_png_reader = vtk.vtkPNGReader()
vtk_png_reader.SetFileName(image_path)
vtk_png_reader.Update()
vtk_image_data = vtk_png_reader.GetOutput()
else:
raise NotImplementedError('Only .png images are supported, file was', extension)
self._save_image_data(vtk_image_data)
@staticmethod
def center_camera(vtk_renderer, vtk_image_data):
"""Sets camera to fill render window with the image
Args:
vtk_renderer: vtkRenderer
vtk_image_data: vtkImageData to calculate extents for centering
"""
origin = vtk_image_data.GetOrigin()
spacing = vtk_image_data.GetSpacing()
extent = vtk_image_data.GetExtent()
camera = vtk_renderer.GetActiveCamera()
camera.ParallelProjectionOn()
xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]
yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]
# xd = (extent[1] - extent[0] + 1) * spacing[0]
yd = (extent[3] - extent[2] + 1) * spacing[1]
d = camera.GetDistance()
camera.SetParallelScale(0.5 * yd)
camera.SetFocalPoint(xc, yc, 0.0)
camera.SetPosition(xc, yc, d)
|
[
"vtk.vtkPNGReader",
"numpy.copy",
"vtk.vtkImageActor",
"os.path.splitext",
"vtk.vtkImageImport",
"numpy.ascontiguousarray"
] |
[((175, 194), 'vtk.vtkImageActor', 'vtk.vtkImageActor', ([], {}), '()\n', (192, 194), False, 'import vtk\n'), ((738, 781), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (758, 781), True, 'import numpy as np\n'), ((882, 902), 'vtk.vtkImageImport', 'vtk.vtkImageImport', ([], {}), '()\n', (900, 902), False, 'import vtk\n'), ((652, 666), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (659, 666), True, 'import numpy as np\n'), ((1673, 1701), 'os.path.splitext', 'os.path.splitext', (['image_path'], {}), '(image_path)\n', (1689, 1701), False, 'import os\n'), ((1802, 1820), 'vtk.vtkPNGReader', 'vtk.vtkPNGReader', ([], {}), '()\n', (1818, 1820), False, 'import vtk\n')]
|
#!/usr/bin/env python
'''
Plot degree values for a given set of nodes in a simple circle plot.
'''
import numpy as np
import matplotlib.pyplot as plt
import mne
from jumeg import get_jumeg_path
from jumeg.connectivity import plot_degree_circle
import bct
orig_labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
yaml_fname = get_jumeg_path() + '/data/desikan_aparc_cortex_based_grouping.yaml'
con_fname = get_jumeg_path() + '/data/sample,aparc-con.npy'
con = np.load(con_fname)
con_ = con[0, :, :, 2] + con[0, :, :, 2].T
# compute the degree
degrees = mne.connectivity.degree(con_, threshold_prop=0.2)
fig, ax = plot_degree_circle(degrees, yaml_fname, orig_labels_fname)
|
[
"jumeg.get_jumeg_path",
"numpy.load",
"mne.connectivity.degree",
"jumeg.connectivity.plot_degree_circle"
] |
[((480, 498), 'numpy.load', 'np.load', (['con_fname'], {}), '(con_fname)\n', (487, 498), True, 'import numpy as np\n'), ((574, 623), 'mne.connectivity.degree', 'mne.connectivity.degree', (['con_'], {'threshold_prop': '(0.2)'}), '(con_, threshold_prop=0.2)\n', (597, 623), False, 'import mne\n'), ((635, 693), 'jumeg.connectivity.plot_degree_circle', 'plot_degree_circle', (['degrees', 'yaml_fname', 'orig_labels_fname'], {}), '(degrees, yaml_fname, orig_labels_fname)\n', (653, 693), False, 'from jumeg.connectivity import plot_degree_circle\n'), ((280, 296), 'jumeg.get_jumeg_path', 'get_jumeg_path', ([], {}), '()\n', (294, 296), False, 'from jumeg import get_jumeg_path\n'), ((345, 361), 'jumeg.get_jumeg_path', 'get_jumeg_path', ([], {}), '()\n', (359, 361), False, 'from jumeg import get_jumeg_path\n'), ((425, 441), 'jumeg.get_jumeg_path', 'get_jumeg_path', ([], {}), '()\n', (439, 441), False, 'from jumeg import get_jumeg_path\n')]
|
import sys
import setuptools
from distutils import sysconfig
cfg_vars = sysconfig.get_config_vars()
for key, value in cfg_vars.items():
if type(value) == str:
cfg_vars[key] = cfg_vars[key].replace("-Wstrict-prototypes", "")
cfg_vars[key] = cfg_vars[key].replace("-Wall", "-w")
cfg_vars[key] = cfg_vars[key].replace("-O3", "")
cfg_vars[key] = cfg_vars[key].replace("-O2", "")
cfg_vars[key] = cfg_vars[key].replace("-DNDEBUG", "-UNDEBUG")
cfg_vars[key] = cfg_vars[key].replace(" -g ", " ")#linux-gnu gotcha
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
from Cython.Build import cythonize
from Cython.Distutils.extension import Extension
from Cython.Distutils import build_ext
## \file setup.py setup.py
# \brief The python script for building proteus
#
# Set the DISTUTILS_DEBUG environment variable to print detailed information while setup.py is running.
#
from proteus import config
from proteus.config import *
###to turn on debugging in c++
##\todo Finishing cleaning up setup.py/setup.cfg, config.py...
PROTEUS_PETSC_EXTRA_LINK_ARGS = getattr(config, 'PROTEUS_PETSC_EXTRA_LINK_ARGS', [])
PROTEUS_PETSC_EXTRA_COMPILE_ARGS = getattr(config, 'PROTEUS_PETSC_EXTRA_COMPILE_ARGS', [])
PROTEUS_CHRONO_CXX_FLAGS = getattr(config, 'PROTEUS_CHRONO_CXX_FLAGS', [])
proteus_install_path = os.path.join(sysconfig.get_python_lib(), 'proteus')
# handle non-system installations
for arg in sys.argv:
if arg.startswith('--root'):
proteus_install_path = proteus_install_path.partition(sys.prefix + '/')[-1]
break
if arg.startswith('--prefix'):
proteus_install_path = proteus_install_path.partition(sys.prefix + '/')[-1]
break
EXTENSIONS_TO_BUILD = [
# Extension("MeshAdaptPUMI.MeshAdaptPUMI",
# sources = ['proteus/MeshAdaptPUMI/MeshAdaptPUMI.pyx', 'proteus/MeshAdaptPUMI/cMeshAdaptPUMI.cpp',
# 'proteus/MeshAdaptPUMI/MeshConverter.cpp', 'proteus/MeshAdaptPUMI/ParallelMeshConverter.cpp',
# 'proteus/MeshAdaptPUMI/MeshFields.cpp', 'proteus/MeshAdaptPUMI/SizeField.cpp',
# 'proteus/MeshAdaptPUMI/DumpMesh.cpp',
# 'proteus/MeshAdaptPUMI/ErrorResidualMethod.cpp','proteus/MeshAdaptPUMI/VMS.cpp','proteus/MeshAdaptPUMI/createAnalyticGeometry.cpp'],
# depends=["proteus/partitioning.h",
# "proteus/partitioning.cpp",
# "proteus/cpartitioning.pyx",
# "proteus/cmeshTools.pxd",
# "proteus/mesh.h",
# 'proteus/mesh.cpp',
# 'proteus/meshio.cpp'],
# define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H)],
# language='c++',
# include_dirs=[numpy.get_include(),'include',
# 'proteus','proteus/MeshAdaptPUMI']+
# PROTEUS_SCOREC_INCLUDE_DIRS,
# library_dirs=PROTEUS_SCOREC_LIB_DIRS,
# libraries=PROTEUS_SCOREC_LIBS,
# extra_compile_args=PROTEUS_SCOREC_EXTRA_COMPILE_ARGS+PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
# extra_link_args=PROTEUS_SCOREC_EXTRA_LINK_ARGS),#+PROTEUS_EXTRA_LINK_ARGS),
Extension("mprans.cPres",['proteus/mprans/cPres.pyx'],
depends=['proteus/mprans/Pres.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],),
Extension("mprans.cPresInit",['proteus/mprans/cPresInit.pyx'],
depends=['proteus/mprans/PresInit.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cPresInc",['proteus/mprans/cPresInc.pyx'],
depends=['proteus/mprans/PresInc.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cAddedMass",['proteus/mprans/cAddedMass.pyx'],
depends=['proteus/mprans/AddedMass.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.SedClosure",['proteus/mprans/SedClosure.pyx'],
depends=['proteus/mprans/SedClosure.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cVOF3P",['proteus/mprans/cVOF3P.pyx'],
depends=['proteus/mprans/VOF3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cVOS3P",['proteus/mprans/cVOS3P.pyx'],
depends=['proteus/mprans/VOS3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cNCLS3P",['proteus/mprans/cNCLS3P.pyx'],
depends=['proteus/mprans/NCLS3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cRDLS3P",['proteus/mprans/cRDLS3P.pyx'],
depends=['proteus/mprans/RDLS3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cMCorr3P",
["proteus/mprans/cMCorr3P.pyx"],
depends=["proteus/mprans/MCorr3P.h", 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
define_macros=[('PROTEUS_LAPACK_H',
PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',
PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',
PROTEUS_BLAS_H)],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("richards.cRichards",['proteus/richards/cRichards.pyx'],
depends=['proteus/richards/Richards.h','proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("elastoplastic.cElastoPlastic",
['proteus/elastoplastic/cElastoPlastic.pyx'],
define_macros=[('PROTEUS_LAPACK_H',
PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',
PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',
PROTEUS_BLAS_H)],
depends=['proteus/elastoplastic/ElastoPlastic.h','proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
include_dirs=[numpy.get_include(),'proteus'],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("mprans.cRANS3PF",['proteus/mprans/cRANS3PF.pyx'],
depends=['proteus/mprans/RANS3PF.h','proteus/mprans/RANS3PF2D.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cRANS3PSed",['proteus/mprans/cRANS3PSed.pyx'],
depends=['proteus/mprans/RANS3PSed.h','proteus/mprans/RANS3PSed2D.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("Isosurface",['proteus/Isosurface.pyx'],
language='c',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("BoundaryConditions",['proteus/BoundaryConditions.py'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.BoundaryConditions",['proteus/mprans/BoundaryConditions.py'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.MeshSmoothing",['proteus/mprans/MeshSmoothing.pyx'],
language='c++',
include_dirs=[numpy.get_include(),'proteus',PROTEUS_INCLUDE_DIR],
libraries=['stdc++','m'],
extra_compile_args=["-std=c++11","-mavx"]),
Extension("mprans.cMoveMeshMonitor",['proteus/mprans/cMoveMeshMonitor.pyx'],
language='c++',
include_dirs=[numpy.get_include(),'proteus',PROTEUS_INCLUDE_DIR],
libraries=['stdc++','m'],
extra_compile_args=["-std=c++11","-mavx"]),
Extension("mbd.CouplingFSI",
sources=['proteus/mbd/CouplingFSI.pyx',
'proteus/mbd/CouplingFSI.pxd',
'proteus/mbd/ChVariablesBodyAddedMass.cpp',
'proteus/mbd/ChBodyAddedMass.cpp',
'proteus/mbd/ChronoHeaders.pxd'],
depends=['proteus/mbd/ProtChBody.h',
'proteus/mbd/ProtChMoorings.h'],
language='c++',
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_INCLUDE_DIR,
PROTEUS_CHRONO_INCLUDE_DIR,
PROTEUS_CHRONO_INCLUDE_DIR+'/chrono',
PROTEUS_CHRONO_INCLUDE_DIR+'/chrono/collision/bullet',],
library_dirs=[PROTEUS_CHRONO_LIB_DIR],
libraries=['ChronoEngine',
'stdc++',
'm'],
extra_compile_args=PROTEUS_CHRONO_CXX_FLAGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("WaveTools",['proteus/WaveTools.py'],
depends=['proteus/WaveTools.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("fenton.Fenton",
sources=['proteus/fenton/Fenton.pyx',
'proteus/fenton/Solve.cpp',
'proteus/fenton/Dpythag.cpp',
'proteus/fenton/Dsvbksb.cpp',
'proteus/fenton/Dsvdcmp.cpp',
'proteus/fenton/Inout.cpp',
'proteus/fenton/Subroutines.cpp',
'proteus/fenton/Util.cpp',],
language='c++',
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_INCLUDE_DIR,
PROTEUS_NCURSES_INCLUDE_DIR,],
library_dirs=[PROTEUS_NCURSES_LIB_DIR,],
libraries=['ncurses','stdc++','m'],
extra_compile_args=["-std=c++11"]),
Extension("ADR",['proteus/ADR.pyx'],
depends=['proteus/ADR.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("subsurfaceTransportFunctions",['proteus/subsurfaceTransportFunctions.pyx'],
include_dirs=[numpy.get_include(),'proteus'],
extra_compile_args=PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('cfemIntegrals',
['proteus/cfemIntegrals.pyx',
'proteus/femIntegrals.c',
'proteus/postprocessing.c'],
depends=['proteus/femIntegrals.h'],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
include_dirs=[numpy.get_include(),'proteus',
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("csparsity",['proteus/csparsity.pyx', 'proteus/sparsity.cpp'],
depends=['proteus/sparsity.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],),
Extension("cmeshTools",
['proteus/cmeshTools.pyx', 'proteus/mesh.cpp', 'proteus/meshio.cpp'],
language='c++',
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
library_dirs=[PROTEUS_DAETK_LIB_DIR]+PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m',PROTEUS_DAETK_LIB]+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT),
Extension('ctransportCoefficients',
['proteus/ctransportCoefficients.pyx','proteus/transportCoefficients.c'],
include_dirs=[numpy.get_include(),'proteus'],
depends=["proteus/transportCoefficients.h"],
language="c",
libraries=['m']),
Extension('csubgridError',
['proteus/csubgridError.pyx','proteus/subgridError.c'],
depends=["proteus/subgridError.h"],
language="c",
include_dirs=[numpy.get_include(),'proteus'],
libraries=['m'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('cshockCapturing',
['proteus/cshockCapturing.pyx','proteus/shockCapturing.c'],
depends=["proteus/shockCapturing.h"],
language="c",
include_dirs=[numpy.get_include(),'proteus'],
libraries=['m'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('superluWrappers',
['proteus/superluWrappers.pyx'],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
language="c",
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_SUPERLU_INCLUDE_DIR],
library_dirs=[PROTEUS_SUPERLU_LIB_DIR,
PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_SUPERLU_LIB,
PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("csmoothers",["proteus/csmoothers.pyx", "proteus/smoothers.c"],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
language="c",
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR,
],
library_dirs=[PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_SUPERLU_LIB_DIR,
PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_SUPERLU_LIB,
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("canalyticalSolutions",["proteus/canalyticalSolutions.pyx", "proteus/analyticalSolutions.c"],
depends=["proteus/analyticalSolutions.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension("clapack",
["proteus/clapack.pyx"],
depends=["proteus/proteus_lapack.h","proteus/proteus_blas.h"],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
language="c",
include_dirs=[numpy.get_include(), 'proteus',
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB]),
Extension("cpostprocessing",
["proteus/cpostprocessing.pyx","proteus/postprocessing.c"],
depends=["proteus/postprocessing.h","proteus/postprocessing.pxd"],
define_macros=[('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
language="c",
include_dirs=[numpy.get_include(), 'proteus',
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB]),
Extension('cnumericalFlux',
['proteus/cnumericalFlux.pyx','proteus/numericalFlux.c'],
depends=["proteus/numericalFlux.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension('ctimeIntegration',
['proteus/ctimeIntegration.pyx','proteus/timeIntegration.c'],
depends=["proteus/timeIntegration.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension("cTwophaseDarcyCoefficients",
["proteus/cTwophaseDarcyCoefficients.pyx",
"proteus/SubsurfaceTransportCoefficients.cpp"],
depends=["proteus/SubsurfaceTransportCoefficients.h",
"proteus/pskRelations.h",
"proteus/pskRelations.pxd",
"proteus/densityRelations.h",
"proteus/twophaseDarcyCoefficients.pxd",
"proteus/twophaseDarcyCoefficients.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cSubsurfaceTransportCoefficients",
["proteus/cSubsurfaceTransportCoefficients.pyx","proteus/SubsurfaceTransportCoefficients.cpp"],
depends=["proteus/SubsurfaceTransportCoefficients.pxd",
"proteus/SubsurfaceTransportCoefficients.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cpskRelations",["proteus/cpskRelations.pyx"],
depends=["proteus/pskRelations.pxd",
"proteus/pskRelations.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cpartitioning",["proteus/cpartitioning.pyx",
"proteus/partitioning.cpp",
'proteus/mesh.cpp',
'proteus/meshio.cpp',],
depends=["proteus/partitioning.h",
"proteus/partitioning.cpp",
"proteus/cpartitioning.pyx",
"proteus/cmeshTools.pxd",
"proteus/mesh.h",
'proteus/mesh.cpp',
'proteus/meshio.cpp'],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("flcbdfWrappers",["proteus/flcbdfWrappers.pyx"],
language="c++",
depends=["proteus/flcbdfWrappers.pxd"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
library_dirs=[PROTEUS_DAETK_LIB_DIR]+PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m',PROTEUS_DAETK_LIB]+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
),
Extension("mprans.cCLSVOF",["proteus/mprans/cCLSVOF.pyx"],
depends=["proteus/mprans/CLSVOF.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cNCLS",["proteus/mprans/cNCLS.pyx"],
depends=["proteus/mprans/NCLS.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cMCorr",["proteus/mprans/cMCorr.pyx"],
depends=["proteus/mprans/MCorr.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
define_macros=[('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("mprans.cRANS2P",["proteus/mprans/cRANS2P.pyx"],
depends=["proteus/mprans/RANS2P.h"] + ["proteus/MixedModelFactory.h","proteus/CompKernel.h"],
extra_compile_args=PROTEUS_OPT,
language="c++", include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cRANS2P2D",["proteus/mprans/cRANS2P2D.pyx"],
depends=["proteus/mprans/RANS2P2D.h"] + ["proteus/MixedModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cRDLS",["proteus/mprans/cRDLS.pyx"],
depends=["proteus/mprans/RDLS.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cVOF",["proteus/mprans/cVOF.pyx"],
depends=["proteus/mprans/VOF.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cMoveMesh",["proteus/mprans/cMoveMesh.pyx"],
depends=["proteus/mprans/MoveMesh.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cMoveMesh2D",["proteus/mprans/cMoveMesh2D.pyx"],
depends=["proteus/mprans/MoveMesh2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cSW2D",["proteus/mprans/cSW2D.pyx"],
depends=["proteus/mprans/SW2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+['-g']+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS+['-g']),
Extension("mprans.cSW2DCV",["proteus/mprans/cSW2DCV.pyx"],
depends=["proteus/mprans/SW2DCV.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+['-g']+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS+['-g']),
Extension("mprans.cGN_SW2DCV",["proteus/mprans/cGN_SW2DCV.pyx"],
depends=["proteus/mprans/GN_SW2DCV.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+['-g']+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS+['-g']),
Extension("mprans.cKappa",["proteus/mprans/cKappa.pyx"],
depends=["proteus/mprans/Kappa.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cKappa2D",["proteus/mprans/cKappa2D.pyx"],
depends=["proteus/mprans/Kappa2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cDissipation",["proteus/mprans/cDissipation.pyx"],
depends=["proteus/mprans/Dissipation.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cDissipation2D",["proteus/mprans/cDissipation2D.pyx"],
depends=["proteus/mprans/Dissipation2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
]
def setup_given_extensions(extensions):
setup(name='proteus',
version='1.6.1.dev0',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7'
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
],
description='Python tools for multiphysics modeling',
author='The Proteus Developers',
author_email='<EMAIL>',
url='http://proteustoolkit.org',
packages = ['proteus',
'proteus.fenton',
'proteus.mprans',
'proteus.richards',
'proteus.elastoplastic',
'proteus.mbd',
'proteus.test_utils',
'proteus.config',
'proteus.tests',
'proteus.tests.ci',
'proteus.tests.griffiths_lane_6',
'proteus.tests.levelset',
'proteus.tests.linalgebra_tests',
'proteus.tests.LS_with_edgeBased_EV',
'proteus.tests.LS_with_edgeBased_EV.VOF',
'proteus.tests.LS_with_edgeBased_EV.NCLS',
'proteus.tests.BernsteinPolynomials',
'proteus.tests.BernsteinPolynomials.poisson_eqn',
'proteus.tests.elliptic_redist',
'proteus.tests.elliptic_redist.RDLS',
'proteus.tests.elliptic_redist.RDLS3P',
'proteus.tests.surface_tension',
'proteus.tests.surface_tension.rising_bubble_rans3p',
'proteus.tests.CLSVOF',
'proteus.tests.CLSVOF.disc_ICs',
'proteus.tests.CLSVOF.with_RANS2P',
'proteus.tests.CLSVOF.with_RANS3PF',
'proteus.tests.CLSVOF.pure_level_set',
'proteus.TwoPhaseFlow',
'proteus.TwoPhaseFlow.utils',
'proteus.tests.TwoPhaseFlow',
'proteus.tests.SWEs',
'proteus.tests.SWEs.dam_over_bumps',
'proteus.tests.SWEs.oneD_dambreak_flat_bottom',
'proteus.tests.SWEs.paraboloid_with_friction',
'proteus.tests.SWEs.paraboloid_with_friction.oneD',
'proteus.tests.SWEs.paraboloid_with_friction.twoD',
'proteus.tests.SWEs.test_gauges',
'proteus.tests.SWEs.test_reflecting_BCs',
'proteus.tests.matrix_constructor',
'proteus.tests.matrix_constructor.import_modules',
'proteus.MeshAdaptPUMI',
'proteus.tests.MeshAdaptPUMI',
'proteus.tests.MeshAdaptPUMI.gauge_compare.dambreak_Colagrossi_2D',
'proteus.tests.mesh_tests',
'proteus.tests.mesh_tests.import_modules',
'proteus.tests.periodic',
'proteus.tests.periodic.petsc',
'proteus.tests.periodic.comparison_files',
'proteus.tests.poisson_2d',
'proteus.tests.post_processing',
'proteus.tests.post_processing.import_modules',
'proteus.tests.ProjScheme_with_EV',
'proteus.tests.single_phase_gw',
'proteus.tests.solver_tests',
'proteus.tests.solver_tests.import_modules',
'proteus.tests.solver_tests_slow',
'proteus.tests.solver_tests_slow.import_modules',
'proteus.tests.solver_tests_mprans',
'proteus.tests.solver_tests_mprans.import_modules',
'proteus.tests.cylinder2D',
'proteus.tests.cylinder2D.conforming_rans2p',
'proteus.tests.cylinder2D.conforming_rans3p',
'proteus.tests.cylinder2D.ibm_method',
'proteus.tests.cylinder2D.ibm_rans2p',
'proteus.tests.cylinder2D.ibm_rans2p_3D',
'proteus.tests.cylinder2D.sbm_method',
'proteus.tests.cylinder2D.sbm_3Dmesh',
'proteus.tests.HotStart_3P',
'proteus.tests.AddedMass',
'proteus.tests.MoveMeshMonitor',
'proteus.tests.wave_tests',
],
cmdclass = {'build_ext':build_ext},
ext_package='proteus',
ext_modules=extensions,
data_files=[(proteus_install_path,
['proteus/proteus_blas.h',
'proteus/proteus_lapack.h',
'proteus/proteus_superlu.h',
'proteus/ModelFactory.h',
'proteus/CompKernel.h'
]),
(os.path.join(proteus_install_path,'tests'),
['proteus/tests/hex_cube_3x3.xmf',
'proteus/tests/hex_cube_3x3.h5',
'proteus/tests/sparse_mat_ex.mtx']),
(os.path.join(proteus_install_path,'tests','linalgebra_tests'),
['proteus/tests/linalgebra_tests/sparse_mat_1.txt',
'proteus/tests/linalgebra_tests/jac.bin']),
(os.path.join(proteus_install_path,'tests','griffiths_lane_6'),
['proteus/tests/griffiths_lane_6/richards_expected.h5',
'proteus/tests/griffiths_lane_6/elastoplastic_expected.h5']),
(os.path.join(proteus_install_path,'tests','levelset'),
['proteus/tests/levelset/rotation/rotation_c0p1cg_vbdf_2_level_1_expected.h5',
'proteus/tests/levelset/vortex2D/vortex_c0p1cg_vbdf_2_level_1_expected.h5',
'proteus/tests/levelset/vortex/vortex_c0p1cg_bdf_2_level_1_expected.h5']),
(os.path.join(proteus_install_path,'tests','ci','comparison_files'),
['proteus/tests/ci/comparison_files/floating_bar.h5',
'proteus/tests/ci/comparison_files/phi_t_0.000000_000.tgz']),
(os.path.join(proteus_install_path,'tests','LS_with_edgeBased_EV','VOF','comparison_files'),
['proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_SmoothnessBased.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_TaylorGalerkin.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_stab4.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_EV1.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_SUPG.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_EV2.h5']),
(os.path.join(proteus_install_path,'tests','LS_with_edgeBased_EV','NCLS','comparison_files'),
['proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_non_saturated_ls.h5',
'proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_pureAdvection_SUPG.h5',
'proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_pureAdvection_EV1.h5',
'proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_saturated_ls.h5']),
(os.path.join(proteus_install_path,'tests','BernsteinPolynomials','poisson_eqn','comparison_files'),
['proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/2D_poisson_hex_degree2.h5',
'proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/2D_poisson_simplex_degree2.h5',
'proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/3D_poisson_hex_degree2.h5',
'proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/3D_poisson_simplex_degree2.h5']),
(os.path.join(proteus_install_path,'tests','surface_tension','rising_bubble_rans3p','comparison_files'),
['proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_2D_supg.h5',
'proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_2D_ev.h5',
'proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_3D_supg.h5',
'proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_3D_ev.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','disc_ICs','comparison_files'),
['proteus/tests/CLSVOF/disc_ICs/comparison_files/test_case_1.h5',
'proteus/tests/CLSVOF/disc_ICs/comparison_files/test_case_2.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','pure_level_set','comparison_files'),
['proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_1.h5',
'proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_2.h5',
'proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_3.h5',
'proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_4.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','with_RANS2P','comparison_files'),
['proteus/tests/CLSVOF/with_RANS2P/comparison_files/multiphase_2D_falling_bubble.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','with_RANS3PF','comparison_files'),
['proteus/tests/CLSVOF/with_RANS3PF/comparison_files/multiphase_2D_falling_bubble.h5',
'proteus/tests/CLSVOF/with_RANS3PF/comparison_files/multiphase_3D_falling_bubble.h5']),
(os.path.join(proteus_install_path,'tests','TwoPhaseFlow','comparison_files'),
['proteus/tests/TwoPhaseFlow/comparison_files/risingBubble.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/damBreak.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/TwoDimBucklingFlow.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/fillingTank.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/marin.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/moses.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','dam_over_bumps','comparison_files'),
['proteus/tests/SWEs/dam_over_bumps/comparison_files/SWEs_dam_over_bumps.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','oneD_dambreak_flat_bottom','comparison_files'),
['proteus/tests/SWEs/oneD_dambreak_flat_bottom/comparison_files/SWEs_oneD_dambreak_flat_bottom.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','paraboloid_with_friction','oneD','comparison_files'),
['proteus/tests/SWEs/paraboloid_with_friction/oneD/comparison_files/SWEs_oneD_paraboloid_with_friction.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','paraboloid_with_friction','twoD','comparison_files'),
['proteus/tests/SWEs/paraboloid_with_friction/twoD/comparison_files/SWEs_twoD_paraboloid_with_friction.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','test_gauges','comparison_files'),
['proteus/tests/SWEs/test_gauges/comparison_files/SWEs_test_gauges.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','test_reflecting_BCs','comparison_files'),
['proteus/tests/SWEs/test_reflecting_BCs/comparison_files/SWEs_test_reflecting_BCs.h5']),
(os.path.join(proteus_install_path,'tests','solver_tests','import_modules'),
['proteus/tests/solver_tests/import_modules/quad_mass_matrix.npy',
'proteus/tests/solver_tests/import_modules/sol_10.npy',
'proteus/tests/solver_tests/import_modules/sol_20_lst.npy',
'proteus/tests/solver_tests/import_modules/input_vec_tppcd.bin',
'proteus/tests/solver_tests/import_modules/tp_pcd_y_output.bin',
'proteus/tests/solver_tests/import_modules/tppcd_y_dirichlet_dof.bin',
'proteus/tests/solver_tests/import_modules/Qp_visc.bin',
'proteus/tests/solver_tests/import_modules/Qp_dens.bin',
'proteus/tests/solver_tests/import_modules/Ap_rho.bin',
'proteus/tests/solver_tests/import_modules/Np_rho.bin',
'proteus/tests/solver_tests/import_modules/saddle_point_small.bin',
'proteus/tests/solver_tests/import_modules/saddle_point_matrix.bin',
'proteus/tests/solver_tests/import_modules/rans2p_step_newton_1.bin',
'proteus/tests/solver_tests/import_modules/rans2p_step_newton_5.bin',
'proteus/tests/solver_tests/import_modules/NSE_cavity_matrix.bin',
'proteus/tests/solver_tests/import_modules/NSE_step_no_slip.bin']),
(os.path.join(proteus_install_path,'tests','mesh_tests','comparison_files'),
['proteus/tests/mesh_tests/comparison_files/poiseulle_xmf.output',
'proteus/tests/mesh_tests/comparison_files/poiseulle_global_xmf.output']),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','comparison_files'),
['proteus/tests/solver_tests_slow/comparison_files/Qp_expected.log',
'proteus/tests/solver_tests_slow/comparison_files/drivenCavityStokes_expected.h5']),
(os.path.join(proteus_install_path,'tests','matrix_constructor','comparison_files'),
['proteus/tests/matrix_constructor/comparison_files/mass_reference_c0p1_2D.txt',
'proteus/tests/matrix_constructor/comparison_files/mass_reference_TH_2D.npy']),
(os.path.join(proteus_install_path,'tests','periodic','petsc'),
['proteus/tests/periodic/petsc/petsc.options.schur.selfp_petsc.amg',
'proteus/tests/periodic/petsc/petsc.options.schur.selfp_petsc.gamg.superlu',
'proteus/tests/periodic/petsc/petsc.options.schur.selfp_petsc.superlu']),
(os.path.join(proteus_install_path,'tests','periodic','comparison_files'),
['proteus/tests/periodic/comparison_files/basic_2d_test.h5',
'proteus/tests/periodic/comparison_files/basic_3d_test.h5']),
(os.path.join(proteus_install_path,'tests','post_processing','import_modules'),
['proteus/tests/post_processing/import_modules/reference_simplex_keep.ele',
'proteus/tests/post_processing/import_modules/reference_simplex_keep.face',
'proteus/tests/post_processing/import_modules/reference_simplex_keep.node',
'proteus/tests/post_processing/import_modules/reference_simplex_keep.poly',
'proteus/tests/post_processing/import_modules/bdm2_3d_face_func_vals.data',
'proteus/tests/post_processing/import_modules/bdm2_3d_interior_func_vals.data',
'proteus/tests/post_processing/import_modules/bdm_bdy_func_values_3dmesh.data',
'proteus/tests/post_processing/import_modules/bdm_func_values_3dmesh.data']),
(os.path.join(proteus_install_path,'tests','post_processing','comparison_files'),
['proteus/tests/post_processing/comparison_files/BDM_Test_File.h5',
'proteus/tests/post_processing/comparison_files/bdm2_ref_proj_mat.txt',
'proteus/tests/post_processing/comparison_files/bdm2_reference_simplex_mat.data',
'proteus/tests/post_processing/comparison_files/bdm2_reference_simplex_rhs.data',
'proteus/tests/post_processing/comparison_files/bdm_bdy_func_values.npy',
'proteus/tests/post_processing/comparison_files/bdm_bdy_func_values_mesh_8.npy',
'proteus/tests/post_processing/comparison_files/bdm_bdy_func_values_trig.npy',
'proteus/tests/post_processing/comparison_files/bdm_func_values.npy',
'proteus/tests/post_processing/comparison_files/bdm_func_values_mesh_8.npy',
'proteus/tests/post_processing/comparison_files/bdm_func_values_trig.npy',
'proteus/tests/post_processing/comparison_files/poisson_bdm1_test.h5',
'proteus/tests/post_processing/comparison_files/test_bdm2_sshaped_region_expected.h5',
'proteus/tests/post_processing/comparison_files/test_bdm_sshaped_region_expected.h5',
'proteus/tests/post_processing/comparison_files/trig_velocity_rep.npy']),
(os.path.join(proteus_install_path,'tests','matrix_constructor','comparison_files'),
['proteus/tests/matrix_constructor/comparison_files/velocity_laplace_C0P2_mesh.npy',
'proteus/tests/matrix_constructor/comparison_files/single_phase_THQuad_4_expected.data']),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','comparison_files'),
['proteus/tests/solver_tests_slow/comparison_files/drivenCavityNSE_LSC_expected.h5',
'proteus/tests/solver_tests_slow/comparison_files/drivenCavityNSE_LSC_expected.xmf',
'proteus/tests/solver_tests_slow/comparison_files/drivenCavityNSE_LSC_expected.log']),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','import_modules'),
['proteus/tests/solver_tests_slow/import_modules/petsc.options.schur_lsc']),
(os.path.join(proteus_install_path,'tests','solver_tests_mprans','comparison_files'),
['proteus/tests/solver_tests_mprans/comparison_files/twp_navier_stokes_cavity_2d.h5',
'proteus/tests/solver_tests_mprans/comparison_files/twp_navier_stokes_cavity_2d.xmf']),
(os.path.join(proteus_install_path,'tests','MeshAdaptPUMI'),
['proteus/tests/MeshAdaptPUMI/cube0.smb',
'proteus/tests/MeshAdaptPUMI/cube.dmg',
'proteus/tests/MeshAdaptPUMI/Couette.null',
'proteus/tests/MeshAdaptPUMI/Couette.msh',
'proteus/tests/MeshAdaptPUMI/Couette2D.msh',
'proteus/tests/MeshAdaptPUMI/Rectangle0.smb',
'proteus/tests/MeshAdaptPUMI/Rectangle1.smb',
'proteus/tests/MeshAdaptPUMI/Rectangle.dmg',
'proteus/tests/MeshAdaptPUMI/TwoQuads0.smb',
'proteus/tests/MeshAdaptPUMI/TwoQuads.dmg']),
(os.path.join(proteus_install_path,'tests','MeshAdaptPUMI','gauge_compare','dambreak_Colagrossi_2D'),
['proteus/tests/MeshAdaptPUMI/gauge_compare/dambreak_Colagrossi_2D/Reconstructed.dmg',
'proteus/tests/MeshAdaptPUMI/gauge_compare/dambreak_Colagrossi_2D/Reconstructed0.smb']),
(os.path.join(proteus_install_path,'tests','poisson_2d'),
['proteus/tests/poisson_2d/square4x4.3dm',
'proteus/tests/poisson_2d/square4x4.bc']),
(os.path.join(proteus_install_path,'tests','cylinder2D','conforming_rans3p','comparison_files'),
['proteus/tests/cylinder2D/conforming_rans3p/comparison_files/T1P1.h5',
'proteus/tests/cylinder2D/conforming_rans3p/comparison_files/T4P2.h5',
'proteus/tests/cylinder2D/conforming_rans3p/comparison_files/T8P2.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_method','comparison_files'),
['proteus/tests/cylinder2D/ibm_method/comparison_files/T1_rans3p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_rans2p','comparison_files'),
['proteus/tests/cylinder2D/ibm_rans2p/comparison_files/T1_ibm_rans2p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_rans2p_3D','comparison_files'),
['proteus/tests/cylinder2D/ibm_rans2p_3D/comparison_files/T1_ibm_3D_rans2p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','sbm_method','comparison_files'),
['proteus/tests/cylinder2D/sbm_method/comparison_files/T1_sbm_rans3p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','sbm_3Dmesh','comparison_files'),
['proteus/tests/cylinder2D/sbm_3Dmesh/comparison_files/T001_P1_sbm_3Dmesh.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','conforming_rans2p','comparison_files'),
['proteus/tests/cylinder2D/conforming_rans2p/comparison_files/T1_rans2p.h5']),
(os.path.join(proteus_install_path,'tests','HotStart_3P','comparison_files'),
['proteus/tests/HotStart_3P/comparison_files/T01P1_hotstart.h5',
'proteus/tests/HotStart_3P/comparison_files/T01P2_hotstart.h5']),
(os.path.join(proteus_install_path,'tests','AddedMass'),
['proteus/tests/AddedMass/petsc.options.superlu_dist']),
(os.path.join(proteus_install_path,'tests','MoveMeshMonitor'),
['proteus/tests/MoveMeshMonitor/petsc.options.asm',
'proteus/tests/MoveMeshMonitor/nodesResult.csv']),
(os.path.join(proteus_install_path,'tests','wave_tests'),
['proteus/tests/wave_tests/data_timeSeries.dat',
'proteus/tests/wave_tests/data_timeSeries.txt',
'proteus/tests/wave_tests/data_timeSeries_err1.csv',
'proteus/tests/wave_tests/data_timeSeries_err2.txt']),
],
scripts = ['scripts/parun','scripts/gf2poly','scripts/gatherArchives.py','scripts/qtm','scripts/waves2xmf','scripts/povgen.py',
'scripts/velocity2xmf','scripts/run_script_garnet','scripts/run_script_diamond',
'scripts/run_script_lonestar','scripts/run_script_ranger','scripts/run_script_mpiexec','scripts/gatherTimes','scripts/clearh5.py',
'scripts/runSWEs.py'],
requires=['numpy']
)
def setup_extensions_in_sequential():
setup_given_extensions(EXTENSIONS_TO_BUILD)
def setup_extensions_in_parallel():
import multiprocessing, logging
logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.INFO)
multiprocessing.log_to_stderr()
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
EXTENSIONS=[[e] for e in EXTENSIONS_TO_BUILD]
pool.imap(setup_given_extensions, EXTENSIONS)
pool.close()
pool.join()
if "build_ext" in sys.argv:
setup_extensions_in_parallel()
else:
setup_extensions_in_sequential()
|
[
"distutils.sysconfig.get_config_vars",
"distutils.sysconfig.get_python_lib",
"multiprocessing.log_to_stderr",
"numpy.get_include",
"multiprocessing.cpu_count"
] |
[((72, 99), 'distutils.sysconfig.get_config_vars', 'sysconfig.get_config_vars', ([], {}), '()\n', (97, 99), False, 'from distutils import sysconfig\n'), ((1399, 1425), 'distutils.sysconfig.get_python_lib', 'sysconfig.get_python_lib', ([], {}), '()\n', (1423, 1425), False, 'from distutils import sysconfig\n'), ((58940, 58971), 'multiprocessing.log_to_stderr', 'multiprocessing.log_to_stderr', ([], {}), '()\n', (58969, 58971), False, 'import multiprocessing, logging\n'), ((59010, 59041), 'multiprocessing.log_to_stderr', 'multiprocessing.log_to_stderr', ([], {}), '()\n', (59039, 59041), False, 'import multiprocessing, logging\n'), ((59084, 59111), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (59109, 59111), False, 'import multiprocessing, logging\n'), ((3582, 3601), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3599, 3601), False, 'import numpy\n'), ((3890, 3909), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3907, 3909), False, 'import numpy\n'), ((4194, 4213), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (4211, 4213), False, 'import numpy\n'), ((4504, 4523), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (4521, 4523), False, 'import numpy\n'), ((4815, 4834), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (4832, 4834), False, 'import numpy\n'), ((5113, 5132), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (5130, 5132), False, 'import numpy\n'), ((5411, 5430), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (5428, 5430), False, 'import numpy\n'), ((5712, 5731), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (5729, 5731), False, 'import numpy\n'), ((6013, 6032), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (6030, 6032), False, 'import numpy\n'), ((6592, 6611), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (6609, 6611), False, 'import numpy\n'), ((7229, 7248), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (7246, 7248), False, 'import numpy\n'), ((7846, 7865), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (7863, 7865), False, 'import numpy\n'), ((8503, 8522), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (8520, 8522), False, 'import numpy\n'), ((8844, 8863), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (8861, 8863), False, 'import numpy\n'), ((9034, 9053), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9051, 9053), False, 'import numpy\n'), ((9296, 9315), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9313, 9315), False, 'import numpy\n'), ((9517, 9536), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9534, 9536), False, 'import numpy\n'), ((9683, 9702), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9700, 9702), False, 'import numpy\n'), ((9972, 9991), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9989, 9991), False, 'import numpy\n'), ((10610, 10629), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (10627, 10629), False, 'import numpy\n'), ((11416, 11435), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (11433, 11435), False, 'import numpy\n'), ((11960, 11979), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (11977, 11979), False, 'import numpy\n'), ((12519, 12538), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (12536, 12538), False, 'import numpy\n'), ((12671, 12690), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (12688, 12690), False, 'import numpy\n'), ((13326, 13345), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (13343, 13345), False, 'import numpy\n'), ((14051, 14070), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (14068, 14070), False, 'import numpy\n'), ((15634, 15653), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (15651, 15653), False, 'import numpy\n'), ((15992, 16011), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (16009, 16011), False, 'import numpy\n'), ((16399, 16418), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (16416, 16418), False, 'import numpy\n'), ((16862, 16881), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (16879, 16881), False, 'import numpy\n'), ((17860, 17879), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (17877, 17879), False, 'import numpy\n'), ((18825, 18844), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (18842, 18844), False, 'import numpy\n'), ((19184, 19203), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (19201, 19203), False, 'import numpy\n'), ((20106, 20125), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (20123, 20125), False, 'import numpy\n'), ((20686, 20705), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (20703, 20705), False, 'import numpy\n'), ((20971, 20990), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (20988, 20990), False, 'import numpy\n'), ((29188, 29207), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (29205, 29207), False, 'import numpy\n'), ((29486, 29505), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (29503, 29505), False, 'import numpy\n'), ((29939, 29958), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (29956, 29958), False, 'import numpy\n'), ((30532, 30551), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (30549, 30551), False, 'import numpy\n'), ((30847, 30866), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (30864, 30866), False, 'import numpy\n'), ((31145, 31164), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (31162, 31164), False, 'import numpy\n'), ((31440, 31459), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (31457, 31459), False, 'import numpy\n'), ((31750, 31769), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (31767, 31769), False, 'import numpy\n'), ((32066, 32085), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (32083, 32085), False, 'import numpy\n'), ((32318, 32337), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (32335, 32337), False, 'import numpy\n'), ((32718, 32737), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (32735, 32737), False, 'import numpy\n'), ((33127, 33146), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (33144, 33146), False, 'import numpy\n'), ((33524, 33543), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (33541, 33543), False, 'import numpy\n'), ((33831, 33850), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (33848, 33850), False, 'import numpy\n'), ((34150, 34169), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (34167, 34169), False, 'import numpy\n'), ((34475, 34494), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (34492, 34494), False, 'import numpy\n'), ((14711, 14730), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (14728, 14730), False, 'import numpy\n'), ((22001, 22020), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (22018, 22020), False, 'import numpy\n'), ((23545, 23564), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (23562, 23564), False, 'import numpy\n'), ((24952, 24971), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (24969, 24971), False, 'import numpy\n'), ((26753, 26772), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (26770, 26772), False, 'import numpy\n'), ((28145, 28164), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (28162, 28164), False, 'import numpy\n')]
|
""" Line analysis tools
These are intended to be methods generic to emission and absorption
(e.g. Equivalent width)
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
from astropy.modeling import models, fitting
def box_ew(spec):
""" Boxcar EW calculation
Observer frame, not rest-frame
Parameters
----------
spec : Tuple of (wave, fx, sig)
Returns
-------
EW, sigEW : EW and error in observer frame
"""
# Note: Tested in test_absline_anly
# Grab
wv,fx,sig = spec
# Cut spectrum
# dwv
dwv = wv - np.roll(wv,1)
dwv[0] = dwv[1]
# Simple boxcar
EW = np.sum( dwv * (1. - fx) )
varEW = np.sum( dwv**2 * sig**2 )
sigEW = np.sqrt(varEW)
# Return
return EW, sigEW
def gaussian_ew(spec, ltype, initial_guesses=None):
""" EW calculation using Gaussian fit
Observer frame, not rest-frame. wvlim and spec must be set!
Parameters
----------
spec : Tuple of (wave, fx, sig)
ltype : string
whether this is for absorption or emission line (see SpectralLine Class)
initial_guesses, optional : Tuple of (amplitude, mean, stddev)
Initial guesses of the Gaussian fit (unitless)
Returns
-------
EW, sigEW : EW and error in observer frame
"""
# Note: Tested in test_absline_anly
# Grab
wv,fx,sig = spec
# dwv
dwv = wv - np.roll(wv,1)
dwv[0] = dwv[1]
# Initial guesses of the Gaussian fit
if initial_guesses is None:
amp_init = np.mean(fx).value/2. #half the mean flux
stddev_init = 3*np.mean(dwv).value #3 pixels
mean_init = np.mean(wv).value #half wave range
elif len(initial_guesses)==3:
amp_init = initial_guesses[0]
mean_init = initial_guesses[1]
stddev_init = initial_guesses[2]
#check whether these values are sensible
if (mean_init < np.min(wv.value)) or (mean_init > np.max(wv.value)):
raise ValueError('gaussian_ew: The initial guess for Gaussian mean is not sensible; check it!')
if (amp_init < 0):
raise ValueError('gaussian_ew: The initial guess for Gaussian amplitude is not sensible; check it!')
if (stddev_init < 0):
raise ValueError('gaussian_ew: The initial guess for Gaussian stddev is not sensible; check it!')
else:
raise ValueError('gaussian_ew: Format of the initial_guesses is incorrect')
# Model initialization
if ltype == 'Abs':
g_init = models.GaussianAbsorption1D(amplitude=amp_init, mean=mean_init, stddev=stddev_init) # This model does not support units
elif ltype == 'Emiss':
g_init = models.Gaussian1D(amplitude=amp_init, mean=mean_init, stddev=stddev_init) # This model does not support units
else:
raise ValueError("gaussian_ew: ltype has to be either 'Abs' or 'Emiss'")
# Fitting algorithm initialization
fit_g = fitting.LevMarLSQFitter()
# Use only good values (i.e. with meaningful errors)
cond = (sig > 0.) & (np.isfinite(sig))
# Actual fit
g = fit_g(g_init, wv[cond], fx[cond], weights=1./sig[cond])
#Check whether the final fit is sensible
fit_info = fit_g.fit_info
if fit_info['param_cov'] is None:
raise ValueError('gaussian_ew: The fit is not sensible! Check initial_guesses')
# Area under curve of Gaussian is [amplitude*stddev*sqrt(2*pi)]
EW = g.amplitude.value * g.stddev.value * np.sqrt(2 * np.pi) #unitless
EW = EW * wv.unit #add the same unit as wv
#error estimation
cov = fit_g.fit_info['param_cov'] #covariance matrix
x = g.parameters[0] # amplitude
y = g.parameters[2] # stddev
sigEW = EW * np.sqrt(cov[0,0] / x**2 + cov[2,2] / y**2 + 2 * cov[0,2] / (x*y))
return EW, sigEW
|
[
"numpy.sum",
"numpy.roll",
"astropy.modeling.models.Gaussian1D",
"astropy.modeling.models.GaussianAbsorption1D",
"numpy.isfinite",
"astropy.modeling.fitting.LevMarLSQFitter",
"numpy.min",
"numpy.mean",
"numpy.max",
"numpy.sqrt"
] |
[((697, 721), 'numpy.sum', 'np.sum', (['(dwv * (1.0 - fx))'], {}), '(dwv * (1.0 - fx))\n', (703, 721), True, 'import numpy as np\n'), ((736, 763), 'numpy.sum', 'np.sum', (['(dwv ** 2 * sig ** 2)'], {}), '(dwv ** 2 * sig ** 2)\n', (742, 763), True, 'import numpy as np\n'), ((774, 788), 'numpy.sqrt', 'np.sqrt', (['varEW'], {}), '(varEW)\n', (781, 788), True, 'import numpy as np\n'), ((2991, 3016), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (3014, 3016), False, 'from astropy.modeling import models, fitting\n'), ((632, 646), 'numpy.roll', 'np.roll', (['wv', '(1)'], {}), '(wv, 1)\n', (639, 646), True, 'import numpy as np\n'), ((1457, 1471), 'numpy.roll', 'np.roll', (['wv', '(1)'], {}), '(wv, 1)\n', (1464, 1471), True, 'import numpy as np\n'), ((2566, 2654), 'astropy.modeling.models.GaussianAbsorption1D', 'models.GaussianAbsorption1D', ([], {'amplitude': 'amp_init', 'mean': 'mean_init', 'stddev': 'stddev_init'}), '(amplitude=amp_init, mean=mean_init, stddev=\n stddev_init)\n', (2593, 2654), False, 'from astropy.modeling import models, fitting\n'), ((3099, 3115), 'numpy.isfinite', 'np.isfinite', (['sig'], {}), '(sig)\n', (3110, 3115), True, 'import numpy as np\n'), ((3515, 3533), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3522, 3533), True, 'import numpy as np\n'), ((3761, 3835), 'numpy.sqrt', 'np.sqrt', (['(cov[0, 0] / x ** 2 + cov[2, 2] / y ** 2 + 2 * cov[0, 2] / (x * y))'], {}), '(cov[0, 0] / x ** 2 + cov[2, 2] / y ** 2 + 2 * cov[0, 2] / (x * y))\n', (3768, 3835), True, 'import numpy as np\n'), ((1699, 1710), 'numpy.mean', 'np.mean', (['wv'], {}), '(wv)\n', (1706, 1710), True, 'import numpy as np\n'), ((2730, 2803), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': 'amp_init', 'mean': 'mean_init', 'stddev': 'stddev_init'}), '(amplitude=amp_init, mean=mean_init, stddev=stddev_init)\n', (2747, 2803), False, 'from astropy.modeling import models, fitting\n'), ((1585, 1596), 'numpy.mean', 'np.mean', (['fx'], {}), '(fx)\n', (1592, 1596), True, 'import numpy as np\n'), ((1650, 1662), 'numpy.mean', 'np.mean', (['dwv'], {}), '(dwv)\n', (1657, 1662), True, 'import numpy as np\n'), ((1960, 1976), 'numpy.min', 'np.min', (['wv.value'], {}), '(wv.value)\n', (1966, 1976), True, 'import numpy as np\n'), ((1994, 2010), 'numpy.max', 'np.max', (['wv.value'], {}), '(wv.value)\n', (2000, 2010), True, 'import numpy as np\n')]
|
import collections
import inspect
import typing
import numpy as np
import pandas as pd
import torch
from river import base
__all__ = ["PyTorch2RiverBase", "PyTorch2RiverRegressor", "PyTorch2RiverClassifier"]
class PyTorch2RiverBase(base.Estimator):
"""An estimator that integrates neural Networks from PyTorch."""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate=1e-3,
seed=42,
**net_params,
):
self.build_fn = build_fn
self.loss_fn = loss_fn
self.loss = loss_fn()
self.optimizer_fn = optimizer_fn
self.learning_rate = learning_rate
self.net_params = net_params
self.seed = seed
torch.manual_seed(seed)
np.random.seed(seed)
self.net = None
@classmethod
def _unit_test_params(cls):
def build_torch_linear_regressor(n_features):
net = torch.nn.Sequential(
torch.nn.Linear(n_features, 1), torch.nn.Sigmoid()
)
return net
return {
"build_fn": build_torch_linear_regressor,
"loss_fn": torch.nn.MSELoss,
"optimizer_fn": torch.optim.SGD,
}
@classmethod
def _unit_test_skips(self):
"""Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However, in some cases, some estimators might not
be able to pass certain checks.
"""
return {
"check_pickling",
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def _learn_one(self, x: torch.Tensor, y: torch.Tensor):
self.net.zero_grad()
y_pred = self.net(x)
loss = self.loss(y_pred, y)
loss.backward()
self.optimizer.step()
def learn_one(self, x: dict, y: base.typing.ClfTarget):
"""Update the model with a set of features `x` and a label `y`.
Parameters
----------
x
A dictionary of features.
y
A label.
Returns
-------
self
"""
if self.net is None:
self._init_net(n_features=len(list(x.values())))
x = torch.Tensor([list(x.values())])
y = torch.Tensor([[y]])
self._learn_one(x=x, y=y)
return self
def _filter_torch_params(self, fn, override=None):
"""Filters `torch_params` and returns those in `fn`'s arguments.
Parameters
----------
fn
arbitrary function
override
dictionary, values to override `torch_params`
Returns
-------
res
dictionary containing variables in both and fn's arguments
"""
override = override or {}
res = {}
for name, value in self.net_params.items():
args = list(inspect.signature(fn).parameters)
if name in args:
res.update({name: value})
res.update(override)
return res
def _init_net(self, n_features):
self.net = self.build_fn(
n_features=n_features, **self._filter_torch_params(self.build_fn)
)
# Only optimizers with learning rate as parameter are supported, needs to be fixed
self.optimizer = self.optimizer_fn(self.net.parameters(), self.learning_rate)
class PyTorch2RiverClassifier(PyTorch2RiverBase, base.Classifier):
"""A river classifier that integrates neural Networks from PyTorch.
Parameters
----------
build_fn
loss_fn
optimizer_fn
learning_rate
net_params
Examples
--------
>>> from river import compat
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river import preprocessing
>>> from torch import nn
>>> from torch import optim
>>> from torch import manual_seed
>>> _ = manual_seed(0)
>>> def build_torch_mlp_classifier(n_features):
... net = nn.Sequential(
... nn.Linear(n_features, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 1),
... nn.Sigmoid()
... )
... return net
...
>>> model = compat.PyTorch2RiverClassifier(
... build_fn= build_torch_mlp_classifier,
... loss_fn=nn.BCELoss,
... optimizer_fn=optim.Adam,
... learning_rate=1e-3
... )
>>> dataset = datasets.Phishing()
>>> metric = metrics.Accuracy()
>>> evaluate.progressive_val_score(dataset=dataset, model=model, metric=metric)
Accuracy: 74.38%
"""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate=1e-3,
**net_params,
):
self.classes = collections.Counter()
self.n_classes = 1
super().__init__(
build_fn=build_fn,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
learning_rate=learning_rate,
**net_params,
)
def _update_classes(self):
self.n_classes = len(self.classes)
layers = list(self.net.children())
i = -1
layer_to_convert = layers[i]
while not hasattr(layer_to_convert, "weight"):
layer_to_convert = layers[i]
i -= 1
removed = list(self.net.children())[: i + 1]
new_net = removed
new_layer = torch.nn.Linear(
in_features=layer_to_convert.in_features, out_features=self.n_classes
)
# copy the original weights back
with torch.no_grad():
new_layer.weight[:-1, :] = layer_to_convert.weight
new_layer.weight[-1:, :] = torch.mean(layer_to_convert.weight, 0)
new_net.append(new_layer)
if i + 1 < -1:
for layer in layers[i + 2 :]:
new_net.append(layer)
self.net = torch.nn.Sequential(*new_net)
self.optimizer = self.optimizer_fn(self.net.parameters(), self.learning_rate)
def learn_one(self, x: dict, y: base.typing.ClfTarget, **kwargs) -> base.Classifier:
self.classes.update([y])
# check if model is initialized
if self.net is None:
self._init_net(len(list(x.values())))
# check last layer and update if needed
if len(self.classes) != self.n_classes:
self._update_classes()
# training process
proba = {c: 0.0 for c in self.classes}
proba[y] = 1.0
x = list(x.values())
y = list(proba.values())
x = torch.Tensor([x])
y = torch.Tensor([y])
self._learn_one(x=x, y=y)
return self
def predict_proba_one(self, x: dict) -> typing.Dict[base.typing.ClfTarget, float]:
if self.net is None:
self._init_net(len(list(x.values())))
x = torch.Tensor(list(x.values()))
yp = self.net(x).detach().numpy()
proba = {c: 0.0 for c in self.classes}
for idx, val in enumerate(self.classes):
proba[val] = yp[idx]
return proba
def predict_proba_many(self, X: pd.DataFrame) -> pd.DataFrame:
if self.net is None:
self._init_net(len(X.columns))
x = torch.Tensor(list(X.to_numpy()))
yp = self.net(x).detach().numpy()
proba = {c: [0.0] * len(X) for c in self.classes}
for idx, val in enumerate(self.classes):
proba[val] = yp[idx]
return pd.DataFrame(proba)
class PyTorch2RiverRegressor(PyTorch2RiverBase, base.MiniBatchRegressor):
"""Compatibility layer from PyTorch to River for regression.
Parameters
----------
build_fn
loss_fn
optimizer_fn
learning_rate
net_params
Examples
--------
>>> from river import compat
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river import preprocessing
>>> from torch import nn
>>> from torch import optim
>>> _ = torch.manual_seed(0)
>>> dataset = datasets.TrumpApproval()
>>> def build_torch_mlp_regressor(n_features):
... net = nn.Sequential(
... nn.Linear(n_features, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 1)
... )
... return net
...
>>> model = compat.PyTorch2RiverRegressor(
... build_fn= build_torch_mlp_regressor,
... loss_fn=nn.MSELoss,
... optimizer_fn=optim.Adam,
... )
>>> metric = metrics.MAE()
>>> metric = evaluate.progressive_val_score(dataset=dataset, model=model, metric=metric)
>>> round(metric.get(), 2)
78.98
"""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer],
learning_rate=1e-3,
**net_params,
):
super().__init__(
build_fn=build_fn,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
learning_rate=learning_rate,
**net_params,
)
def learn_many(self, X: pd.DataFrame, y: pd.Series, **kwargs):
if self.net is None:
self._init_net(n_features=len(X.columns))
x = torch.Tensor(X.to_numpy())
y = torch.Tensor([y])
self._learn_one(x=x, y=y)
return self
def predict_one(self, x):
if self.net is None:
self._init_net(len(x))
x = torch.Tensor(list(x.values()))
return self.net(x).item()
def predict_many(self, X: pd.DataFrame) -> pd.Series:
if self.net is None:
self._init_net(len(X.columns))
x = torch.Tensor(X.to_numpy())
return pd.Series(self.net(x).item())
|
[
"pandas.DataFrame",
"torch.mean",
"numpy.random.seed",
"torch.nn.Sequential",
"torch.manual_seed",
"torch.Tensor",
"inspect.signature",
"torch.nn.Linear",
"collections.Counter",
"torch.no_grad",
"torch.nn.Sigmoid"
] |
[((832, 855), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (849, 855), False, 'import torch\n'), ((864, 884), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (878, 884), True, 'import numpy as np\n'), ((2531, 2550), 'torch.Tensor', 'torch.Tensor', (['[[y]]'], {}), '([[y]])\n', (2543, 2550), False, 'import torch\n'), ((5213, 5234), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (5232, 5234), False, 'import collections\n'), ((5849, 5940), 'torch.nn.Linear', 'torch.nn.Linear', ([], {'in_features': 'layer_to_convert.in_features', 'out_features': 'self.n_classes'}), '(in_features=layer_to_convert.in_features, out_features=self\n .n_classes)\n', (5864, 5940), False, 'import torch\n'), ((6326, 6355), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*new_net'], {}), '(*new_net)\n', (6345, 6355), False, 'import torch\n'), ((6990, 7007), 'torch.Tensor', 'torch.Tensor', (['[x]'], {}), '([x])\n', (7002, 7007), False, 'import torch\n'), ((7020, 7037), 'torch.Tensor', 'torch.Tensor', (['[y]'], {}), '([y])\n', (7032, 7037), False, 'import torch\n'), ((7876, 7895), 'pandas.DataFrame', 'pd.DataFrame', (['proba'], {}), '(proba)\n', (7888, 7895), True, 'import pandas as pd\n'), ((9771, 9788), 'torch.Tensor', 'torch.Tensor', (['[y]'], {}), '([y])\n', (9783, 9788), False, 'import torch\n'), ((6012, 6027), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6025, 6027), False, 'import torch\n'), ((6131, 6169), 'torch.mean', 'torch.mean', (['layer_to_convert.weight', '(0)'], {}), '(layer_to_convert.weight, 0)\n', (6141, 6169), False, 'import torch\n'), ((1069, 1099), 'torch.nn.Linear', 'torch.nn.Linear', (['n_features', '(1)'], {}), '(n_features, 1)\n', (1084, 1099), False, 'import torch\n'), ((1101, 1119), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (1117, 1119), False, 'import torch\n'), ((3146, 3167), 'inspect.signature', 'inspect.signature', (['fn'], {}), '(fn)\n', (3163, 3167), False, 'import inspect\n')]
|
import numpy as np
import math
import matplotlib.pyplot as plt
import pickle
from time import time
from numpy.linalg import matrix_rank
from numpy.linalg import pinv,inv
from numpy.linalg import eig as eig
from numpy.linalg import eigh,lstsq
from numpy.linalg import matrix_power
from scipy.linalg import expm,pinvh,solve
from tqdm.notebook import tqdm,trange
from support.omniglot_loaders import OmniglotNShot
from support.tools import *
from sklearn import decomposition
from scipy.spatial.distance import pdist,squareform
from sklearn.neighbors import NearestCentroid
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestNeighbors,KNeighborsClassifier
from meta_cntk import MetaCNTK
import argparse
from types import SimpleNamespace
from sklearn.decomposition import PCA,IncrementalPCA,KernelPCA
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from collections import deque
import argparse
from time import time
import typing
import pandas as pd
import matplotlib as mpl
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import higher
import pickle
from tqdm.notebook import tqdm,trange
from support.omniglot_loaders import OmniglotNShot
def load_dataset(n_task,random=True,seed=0,load_embeddings=False):
# Load preprocesses Ominiglot dataset for 20-way 1-shot classification
# path = f'saved_models/tasks-{n_task}.p'
# if os.path.exists(path):
# print(f"Dataset exists at {path}")
# tasks = pickle.load(open(path,'rb'))
# else:
tasks = pickle.load(open(f'saved_models/tasks-200.p', 'rb'))
# Get the subset of the size we need
n_all_tasks = len(tasks['X_qry'])
assert n_all_tasks == 200
assert n_task <= n_all_tasks
if random:
np.random.seed(seed)
idxes = np.random.choice(n_all_tasks, size=n_task, replace=False)
else:
idxes = np.arange(n_task)
tasks['X_qry'] = tasks['X_qry'][idxes]
tasks['X_spt'] = tasks['X_spt'][idxes]
tasks['Y_qry'] = tasks['Y_qry'][idxes]
tasks['Y_spt'] = tasks['Y_spt'][idxes]
tasks['idx_Xs']= tasks['idx_Xs'][idxes]
tasks['idx_Xs_'] = tasks['idx_Xs_'][idxes]
tasks['n_task'] = n_task
# if load_embeddings:
# embeddings = load_label_embeddings(200,random_cnn=random_cnn_embedding)
# tasks['Y_qry_emb'] = embeddings['Y_qry_emb'][idxes]
# tasks['Y_spt_emb'] = embeddings['Y_spt_emb'][idxes]
# tasks['test_Y_qry_emb'] = embeddings['test_Y_qry_emb']
# tasks['test_Y_spt_emb'] = embeddings['test_Y_spt_emb']
tasks['load_embeddings'] = load_embeddings
return SimpleNamespace(**tasks)
def load_precomputed_base_kernels(dataset,kernel='CNTK'):
# path = f'saved_models/CNTK-{n_task}.npy'
# if os.path.exists(path):
# print(f"Precomputed CNTK exists at {path}")
# CNTK = np.load(path)
# else:
CNTK_all = np.load(f'saved_models/CNTK-200.npy')
all_idxes = np.concatenate([dataset.idx_Xs.flatten(),
dataset.idx_Xs_.flatten(),
dataset.idx_test_Xs.flatten(),
dataset.idx_test_Xs_.flatten()])
dataset.all_idxes = all_idxes
dataset.CNTK = CNTK_all[all_idxes][:, all_idxes]
def load_label_embeddings(n_task,random_cnn=False):
# postfix = f'-{emb_method}' if emb_method != '' else ''
if not random_cnn:
path = f'saved_models/tasks-{n_task}-embeddings.p'
else:
path = f'saved_models/tasks-{n_task}-embeddings-random_cnn.p'
embedding_dict = pickle.load(open(path, 'rb'))
return embedding_dict
def get_embeddings_from_PCA(dataset, n_components=784, PCA_method='regular'):
# 784 = 28*28, which is the number of pixels in each original image.
# It is also the maximum n_componetns for PCA that we can choose
X_qry = dataset.X_qry if not dataset.load_embeddings else dataset.Y_qry_emb
X_spt = dataset.X_spt if not dataset.load_embeddings else dataset.Y_spt_emb
test_X_qry = dataset.test_X_qry if not dataset.load_embeddings else dataset.test_Y_qry_emb
test_X_spt = dataset.test_X_spt if not dataset.load_embeddings else dataset.test_Y_spt_emb
if PCA_method == 'regular':
pca = PCA(n_components=n_components, svd_solver='randomized')
else:
assert PCA_method in ['linear', 'poly', 'rbf', 'sigmoid', 'cosine']
pca = KernelPCA(n_components=n_components, kernel=PCA_method, fit_inverse_transform=True)
if not dataset.load_embeddings:
# Reshape images from vectors to their original size: 32*32
new_shape = (-1, 32, 32)
X_train = np.concatenate([dataset.X_qry.reshape(new_shape), dataset.X_spt.reshape(new_shape)], axis=0)
X_train = X_train[:, 2:-2, 2:-2] # remove paddings
pca.fit(X_train.reshape(X_train.shape[0], -1))
else:
emb_dim = X_qry.shape[-1]
X_train = np.concatenate([X_qry.reshape(-1,emb_dim),
X_spt.reshape(-1,emb_dim)], axis=0)
pca.fit(X_train)
# print('X_train',X_train.shape)
Xs = [X_qry, X_spt, test_X_qry, test_X_spt]
Y_qry_emb, Y_spt_emb, test_Y_qry_emb, test_Y_spt_emb = [], [], [], []
Ys = [Y_qry_emb, Y_spt_emb, test_Y_qry_emb, test_Y_spt_emb]
for x, y in zip(Xs, Ys):
# The following 3 lines are to remove the padding in original images (28*28 pixels),
# since we pad the original images to 32*32 for convenience of CNTK computing via CUDA
if not dataset.load_embeddings:
x = x.reshape(-1, 32, 32)
x = x[:, 2:-2, 2:-2]
x = x.reshape(x.shape[0], -1)
else:
x = x.reshape(-1,emb_dim)
result = pca.transform(x)
y.append(result)
dataset.Y_qry_emb, dataset.Y_spt_emb, dataset.test_Y_qry_emb, dataset.test_Y_spt_emb = Y_qry_emb[0], Y_spt_emb[0], test_Y_qry_emb[0], \
test_Y_spt_emb[0]
# return SimpleNamespace(Y_qry_emb=Y_qry_emb, Y_spt_emb=Y_spt_emb,
# test_Y_qry_emb=test_Y_qry_emb, test_Y_spt_emb=test_Y_spt_emb)
def preprocess_label_embeddings(dataset,pred_test_Y_qry=None,test_all = False):
# Find the center of embeddings in each class, then use this center as the label for this class
n_components = dataset.Y_qry_emb.shape[-1]
Y_qry_emb = dataset.Y_qry_emb.reshape(*dataset.Y_qry.shape, n_components)
Y_spt_emb = dataset.Y_spt_emb.reshape(*dataset.Y_spt.shape, n_components)
test_Y_qry_emb = dataset.test_Y_qry_emb.reshape(*dataset.test_Y_qry.shape, n_components)
test_Y_spt_emb = dataset.test_Y_spt_emb.reshape(*dataset.test_Y_spt.shape, n_components)
# Y_qry_emb,Y_spt_emb
clf = NearestCentroid()
Y_train = np.concatenate([dataset.Y_qry, dataset.Y_spt], axis=1)
Y_train_emb = np.concatenate([Y_qry_emb, Y_spt_emb], axis=1)
N_train = len(Y_train)
n_class = len(np.unique(Y_train[0]))
Y_centroids = []
for i in range(N_train):
clf.fit(Y_train_emb[i], Y_train[i])
for j in range(n_class):
Y_train_emb[i][Y_train[i] == j] = clf.centroids_[j]
centroids = clf.centroids_
Y_centroids.append(centroids)
Y_qry_emb = Y_train_emb[:, :dataset.Y_qry.shape[1], :]
Y_spt_emb = Y_train_emb[:, dataset.Y_qry.shape[1]:, :]
Y_centroids = np.array(Y_centroids)
# Y_qry_emb,Y_spt_emb
clf = NearestCentroid()
Y_test = np.concatenate([dataset.test_Y_qry, dataset.test_Y_spt], axis=1)
Y_test_emb = np.concatenate([test_Y_qry_emb, test_Y_spt_emb], axis=1)
if pred_test_Y_qry is not None:
pred_Y_test = np.concatenate([pred_test_Y_qry,dataset.test_Y_spt],axis=1)
N_test = len(Y_test)
n_class = len(np.unique(Y_test[0]))
test_Y_centroids = []
for i in range(N_test):
if pred_test_Y_qry is None:
Y_emb = test_Y_spt_emb[i]
Y = dataset.test_Y_spt[i]
clf.fit(Y_emb, Y)
else:
clf.fit(Y_test_emb[i], pred_Y_test[i])
for j in range(n_class):
nbrs = NearestNeighbors(n_neighbors=1)
embs = Y_test_emb[i][pred_Y_test[i] == j]
nbrs.fit(embs)
_,[[emb_idx]] = nbrs.kneighbors([clf.centroids_[j]])
clf.centroids_[j]=embs[emb_idx]
for j in range(n_class):
Y_test_emb[i][Y_test[i] == j] = clf.centroids_[j]
centroids = clf.centroids_
test_Y_centroids.append(centroids)
test_Y_qry_emb = Y_test_emb[:, :dataset.test_Y_qry.shape[1], :]
test_Y_spt_emb = Y_test_emb[:, dataset.test_Y_qry.shape[1]:, :]
test_Y_centroids = np.array(test_Y_centroids)
dataset.Y_qry_emb=Y_qry_emb
dataset.Y_spt_emb=Y_spt_emb
dataset.test_Y_qry_emb=test_Y_qry_emb
dataset.test_Y_spt_emb=test_Y_spt_emb
dataset.Y_centroids=Y_centroids
dataset.test_Y_centroids=test_Y_centroids
dataset.n_components=n_components
dataset.N_train = N_train
dataset.N_test = N_test
def pred_from_emb(embeddings, dataset, n_neighbors=1):
nbrs = NearestNeighbors(n_neighbors=n_neighbors)
assert len(embeddings) == len(dataset.test_Y_centroids)
preds = []
for i in range(dataset.N_test):
nbrs.fit(dataset.test_Y_centroids[i])
emb = embeddings[i]
_, pred = nbrs.kneighbors(emb)
pred = pred.flatten()
preds.append(pred)
preds = np.array(preds)
return preds
def build_MetaCNTK(dataset, ridge_coef=[1e-5, 1e-5], normalize_NTK=True, normalize_metaNTK=True):
model = MetaCNTK(d_max=20, fix=False, GAP=True,
inner_lr=np.inf, train_time=np.inf,
invMetaNTK=False,
kernel_ridge=True,
ridge_coef=ridge_coef,
normalize_NTK=normalize_NTK,
normalize_metaNTK=normalize_metaNTK)
model.fit(dataset.X_qry,dataset.Y_qry_emb,dataset.X_spt,dataset.Y_spt_emb)
model.load_test_tasks(X_query=dataset.test_X_qry,X_support=dataset.test_X_spt,Y_support=dataset.test_Y_spt_emb)
model.load_precompute_NTKs(dataset.CNTK)
return model
def test_MetaCNTK(dataset,model):
t0 = time()
pred_test_Y = model.predict()
print(f"Took {round(time() - t0, 2)}")
loss = np.mean( (pred_test_Y - dataset.test_Y_qry_emb)**2)
pred_test_Y = pred_from_emb(pred_test_Y, dataset)
pred_test_Y = pred_test_Y.reshape(*dataset.test_Y_qry.shape)
test_acc = np.mean(pred_test_Y == dataset.test_Y_qry)
return test_acc, pred_test_Y, loss
def augment_train_data(dataset,enlarge_ratio=10,n_way=5,n_shot=1,seed=0):
new_n_task =dataset.n_task*enlarge_ratio
X = np.concatenate([dataset.X_qry,dataset.X_spt],axis=1)
Y = np.concatenate([dataset.Y_qry,dataset.Y_spt],axis=1)
idx_X = np.concatenate([dataset.idx_Xs,dataset.idx_Xs_],axis=1)
dict_idx_x = {}
for i in range(idx_X.shape[0]):
for j in range(idx_X.shape[1]):
idx = idx_X[i][j]
x = X[i][j]
dict_idx_x[idx] = x
n_local_labels = len(np.unique(Y))
n_global_labels = 0
for i in range(Y.shape[0]):
Y[i] += n_global_labels
n_global_labels += n_local_labels
global_labels = np.unique(Y)
Y = Y.flatten()
idx_X = idx_X.flatten()
dict_label_idx = {}
dict_idx_label = {}
for label in global_labels:
idxes_for_label = idx_X[Y == label]
dict_label_idx[label] = idxes_for_label
for idx in idxes_for_label:
dict_idx_label[idx] = label
X_qry,X_spt,Y_spt,Y_qry,idx_X_qry,idx_X_spt = [],[],[],[],[],[]
np.random.seed(seed)
all_labels = np.concatenate([np.random.choice(global_labels, size=len(global_labels), replace=False) for _ in
range(enlarge_ratio)]).reshape(-1, n_way)
assert len(all_labels) == new_n_task
for i_task in range(new_n_task):
# labels = np.random.choice(global_labels,size = n_way,replace=False)
labels = all_labels[i_task]
idx_X_qry.append([]),idx_X_spt.append([])
# Y_qry.append([]),Y_spt.append([])
for label in labels:
# print(labels)
idx_spt,idx_qry = train_test_split(dict_label_idx[label],train_size = n_shot)
idx_X_qry[-1].append(idx_qry)
idx_X_spt[-1].append(idx_spt)
idx_X_qry = np.array(idx_X_qry).reshape(len(idx_X_qry),-1)
idx_X_spt = np.array(idx_X_spt).reshape(len(idx_X_spt),-1)
Y_qry_emb,Y_spt_emb,test_Y_qry_emb,test_Y_spt_emb = [],[],[],[]
for idx in idx_X_qry.flatten():
Y_qry.append(dict_idx_label[idx])
X_qry.append(dict_idx_x[idx])
Y_qry_emb.append(dataset.dict_idx_emb[idx])
for idx in idx_X_spt.flatten():
Y_spt.append(dict_idx_label[idx])
X_spt.append(dict_idx_x[idx])
Y_spt_emb.append(dataset.dict_idx_emb[idx])
x_shape = X_spt[0].shape
emb_shape = Y_spt_emb[0].shape
Y_qry,Y_spt = np.array(Y_qry),np.array(Y_spt)
Y_qry_emb,Y_spt_emb = np.array(Y_qry_emb), np.array(Y_spt_emb)
X_qry,X_spt = np.array(X_qry),np.array(X_spt)
Y_qry,Y_spt = Y_qry.reshape(idx_X_qry.shape),Y_spt.reshape(idx_X_spt.shape)
Y_qry_emb,Y_spt_emb = Y_qry_emb.reshape(idx_X_qry.shape+emb_shape), Y_spt_emb.reshape(idx_X_spt.shape+emb_shape)
X_qry,X_spt = X_qry.reshape(idx_X_qry.shape + x_shape),X_spt.reshape(idx_X_spt.shape+x_shape)
from copy import deepcopy
np.random.seed(seed)
for i in range(len(Y_qry)):
ys_qry = deepcopy(Y_qry[i])
ys_spt = deepcopy(Y_spt[i])
label_mapping = {}
labels = np.unique(ys_qry)
new_labels = np.arange(n_way)
np.random.shuffle(new_labels)
for label,new_label in zip(labels,new_labels):
Y_qry[i][ys_qry==label] = new_label
Y_spt[i][ys_spt==label] = new_label
dataset.idx_Xs = idx_X_qry
dataset.idx_Xs_ = idx_X_spt
dataset.X_qry = X_qry
dataset.X_spt = X_spt
dataset.Y_qry = Y_qry
dataset.Y_spt = Y_spt
dataset.Y_qry_emb = Y_qry_emb
dataset.Y_spt_emb = Y_spt_emb
def train_supervised(model, device, train_loader, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
def test_supervised(model, device, test_loader, verbose=False):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = correct / len(test_loader.dataset)
if verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss,test_acc
class NaiveDataset(torch.utils.data.Dataset):
def __init__(self, samples,labels):
'Initialization'
self.labels = torch.from_numpy(labels).long()
self.samples = torch.from_numpy(samples).float()
assert len(labels) == len(samples)
def __len__(self):
'Denotes the total number of samples'
return len(self.labels)
def __getitem__(self, index):
'Generates one sample of data'
X = self.samples[index]
y = self.labels[index]
return X, y
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def build_CNN(n_way,device,n_channel=64,batch_norm = True,dropout=None):
if dropout == 0:
dropout = None
modules = [nn.Conv2d(1, n_channel, 3),
nn.BatchNorm2d(n_channel, momentum=1, affine=True) if batch_norm else None,
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(dropout) if dropout is not None else None,
nn.Conv2d(n_channel, n_channel, 3),
nn.BatchNorm2d(n_channel, momentum=1, affine=True) if batch_norm else None,
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(dropout) if dropout is not None else None,
nn.Conv2d(n_channel, n_channel, 3),
nn.BatchNorm2d(n_channel, momentum=1, affine=True) if batch_norm else None,
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(dropout) if dropout is not None else None,
Flatten(),
nn.Linear(n_channel, n_way)]
for i,module in enumerate(modules):
if module is None:
del modules[i]
net = nn.Sequential(*modules).to(device)
net.eval()
return net
def get_train_data(dataset, n_test_per_class=0):
X = np.concatenate([dataset.X_qry, dataset.X_spt], axis=1)
Y = np.concatenate([dataset.Y_qry, dataset.Y_spt], axis=1)
idx_X = np.concatenate([dataset.idx_Xs, dataset.idx_Xs_], axis=1)
dict_idx_x = {}
for i in range(idx_X.shape[0]):
for j in range(idx_X.shape[1]):
idx = idx_X[i][j]
x = X[i][j]
dict_idx_x[idx] = x
n_local_labels = len(np.unique(Y))
n_global_labels = 0
for i in range(Y.shape[0]):
Y[i] += n_global_labels
n_global_labels += n_local_labels
global_labels = np.unique(Y)
Y = Y.flatten()
idx_X = idx_X.flatten()
dict_label_idx = {}
dict_idx_label = {}
for label in global_labels:
idxes_for_label = idx_X[Y == label]
dict_label_idx[label] = idxes_for_label
for idx in idxes_for_label:
dict_idx_label[idx] = label
labels = []
samples = []
for label, idxes in dict_label_idx.items():
if n_test_per_class > 0:
idxes = idxes[:-n_test_per_class]
for idx in idxes:
labels.append(label)
samples.append(dict_idx_x[idx])
samples = np.array(samples)
labels = np.array(labels)
if samples.shape[-1] == 32: # remove useless padding
samples = samples[:, :, 2:-2, 2:-2]
train_set = {'samples': samples, 'labels': labels}
n_class = len(dict_label_idx.keys())
assert n_class == len(np.unique(train_set['labels']))
assert np.max(train_set['labels']) == n_class - 1
train_set['n_class'] = n_class
if n_test_per_class > 0:
labels = []
samples = []
for label, idxes in dict_label_idx.items():
idxes = idxes[-n_test_per_class:]
for idx in idxes:
labels.append(label)
samples.append(dict_idx_x[idx])
samples = np.array(samples)
labels = np.array(labels)
if samples.shape[-1] == 32: # remove useless padding
samples = samples[:, :, 2:-2, 2:-2]
test_set = {'samples': samples, 'labels': labels}
return train_set, test_set
else:
return train_set,None
def pretrain(net,train_set, test_set, device, batch_size=64, lr=1e-3, epochs=40, seed=0,weight_decay=0.):
if epochs == 0:
return net
kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}
torch.manual_seed(seed)
np.random.seed(seed)
train_loader = torch.utils.data.DataLoader(
NaiveDataset(train_set['samples'], train_set['labels']),
batch_size=batch_size, shuffle=True, **kwargs)
if test_set is not None:
test_loader = torch.utils.data.DataLoader(
NaiveDataset(test_set['samples'], test_set['labels']),
batch_size=batch_size, shuffle=True, **kwargs)
optimizer = optim.Adam(net.parameters(), lr=lr,weight_decay=weight_decay)
test_accs = []
test_losses = []
for epoch in trange(epochs, leave=False, desc='Train Supervised'):
train_supervised(net, device, train_loader, optimizer)
if test_set is not None:
test_loss, test_acc = test_supervised(net, device, test_loader)
test_accs.append(test_acc)
test_losses.append(test_loss)
if test_set is not None:
return net,np.array(test_accs),np.array(test_losses)
else:
return net, None, None
def encode_labels(dataset,net,device):
feature_extractor = net[:-1]
Y_qry_emb,Y_spt_emb,test_Y_qry_emb,test_Y_spt_emb = [],[],[],[]
for x,y in [(dataset.X_qry,Y_qry_emb),(dataset.X_spt,Y_spt_emb),
(dataset.test_X_qry,test_Y_qry_emb),(dataset.test_X_spt,test_Y_spt_emb)]:
x = x.reshape(-1,1,32,32)
x = x[:,:,2:-2,2:-2]
x = torch.from_numpy(x).to(device)
x = x.reshape(( -1, 5 ,)+x.shape[1:]) # reshape into batches of size = 5 for memory efficiency
result = []
for batch_x in x:
result.append(feature_extractor(batch_x).detach().cpu().numpy())
result = np.concatenate(result,axis=0)
y.append(result)
Y_qry_emb,Y_spt_emb,test_Y_qry_emb,test_Y_spt_emb = Y_qry_emb[0],Y_spt_emb[0],test_Y_qry_emb[0],test_Y_spt_emb[0]
emb_dim = Y_qry_emb.shape[-1]
dict_idx_emb = {}
for embs, idxes in [(Y_qry_emb, dataset.idx_Xs), (Y_spt_emb, dataset.idx_Xs_),
(test_Y_qry_emb, dataset.idx_test_Xs),
(test_Y_spt_emb, dataset.idx_test_Xs_)]:
idxes = idxes.flatten()
for emb, idx in zip(embs, idxes):
dict_idx_emb[idx] = emb
dataset.Y_qry_emb = Y_qry_emb.reshape(dataset.n_task,-1,emb_dim)
dataset.Y_spt_emb = Y_spt_emb.reshape(dataset.n_task,-1,emb_dim)
dataset.test_Y_qry_emb = test_Y_qry_emb.reshape(dataset.test_Y_qry.shape+(emb_dim,))
dataset.test_Y_spt_emb = test_Y_spt_emb.reshape(dataset.test_Y_spt.shape+(emb_dim,))
dataset.dict_idx_emb = dict_idx_emb
|
[
"numpy.load",
"numpy.random.seed",
"sklearn.model_selection.train_test_split",
"tqdm.notebook.trange",
"numpy.mean",
"numpy.arange",
"torch.no_grad",
"numpy.unique",
"numpy.max",
"sklearn.neighbors.NearestNeighbors",
"numpy.random.choice",
"torch.nn.Linear",
"sklearn.neighbors.NearestCentroid",
"meta_cntk.MetaCNTK",
"types.SimpleNamespace",
"numpy.random.shuffle",
"copy.deepcopy",
"torch.nn.Dropout2d",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.nn.functional.cross_entropy",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.nn.MaxPool2d",
"numpy.concatenate",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.Sequential",
"time.time",
"numpy.array",
"sklearn.decomposition.PCA",
"sklearn.decomposition.KernelPCA"
] |
[((2683, 2707), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**tasks)\n', (2698, 2707), False, 'from types import SimpleNamespace\n'), ((2958, 2995), 'numpy.load', 'np.load', (['f"""saved_models/CNTK-200.npy"""'], {}), "(f'saved_models/CNTK-200.npy')\n", (2965, 2995), True, 'import numpy as np\n'), ((6803, 6820), 'sklearn.neighbors.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (6818, 6820), False, 'from sklearn.neighbors import NearestCentroid\n'), ((6835, 6889), 'numpy.concatenate', 'np.concatenate', (['[dataset.Y_qry, dataset.Y_spt]'], {'axis': '(1)'}), '([dataset.Y_qry, dataset.Y_spt], axis=1)\n', (6849, 6889), True, 'import numpy as np\n'), ((6908, 6954), 'numpy.concatenate', 'np.concatenate', (['[Y_qry_emb, Y_spt_emb]'], {'axis': '(1)'}), '([Y_qry_emb, Y_spt_emb], axis=1)\n', (6922, 6954), True, 'import numpy as np\n'), ((7424, 7445), 'numpy.array', 'np.array', (['Y_centroids'], {}), '(Y_centroids)\n', (7432, 7445), True, 'import numpy as np\n'), ((7483, 7500), 'sklearn.neighbors.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (7498, 7500), False, 'from sklearn.neighbors import NearestCentroid\n'), ((7514, 7578), 'numpy.concatenate', 'np.concatenate', (['[dataset.test_Y_qry, dataset.test_Y_spt]'], {'axis': '(1)'}), '([dataset.test_Y_qry, dataset.test_Y_spt], axis=1)\n', (7528, 7578), True, 'import numpy as np\n'), ((7596, 7652), 'numpy.concatenate', 'np.concatenate', (['[test_Y_qry_emb, test_Y_spt_emb]'], {'axis': '(1)'}), '([test_Y_qry_emb, test_Y_spt_emb], axis=1)\n', (7610, 7652), True, 'import numpy as np\n'), ((8732, 8758), 'numpy.array', 'np.array', (['test_Y_centroids'], {}), '(test_Y_centroids)\n', (8740, 8758), True, 'import numpy as np\n'), ((9153, 9194), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neighbors'}), '(n_neighbors=n_neighbors)\n', (9169, 9194), False, 'from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\n'), ((9489, 9504), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (9497, 9504), True, 'import numpy as np\n'), ((9636, 9845), 'meta_cntk.MetaCNTK', 'MetaCNTK', ([], {'d_max': '(20)', 'fix': '(False)', 'GAP': '(True)', 'inner_lr': 'np.inf', 'train_time': 'np.inf', 'invMetaNTK': '(False)', 'kernel_ridge': '(True)', 'ridge_coef': 'ridge_coef', 'normalize_NTK': 'normalize_NTK', 'normalize_metaNTK': 'normalize_metaNTK'}), '(d_max=20, fix=False, GAP=True, inner_lr=np.inf, train_time=np.inf,\n invMetaNTK=False, kernel_ridge=True, ridge_coef=ridge_coef,\n normalize_NTK=normalize_NTK, normalize_metaNTK=normalize_metaNTK)\n', (9644, 9845), False, 'from meta_cntk import MetaCNTK\n'), ((10265, 10271), 'time.time', 'time', ([], {}), '()\n', (10269, 10271), False, 'from time import time\n'), ((10360, 10412), 'numpy.mean', 'np.mean', (['((pred_test_Y - dataset.test_Y_qry_emb) ** 2)'], {}), '((pred_test_Y - dataset.test_Y_qry_emb) ** 2)\n', (10367, 10412), True, 'import numpy as np\n'), ((10547, 10589), 'numpy.mean', 'np.mean', (['(pred_test_Y == dataset.test_Y_qry)'], {}), '(pred_test_Y == dataset.test_Y_qry)\n', (10554, 10589), True, 'import numpy as np\n'), ((10757, 10811), 'numpy.concatenate', 'np.concatenate', (['[dataset.X_qry, dataset.X_spt]'], {'axis': '(1)'}), '([dataset.X_qry, dataset.X_spt], axis=1)\n', (10771, 10811), True, 'import numpy as np\n'), ((10818, 10872), 'numpy.concatenate', 'np.concatenate', (['[dataset.Y_qry, dataset.Y_spt]'], {'axis': '(1)'}), '([dataset.Y_qry, dataset.Y_spt], axis=1)\n', (10832, 10872), True, 'import numpy as np\n'), ((10883, 10940), 'numpy.concatenate', 'np.concatenate', (['[dataset.idx_Xs, dataset.idx_Xs_]'], {'axis': '(1)'}), '([dataset.idx_Xs, dataset.idx_Xs_], axis=1)\n', (10897, 10940), True, 'import numpy as np\n'), ((11313, 11325), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (11322, 11325), True, 'import numpy as np\n'), ((11697, 11717), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11711, 11717), True, 'import numpy as np\n'), ((13524, 13544), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13538, 13544), True, 'import numpy as np\n'), ((17265, 17319), 'numpy.concatenate', 'np.concatenate', (['[dataset.X_qry, dataset.X_spt]'], {'axis': '(1)'}), '([dataset.X_qry, dataset.X_spt], axis=1)\n', (17279, 17319), True, 'import numpy as np\n'), ((17328, 17382), 'numpy.concatenate', 'np.concatenate', (['[dataset.Y_qry, dataset.Y_spt]'], {'axis': '(1)'}), '([dataset.Y_qry, dataset.Y_spt], axis=1)\n', (17342, 17382), True, 'import numpy as np\n'), ((17395, 17452), 'numpy.concatenate', 'np.concatenate', (['[dataset.idx_Xs, dataset.idx_Xs_]'], {'axis': '(1)'}), '([dataset.idx_Xs, dataset.idx_Xs_], axis=1)\n', (17409, 17452), True, 'import numpy as np\n'), ((17827, 17839), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (17836, 17839), True, 'import numpy as np\n'), ((18416, 18433), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (18424, 18433), True, 'import numpy as np\n'), ((18447, 18463), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (18455, 18463), True, 'import numpy as np\n'), ((19646, 19669), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (19663, 19669), False, 'import torch\n'), ((19674, 19694), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (19688, 19694), True, 'import numpy as np\n'), ((20205, 20257), 'tqdm.notebook.trange', 'trange', (['epochs'], {'leave': '(False)', 'desc': '"""Train Supervised"""'}), "(epochs, leave=False, desc='Train Supervised')\n", (20211, 20257), False, 'from tqdm.notebook import tqdm, trange\n'), ((1830, 1850), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1844, 1850), True, 'import numpy as np\n'), ((1867, 1924), 'numpy.random.choice', 'np.random.choice', (['n_all_tasks'], {'size': 'n_task', 'replace': '(False)'}), '(n_all_tasks, size=n_task, replace=False)\n', (1883, 1924), True, 'import numpy as np\n'), ((1951, 1968), 'numpy.arange', 'np.arange', (['n_task'], {}), '(n_task)\n', (1960, 1968), True, 'import numpy as np\n'), ((4304, 4359), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components', 'svd_solver': '"""randomized"""'}), "(n_components=n_components, svd_solver='randomized')\n", (4307, 4359), False, 'from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA\n'), ((4460, 4547), 'sklearn.decomposition.KernelPCA', 'KernelPCA', ([], {'n_components': 'n_components', 'kernel': 'PCA_method', 'fit_inverse_transform': '(True)'}), '(n_components=n_components, kernel=PCA_method,\n fit_inverse_transform=True)\n', (4469, 4547), False, 'from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA\n'), ((7000, 7021), 'numpy.unique', 'np.unique', (['Y_train[0]'], {}), '(Y_train[0])\n', (7009, 7021), True, 'import numpy as np\n'), ((7712, 7773), 'numpy.concatenate', 'np.concatenate', (['[pred_test_Y_qry, dataset.test_Y_spt]'], {'axis': '(1)'}), '([pred_test_Y_qry, dataset.test_Y_spt], axis=1)\n', (7726, 7773), True, 'import numpy as np\n'), ((7816, 7836), 'numpy.unique', 'np.unique', (['Y_test[0]'], {}), '(Y_test[0])\n', (7825, 7836), True, 'import numpy as np\n'), ((11148, 11160), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (11157, 11160), True, 'import numpy as np\n'), ((13043, 13058), 'numpy.array', 'np.array', (['Y_qry'], {}), '(Y_qry)\n', (13051, 13058), True, 'import numpy as np\n'), ((13059, 13074), 'numpy.array', 'np.array', (['Y_spt'], {}), '(Y_spt)\n', (13067, 13074), True, 'import numpy as np\n'), ((13101, 13120), 'numpy.array', 'np.array', (['Y_qry_emb'], {}), '(Y_qry_emb)\n', (13109, 13120), True, 'import numpy as np\n'), ((13122, 13141), 'numpy.array', 'np.array', (['Y_spt_emb'], {}), '(Y_spt_emb)\n', (13130, 13141), True, 'import numpy as np\n'), ((13160, 13175), 'numpy.array', 'np.array', (['X_qry'], {}), '(X_qry)\n', (13168, 13175), True, 'import numpy as np\n'), ((13176, 13191), 'numpy.array', 'np.array', (['X_spt'], {}), '(X_spt)\n', (13184, 13191), True, 'import numpy as np\n'), ((13594, 13612), 'copy.deepcopy', 'deepcopy', (['Y_qry[i]'], {}), '(Y_qry[i])\n', (13602, 13612), False, 'from copy import deepcopy\n'), ((13630, 13648), 'copy.deepcopy', 'deepcopy', (['Y_spt[i]'], {}), '(Y_spt[i])\n', (13638, 13648), False, 'from copy import deepcopy\n'), ((13693, 13710), 'numpy.unique', 'np.unique', (['ys_qry'], {}), '(ys_qry)\n', (13702, 13710), True, 'import numpy as np\n'), ((13733, 13749), 'numpy.arange', 'np.arange', (['n_way'], {}), '(n_way)\n', (13742, 13749), True, 'import numpy as np\n'), ((13759, 13788), 'numpy.random.shuffle', 'np.random.shuffle', (['new_labels'], {}), '(new_labels)\n', (13776, 13788), True, 'import numpy as np\n'), ((14456, 14487), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (14471, 14487), True, 'import torch.nn.functional as F\n'), ((14663, 14678), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14676, 14678), False, 'import torch\n'), ((16180, 16206), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'n_channel', '(3)'], {}), '(1, n_channel, 3)\n', (16189, 16206), False, 'from torch import nn\n'), ((16308, 16329), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (16315, 16329), False, 'from torch import nn\n'), ((16343, 16361), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (16355, 16361), False, 'from torch import nn\n'), ((16443, 16477), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channel', 'n_channel', '(3)'], {}), '(n_channel, n_channel, 3)\n', (16452, 16477), False, 'from torch import nn\n'), ((16579, 16600), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (16586, 16600), False, 'from torch import nn\n'), ((16614, 16632), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (16626, 16632), False, 'from torch import nn\n'), ((16714, 16748), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channel', 'n_channel', '(3)'], {}), '(n_channel, n_channel, 3)\n', (16723, 16748), False, 'from torch import nn\n'), ((16850, 16871), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (16857, 16871), False, 'from torch import nn\n'), ((16885, 16903), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (16897, 16903), False, 'from torch import nn\n'), ((17008, 17035), 'torch.nn.Linear', 'nn.Linear', (['n_channel', 'n_way'], {}), '(n_channel, n_way)\n', (17017, 17035), False, 'from torch import nn\n'), ((17662, 17674), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (17671, 17674), True, 'import numpy as np\n'), ((18731, 18758), 'numpy.max', 'np.max', (["train_set['labels']"], {}), "(train_set['labels'])\n", (18737, 18758), True, 'import numpy as np\n'), ((19110, 19127), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (19118, 19127), True, 'import numpy as np\n'), ((19145, 19161), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (19153, 19161), True, 'import numpy as np\n'), ((19608, 19633), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19631, 19633), False, 'import torch\n'), ((21293, 21323), 'numpy.concatenate', 'np.concatenate', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (21307, 21323), True, 'import numpy as np\n'), ((12280, 12338), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dict_label_idx[label]'], {'train_size': 'n_shot'}), '(dict_label_idx[label], train_size=n_shot)\n', (12296, 12338), False, 'from sklearn.model_selection import train_test_split\n'), ((12441, 12460), 'numpy.array', 'np.array', (['idx_X_qry'], {}), '(idx_X_qry)\n', (12449, 12460), True, 'import numpy as np\n'), ((12504, 12523), 'numpy.array', 'np.array', (['idx_X_spt'], {}), '(idx_X_spt)\n', (12512, 12523), True, 'import numpy as np\n'), ((16220, 16270), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channel'], {'momentum': '(1)', 'affine': '(True)'}), '(n_channel, momentum=1, affine=True)\n', (16234, 16270), False, 'from torch import nn\n'), ((16375, 16396), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (16387, 16396), False, 'from torch import nn\n'), ((16491, 16541), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channel'], {'momentum': '(1)', 'affine': '(True)'}), '(n_channel, momentum=1, affine=True)\n', (16505, 16541), False, 'from torch import nn\n'), ((16646, 16667), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (16658, 16667), False, 'from torch import nn\n'), ((16762, 16812), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channel'], {'momentum': '(1)', 'affine': '(True)'}), '(n_channel, momentum=1, affine=True)\n', (16776, 16812), False, 'from torch import nn\n'), ((16917, 16938), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (16929, 16938), False, 'from torch import nn\n'), ((17141, 17164), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (17154, 17164), False, 'from torch import nn\n'), ((18688, 18718), 'numpy.unique', 'np.unique', (["train_set['labels']"], {}), "(train_set['labels'])\n", (18697, 18718), True, 'import numpy as np\n'), ((20560, 20579), 'numpy.array', 'np.array', (['test_accs'], {}), '(test_accs)\n', (20568, 20579), True, 'import numpy as np\n'), ((20580, 20601), 'numpy.array', 'np.array', (['test_losses'], {}), '(test_losses)\n', (20588, 20601), True, 'import numpy as np\n'), ((8161, 8192), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (8177, 8192), False, 'from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\n'), ((15555, 15579), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (15571, 15579), False, 'import torch\n'), ((15610, 15635), 'torch.from_numpy', 'torch.from_numpy', (['samples'], {}), '(samples)\n', (15626, 15635), False, 'import torch\n'), ((21019, 21038), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (21035, 21038), False, 'import torch\n'), ((14841, 14889), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (14856, 14889), True, 'import torch.nn.functional as F\n'), ((10330, 10336), 'time.time', 'time', ([], {}), '()\n', (10334, 10336), False, 'from time import time\n')]
|
from sampling import Sampler
import algos
import numpy as np
from simulation_utils import create_env, get_feedback, run_algo
import sys
def batch(task, method, N, M, b):
if N % b != 0:
print('N must be divisible to b')
exit(0)
B = 20*b
simulation_object = create_env(task)
d = simulation_object.num_of_features
w_true = 2*np.random.rand(d)-1
w_true = w_true / np.linalg.norm(w_true)
print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
upper_input_bound = [x[1] for x in simulation_object.feed_bounds]
w_sampler = Sampler(d)
psi_set = []
s_set = []
i = 0
while i < N:
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples,axis=0)
print('Samples so far: ' + str(i))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
inputA_set, inputB_set = run_algo(method, simulation_object, w_samples, b, B)
for j in range(b):
input_A = inputA_set[j]
input_B = inputB_set[j]
psi, s = get_feedback(simulation_object, input_B, input_A, w_true)
psi_set.append(psi)
s_set.append(s)
i += b
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples, axis=0)
print('Samples so far: ' + str(N))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
def nonbatch(task, method, N, M):
simulation_object = create_env(task)
d = simulation_object.num_of_features
w_true = 2*np.random.rand(d)-1
w_true = w_true / np.linalg.norm(w_true)
print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
upper_input_bound = [x[1] for x in simulation_object.feed_bounds]
w_sampler = Sampler(d)
psi_set = []
s_set = []
for i in range(N):
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples,axis=0)
print('Samples so far: ' + str(i))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
input_A, input_B = run_algo(method, simulation_object, w_samples)
psi, s = get_feedback(simulation_object, input_A, input_B, w_true)
psi_set.append(psi)
s_set.append(s)
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
print('Samples so far: ' + str(N))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
|
[
"simulation_utils.get_feedback",
"numpy.mean",
"numpy.linalg.norm",
"simulation_utils.create_env",
"simulation_utils.run_algo",
"numpy.array",
"numpy.random.rand",
"sampling.Sampler"
] |
[((286, 302), 'simulation_utils.create_env', 'create_env', (['task'], {}), '(task)\n', (296, 302), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((671, 681), 'sampling.Sampler', 'Sampler', (['d'], {}), '(d)\n', (678, 681), False, 'from sampling import Sampler\n'), ((1612, 1638), 'numpy.mean', 'np.mean', (['w_samples'], {'axis': '(0)'}), '(w_samples, axis=0)\n', (1619, 1638), True, 'import numpy as np\n'), ((1916, 1932), 'simulation_utils.create_env', 'create_env', (['task'], {}), '(task)\n', (1926, 1932), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((2301, 2311), 'sampling.Sampler', 'Sampler', (['d'], {}), '(d)\n', (2308, 2311), False, 'from sampling import Sampler\n'), ((404, 426), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (418, 426), True, 'import numpy as np\n'), ((888, 914), 'numpy.mean', 'np.mean', (['w_samples'], {'axis': '(0)'}), '(w_samples, axis=0)\n', (895, 914), True, 'import numpy as np\n'), ((1175, 1227), 'simulation_utils.run_algo', 'run_algo', (['method', 'simulation_object', 'w_samples', 'b', 'B'], {}), '(method, simulation_object, w_samples, b, B)\n', (1183, 1227), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((2034, 2056), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (2048, 2056), True, 'import numpy as np\n'), ((2514, 2540), 'numpy.mean', 'np.mean', (['w_samples'], {'axis': '(0)'}), '(w_samples, axis=0)\n', (2521, 2540), True, 'import numpy as np\n'), ((2795, 2841), 'simulation_utils.run_algo', 'run_algo', (['method', 'simulation_object', 'w_samples'], {}), '(method, simulation_object, w_samples)\n', (2803, 2841), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((2859, 2916), 'simulation_utils.get_feedback', 'get_feedback', (['simulation_object', 'input_A', 'input_B', 'w_true'], {}), '(simulation_object, input_A, input_B, w_true)\n', (2871, 2916), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((362, 379), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (376, 379), True, 'import numpy as np\n'), ((1348, 1405), 'simulation_utils.get_feedback', 'get_feedback', (['simulation_object', 'input_B', 'input_A', 'w_true'], {}), '(simulation_object, input_B, input_A, w_true)\n', (1360, 1405), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((1525, 1540), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (1533, 1540), True, 'import numpy as np\n'), ((1992, 2009), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (2006, 2009), True, 'import numpy as np\n'), ((3013, 3028), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (3021, 3028), True, 'import numpy as np\n'), ((487, 509), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (501, 509), True, 'import numpy as np\n'), ((793, 808), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (801, 808), True, 'import numpy as np\n'), ((1728, 1758), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1742, 1758), True, 'import numpy as np\n'), ((1822, 1852), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1836, 1852), True, 'import numpy as np\n'), ((2117, 2139), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (2131, 2139), True, 'import numpy as np\n'), ((2419, 2434), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (2427, 2434), True, 'import numpy as np\n'), ((3168, 3198), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (3182, 3198), True, 'import numpy as np\n'), ((3262, 3292), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (3276, 3292), True, 'import numpy as np\n'), ((1011, 1041), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1025, 1041), True, 'import numpy as np\n'), ((1109, 1139), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1123, 1139), True, 'import numpy as np\n'), ((2637, 2667), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (2651, 2667), True, 'import numpy as np\n'), ((2735, 2765), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (2749, 2765), True, 'import numpy as np\n')]
|
"""
Advances in Financial Machine Learning, <NAME>
Chapter 2: Financial Data Structures
This module contains the functions to help users create structured financial data from raw unstructured data,
in the form of time, tick, volume, and dollar bars.
These bars are used throughout the text book (Advances in Financial Machine Learning, By <NAME>, 2018,
pg 25) to build the more interesting features for predicting financial time series data.
These financial data structures have better statistical properties when compared to those based on fixed time interval
sampling. A great paper to read more about this is titled: The Volume Clock: Insights into the high frequency paradigm,
Lopez de Prado, et al.
Many of the projects going forward will require Dollar and Volume bars.
"""
# Imports
from typing import Tuple
import numpy as np
import pandas as pd
from mlfinlab.data_structures.base_bars import BaseBars
class StandardBars(BaseBars):
"""
Contains all of the logic to construct the standard bars from chapter 2. This class shouldn't be used directly.
We have added functions to the package such as get_dollar_bars which will create an instance of this
class and then construct the standard bars, to return to the user.
This is because we wanted to simplify the logic as much as possible, for the end user.
"""
def __init__(self, file_path_or_df: Tuple[str, pd.DataFrame], metric: str, threshold: int = 50000,
batch_size: int = 20000000):
BaseBars.__init__(self, file_path_or_df, metric, batch_size)
# Threshold at which to sample
self.threshold = threshold
def _reset_cache(self):
"""
Implementation of abstract method _reset_cache for standard bars
"""
self.open_price = None
self.high_price, self.low_price = -np.inf, np.inf
self.cum_statistics = {'cum_ticks': 0, 'cum_dollar_value': 0, 'cum_volume': 0, 'cum_buy_volume': 0}
def _extract_bars(self, data: pd.DataFrame) -> list:
"""
For loop which compiles the various bars: dollar, volume, or tick.
We did investigate the use of trying to solve this in a vectorised manner but found that a For loop worked well.
:param data: Contains 3 columns - date_time, price, and volume.
"""
# Iterate over rows
list_bars = []
for row in data.values:
# Set variables
date_time = row[0]
self.tick_num += 1
price = np.float(row[1])
volume = row[2]
dollar_value = price * volume
signed_tick = self._apply_tick_rule(price)
if self.open_price is None:
self.open_price = price
# Update high low prices
self.high_price, self.low_price = self._update_high_low(price)
# Calculations
self.cum_statistics['cum_ticks'] += 1
self.cum_statistics['cum_dollar_value'] += dollar_value
self.cum_statistics['cum_volume'] += volume
if signed_tick == 1:
self.cum_statistics['cum_buy_volume'] += volume
# If threshold reached then take a sample
if self.cum_statistics[self.metric] >= self.threshold: # pylint: disable=eval-used
self._create_bars(date_time, price,
self.high_price, self.low_price, list_bars)
# Reset cache
self._reset_cache()
return list_bars
def get_dollar_bars(file_path_or_df: Tuple[str, pd.DataFrame], threshold: float = 70000000, batch_size: int = 20000000,
verbose: bool = True, to_csv: bool = False, output_path: str = None):
"""
Creates the dollar bars: date_time, open, high, low, close, volume, cum_buy_volume, cum_ticks, cum_dollar_value.
Following the paper "The Volume Clock: Insights into the high frequency paradigm" by <NAME>, et al,
it is suggested that using 1/50 of the average daily dollar value, would result in more desirable statistical
properties.
:param file_path_or_df: (str or pd.DataFrame) Path to the csv file or Pandas Data Frame containing raw tick data in the format[date_time, price, volume]
:param threshold: (float) A cumulative value above this threshold triggers a sample to be taken.
:param batch_size: (int) The number of rows per batch. Less RAM = smaller batch size.
:param verbose: (bool) Print out batch numbers (True or False)
:param to_csv: (bool) Save bars to csv after every batch run (True or False)
:param output_path: (str) Path to csv file, if to_csv is True
:return: (pd.DataFrame) Dataframe of dollar bars
"""
bars = StandardBars(file_path_or_df=file_path_or_df, metric='cum_dollar_value', threshold=threshold,
batch_size=batch_size)
dollar_bars = bars.batch_run(verbose=verbose, to_csv=to_csv, output_path=output_path)
return dollar_bars
def get_volume_bars(file_path_or_df: Tuple[str, pd.DataFrame], threshold: float = 70000000, batch_size: int = 20000000,
verbose: bool = True, to_csv: bool = False, output_path: str = None):
"""
Creates the volume bars: date_time, open, high, low, close, volume, cum_buy_volume, cum_ticks, cum_dollar_value.
Following the paper "The Volume Clock: Insights into the high frequency paradigm" by <NAME>, et al,
it is suggested that using 1/50 of the average daily volume, would result in more desirable statistical properties.
:param file_path_or_df: (str or pd.DataFrame) Path to the csv file or Pandas Data Frame containing raw tick data in the format[date_time, price, volume]
:param threshold: (float) A cumulative value above this threshold triggers a sample to be taken.
:param batch_size: (int) The number of rows per batch. Less RAM = smaller batch size.
:param verbose: (bool) Print out batch numbers (True or False)
:param to_csv: (bool) Save bars to csv after every batch run (True or False)
:param output_path: (str) Path to csv file, if to_csv is True
:return: (pd.DataFrame) Dataframe of volume bars
"""
bars = StandardBars(file_path_or_df=file_path_or_df, metric='cum_volume',
threshold=threshold, batch_size=batch_size)
volume_bars = bars.batch_run(verbose=verbose, to_csv=to_csv, output_path=output_path)
return volume_bars
def get_tick_bars(file_path_or_df: Tuple[str, pd.DataFrame], threshold: float = 70000000, batch_size: int = 20000000,
verbose: bool = True, to_csv: bool = False, output_path: str = None):
"""
Creates the tick bars: date_time, open, high, low, close, volume, cum_buy_volume, cum_ticks, cum_dollar_value.
:param file_path_or_df: (str or pd.DataFrame) Path to the csv file or Pandas Data Frame containing raw tick data in the format[date_time, price, volume]
:param threshold: (float) A cumulative value above this threshold triggers a sample to be taken.
:param batch_size: (int) The number of rows per batch. Less RAM = smaller batch size.
:param verbose: (bool) Print out batch numbers (True or False)
:param to_csv: (bool) Save bars to csv after every batch run (True or False)
:param output_path: (str) Path to csv file, if to_csv is True
:return: (pd.DataFrame) Dataframe of volume bars
"""
bars = StandardBars(file_path_or_df=file_path_or_df, metric='cum_ticks',
threshold=threshold, batch_size=batch_size)
tick_bars = bars.batch_run(verbose=verbose, to_csv=to_csv, output_path=output_path)
return tick_bars
|
[
"numpy.float",
"mlfinlab.data_structures.base_bars.BaseBars.__init__"
] |
[((1508, 1568), 'mlfinlab.data_structures.base_bars.BaseBars.__init__', 'BaseBars.__init__', (['self', 'file_path_or_df', 'metric', 'batch_size'], {}), '(self, file_path_or_df, metric, batch_size)\n', (1525, 1568), False, 'from mlfinlab.data_structures.base_bars import BaseBars\n'), ((2513, 2529), 'numpy.float', 'np.float', (['row[1]'], {}), '(row[1])\n', (2521, 2529), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.