code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from math import cos, sin
import numpy as np
from ....simulator import Agent
from .quintic_polynomials_planner import quinic_polynomials_planner
class TeacherQuinticPolynomials(Agent):
def learn(self, state, action):
raise NotImplementedError()
def explore(self, state, horizon=1):
raise NotImplementedError()
def __init__(self, world, lane):
Agent.__init__(self, world)
self.lane = lane
self.navigation_plan = None
self.goal = self.lane.end_middle()
self.goal = self.goal[0], self.goal[1], 0.0 # the angle depends on the lane direction
def plan(self, horizon=10):
trajectory = quinic_polynomials_planner(sx=self.x, sy=self.y, syaw=self.theta, sv=self.v, sa=0.0,
gx=self.goal[0], gy=self.goal[1], gyaw=self.goal[2], gv=0.0, ga=0.0,
max_accel=0.0, max_jerk=0.1, dt=1)
return np.array(trajectory[3])[:horizon]
def exploit(self, state, horizon=1):
if self.navigation_plan is None:
self.navigation_plan = self.plan()
for _ in range(horizon):
self.execute()
def execute(self, action, horizon=1):
for _ in range(horizon):
self.x = self.x + self.v * cos(action)
self.y = self.y + self.v * sin(action)
|
[
"numpy.array",
"math.cos",
"math.sin"
] |
[((953, 976), 'numpy.array', 'np.array', (['trajectory[3]'], {}), '(trajectory[3])\n', (961, 976), True, 'import numpy as np\n'), ((1292, 1303), 'math.cos', 'cos', (['action'], {}), '(action)\n', (1295, 1303), False, 'from math import cos, sin\n'), ((1343, 1354), 'math.sin', 'sin', (['action'], {}), '(action)\n', (1346, 1354), False, 'from math import cos, sin\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess
"""
import os
import argparse
from functools import reduce
import numpy as np
import mindspore as ms
from mindspore import ops, Tensor, context
import src.util as util
def cal_acc(args):
"""
:return: meta-baseline eval
"""
temp = 5.
n_shots = [args.num_shots]
file_num = int(len(os.listdir(args.post_result_path)) / args.num_shots)
aves_keys = ['tl', 'ta', 'vl', 'va']
for n_shot in n_shots:
aves_keys += ['fsa-' + str(n_shot)]
aves = {k: util.Averager() for k in aves_keys}
label_list = np.load(os.path.join(args.pre_result_path, "label.npy"), allow_pickle=True)
shape_list = np.load(os.path.join(args.pre_result_path, "shape.npy"), allow_pickle=True)
x_shot_shape = shape_list[0]
x_query_shape = shape_list[1]
shot_shape = x_shot_shape[:-3]
query_shape = x_query_shape[:-3]
x_shot_len = reduce(lambda x, y: x*y, shot_shape)
x_query_len = reduce(lambda x, y: x*y, query_shape)
for i, n_shot in enumerate(n_shots):
np.random.seed(0)
label_shot = label_list[i]
for j in range(file_num):
labels = Tensor(label_shot[j])
f = os.path.join(args.post_result_path, "nshot_" + str(i) + "_" + str(j) + "_0.bin")
x_tot = Tensor(np.fromfile(f, np.float32).reshape(args.batch_size, 512))
x_shot, x_query = x_tot[:x_shot_len], x_tot[-x_query_len:]
x_shot = x_shot.view(*shot_shape, -1)
x_query = x_query.view(*query_shape, -1)
########## cross-class bias ############
bs = x_shot.shape[0]
fs = x_shot.shape[-1]
bias = x_shot.view(bs, -1, fs).mean(1) - x_query.mean(1)
x_query = x_query + ops.ExpandDims()(bias, 1)
x_shot = x_shot.mean(axis=-2)
x_shot = ops.L2Normalize(axis=-1)(x_shot)
x_query = ops.L2Normalize(axis=-1)(x_query)
logits = ops.BatchMatMul()(x_query, x_shot.transpose(0, 2, 1))
logits = logits * temp
ret = ops.Argmax()(logits) == labels.astype(ms.int32)
acc = ret.astype(ms.float32).mean()
aves['fsa-' + str(n_shot)].add(acc.asnumpy())
for k, v in aves.items():
aves[k] = v.item()
for n_shot in n_shots:
key = 'fsa-' + str(n_shot)
print("epoch {}, {}-shot, val acc {:.4f}".format(str(1), n_shot, aves[key]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device_target', type=str, default='CPU', choices=['Ascend', 'GPU', 'CPU'])
parser.add_argument('--dataset', default='mini-imagenet')
parser.add_argument('--post_result_path', default='./result_Files')
parser.add_argument('--pre_result_path', type=str, default='./preprocess_Result')
parser.add_argument('--batch_size', type=int, default=320)
parser.add_argument('--num_shots', type=int, default=1)
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, save_graphs=False)
cal_acc(args_opt)
|
[
"os.listdir",
"mindspore.context.set_context",
"mindspore.ops.Argmax",
"numpy.random.seed",
"argparse.ArgumentParser",
"mindspore.ops.ExpandDims",
"numpy.fromfile",
"mindspore.ops.L2Normalize",
"mindspore.Tensor",
"mindspore.ops.BatchMatMul",
"functools.reduce",
"os.path.join",
"src.util.Averager"
] |
[((1547, 1585), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shot_shape'], {}), '(lambda x, y: x * y, shot_shape)\n', (1553, 1585), False, 'from functools import reduce\n'), ((1602, 1641), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'query_shape'], {}), '(lambda x, y: x * y, query_shape)\n', (1608, 1641), False, 'from functools import reduce\n'), ((3108, 3133), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3131, 3133), False, 'import argparse\n'), ((3618, 3724), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'args_opt.device_target', 'save_graphs': '(False)'}), '(mode=context.GRAPH_MODE, device_target=args_opt.\n device_target, save_graphs=False)\n', (3637, 3724), False, 'from mindspore import ops, Tensor, context\n'), ((1168, 1183), 'src.util.Averager', 'util.Averager', ([], {}), '()\n', (1181, 1183), True, 'import src.util as util\n'), ((1230, 1277), 'os.path.join', 'os.path.join', (['args.pre_result_path', '"""label.npy"""'], {}), "(args.pre_result_path, 'label.npy')\n", (1242, 1277), False, 'import os\n'), ((1323, 1370), 'os.path.join', 'os.path.join', (['args.pre_result_path', '"""shape.npy"""'], {}), "(args.pre_result_path, 'shape.npy')\n", (1335, 1370), False, 'import os\n'), ((1690, 1707), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1704, 1707), True, 'import numpy as np\n'), ((1798, 1819), 'mindspore.Tensor', 'Tensor', (['label_shot[j]'], {}), '(label_shot[j])\n', (1804, 1819), False, 'from mindspore import ops, Tensor, context\n'), ((987, 1020), 'os.listdir', 'os.listdir', (['args.post_result_path'], {}), '(args.post_result_path)\n', (997, 1020), False, 'import os\n'), ((2488, 2512), 'mindspore.ops.L2Normalize', 'ops.L2Normalize', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2503, 2512), False, 'from mindspore import ops, Tensor, context\n'), ((2543, 2567), 'mindspore.ops.L2Normalize', 'ops.L2Normalize', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2558, 2567), False, 'from mindspore import ops, Tensor, context\n'), ((2598, 2615), 'mindspore.ops.BatchMatMul', 'ops.BatchMatMul', ([], {}), '()\n', (2613, 2615), False, 'from mindspore import ops, Tensor, context\n'), ((2398, 2414), 'mindspore.ops.ExpandDims', 'ops.ExpandDims', ([], {}), '()\n', (2412, 2414), False, 'from mindspore import ops, Tensor, context\n'), ((2707, 2719), 'mindspore.ops.Argmax', 'ops.Argmax', ([], {}), '()\n', (2717, 2719), False, 'from mindspore import ops, Tensor, context\n'), ((1944, 1970), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32'], {}), '(f, np.float32)\n', (1955, 1970), True, 'import numpy as np\n')]
|
# Import modulov
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
# Výpočtová oblasť
xmin = -1.0
xmax = 1.0
xn = 101 # Počet vzorkovacích bodov funkcie "f" na intervale "[xmin, xmax]"
ymin = xmin
ymax = xmax
yn = xn # Počet vzorkovacích bodov funkcie "f" na intervale "[ymin, ymax]"
xngrad = 10 # Zobrazený bude každý "xngrad" vzorkovací bod v smere osi "x"
yngrad = xngrad # Zobrazený bude každý "yngrad" vzorkovací bod v smere osi "y"
# Tvorba gridu
x, y = np.meshgrid(np.linspace(xmin, xmax, xn), np.linspace(ymin, ymax, yn))
# Výpočet funkcie
f = np.sin(2.0 * x) + np.cos(2.0 * y)
# Výpočet derivácií "f" podľa "x" a "y"
fx = 2.0 * np.cos(2.0 * x)
fy = -2.0 * np.sin(2.0 * y)
# Vykreslenie
fig, ax = plt.subplots(figsize=(12.0 / 2.54, 8.0 / 2.54))
im = ax.imshow(f, extent=(xmin, xmax, ymin, ymax), cmap="bwr",
vmin=-np.abs(f).max(), vmax=np.abs(f).max())
ax.quiver( x[::xngrad, ::xngrad], y[::yngrad, ::yngrad],
fx[::xngrad, ::xngrad], fy[::yngrad, ::yngrad])
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_xticks(np.linspace(xmin, xmax, 6))
ax.set_yticks(np.linspace(ymin, ymax, 6))
fig.colorbar(im)
plt.show()
fig.savefig("../latex/fig-f-gradf.pdf")
|
[
"matplotlib.rc",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.subplots"
] |
[((94, 117), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (96, 117), False, 'from matplotlib import rc\n'), ((783, 830), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12.0 / 2.54, 8.0 / 2.54)'}), '(figsize=(12.0 / 2.54, 8.0 / 2.54))\n', (795, 830), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1221, 1223), True, 'import matplotlib.pyplot as plt\n'), ((546, 573), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'xn'], {}), '(xmin, xmax, xn)\n', (557, 573), True, 'import numpy as np\n'), ((575, 602), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'yn'], {}), '(ymin, ymax, yn)\n', (586, 602), True, 'import numpy as np\n'), ((627, 642), 'numpy.sin', 'np.sin', (['(2.0 * x)'], {}), '(2.0 * x)\n', (633, 642), True, 'import numpy as np\n'), ((645, 660), 'numpy.cos', 'np.cos', (['(2.0 * y)'], {}), '(2.0 * y)\n', (651, 660), True, 'import numpy as np\n'), ((714, 729), 'numpy.cos', 'np.cos', (['(2.0 * x)'], {}), '(2.0 * x)\n', (720, 729), True, 'import numpy as np\n'), ((742, 757), 'numpy.sin', 'np.sin', (['(2.0 * y)'], {}), '(2.0 * y)\n', (748, 757), True, 'import numpy as np\n'), ((1126, 1152), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(6)'], {}), '(xmin, xmax, 6)\n', (1137, 1152), True, 'import numpy as np\n'), ((1168, 1194), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(6)'], {}), '(ymin, ymax, 6)\n', (1179, 1194), True, 'import numpy as np\n'), ((937, 946), 'numpy.abs', 'np.abs', (['f'], {}), '(f)\n', (943, 946), True, 'import numpy as np\n'), ((915, 924), 'numpy.abs', 'np.abs', (['f'], {}), '(f)\n', (921, 924), True, 'import numpy as np\n')]
|
import numpy as np
import scipy
from ._hist import take_bins
__all__ = ['ecdf']
__EPSILON__ = 1e-8
#--------------------------------------------------------------------
def ecdf(x,y=None):
'''
Empirical Cumulative Density Function (ECDF).
Parameters
-----------
* x,y: 1d ndarrays,
if y is None, than ecdf only by x will be taken.
Returns
--------
* if y is not None -> (bins,out_x, out_y);
* if y is None -> (bins,out_x).
Notes
-------
* Based on scipy implementation.
* If y is not None, ECDF will be constructed on the joint x and y.
* If y is None, only bins and cdf(x) (2 argument) will be returned.
* ECDF is calculated as:
bins = sort(concatenate(x,y)),
cdf_x = (serch&past bins in sort(x))/size(x),
cdf_y = (serch&past bins in sort(y))/size(y),
where:
* bins - bins for cdfs (if y is not None, joint bins).
'''
x = np.array(x)
x = np.sort(x)
ret2 =True
if (y is not None):
y = np.array(y)
y = np.sort(y)
else:
ret2 = False
y=np.array([])
bins = np.concatenate((x,y))
bins=np.sort(bins)
x_cdf = np.searchsorted(x,bins, 'right')
y_cdf = np.searchsorted(y,bins, 'right')
x_cdf = (x_cdf) / x.shape[0]
y_cdf = (y_cdf) / y.shape[0]
out = (bins,x_cdf)
if (ret2):
out= (bins,x_cdf,y_cdf)
return out
#--------------------------------------------------------------------
def hist2cdf(hist_x, normalize = True):
'''
The cumulative density function made by histogram.
Parameters:
* hist_x 1d histogram (ndarray).
Returns:
* cfd(hist_x) (Cumulative Density Function).
'''
hist_x = np.asarray(hist_x)
out = np.cumsum(hist_x)
if(normalize):
out /=np.max(out)
# TODO: out /=x.size # more simple!
return out
#--------------------------------------------------------------------
def cdf_by_hist(x,y=None,n_bins = None, bins = None, take_mean=False):
'''
Cumulative density function constructed by histogram.
Parameters:
* x,y: 1d ndarrays;
* n_bins: required number of uniformly distributed bins,
* work only if bins is None.
* bins: grid of prepared bins (can be ununiform)
* take_mean: sustrauct mean if ture.
Returns:
* y is not None -> (out_x, out_y,bins)
* y is None -> (out_x,bins)
Notes:
* If bins is None and n_bins is None:
bins = np.sort(np.concatenate((x,y))).
This case make the same result as ecdf!
* If bins is None and n_bins <=0: n_bins = x.shape[0];
The case of uniform bins grid! (Differ from ECDF).
* For tests: modes n_bins = 't10' and n_bins = 't5'
for obtaining uniform bins with x shape/10 and /5 correspondingly
'''
#FIXME: the results are sligthly differ from ecdf
# TODO: the case xy is the same as for ecfd, but uniform bins may be more valid (see tests)
if(bins is None and n_bins is None):
bins = take_bins(x,y, n_bins='xy')
elif(n_bins == 't10' and bins is None):
bins = take_bins(x,y, n_bins=x.shape[0]//10)
elif(n_bins == 't5' and bins is None):
bins = take_bins(x,y, n_bins=x.shape[0]//5)
if(y is None):
bins, out_x = hist(x,y=None,n_bins = n_bins, bins = bins, take_mean=take_mean)
out_x = hist2cdf(out_x, normalize = True)
out = (bins, out_x )
else:
bins, out_x, out_y = hist(x,y=y,n_bins = n_bins, bins = bins, take_mean=take_mean)
out_x = hist2cdf(out_x, normalize = True)
out_y = hist2cdf(out_y, normalize = True)
out = (bins,out_x, out_y)
return out
|
[
"numpy.asarray",
"numpy.searchsorted",
"numpy.sort",
"numpy.cumsum",
"numpy.max",
"numpy.array",
"numpy.concatenate"
] |
[((969, 980), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (977, 980), True, 'import numpy as np\n'), ((989, 999), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (996, 999), True, 'import numpy as np\n'), ((1165, 1187), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {}), '((x, y))\n', (1179, 1187), True, 'import numpy as np\n'), ((1196, 1209), 'numpy.sort', 'np.sort', (['bins'], {}), '(bins)\n', (1203, 1209), True, 'import numpy as np\n'), ((1222, 1255), 'numpy.searchsorted', 'np.searchsorted', (['x', 'bins', '"""right"""'], {}), "(x, bins, 'right')\n", (1237, 1255), True, 'import numpy as np\n'), ((1267, 1300), 'numpy.searchsorted', 'np.searchsorted', (['y', 'bins', '"""right"""'], {}), "(y, bins, 'right')\n", (1282, 1300), True, 'import numpy as np\n'), ((1800, 1818), 'numpy.asarray', 'np.asarray', (['hist_x'], {}), '(hist_x)\n', (1810, 1818), True, 'import numpy as np\n'), ((1834, 1851), 'numpy.cumsum', 'np.cumsum', (['hist_x'], {}), '(hist_x)\n', (1843, 1851), True, 'import numpy as np\n'), ((1056, 1067), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1064, 1067), True, 'import numpy as np\n'), ((1080, 1090), 'numpy.sort', 'np.sort', (['y'], {}), '(y)\n', (1087, 1090), True, 'import numpy as np\n'), ((1132, 1144), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1140, 1144), True, 'import numpy as np\n'), ((1890, 1901), 'numpy.max', 'np.max', (['out'], {}), '(out)\n', (1896, 1901), True, 'import numpy as np\n')]
|
import operator
import math
import numpy as np
from rtlsdr import RtlSdr
import matplotlib.pyplot as plt
# Available sample rates
'''
3200000Hz
2800000Hz
2560000Hz
2400000Hz
2048000Hz
1920000Hz
1800000Hz
1400000Hz
1024000Hz
900001Hz
250000Hz
'''
# Receiver class. This needs receiving parameters and will receive data from the SDR
class Receiver:
def __init__(self, sample_rate, ppm, resolution, num_FFT, num_med):
self.sdr = RtlSdr()
# configure SDR
self.sdr.sample_rate = sample_rate
self.sdr.center_freq = 1420405000
# For some reason the SDR doesn't want to set the offset PPM to 0 so we avoid that
if ppm != 0:
self.sdr.freq_correction = ppm
self.sdr.gain = 'auto'
self.resolution = 2**resolution
self.num_FFT = num_FFT
self.num_med = num_med
# Reads data from SDR, processes and writes it
def receive(self):
print(f'Receiving {self.num_FFT} bins of {self.resolution} samples each...')
data_PSD = self.sample()
# Observed frequency range
start_freq = self.sdr.center_freq - self.sdr.sample_rate/2
stop_freq = self.sdr.center_freq + self.sdr.sample_rate/2
freqs = np.linspace(start = start_freq, stop = stop_freq, num = self.resolution)
# Samples a blank spectrum to callibrate spectrum with.
self.sdr.center_freq = self.sdr.center_freq + 3000000
blank_PSD = self.sample()
SNR_spectrum = self.estimate_SNR(data = data_PSD, blank = blank_PSD)
SNR_median = self.median(SNR_spectrum) if self.num_med != 0 else SNR_spectrum
# Close the SDR
self.sdr.close()
return freqs, SNR_median
# Returns numpy array with PSD values averaged from "num_FFT" datasets
def sample(self):
counter = 0.0
PSD_summed = (0, )* self.resolution
while (counter < self.num_FFT):
samples = self.sdr.read_samples(self.resolution)
# Applies window to samples in time domain before performing FFT
window = np.hanning(self.resolution)
windowed_samples = samples * window
# Perform FFT and PSD-analysis
PSD = np.abs(np.fft.fft(windowed_samples)/self.sdr.sample_rate)**2
PSD_checked = self.check_for_zero(PSD)
PSD_log = 10*np.log10(PSD_checked)
PSD_summed = tuple(map(operator.add, PSD_summed, np.fft.fftshift(PSD_log)))
counter += 1.0
averaged_PSD = tuple(sample/counter for sample in PSD_summed)
return averaged_PSD
# Calculates SNR from spectrum and H-line SNR
def estimate_SNR(self, data, blank):
SNR = np.array(data)-np.array(blank)
# Ghetto noise floor estimate:
noise_floor = sum(SNR[0:10])/10
shifted_SNR = SNR-noise_floor
return shifted_SNR
# Median filter for rfi-removal
def median(self, data):
for i in range(len(data)):
data[i] = np.mean(data[i:i+self.num_med])
return data
# Checks if samples have been dropped and replaces 0.0 with next value
def check_for_zero(self, PSD):
try:
index = list(PSD).index(0.0)
print('Dropped sample was recovered!')
PSD[index] = (PSD[index+1]+PSD[index-1])/2
return PSD
except:
return PSD
|
[
"rtlsdr.RtlSdr",
"numpy.fft.fft",
"numpy.mean",
"numpy.array",
"numpy.fft.fftshift",
"numpy.linspace",
"numpy.hanning",
"numpy.log10"
] |
[((446, 454), 'rtlsdr.RtlSdr', 'RtlSdr', ([], {}), '()\n', (452, 454), False, 'from rtlsdr import RtlSdr\n'), ((1237, 1303), 'numpy.linspace', 'np.linspace', ([], {'start': 'start_freq', 'stop': 'stop_freq', 'num': 'self.resolution'}), '(start=start_freq, stop=stop_freq, num=self.resolution)\n', (1248, 1303), True, 'import numpy as np\n'), ((2104, 2131), 'numpy.hanning', 'np.hanning', (['self.resolution'], {}), '(self.resolution)\n', (2114, 2131), True, 'import numpy as np\n'), ((2744, 2758), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2752, 2758), True, 'import numpy as np\n'), ((2759, 2774), 'numpy.array', 'np.array', (['blank'], {}), '(blank)\n', (2767, 2774), True, 'import numpy as np\n'), ((3043, 3076), 'numpy.mean', 'np.mean', (['data[i:i + self.num_med]'], {}), '(data[i:i + self.num_med])\n', (3050, 3076), True, 'import numpy as np\n'), ((2380, 2401), 'numpy.log10', 'np.log10', (['PSD_checked'], {}), '(PSD_checked)\n', (2388, 2401), True, 'import numpy as np\n'), ((2463, 2487), 'numpy.fft.fftshift', 'np.fft.fftshift', (['PSD_log'], {}), '(PSD_log)\n', (2478, 2487), True, 'import numpy as np\n'), ((2249, 2277), 'numpy.fft.fft', 'np.fft.fft', (['windowed_samples'], {}), '(windowed_samples)\n', (2259, 2277), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
def faces_with_repeated_vertices(f):
if f.shape[1] == 3:
return np.unique(np.concatenate([
np.where(f[:, 0] == f[:, 1])[0],
np.where(f[:, 0] == f[:, 2])[0],
np.where(f[:, 1] == f[:, 2])[0],
]))
else:
return np.unique(np.concatenate([
np.where(f[:, 0] == f[:, 1])[0],
np.where(f[:, 0] == f[:, 2])[0],
np.where(f[:, 0] == f[:, 3])[0],
np.where(f[:, 1] == f[:, 2])[0],
np.where(f[:, 1] == f[:, 3])[0],
np.where(f[:, 2] == f[:, 3])[0],
]))
def faces_with_out_of_range_vertices(f, v):
return np.unique(np.concatenate([
np.where(f < 0)[0],
np.where(f >= len(v))[0],
]))
def check_integrity(mesh):
errors = []
for f_index in faces_with_out_of_range_vertices(mesh.f, mesh.v):
errors.append(("f", f_index, "Vertex out of range"))
for f_index in faces_with_repeated_vertices(mesh.f):
errors.append(("f", f_index, "Repeated vertex"))
return errors
def print_integrity_errors(errors, mesh):
for attr, index, message in errors:
try:
data = getattr(mesh, attr)[index]
except (AttributeError, IndexError):
data = ''
print("{} {} {} {}".format(attr, index, message, data))
|
[
"numpy.where"
] |
[((734, 749), 'numpy.where', 'np.where', (['(f < 0)'], {}), '(f < 0)\n', (742, 749), True, 'import numpy as np\n'), ((174, 202), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 1])'], {}), '(f[:, 0] == f[:, 1])\n', (182, 202), True, 'import numpy as np\n'), ((219, 247), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 2])'], {}), '(f[:, 0] == f[:, 2])\n', (227, 247), True, 'import numpy as np\n'), ((264, 292), 'numpy.where', 'np.where', (['(f[:, 1] == f[:, 2])'], {}), '(f[:, 1] == f[:, 2])\n', (272, 292), True, 'import numpy as np\n'), ((373, 401), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 1])'], {}), '(f[:, 0] == f[:, 1])\n', (381, 401), True, 'import numpy as np\n'), ((418, 446), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 2])'], {}), '(f[:, 0] == f[:, 2])\n', (426, 446), True, 'import numpy as np\n'), ((463, 491), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 3])'], {}), '(f[:, 0] == f[:, 3])\n', (471, 491), True, 'import numpy as np\n'), ((508, 536), 'numpy.where', 'np.where', (['(f[:, 1] == f[:, 2])'], {}), '(f[:, 1] == f[:, 2])\n', (516, 536), True, 'import numpy as np\n'), ((553, 581), 'numpy.where', 'np.where', (['(f[:, 1] == f[:, 3])'], {}), '(f[:, 1] == f[:, 3])\n', (561, 581), True, 'import numpy as np\n'), ((598, 626), 'numpy.where', 'np.where', (['(f[:, 2] == f[:, 3])'], {}), '(f[:, 2] == f[:, 3])\n', (606, 626), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
from packaging import version
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
from PIL import Image
import matplotlib.pyplot as plt
import cv2
from skimage.transform import rotate
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.utils import data
from models.unet import UNet
from dataset.refuge import REFUGE
NUM_CLASSES = 3
NUM_STEPS = 512 # Number of images in the validation set.
RESTORE_FROM = '/home/charlietran/CADA_Tutorial/Model_Weights/Trial1/UNet1000_v18_weightedclass.pth'
SAVE_PATH = '/home/charlietran/CADA_Tutorial/result/Trial1/'
MODEL = 'Unet'
BATCH_SIZE = 1
is_polar = False #If need to transfer the image and labels to polar coordinates: MICCAI version is False
ROI_size = 700 #ROI size
from evaluation.evaluation_segmentation import *
print(RESTORE_FROM)
palette=[
255, 255, 255, # black background
128, 128, 128, # index 1 is red
0, 0, 0, # index 2 is yellow
0, 0 , 0 # index 3 is orange
]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Unet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice Unet.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--is_polar", type=bool, default=False,
help="If proceed images in polar coordinate. MICCAI version is false")
parser.add_argument("--ROI_size", type=int, default=460,
help="Size of ROI.")
parser.add_argument('--t', type=int, default=3, help='t for Recurrent step of R2U_Net or R2AttU_Net')
return parser.parse_args()
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
gpu0 = args.gpu
if not os.path.exists(args.save):
os.makedirs(args.save)
model = UNet(3, n_classes=args.num_classes)
saved_state_dict = torch.load(args.restore_from)
model.load_state_dict(saved_state_dict)
model.cuda(gpu0)
model.train()
testloader = data.DataLoader(REFUGE(False, domain='REFUGE_TEST', is_transform=True),
batch_size=args.batch_size, shuffle=False, pin_memory=True)
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(ROI_size, ROI_size), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(ROI_size, ROI_size), mode='bilinear')
for index, batch in enumerate(testloader):
if index % 100 == 0:
print('%d processd' % index)
image, label, _, _, name = batch
if args.model == 'Unet':
_,_,_,_, output2 = model(Variable(image, volatile=True).cuda(gpu0))
output = interp(output2).cpu().data.numpy()
for idx, one_name in enumerate(name):
pred = output[idx]
pred = pred.transpose(1,2,0)
pred = np.asarray(np.argmax(pred, axis=2), dtype=np.uint8)
output_col = colorize_mask(pred)
print(output_col.size)
one_name = one_name.split('/')[-1]
output_col = output_col.convert('L')
output_col.save('%s/%s.bmp' % (args.save, one_name))
if __name__ == '__main__':
main()
results_folder = SAVE_PATH
gt_folder = '/DATA/charlie/AWC/CADA_Tutorial_Image/Target_Test/mask/'
output_path = results_folder
export_table = True
evaluate_segmentation_results(results_folder, gt_folder, output_path, export_table)
|
[
"os.makedirs",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.autograd.Variable",
"torch.load",
"dataset.refuge.REFUGE",
"os.path.exists",
"packaging.version.parse",
"torch.nn.Upsample",
"models.unet.UNet"
] |
[((1465, 1516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Unet Network"""'}), "(description='Unet Network')\n", (1488, 1516), False, 'import argparse\n'), ((2912, 2947), 'models.unet.UNet', 'UNet', (['(3)'], {'n_classes': 'args.num_classes'}), '(3, n_classes=args.num_classes)\n', (2916, 2947), False, 'from models.unet import UNet\n'), ((2972, 3001), 'torch.load', 'torch.load', (['args.restore_from'], {}), '(args.restore_from)\n', (2982, 3001), False, 'import torch\n'), ((2841, 2866), 'os.path.exists', 'os.path.exists', (['args.save'], {}), '(args.save)\n', (2855, 2866), False, 'import os\n'), ((2876, 2898), 'os.makedirs', 'os.makedirs', (['args.save'], {}), '(args.save)\n', (2887, 2898), False, 'import os\n'), ((3120, 3174), 'dataset.refuge.REFUGE', 'REFUGE', (['(False)'], {'domain': '"""REFUGE_TEST"""', 'is_transform': '(True)'}), "(False, domain='REFUGE_TEST', is_transform=True)\n", (3126, 3174), False, 'from dataset.refuge import REFUGE\n'), ((3281, 3313), 'packaging.version.parse', 'version.parse', (['torch.__version__'], {}), '(torch.__version__)\n', (3294, 3313), False, 'from packaging import version\n'), ((3317, 3339), 'packaging.version.parse', 'version.parse', (['"""0.4.0"""'], {}), "('0.4.0')\n", (3330, 3339), False, 'from packaging import version\n'), ((3358, 3433), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(ROI_size, ROI_size)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(size=(ROI_size, ROI_size), mode='bilinear', align_corners=True)\n", (3369, 3433), True, 'import torch.nn as nn\n'), ((3461, 3516), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(ROI_size, ROI_size)', 'mode': '"""bilinear"""'}), "(size=(ROI_size, ROI_size), mode='bilinear')\n", (3472, 3516), True, 'import torch.nn as nn\n'), ((3997, 4020), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(2)'}), '(pred, axis=2)\n', (4006, 4020), True, 'import numpy as np\n'), ((3747, 3777), 'torch.autograd.Variable', 'Variable', (['image'], {'volatile': '(True)'}), '(image, volatile=True)\n', (3755, 3777), False, 'from torch.autograd import Variable\n')]
|
# TODO: Explain 8 corners logic at the top and use it consistently
# Add comments of explanation
import numpy as np
import scipy.spatial
from .rotation import rotate_points_along_z
def get_size(box):
"""
Args:
box: 8x3
Returns:
size: [dx, dy, dz]
"""
distance = scipy.spatial.distance.cdist(box[0:1, :], box[1:5, :])
l = distance[0, 2]
w = distance[0, 0]
h = distance[0, 3]
return [l, w, h]
def get_heading_angle(box):
"""
Args:
box: (8, 3)
Returns:
heading_angle: float
"""
a = box[0, 0] - box[1, 0]
b = box[0, 1] - box[1, 1]
heading_angle = np.arctan2(a, b)
return heading_angle
def compute_box_3d(size, center, rotmat):
"""Compute corners of a single box from rotation matrix
Args:
size: list of float [dx, dy, dz]
center: np.array [x, y, z]
rotmat: np.array (3, 3)
Returns:
corners: (8, 3)
"""
l, h, w = [i / 2 for i in size]
center = np.reshape(center, (-1, 3))
center = center.reshape(3)
x_corners = [l, l, -l, -l, l, l, -l, -l]
y_corners = [h, -h, -h, h, h, -h, -h, h]
z_corners = [w, w, w, w, -w, -w, -w, -w]
corners_3d = np.dot(
np.transpose(rotmat), np.vstack([x_corners, y_corners, z_corners])
)
corners_3d[0, :] += center[0]
corners_3d[1, :] += center[1]
corners_3d[2, :] += center[2]
return np.transpose(corners_3d)
def corners_to_boxes(corners3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
corners: (N, 8, 3), vertex order shown in figure above
Returns:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading]
with (x, y, z) is the box center
(dx, dy, dz) as the box size
and heading as the clockwise rotation angle
"""
boxes3d = np.zeros((corners3d.shape[0], 7))
for i in range(corners3d.shape[0]):
boxes3d[i, :3] = np.mean(corners3d[i, :, :], axis=0)
boxes3d[i, 3:6] = get_size(corners3d[i, :, :])
boxes3d[i, 6] = get_heading_angle(corners3d[i, :, :])
return boxes3d
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading],
(x, y, z) is the box center
Returns:
corners: (N, 8, 3)
"""
template = np.array([[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1]]
) / 2.
# corners3d: of shape (N, 3, 8)
corners3d = np.tile(boxes3d[:, None, 3:6], (1, 8, 1)) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.reshape(-1, 8, 3), boxes3d[:, 6]).reshape(
-1, 8, 3
)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def points_in_boxes(points, boxes):
"""
Args:
pc: np.array (n, 3+d)
boxes: np.array (m, 8, 3)
Returns:
mask: np.array (n, m) of type bool
"""
if len(boxes) == 0:
return np.zeros([points.shape[0], 1], dtype=np.bool)
points = points[:, :3] # get xyz
# u = p6 - p5
u = boxes[:, 6, :] - boxes[:, 5, :] # (m, 3)
# v = p6 - p7
v = boxes[:, 6, :] - boxes[:, 7, :] # (m, 3)
# w = p6 - p2
w = boxes[:, 6, :] - boxes[:, 2, :] # (m, 3)
# ux, vx, wx
ux = np.matmul(points, u.T) # (n, m)
vx = np.matmul(points, v.T)
wx = np.matmul(points, w.T)
# up6, up5, vp6, vp7, wp6, wp2
up6 = np.sum(u * boxes[:, 6, :], axis=1)
up5 = np.sum(u * boxes[:, 5, :], axis=1)
vp6 = np.sum(v * boxes[:, 6, :], axis=1)
vp7 = np.sum(v * boxes[:, 7, :], axis=1)
wp6 = np.sum(w * boxes[:, 6, :], axis=1)
wp2 = np.sum(w * boxes[:, 2, :], axis=1)
mask_u = np.logical_and(ux <= up6, ux >= up5) # (1024, n)
mask_v = np.logical_and(vx <= vp6, vx >= vp7)
mask_w = np.logical_and(wx <= wp6, wx >= wp2)
mask = mask_u & mask_v & mask_w # (10240, n)
return mask
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return (outputList)
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = scipy.spatial.ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i,0], corners1[i,1]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,1]) for i in range(3,-1,-1)]
area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[:,2].max(), corners2[:,2].max())
ymin = max(corners1[:,2].min(), corners2[:,2].min())
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou
|
[
"numpy.arctan2",
"numpy.sum",
"numpy.logical_and",
"numpy.roll",
"numpy.zeros",
"numpy.transpose",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.matmul",
"numpy.tile",
"numpy.vstack"
] |
[((646, 662), 'numpy.arctan2', 'np.arctan2', (['a', 'b'], {}), '(a, b)\n', (656, 662), True, 'import numpy as np\n'), ((1004, 1031), 'numpy.reshape', 'np.reshape', (['center', '(-1, 3)'], {}), '(center, (-1, 3))\n', (1014, 1031), True, 'import numpy as np\n'), ((1417, 1441), 'numpy.transpose', 'np.transpose', (['corners_3d'], {}), '(corners_3d)\n', (1429, 1441), True, 'import numpy as np\n'), ((1937, 1970), 'numpy.zeros', 'np.zeros', (['(corners3d.shape[0], 7)'], {}), '((corners3d.shape[0], 7))\n', (1945, 1970), True, 'import numpy as np\n'), ((3577, 3599), 'numpy.matmul', 'np.matmul', (['points', 'u.T'], {}), '(points, u.T)\n', (3586, 3599), True, 'import numpy as np\n'), ((3619, 3641), 'numpy.matmul', 'np.matmul', (['points', 'v.T'], {}), '(points, v.T)\n', (3628, 3641), True, 'import numpy as np\n'), ((3651, 3673), 'numpy.matmul', 'np.matmul', (['points', 'w.T'], {}), '(points, w.T)\n', (3660, 3673), True, 'import numpy as np\n'), ((3720, 3754), 'numpy.sum', 'np.sum', (['(u * boxes[:, 6, :])'], {'axis': '(1)'}), '(u * boxes[:, 6, :], axis=1)\n', (3726, 3754), True, 'import numpy as np\n'), ((3765, 3799), 'numpy.sum', 'np.sum', (['(u * boxes[:, 5, :])'], {'axis': '(1)'}), '(u * boxes[:, 5, :], axis=1)\n', (3771, 3799), True, 'import numpy as np\n'), ((3810, 3844), 'numpy.sum', 'np.sum', (['(v * boxes[:, 6, :])'], {'axis': '(1)'}), '(v * boxes[:, 6, :], axis=1)\n', (3816, 3844), True, 'import numpy as np\n'), ((3855, 3889), 'numpy.sum', 'np.sum', (['(v * boxes[:, 7, :])'], {'axis': '(1)'}), '(v * boxes[:, 7, :], axis=1)\n', (3861, 3889), True, 'import numpy as np\n'), ((3900, 3934), 'numpy.sum', 'np.sum', (['(w * boxes[:, 6, :])'], {'axis': '(1)'}), '(w * boxes[:, 6, :], axis=1)\n', (3906, 3934), True, 'import numpy as np\n'), ((3945, 3979), 'numpy.sum', 'np.sum', (['(w * boxes[:, 2, :])'], {'axis': '(1)'}), '(w * boxes[:, 2, :], axis=1)\n', (3951, 3979), True, 'import numpy as np\n'), ((3994, 4030), 'numpy.logical_and', 'np.logical_and', (['(ux <= up6)', '(ux >= up5)'], {}), '(ux <= up6, ux >= up5)\n', (4008, 4030), True, 'import numpy as np\n'), ((4057, 4093), 'numpy.logical_and', 'np.logical_and', (['(vx <= vp6)', '(vx >= vp7)'], {}), '(vx <= vp6, vx >= vp7)\n', (4071, 4093), True, 'import numpy as np\n'), ((4107, 4143), 'numpy.logical_and', 'np.logical_and', (['(wx <= wp6)', '(wx >= wp2)'], {}), '(wx <= wp6, wx >= wp2)\n', (4121, 4143), True, 'import numpy as np\n'), ((1231, 1251), 'numpy.transpose', 'np.transpose', (['rotmat'], {}), '(rotmat)\n', (1243, 1251), True, 'import numpy as np\n'), ((1253, 1297), 'numpy.vstack', 'np.vstack', (['[x_corners, y_corners, z_corners]'], {}), '([x_corners, y_corners, z_corners])\n', (1262, 1297), True, 'import numpy as np\n'), ((2036, 2071), 'numpy.mean', 'np.mean', (['corners3d[i, :, :]'], {'axis': '(0)'}), '(corners3d[i, :, :], axis=0)\n', (2043, 2071), True, 'import numpy as np\n'), ((2568, 2682), 'numpy.array', 'np.array', (['[[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], [1, 1, 1], [1, -1, 1],\n [-1, -1, 1], [-1, 1, 1]]'], {}), '([[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], [1, 1, 1], [1,\n -1, 1], [-1, -1, 1], [-1, 1, 1]])\n', (2576, 2682), True, 'import numpy as np\n'), ((2798, 2839), 'numpy.tile', 'np.tile', (['boxes3d[:, None, 3:6]', '(1, 8, 1)'], {}), '(boxes3d[:, None, 3:6], (1, 8, 1))\n', (2805, 2839), True, 'import numpy as np\n'), ((3262, 3307), 'numpy.zeros', 'np.zeros', (['[points.shape[0], 1]'], {'dtype': 'np.bool'}), '([points.shape[0], 1], dtype=np.bool)\n', (3270, 3307), True, 'import numpy as np\n'), ((6481, 6525), 'numpy.sum', 'np.sum', (['((corners[0, :] - corners[1, :]) ** 2)'], {}), '((corners[0, :] - corners[1, :]) ** 2)\n', (6487, 6525), True, 'import numpy as np\n'), ((6539, 6583), 'numpy.sum', 'np.sum', (['((corners[1, :] - corners[2, :]) ** 2)'], {}), '((corners[1, :] - corners[2, :]) ** 2)\n', (6545, 6583), True, 'import numpy as np\n'), ((6597, 6641), 'numpy.sum', 'np.sum', (['((corners[0, :] - corners[4, :]) ** 2)'], {}), '((corners[0, :] - corners[4, :]) ** 2)\n', (6603, 6641), True, 'import numpy as np\n'), ((7201, 7216), 'numpy.array', 'np.array', (['rect1'], {}), '(rect1)\n', (7209, 7216), True, 'import numpy as np\n'), ((7223, 7238), 'numpy.array', 'np.array', (['rect1'], {}), '(rect1)\n', (7231, 7238), True, 'import numpy as np\n'), ((7267, 7282), 'numpy.array', 'np.array', (['rect2'], {}), '(rect2)\n', (7275, 7282), True, 'import numpy as np\n'), ((7289, 7304), 'numpy.array', 'np.array', (['rect2'], {}), '(rect2)\n', (7297, 7304), True, 'import numpy as np\n'), ((4374, 4387), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (4381, 4387), True, 'import numpy as np\n'), ((4397, 4410), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (4404, 4410), True, 'import numpy as np\n')]
|
""" Compute resonances using the cxroots library (contour integration techniques)
Authors: <NAME>, <NAME>
Karlsruhe Institute of Technology, Germany
University of California, Merced
Last modified: 20/04/2021
"""
from sys import argv
import matplotlib.pyplot as plt
import numpy as np
from cxroots import AnnulusSector, Circle
from scipy.special import h1vp, hankel1, iv, ivp
## Entries ##
ε = float(argv[1]) # For example -1.1 + 1e-2 * 1j
η = np.sqrt(-ε)
print(f"η = {η}")
c = η + 1 / η
## Internal functions ##
def rootsAnnSec(m, rMin, rMax, aMin, aMax):
f0 = lambda k: ivp(m, η * k) * hankel1(m, k) / η + iv(m, η * k) * h1vp(m, k)
f1 = (
lambda k: ivp(m, η * k, 2) * hankel1(m, k)
+ c * ivp(m, η * k) * h1vp(m, k)
+ iv(m, η * k) * h1vp(m, k, 2)
)
A = AnnulusSector(center=0.0, radii=(rMin, rMax), phiRange=(aMin, aMax))
z = A.roots(f0, df=f1)
return z.roots
def writeFile(myFile, m, z):
if np.size(z, 0):
for i in range(np.size(z, 0)):
myFile.write(f"{m} {z[i].real} {z[i].imag}\n")
def calcInt():
plaTrue = ε > -1.0
if plaTrue:
Int = open(f"eps_{ε}_int", "w")
Pla = open(f"eps_{ε}_pla", "w")
else:
Int = open(f"eps_{ε}_int", "w")
for m in range(65):
print(f"m = {m}")
f0 = lambda k: ivp(m, η * k) * hankel1(m, k) / η + iv(m, η * k) * h1vp(m, k)
f1 = (
lambda k: ivp(m, η * k, 2) * hankel1(m, k)
+ c * ivp(m, η * k) * h1vp(m, k)
+ iv(m, η * k) * h1vp(m, k, 2)
)
t = np.linspace(0.2, 65.0, num=1024)
k = 1j * t
rf = np.real(f0(k))
ind = np.where(rf[1:] * rf[:-1] < 0.0)[0]
roots = np.zeros(np.shape(ind), dtype=complex)
for a, i in enumerate(ind):
C = Circle(center=1j * (t[i] + t[i + 1]) / 2.0, radius=(t[i + 1] - t[i]))
z = C.roots(f0, df=f1)
roots[a] = z.roots[0]
if plaTrue:
if m:
writeFile(Int, m, roots[1:])
writeFile(Pla, m, roots[[0]])
else:
writeFile(Int, m, roots)
else:
writeFile(Int, m, roots)
if plaTrue:
Int.close()
Pla.close()
else:
Int.close()
calcInt()
def calcResPla():
if ε < -1.0:
Pla = open(f"eps_{ε}_pla", "w")
angle = -np.pi / 4.0
for m in range(1, 65):
r = max(0.1, 0.9 * np.sqrt(1.0 - η ** (-2)) * m - 1.0)
R = max(2.0, 1.1 * np.sqrt(1.0 - η ** (-2)) * m + 1.0)
a = min(angle, -1e-3)
z = rootsAnnSec(m, r, R, a, 1e-3)
writeFile(Pla, m, z)
angle = np.angle(z[0])
Pla.close()
calcResPla()
def calcResOut():
Out = open(f"eps_{ε}_out", "w")
rMin = 0.2
rMax = 5.0
aMin = -np.pi + 0.01
aMax = 0.0
for m in range(33, 65):
print(f"m = {m}")
z = rootsAnnSec(m, rMin, rMax, aMin, aMax)
writeFile(Out, m, z)
if m > 3:
zMod = np.abs(z)
zArg = np.angle(z)
rMin = max(0.2, np.amin(zMod) * 0.75)
rMax = max(rMax, np.amax(zMod) + 3.0)
aMin = min(aMin, (-np.pi + np.amin(zArg)) / 2.0)
aMax = np.amax(zArg) / 2.0
Out.close()
calcResOut()
def calc_cx_pla():
with open(f"eps_{ε}_pla", "w") as file:
rMin, rMax = 0.1, 0.5
aMin = -np.pi / 4
for m in range(1, 65):
z = rootsAnnSec(m, rMin, rMax, aMin, 1e-3)[0]
file.write(f"{m} {z.real} {z.imag}\n")
rMin = abs(z)
rMax = abs(z) * (m + 1) / m + 1
aMin = min(2.5 * np.angle(z), -1e-3)
print(m, rMin, rMax, aMin)
calc_cx_pla()
def rewriteSave():
Int = np.loadtxt(f"eps_{ε}_int")
Pla = np.loadtxt(f"eps_{ε}_pla")
Out = np.loadtxt(f"eps_{ε}_out")
ind = np.argsort(Out[:, 1])[::-1]
out2 = Out[ind]
rep = out2[:, 1] > -1e-3
np.savez(f"eps_{ε}.npz", inner=Int, plasmon=Pla, outer=out2[rep])
rewriteSave()
def rewriteSave_pla():
Pla = np.loadtxt(f"eps_{ε}_pla")
np.savez(f"eps_{ε}.npz", plasmon=Pla)
# rewriteSave_pla()
|
[
"scipy.special.h1vp",
"numpy.size",
"numpy.abs",
"scipy.special.ivp",
"numpy.amin",
"numpy.angle",
"numpy.argsort",
"numpy.shape",
"cxroots.Circle",
"numpy.where",
"scipy.special.iv",
"numpy.loadtxt",
"numpy.linspace",
"cxroots.AnnulusSector",
"numpy.amax",
"scipy.special.hankel1",
"numpy.savez",
"numpy.sqrt"
] |
[((482, 493), 'numpy.sqrt', 'np.sqrt', (['(-ε)'], {}), '(-ε)\n', (489, 493), True, 'import numpy as np\n'), ((834, 902), 'cxroots.AnnulusSector', 'AnnulusSector', ([], {'center': '(0.0)', 'radii': '(rMin, rMax)', 'phiRange': '(aMin, aMax)'}), '(center=0.0, radii=(rMin, rMax), phiRange=(aMin, aMax))\n', (847, 902), False, 'from cxroots import AnnulusSector, Circle\n'), ((987, 1000), 'numpy.size', 'np.size', (['z', '(0)'], {}), '(z, 0)\n', (994, 1000), True, 'import numpy as np\n'), ((3819, 3845), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_int"""'], {}), "(f'eps_{ε}_int')\n", (3829, 3845), True, 'import numpy as np\n'), ((3856, 3882), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_pla"""'], {}), "(f'eps_{ε}_pla')\n", (3866, 3882), True, 'import numpy as np\n'), ((3893, 3919), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_out"""'], {}), "(f'eps_{ε}_out')\n", (3903, 3919), True, 'import numpy as np\n'), ((4013, 4078), 'numpy.savez', 'np.savez', (['f"""eps_{ε}.npz"""'], {'inner': 'Int', 'plasmon': 'Pla', 'outer': 'out2[rep]'}), "(f'eps_{ε}.npz', inner=Int, plasmon=Pla, outer=out2[rep])\n", (4021, 4078), True, 'import numpy as np\n'), ((4130, 4156), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_pla"""'], {}), "(f'eps_{ε}_pla')\n", (4140, 4156), True, 'import numpy as np\n'), ((4161, 4198), 'numpy.savez', 'np.savez', (['f"""eps_{ε}.npz"""'], {'plasmon': 'Pla'}), "(f'eps_{ε}.npz', plasmon=Pla)\n", (4169, 4198), True, 'import numpy as np\n'), ((1605, 1637), 'numpy.linspace', 'np.linspace', (['(0.2)', '(65.0)'], {'num': '(1024)'}), '(0.2, 65.0, num=1024)\n', (1616, 1637), True, 'import numpy as np\n'), ((3931, 3952), 'numpy.argsort', 'np.argsort', (['Out[:, 1]'], {}), '(Out[:, 1])\n', (3941, 3952), True, 'import numpy as np\n'), ((1025, 1038), 'numpy.size', 'np.size', (['z', '(0)'], {}), '(z, 0)\n', (1032, 1038), True, 'import numpy as np\n'), ((1700, 1732), 'numpy.where', 'np.where', (['(rf[1:] * rf[:-1] < 0.0)'], {}), '(rf[1:] * rf[:-1] < 0.0)\n', (1708, 1732), True, 'import numpy as np\n'), ((1761, 1774), 'numpy.shape', 'np.shape', (['ind'], {}), '(ind)\n', (1769, 1774), True, 'import numpy as np\n'), ((1843, 1912), 'cxroots.Circle', 'Circle', ([], {'center': '(1.0j * (t[i] + t[i + 1]) / 2.0)', 'radius': '(t[i + 1] - t[i])'}), '(center=1.0j * (t[i] + t[i + 1]) / 2.0, radius=t[i + 1] - t[i])\n', (1849, 1912), False, 'from cxroots import AnnulusSector, Circle\n'), ((2727, 2741), 'numpy.angle', 'np.angle', (['z[0]'], {}), '(z[0])\n', (2735, 2741), True, 'import numpy as np\n'), ((3079, 3088), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (3085, 3088), True, 'import numpy as np\n'), ((3108, 3119), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (3116, 3119), True, 'import numpy as np\n'), ((653, 665), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (655, 665), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((669, 679), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (673, 679), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((790, 802), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (792, 802), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((806, 819), 'scipy.special.h1vp', 'h1vp', (['m', 'k', '(2)'], {}), '(m, k, 2)\n', (810, 819), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((3300, 3313), 'numpy.amax', 'np.amax', (['zArg'], {}), '(zArg)\n', (3307, 3313), True, 'import numpy as np\n'), ((615, 628), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (618, 628), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((632, 645), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (639, 645), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((706, 722), 'scipy.special.ivp', 'ivp', (['m', '(η * k)', '(2)'], {}), '(m, η * k, 2)\n', (709, 722), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((726, 739), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (733, 739), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((770, 780), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (774, 780), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1400, 1412), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (1402, 1412), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1416, 1426), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (1420, 1426), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1553, 1565), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (1555, 1565), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1569, 1582), 'scipy.special.h1vp', 'h1vp', (['m', 'k', '(2)'], {}), '(m, k, 2)\n', (1573, 1582), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((3148, 3161), 'numpy.amin', 'np.amin', (['zMod'], {}), '(zMod)\n', (3155, 3161), True, 'import numpy as np\n'), ((3199, 3212), 'numpy.amax', 'np.amax', (['zMod'], {}), '(zMod)\n', (3206, 3212), True, 'import numpy as np\n'), ((3713, 3724), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (3721, 3724), True, 'import numpy as np\n'), ((753, 766), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (756, 766), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1362, 1375), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (1365, 1375), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1379, 1392), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (1386, 1392), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1461, 1477), 'scipy.special.ivp', 'ivp', (['m', '(η * k)', '(2)'], {}), '(m, η * k, 2)\n', (1464, 1477), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1481, 1494), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (1488, 1494), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1529, 1539), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (1533, 1539), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((3259, 3272), 'numpy.amin', 'np.amin', (['zArg'], {}), '(zArg)\n', (3266, 3272), True, 'import numpy as np\n'), ((1512, 1525), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (1515, 1525), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((2490, 2512), 'numpy.sqrt', 'np.sqrt', (['(1.0 - η ** -2)'], {}), '(1.0 - η ** -2)\n', (2497, 2512), True, 'import numpy as np\n'), ((2557, 2579), 'numpy.sqrt', 'np.sqrt', (['(1.0 - η ** -2)'], {}), '(1.0 - η ** -2)\n', (2564, 2579), True, 'import numpy as np\n')]
|
import sys
import ReadFile
import pickle
import World
import importlib.util
import os.path as osp
import policy_generator as pg
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
import numpy as np
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def get_example_path():
return sys.argv[1]
def get_config_path(path):
config_filepath=osp.join(path,'config.txt')
return config_filepath
def get_file_paths(example_path,config_obj):
# File Names
locations_filename=None
agents_filename=osp.join(example_path,config_obj.agents_filename)
interactions_FilesList_filename=osp.join(example_path,config_obj.interactions_files_list)
events_FilesList_filename=osp.join(example_path,config_obj.events_files_list)
if config_obj.locations_filename=="":
locations_filename=None
else:
locations_filename=osp.join(example_path,config_obj.locations_filename)
return agents_filename, interactions_FilesList_filename, events_FilesList_filename, locations_filename
def get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj):
# Reading through a file (for interactions/events) that contain file names which contain interactions and event details for a time step
interactions_files_list=None
events_files_list=None
if config_obj.interactions_files_list=='':
print('No Interaction files uploaded!')
else:
interactionFiles_obj=ReadFile.ReadFilesList(interactions_FilesList_filename)
interactions_files_list=list(map(lambda x : osp.join(example_path,x) ,interactionFiles_obj.file_list))
if interactions_files_list==[]:
print('No Interactions inputted')
if config_obj.events_files_list=='':
print('No Event files uploaded!')
else:
eventFiles_obj=ReadFile.ReadFilesList(events_FilesList_filename)
events_files_list=list(map(lambda x : osp.join(example_path,x) ,eventFiles_obj.file_list))
if events_files_list==[]:
print('No Events inputted')
return interactions_files_list, events_files_list
def get_model(example_path):
UserModel = module_from_file("Generate_model", osp.join(example_path,'UserModel.py'))
model = UserModel.UserModel()
return model
def get_policy(example_path):
Generate_policy = module_from_file("Generate_policy", osp.join(example_path,'Generate_policy.py'))
policy_list, event_restriction_fn=Generate_policy.generate_policy()
return policy_list, event_restriction_fn
if __name__=="__main__":
example_path = get_example_path()
config_filename = get_config_path(example_path)
# Read Config file using ReadFile.ReadConfiguration
config_obj=ReadFile.ReadConfiguration(config_filename)
agents_filename, interactions_FilesList_filename,\
events_FilesList_filename, locations_filename = get_file_paths(example_path,config_obj)
interactions_files_list, events_files_list = get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj)
# User Model
model = get_model(example_path)
# policy_list, event_restriction_fn=get_policy(example_path)
##########################################################################################
num_tests = 90
ntpa_max=6
napt_max=6
X=np.arange(1, napt_max+1, 1)
Y=np.arange(1, ntpa_max+1, 1)
X,Y = np.meshgrid(X,Y)
print(X)
print(Y)
data_list={'Infected':np.zeros((ntpa_max,napt_max)),'False Positives':np.zeros((ntpa_max,napt_max)),'Quarantined':np.zeros((ntpa_max,napt_max))}
for i in range(napt_max):
for j in range(ntpa_max):
policy_list, event_restriction_fn = pg.generate_group_testing_tests_policy(num_tests, i+1, j+1)
world_obj=World.World(config_obj,model,policy_list,event_restriction_fn,agents_filename,interactions_files_list,locations_filename,events_files_list)
tdict, total_infection, total_quarantined_days, wrongly_quarantined_days, total_test_cost = world_obj.simulate_worlds(plot=False)
data_list['Infected'][j][i]=total_infection
data_list['False Positives'][j][i]=world_obj.total_false_positives
data_list['Quarantined'][j][i]=total_quarantined_days
print(data_list)
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['False Positives']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total false positives")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['Infected']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total infections")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['Quarantined']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total quarantine")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
###############################################################################################
|
[
"matplotlib.pyplot.title",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"ReadFile.ReadConfiguration",
"ReadFile.ReadFilesList",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"policy_generator.generate_group_testing_tests_policy",
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"World.World"
] |
[((573, 601), 'os.path.join', 'osp.join', (['path', '"""config.txt"""'], {}), "(path, 'config.txt')\n", (581, 601), True, 'import os.path as osp\n'), ((740, 790), 'os.path.join', 'osp.join', (['example_path', 'config_obj.agents_filename'], {}), '(example_path, config_obj.agents_filename)\n', (748, 790), True, 'import os.path as osp\n'), ((826, 884), 'os.path.join', 'osp.join', (['example_path', 'config_obj.interactions_files_list'], {}), '(example_path, config_obj.interactions_files_list)\n', (834, 884), True, 'import os.path as osp\n'), ((914, 966), 'os.path.join', 'osp.join', (['example_path', 'config_obj.events_files_list'], {}), '(example_path, config_obj.events_files_list)\n', (922, 966), True, 'import os.path as osp\n'), ((2944, 2987), 'ReadFile.ReadConfiguration', 'ReadFile.ReadConfiguration', (['config_filename'], {}), '(config_filename)\n', (2970, 2987), False, 'import ReadFile\n'), ((3560, 3589), 'numpy.arange', 'np.arange', (['(1)', '(napt_max + 1)', '(1)'], {}), '(1, napt_max + 1, 1)\n', (3569, 3589), True, 'import numpy as np\n'), ((3594, 3623), 'numpy.arange', 'np.arange', (['(1)', '(ntpa_max + 1)', '(1)'], {}), '(1, ntpa_max + 1, 1)\n', (3603, 3623), True, 'import numpy as np\n'), ((3632, 3649), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (3643, 3649), True, 'import numpy as np\n'), ((4542, 4587), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (4554, 4587), True, 'import matplotlib.pyplot as plt\n'), ((4714, 4757), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Agents per testtube"""'], {}), "('Number of Agents per testtube')\n", (4724, 4757), True, 'import matplotlib.pyplot as plt\n'), ((4762, 4805), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of testtubes per agent"""'], {}), "('Number of testtubes per agent')\n", (4772, 4805), True, 'import matplotlib.pyplot as plt\n'), ((4810, 4871), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool testing strategies vs total false positives"""'], {}), "('Pool testing strategies vs total false positives')\n", (4819, 4871), True, 'import matplotlib.pyplot as plt\n'), ((4921, 4931), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4929, 4931), True, 'import matplotlib.pyplot as plt\n'), ((4947, 4992), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (4959, 4992), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5155), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Agents per testtube"""'], {}), "('Number of Agents per testtube')\n", (5122, 5155), True, 'import matplotlib.pyplot as plt\n'), ((5160, 5203), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of testtubes per agent"""'], {}), "('Number of testtubes per agent')\n", (5170, 5203), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5264), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool testing strategies vs total infections"""'], {}), "('Pool testing strategies vs total infections')\n", (5217, 5264), True, 'import matplotlib.pyplot as plt\n'), ((5314, 5324), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5322, 5324), True, 'import matplotlib.pyplot as plt\n'), ((5340, 5385), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (5352, 5385), True, 'import matplotlib.pyplot as plt\n'), ((5508, 5551), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Agents per testtube"""'], {}), "('Number of Agents per testtube')\n", (5518, 5551), True, 'import matplotlib.pyplot as plt\n'), ((5556, 5599), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of testtubes per agent"""'], {}), "('Number of testtubes per agent')\n", (5566, 5599), True, 'import matplotlib.pyplot as plt\n'), ((5604, 5660), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool testing strategies vs total quarantine"""'], {}), "('Pool testing strategies vs total quarantine')\n", (5613, 5660), True, 'import matplotlib.pyplot as plt\n'), ((5710, 5720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5718, 5720), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1130), 'os.path.join', 'osp.join', (['example_path', 'config_obj.locations_filename'], {}), '(example_path, config_obj.locations_filename)\n', (1085, 1130), True, 'import os.path as osp\n'), ((1684, 1739), 'ReadFile.ReadFilesList', 'ReadFile.ReadFilesList', (['interactions_FilesList_filename'], {}), '(interactions_FilesList_filename)\n', (1706, 1739), False, 'import ReadFile\n'), ((2055, 2104), 'ReadFile.ReadFilesList', 'ReadFile.ReadFilesList', (['events_FilesList_filename'], {}), '(events_FilesList_filename)\n', (2077, 2104), False, 'import ReadFile\n'), ((2414, 2452), 'os.path.join', 'osp.join', (['example_path', '"""UserModel.py"""'], {}), "(example_path, 'UserModel.py')\n", (2422, 2452), True, 'import os.path as osp\n'), ((2593, 2637), 'os.path.join', 'osp.join', (['example_path', '"""Generate_policy.py"""'], {}), "(example_path, 'Generate_policy.py')\n", (2601, 2637), True, 'import os.path as osp\n'), ((3703, 3733), 'numpy.zeros', 'np.zeros', (['(ntpa_max, napt_max)'], {}), '((ntpa_max, napt_max))\n', (3711, 3733), True, 'import numpy as np\n'), ((3751, 3781), 'numpy.zeros', 'np.zeros', (['(ntpa_max, napt_max)'], {}), '((ntpa_max, napt_max))\n', (3759, 3781), True, 'import numpy as np\n'), ((3795, 3825), 'numpy.zeros', 'np.zeros', (['(ntpa_max, napt_max)'], {}), '((ntpa_max, napt_max))\n', (3803, 3825), True, 'import numpy as np\n'), ((4621, 4659), 'numpy.array', 'np.array', (["data_list['False Positives']"], {}), "(data_list['False Positives'])\n", (4629, 4659), True, 'import numpy as np\n'), ((5026, 5057), 'numpy.array', 'np.array', (["data_list['Infected']"], {}), "(data_list['Infected'])\n", (5034, 5057), True, 'import numpy as np\n'), ((5419, 5453), 'numpy.array', 'np.array', (["data_list['Quarantined']"], {}), "(data_list['Quarantined'])\n", (5427, 5453), True, 'import numpy as np\n'), ((3940, 4003), 'policy_generator.generate_group_testing_tests_policy', 'pg.generate_group_testing_tests_policy', (['num_tests', '(i + 1)', '(j + 1)'], {}), '(num_tests, i + 1, j + 1)\n', (3978, 4003), True, 'import policy_generator as pg\n'), ((4022, 4176), 'World.World', 'World.World', (['config_obj', 'model', 'policy_list', 'event_restriction_fn', 'agents_filename', 'interactions_files_list', 'locations_filename', 'events_files_list'], {}), '(config_obj, model, policy_list, event_restriction_fn,\n agents_filename, interactions_files_list, locations_filename,\n events_files_list)\n', (4033, 4176), False, 'import World\n'), ((1792, 1817), 'os.path.join', 'osp.join', (['example_path', 'x'], {}), '(example_path, x)\n', (1800, 1817), True, 'import os.path as osp\n'), ((2151, 2176), 'os.path.join', 'osp.join', (['example_path', 'x'], {}), '(example_path, x)\n', (2159, 2176), True, 'import os.path as osp\n')]
|
from django.db import connection
import numpy as np
def getstudentcoursewisePLO(studentID, courseID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
and co.course_id = '{}'
GROUP BY p.ploID
'''.format(studentID, courseID))
row = cursor.fetchall()
return row
def getcoursewiseavgPLO(courseID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks)
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and co.course_id = '{}'
GROUP BY p.ploID
'''.format(courseID))
row = cursor.fetchall()
return row
def getcompletedcourses(studentID):
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT distinct s.course_id
FROM app_registration_t r,
app_evaluation_t e,
app_section_t s
WHERE r.registrationID = e.registration_id
and r.section_id = s.sectionID
and r.student_id = '{}'
'''.format(studentID))
row = cursor.fetchall()
return row
def getcorrespondingstudentid(userID):
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT studentID
FROM app_student_t s
WHERE s.user_ptr_id = '{}'
'''.format(userID))
row = cursor.fetchall()
return row
def getstudentprogramwisePLO(studentID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_student_t s,
app_program_t pr
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
and s.studentID = r.student_id
and s.program_id = pr.programID
GROUP BY p.ploID
'''.format(studentID))
row = cursor.fetchall()
return row
def getprogramwiseavgPLO(programID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks)
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = '{}'
GROUP BY p.ploID
'''.format(programID))
row = cursor.fetchall()
return row
def getstudentprogramid(studentID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT s.program_id
FROM app_student_t s
WHERE s.studentID = '{}'
'''.format(studentID))
row = cursor.fetchall()
return row
def getstudentallcoursePLO(studentID, category):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as ploNum,co.course_id,sum(e.obtainedMarks),sum(a.totalMarks), derived.Total
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
(
SELECT p.ploNum as ploNum,sum(a.totalMarks) as Total, r.student_id as StudentID
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
GROUP BY r.student_id,p.ploID) derived
WHERE r.student_id = derived.StudentID
and e.registration_id = r.registrationID
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum = derived.ploNum
GROUP BY p.ploID,co.course_id
'''.format(studentID))
row = cursor.fetchall()
table = []
courses = []
for entry in row:
if entry[1] not in courses:
courses.append(entry[1])
courses.sort()
plo = ["PLO1", "PLO2", "PLO3", "PLO4", "PLO5", "PLO6", "PLO7", "PLO8", "PLO9", "PLO10", "PLO11", "PLO12"]
for i in courses:
temptable = []
if category == 'report':
temptable = [i]
for j in plo:
found = False
for k in row:
if j == k[0] and i == k[1]:
if category == 'report':
temptable.append(np.round(100 * k[2] / k[3], 2))
elif category == 'chart':
temptable.append(np.round(100 * k[2] / k[4], 2))
found = True
if not found:
if category == 'report':
temptable.append('N/A')
elif category == 'chart':
temptable.append(0)
table.append(temptable)
return plo, courses, table
def getfacultycoursewisePLO(courseID, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.first_name, f.last_name, f.plonum, COUNT(*) as achieved_cnt
FROM
(
SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
accounts_user u,
app_employee_t emp
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.faculty_id IN
(
SELECT DISTINCT s.faculty_id
FROM app_section_t s
WHERE s.course_id = '{}'
)
and s.semester IN ({})
and s.course_id ='{}'
and s.faculty_id = emp.employeeID
and emp.user_ptr_id = u.id
)f
WHERE f.percentage >= 40
GROUP BY f.first_name, f.plonum;
'''.format(courseID, sem, courseID))
row1 = cursor.fetchall()
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
accounts_user u,
app_employee_t emp
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.faculty_id IN
(
SELECT DISTINCT s.faculty_id
FROM app_section_t s
WHERE s.course_id = '{}'
)
and s.semester IN ({})
and s.course_id ='{}'
and s.faculty_id = emp.employeeID
and emp.user_ptr_id = u.id
)f
GROUP BY f.first_name, f.plonum;
'''.format(courseID, sem, courseID))
row2 = cursor.fetchall()
faculty = []
plonum = []
plos1 = []
plos2 = []
for record in row1:
faculty.append(record[0]+' '+record[1])
plonum.append(record[2])
plos1.append(record[3])
for record in row2:
plos2.append(record[0])
plos = 100*(np.array(plos1)/np.array(plos2))
plos = plos.tolist()
faculty = list(set(faculty))
plonum = list(set(plonum))
plonum.sort()
plonum.sort(key=len, reverse=False)
plos = np.array(plos)
plos = np.split(plos, len(plos)/len(plonum))
new_plo=[]
for plo in plos:
new_plo.append(plo.tolist())
return faculty, plonum, new_plo
def getsemestercoursewisePLO(courseID, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.semester, f.plonum, COUNT(*) as achieved_cnt
FROM
(
SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.semester IN ({})
and co.course_id ='{}'
and s.course_id = co.course_id
)f
WHERE f.percentage >= 40
GROUP BY f.semester, f.plonum;
'''.format(sem, courseID))
row1 = cursor.fetchall()
cursor.execute('''
SELECT COUNT(*) as all_cnt
FROM
(
SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.semester IN ({})
and co.course_id ='{}'
and s.course_id = co.course_id
)f
GROUP BY f.semester, f.plonum;
'''.format(sem, courseID))
row2 = cursor.fetchall()
semester = []
plonum = []
acheived = []
all_cnt = []
for record in row1:
semester.append(record[0])
plonum.append(record[1])
acheived.append(record[2])
for record in row2:
all_cnt.append(record[0])
acheived_per = 100*(np.array(acheived)/np.array(all_cnt))
semester = list(set(semester))
plonum = list(set(plonum))
failed_per = 100 - acheived_per
acheived_per = np.split(acheived_per, len(acheived_per)/len(semester))
failed_per = np.split(failed_per, len(failed_per)/len(semester))
acheived=[]
for plo in acheived_per:
acheived.append(plo.tolist())
failed=[]
for plo in failed_per:
failed.append(plo.tolist())
return semester, plonum, acheived, failed
def getplowisecoursecomparism(plos, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
ploo = '';
for plo in plos:
ploo += '"'
ploo += plo
ploo += '",'
ploo = ploo[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.course_id, f.ploNum, COUNT(*)
FROM
(
SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum in ({})
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage >= 40
GROUP BY f.ploNum, f.course_id;
'''.format(ploo, sem))
row1 = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum in ({})
and a.section_id = s.sectionID
and s.semester IN ({})
)f
GROUP BY f.ploNum, f.course_id;
'''.format(ploo, sem))
row2 = cursor.fetchall()
courses = []
plonum = []
acheived = []
all_cnt = []
for record in row1:
courses.append(record[0])
plonum.append(record[1])
acheived.append(record[2])
for record in row2:
all_cnt.append(record[0])
acheived_per = 100*(np.array(acheived)/np.array(all_cnt))
courses = list(set(courses))
plonum = list(set(plonum))
acheived_per = np.split(acheived_per, len(acheived_per)/len(plonum))
acheived=[]
for plo in acheived_per:
acheived.append(plo.tolist())
return courses, plonum, acheived
def getprogramsemesterwiseplocount(program, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.plonum, COUNT(*)
FROM
(
SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage>=40
GROUP BY f.plonum;
'''.format(program, sem))
row1 = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
GROUP BY f.plonum;
'''.format(program, sem))
row2 = cursor.fetchall()
plonum = []
acheived = []
attempted = []
for record in row1:
plonum.append(record[0])
acheived.append(record[1])
for record in row2:
attempted.append(record[0])
plonum = list(set(plonum))
acheived = np.array(acheived)
attempted = np.array(attempted)
new_acheived=[]
for plo in acheived:
new_acheived.append(plo.tolist())
new_attempted=[]
for plo in attempted:
new_attempted.append(plo.tolist())
plonum.sort()
plonum.sort(key=len, reverse=False)
return plonum, new_acheived, new_attempted
def getprogramwiseploandcourses(program, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.ploNum, f.course_id, COUNT(*)
FROM
(
SELECT p.ploNum as plonum, s.course_id, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage>=40
GROUP BY f.ploNum, f.course_id
'''.format(program, sem))
row = cursor.fetchall()
plonum = []
courses = []
counts = []
for record in row:
plonum.append(record[0])
courses.append(record[1])
plonum = list(set(plonum))
plonum.sort()
plonum.sort(key=len, reverse=False)
courses = list(set(courses))
courses.sort()
table = np.zeros((len(courses), len(plonum)))
for record in row:
table[courses.index(record[1])][plonum.index(record[0])] += record[2]
table = table.tolist()
return plonum, courses, table
|
[
"numpy.round",
"numpy.array",
"django.db.connection.cursor"
] |
[((112, 131), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (129, 131), False, 'from django.db import connection\n'), ((930, 949), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (947, 949), False, 'from django.db import connection\n'), ((1672, 1691), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (1689, 1691), False, 'from django.db import connection\n'), ((2186, 2205), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (2203, 2205), False, 'from django.db import connection\n'), ((2502, 2521), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (2519, 2521), False, 'from django.db import connection\n'), ((3445, 3464), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (3462, 3464), False, 'from django.db import connection\n'), ((4189, 4208), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4206, 4208), False, 'from django.db import connection\n'), ((4508, 4527), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4525, 4527), False, 'from django.db import connection\n'), ((7192, 7211), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (7209, 7211), False, 'from django.db import connection\n'), ((10761, 10775), 'numpy.array', 'np.array', (['plos'], {}), '(plos)\n', (10769, 10775), True, 'import numpy as np\n'), ((11155, 11174), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (11172, 11174), False, 'from django.db import connection\n'), ((14719, 14738), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (14736, 14738), False, 'from django.db import connection\n'), ((15853, 15872), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (15870, 15872), False, 'from django.db import connection\n'), ((17774, 17793), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (17791, 17793), False, 'from django.db import connection\n'), ((18929, 18948), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (18946, 18948), False, 'from django.db import connection\n'), ((20318, 20336), 'numpy.array', 'np.array', (['acheived'], {}), '(acheived)\n', (20326, 20336), True, 'import numpy as np\n'), ((20357, 20376), 'numpy.array', 'np.array', (['attempted'], {}), '(attempted)\n', (20365, 20376), True, 'import numpy as np\n'), ((20896, 20915), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (20913, 20915), False, 'from django.db import connection\n'), ((10543, 10558), 'numpy.array', 'np.array', (['plos1'], {}), '(plos1)\n', (10551, 10558), True, 'import numpy as np\n'), ((10559, 10574), 'numpy.array', 'np.array', (['plos2'], {}), '(plos2)\n', (10567, 10574), True, 'import numpy as np\n'), ((13831, 13849), 'numpy.array', 'np.array', (['acheived'], {}), '(acheived)\n', (13839, 13849), True, 'import numpy as np\n'), ((13850, 13867), 'numpy.array', 'np.array', (['all_cnt'], {}), '(all_cnt)\n', (13858, 13867), True, 'import numpy as np\n'), ((17239, 17257), 'numpy.array', 'np.array', (['acheived'], {}), '(acheived)\n', (17247, 17257), True, 'import numpy as np\n'), ((17258, 17275), 'numpy.array', 'np.array', (['all_cnt'], {}), '(all_cnt)\n', (17266, 17275), True, 'import numpy as np\n'), ((6562, 6592), 'numpy.round', 'np.round', (['(100 * k[2] / k[3])', '(2)'], {}), '(100 * k[2] / k[3], 2)\n', (6570, 6592), True, 'import numpy as np\n'), ((6681, 6711), 'numpy.round', 'np.round', (['(100 * k[2] / k[4])', '(2)'], {}), '(100 * k[2] / k[4], 2)\n', (6689, 6711), True, 'import numpy as np\n')]
|
#Color dict
import numpy as np
# import wandb
import cv2
import torch
import os
colors = {
'0':[(128, 64, 128), (244, 35, 232), (0, 0, 230), (220, 190, 40), (70, 70, 70), (70, 130, 180), (0, 0, 0)],
'1':[(128, 64, 128), (250, 170, 160), (244, 35, 232), (230, 150, 140), (220, 20, 60), (255, 0, 0), (0, 0, 230), (255, 204, 54), (0, 0, 70), (220, 190, 40), (190, 153, 153), (174, 64, 67), (153, 153, 153), (70, 70, 70), (107, 142, 35), (70, 130, 180)],
'2':[(128, 64, 128), (250, 170, 160), (244, 35, 232), (230, 150, 140), (220, 20, 60), (255, 0, 0), (0, 0, 230), (119, 11, 32), (255, 204, 54), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (220, 190, 40), (102, 102, 156), (190, 153, 153), (180, 165, 180), (174, 64, 67), (220, 220, 0), (250, 170, 30), (153, 153, 153), (169, 187, 214), (70, 70, 70), (150, 100, 100), (107, 142, 35), (70, 130, 180)],
'3':[(128, 64, 128), (250, 170, 160), (81, 0, 81), (244, 35, 232), (230, 150, 140), (152, 251, 152), (220, 20, 60), (246, 198, 145), (255, 0, 0), (0, 0, 230), (119, 11, 32), (255, 204, 54), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (136, 143, 153), (220, 190, 40), (102, 102, 156), (190, 153, 153), (180, 165, 180), (174, 64, 67), (220, 220, 0), (250, 170, 30), (153, 153, 153), (153, 153, 153), (169, 187, 214), (70, 70, 70), (150, 100, 100), (150, 120, 90), (107, 142, 35), (70, 130, 180), (169, 187, 214), (0, 0, 142)]
}
def visualize(mask,n_classes,ignore_label,gt = None):
if(n_classes<len(colors['0'])):
id = 0
elif(n_classes<len(colors['1'])):
id = 1
elif(n_classes<len(colors['2'])):
id = 2
else:
id = 3
out_mask = np.zeros((mask.shape[0],mask.shape[1],3))
for i in range(n_classes):
out_mask[mask == i] = colors[str(id)][i]
if(gt is not None):
out_mask[gt == ignore_label] = (255,255,255)
out_mask[np.where((out_mask == [0, 0, 0]).all(axis=2))] = (255,255,255)
return out_mask
def error_map(pred,gt,cfg):
canvas = pred.copy()
canvas[canvas == gt] = 255
canvas[gt == cfg.Loss.ignore_label] = 255
return canvas
# def segmentation_validation_visualization(epoch,sample,pred,batch_size,class_labels,wandb_image,cfg):
# os.makedirs(os.path.join(cfg.train.output_dir,'Visualization',str(epoch)),exist_ok = True)
# input = sample['image'].permute(0,2,3,1).detach().cpu().numpy()
# label = sample['label'].detach().cpu().numpy().astype(np.uint8)
# pred = torch.argmax(pred[0],dim = 1).detach().cpu().numpy().astype(np.uint8)
# for i in range(batch_size):
# errormap = error_map(pred[i],label[i],cfg)
# wandb_image.append(wandb.Image(cv2.resize(cv2.cvtColor(input[i], cv2.COLOR_BGR2RGB),(cfg.dataset.width//4,cfg.dataset.height//4)), masks={
# "predictions" : {
# "mask_data" : cv2.resize(pred[i],(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# },
# "ground_truth" : {
# "mask_data" : cv2.resize(label[i],(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# }
# ,
# "error_map" : {
# "mask_data" : cv2.resize(errormap,(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# }
# }))
# if(cfg.valid.write):
# prediction = visualize(pred[i],cfg.model.n_classes,cfg.Loss.ignore_label,gt = label[i])
# mask = visualize(label[i],cfg.model.n_classes,cfg.Loss.ignore_label,gt = label[i])
# out = np.concatenate([((input[i]* np.array(cfg.dataset.mean) + np.array(cfg.dataset.std))*255).astype(int),mask,prediction,visualize(errormap,cfg.model.n_classes,cfg.Loss.ignore_label,label[i])],axis = 1)
# cv2.imwrite(os.path.join(cfg.train.output_dir,'Visualization',str(epoch),sample['img_name'][i]),out)
# return wandb_image
|
[
"numpy.zeros"
] |
[((1692, 1735), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1], 3)'], {}), '((mask.shape[0], mask.shape[1], 3))\n', (1700, 1735), True, 'import numpy as np\n')]
|
"""
@author: <NAME>, Energy Information Networks & Systems @ TU Darmstadt
"""
import numpy as np
import tensorflow as tf
from models.igc import ImplicitGenerativeCopula, GMMNCopula
from models.utils import cdf_interpolator
import pyvinecopulib as pv
from models import mv_copulas
import matplotlib.pyplot as plt
class CopulaAutoEncoder(object):
def __init__(self, x, ae_model):
if isinstance(ae_model, str):
ae_model = tf.keras.models.load_model(ae_model)
self.encoder_model = ae_model.encoder
self.decoder_model = ae_model.decoder
self.z = self._encode(x)
self.margins = self._fit_margins(self.z)
self.u = self._cdf(self.z)
def _encode(self, x):
# encode images to latent space
return self.encoder_model(x).numpy()
def _decode(self, z):
# decode latent space samples to images
return self.decoder_model(z).numpy()
def _cdf(self, z):
# get pseudo obs
u = np.zeros_like(z)
for i in range(u.shape[1]):
u[:,i] = self.margins[i].cdf(z[:,i])
return u
def _ppf(self, u):
# inverse marginal cdf
z = np.zeros_like(u)
for i in range(z.shape[1]):
z[:,i] = self.margins[i].ppf(u[:,i])
return z
def _fit_margins(self, z):
# get the marginal distributions via ecdf interpolation
margins = []
for i in range(z.shape[1]):
margins.append(cdf_interpolator(z[:,i],
kind="linear",
x_min=np.min(z[:,i])-np.diff(np.sort(z[:,i])[0:2])[0],
x_max=np.max(z[:,i])+np.diff(np.sort(z[:,i])[-2:])[0]))
return margins
def _sample_u(self, n_samples=1):
# sample from copula
return self.copula.simulate(n_samples)
def _sample_z(self, n_samples=1, u=None):
# sample from latent space
if u is None:
return self._ppf(self._sample_u(n_samples))
else:
return self._ppf(u)
def sample_images(self, n_samples=1, z=None):
# sample an image
if z is None:
return self._decode(self._sample_z(n_samples))
else:
return self._decode(z)
def show_images(self, n=5, imgs=None, cmap="gray", title=None):
if imgs is None:
imgs = self.sample_images(n)
plt.figure(figsize=(16, 3))
for i in range(n):
ax = plt.subplot(1, n, i+1)
plt.imshow(np.squeeze(imgs[i]*255), vmin=0, vmax=255, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.suptitle(title)
plt.tight_layout()
class IGCAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Implicit Generative Copula """
def fit(self, epochs=100, batch_size=100, n_samples_train=200, regen_noise=1000000, validation_split=0.0, validation_data=None):
if validation_data is not None:
u_test = self._cdf((self._encode(validation_data)))
else:
u_test = None
#self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2, n_layers=3, n_neurons=200)
hist = self.copula.fit(self.u, epochs=epochs, batch_size=batch_size, validation_data=u_test, regen_noise=regen_noise, validation_split=0.0)
return hist
def save_copula_model(self, path):
self.copula.save_model(path)
def load_copula_model(self, path, n_samples_train=200):
self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula.load_model(path)
print("Loaded saved copula model.")
class GMMNCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with GMMN Copula """
def fit(self, epochs=100, batch_size=100, n_samples_train=200, regen_noise=10000000, validation_split=0.0, validation_data=None):
if validation_data is not None:
u_test = self._cdf((self._encode(validation_data)))
else:
u_test = None
#self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2, n_layers=3, n_neurons=200)
hist = self.copula.fit(self.u, epochs=epochs, batch_size=batch_size, validation_data=u_test, regen_noise=regen_noise, validation_split=0.0)
return hist
def save_copula_model(self, path):
self.copula.save_model(path)
def load_copula_model(self, path, n_samples_train=200):
self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula.load_model(path)
print("Loaded saved copula model.")
class VineCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Vine Copula """
def fit(self, families="nonparametric", show_trace=False, trunc_lvl=18446744073709551615):
if families == "nonparametric":
controls = pv.FitControlsVinecop(family_set=[pv.BicopFamily.tll], trunc_lvl=trunc_lvl, show_trace=show_trace)
elif families == "parametric":
controls = pv.FitControlsVinecop(family_set=[pv.BicopFamily.indep,
pv.BicopFamily.gaussian,
pv.BicopFamily.student,
pv.BicopFamily.clayton,
pv.BicopFamily.gumbel,
pv.BicopFamily.frank,
pv.BicopFamily.joe,
pv.BicopFamily.bb1,
pv.BicopFamily.bb6,
pv.BicopFamily.bb7,
pv.BicopFamily.bb8],
trunc_lvl=trunc_lvl,
show_trace=show_trace)
else:
controls = pv.FitControlsVinecop(trunc_lvl=trunc_lvl, show_trace=show_trace)
self.copula = pv.Vinecop(data=self.u, controls=controls)
def save_model(self, path):
self.copula.to_json(path)
print(f"Saved vine copula model to {path}.")
def load_model(self, path):
self.copula = pv.Vinecop(filename=path)
print("Loaded vine copula model.")
class GaussianCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Gaussian Copula """
def fit(self):
self.copula = mv_copulas.GaussianCopula()
self.copula.fit(self.u)
class IndependenceCopulaCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Independence Copula """
def fit(self):
pass
def _sample_u(self, n_samples):
return np.random.uniform(0.0, 1.0, size=(n_samples, self.u.shape[1]))
class VariationalAutoEncoder(object):
def __init__(self, decoder_model="models/autoencoder/VAE_decoder_fashion_mnist_100epochs", latent_dim=25):
if isinstance(decoder_model, str):
self.decoder_model = tf.keras.models.load_model(decoder_model)
else:
self.decoder_model = decoder_model
self.decoder_model.compile()
self.latent_dim = 25
def _sample_z(self, n_samples):
# sample from latent space
return np.random.normal(loc=0.0, scale=1.0, size=(n_samples, self.latent_dim))
def _decode(self,z):
return self.decoder_model.predict(z)
def fit(self):
pass
def sample_images(self, n_samples):
# sample an image
return self._decode(self._sample_z(n_samples))
def show_images(self, n=5, imgs=None, cmap="gray", title=None):
if imgs is None:
imgs = self.sample_images(n)
plt.figure(figsize=(16, 3))
for i in range(n):
ax = plt.subplot(1, n, i+1)
plt.imshow(np.squeeze(imgs[i]*255), vmin=0, vmax=255, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.suptitle(title)
plt.tight_layout()
|
[
"models.mv_copulas.GaussianCopula",
"numpy.random.uniform",
"matplotlib.pyplot.subplot",
"numpy.zeros_like",
"tensorflow.keras.models.load_model",
"models.igc.GMMNCopula",
"pyvinecopulib.FitControlsVinecop",
"matplotlib.pyplot.suptitle",
"models.igc.ImplicitGenerativeCopula",
"numpy.sort",
"matplotlib.pyplot.figure",
"pyvinecopulib.Vinecop",
"numpy.min",
"numpy.max",
"numpy.random.normal",
"numpy.squeeze",
"matplotlib.pyplot.tight_layout"
] |
[((1004, 1020), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (1017, 1020), True, 'import numpy as np\n'), ((1203, 1219), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (1216, 1219), True, 'import numpy as np\n'), ((2538, 2565), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 3)'}), '(figsize=(16, 3))\n', (2548, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2829), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (2822, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2838, 2856), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2854, 2856), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3552), 'models.igc.ImplicitGenerativeCopula', 'ImplicitGenerativeCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)', 'n_layers': '(3)', 'n_neurons': '(200)'}), '(dim_out=self.z.shape[1], n_samples_train=\n n_samples_train, dim_latent=self.z.shape[1] * 2, n_layers=3, n_neurons=200)\n', (3430, 3552), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((3879, 3998), 'models.igc.ImplicitGenerativeCopula', 'ImplicitGenerativeCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)'}), '(dim_out=self.z.shape[1], n_samples_train=\n n_samples_train, dim_latent=self.z.shape[1] * 2)\n', (3903, 3998), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((4603, 4734), 'models.igc.GMMNCopula', 'GMMNCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)', 'n_layers': '(3)', 'n_neurons': '(200)'}), '(dim_out=self.z.shape[1], n_samples_train=n_samples_train,\n dim_latent=self.z.shape[1] * 2, n_layers=3, n_neurons=200)\n', (4613, 4734), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((5062, 5166), 'models.igc.GMMNCopula', 'GMMNCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)'}), '(dim_out=self.z.shape[1], n_samples_train=n_samples_train,\n dim_latent=self.z.shape[1] * 2)\n', (5072, 5166), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((6804, 6846), 'pyvinecopulib.Vinecop', 'pv.Vinecop', ([], {'data': 'self.u', 'controls': 'controls'}), '(data=self.u, controls=controls)\n', (6814, 6846), True, 'import pyvinecopulib as pv\n'), ((7022, 7047), 'pyvinecopulib.Vinecop', 'pv.Vinecop', ([], {'filename': 'path'}), '(filename=path)\n', (7032, 7047), True, 'import pyvinecopulib as pv\n'), ((7241, 7268), 'models.mv_copulas.GaussianCopula', 'mv_copulas.GaussianCopula', ([], {}), '()\n', (7266, 7268), False, 'from models import mv_copulas\n'), ((7507, 7569), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '(n_samples, self.u.shape[1])'}), '(0.0, 1.0, size=(n_samples, self.u.shape[1]))\n', (7524, 7569), True, 'import numpy as np\n'), ((8062, 8133), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(n_samples, self.latent_dim)'}), '(loc=0.0, scale=1.0, size=(n_samples, self.latent_dim))\n', (8078, 8133), True, 'import numpy as np\n'), ((8516, 8543), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 3)'}), '(figsize=(16, 3))\n', (8526, 8543), True, 'import matplotlib.pyplot as plt\n'), ((8788, 8807), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (8800, 8807), True, 'import matplotlib.pyplot as plt\n'), ((8816, 8834), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8832, 8834), True, 'import matplotlib.pyplot as plt\n'), ((446, 482), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['ae_model'], {}), '(ae_model)\n', (472, 482), True, 'import tensorflow as tf\n'), ((2610, 2634), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'n', '(i + 1)'], {}), '(1, n, i + 1)\n', (2621, 2634), True, 'import matplotlib.pyplot as plt\n'), ((5504, 5606), 'pyvinecopulib.FitControlsVinecop', 'pv.FitControlsVinecop', ([], {'family_set': '[pv.BicopFamily.tll]', 'trunc_lvl': 'trunc_lvl', 'show_trace': 'show_trace'}), '(family_set=[pv.BicopFamily.tll], trunc_lvl=trunc_lvl,\n show_trace=show_trace)\n', (5525, 5606), True, 'import pyvinecopulib as pv\n'), ((7798, 7839), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['decoder_model'], {}), '(decoder_model)\n', (7824, 7839), True, 'import tensorflow as tf\n'), ((8588, 8612), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'n', '(i + 1)'], {}), '(1, n, i + 1)\n', (8599, 8612), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2681), 'numpy.squeeze', 'np.squeeze', (['(imgs[i] * 255)'], {}), '(imgs[i] * 255)\n', (2666, 2681), True, 'import numpy as np\n'), ((5665, 6003), 'pyvinecopulib.FitControlsVinecop', 'pv.FitControlsVinecop', ([], {'family_set': '[pv.BicopFamily.indep, pv.BicopFamily.gaussian, pv.BicopFamily.student, pv.\n BicopFamily.clayton, pv.BicopFamily.gumbel, pv.BicopFamily.frank, pv.\n BicopFamily.joe, pv.BicopFamily.bb1, pv.BicopFamily.bb6, pv.BicopFamily\n .bb7, pv.BicopFamily.bb8]', 'trunc_lvl': 'trunc_lvl', 'show_trace': 'show_trace'}), '(family_set=[pv.BicopFamily.indep, pv.BicopFamily.\n gaussian, pv.BicopFamily.student, pv.BicopFamily.clayton, pv.\n BicopFamily.gumbel, pv.BicopFamily.frank, pv.BicopFamily.joe, pv.\n BicopFamily.bb1, pv.BicopFamily.bb6, pv.BicopFamily.bb7, pv.BicopFamily\n .bb8], trunc_lvl=trunc_lvl, show_trace=show_trace)\n', (5686, 6003), True, 'import pyvinecopulib as pv\n'), ((6715, 6780), 'pyvinecopulib.FitControlsVinecop', 'pv.FitControlsVinecop', ([], {'trunc_lvl': 'trunc_lvl', 'show_trace': 'show_trace'}), '(trunc_lvl=trunc_lvl, show_trace=show_trace)\n', (6736, 6780), True, 'import pyvinecopulib as pv\n'), ((8634, 8659), 'numpy.squeeze', 'np.squeeze', (['(imgs[i] * 255)'], {}), '(imgs[i] * 255)\n', (8644, 8659), True, 'import numpy as np\n'), ((1647, 1662), 'numpy.min', 'np.min', (['z[:, i]'], {}), '(z[:, i])\n', (1653, 1662), True, 'import numpy as np\n'), ((1747, 1762), 'numpy.max', 'np.max', (['z[:, i]'], {}), '(z[:, i])\n', (1753, 1762), True, 'import numpy as np\n'), ((1670, 1686), 'numpy.sort', 'np.sort', (['z[:, i]'], {}), '(z[:, i])\n', (1677, 1686), True, 'import numpy as np\n'), ((1770, 1786), 'numpy.sort', 'np.sort', (['z[:, i]'], {}), '(z[:, i])\n', (1777, 1786), True, 'import numpy as np\n')]
|
"""
This module contains methods/objects that facilitate
basic operations.
"""
# std pkgs
import numpy as np
import random
from typing import Dict, List, Optional, Union
from pathlib import Path
import pickle
# non-std pkgs
import matplotlib.pyplot as plt
def hamming_dist(k1, k2):
val = 0
for ind, char in enumerate(k1):
if char != k2[ind]: val += 1
return val
def clean_input_sequences(input_seq):
"""
This method cleans all input sequences to ensure they will
be compatible with the precomputed hash table.
"""
seq_list = []
for aa in input_seq:
if aa not in ["A", "R", "N", "D", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V"]:
print(aa)
if aa == "*":
amino_chosen = "G"
elif aa == "B":
amino_chosen = np.random.choice(["N", "D"], 1, p=[0.5, 0.5])[0]
elif aa == "Z":
amino_chosen = np.random.choice(["Q", "E"], 1, p=[0.5, 0.5])[0]
elif aa == "J":
amino_chosen = np.random.choice(["L", "I"], 1, p=[0.5, 0.5])[0]
elif aa == "X":
amino_chosen = random.choice(["A", "R", "N", "D", "C",
"Q", "E", "G", "H", "I",
"L", "K", "M", "F", "P",
"S", "T", "W", "Y", "V"])[0]
else:
amino_chosen = aa
seq_list.append(amino_chosen)
return ''.join(seq_list) #+ input_seq[kmer_size+1:]
def readFasta(fasta_file_path: Union[str, Path]):
"""
This function reads a fasta file
Parameters
----------
fasta file path: string OR Path
Returns
-------
proteins : array of protein sequence (ordered)
protein_names : array of protein names (ordered)
"""
proteins, protein_names = [], []
with open(fasta_file_path) as fasta_file:
fasta_file_array = fasta_file.readlines()
for line_count, fasta_line in enumerate(fasta_file_array):
if (fasta_line[0] == ">"):
name = fasta_line.strip("\n")
protein_names.append(name)
proteins.append(protein_seq) if line_count > 0 else None
protein_seq = "" # renew sequence everytime fasta name is added.
else:
protein_seq += fasta_line.strip("\n")
proteins.append(protein_seq)
return proteins, protein_names
def get_kmer_size(hash_table) -> int:
"""
This function extracts the kmer size from
the hash table.
"""
kmer_size = 0
with open(hash_table, "rb") as hash_tb:
hash = pickle.load(hash_tb)
kmer_size = len(list(hash.keys())[0])
return kmer_size
|
[
"pickle.load",
"random.choice",
"numpy.random.choice"
] |
[((2668, 2688), 'pickle.load', 'pickle.load', (['hash_tb'], {}), '(hash_tb)\n', (2679, 2688), False, 'import pickle\n'), ((856, 901), 'numpy.random.choice', 'np.random.choice', (["['N', 'D']", '(1)'], {'p': '[0.5, 0.5]'}), "(['N', 'D'], 1, p=[0.5, 0.5])\n", (872, 901), True, 'import numpy as np\n'), ((956, 1001), 'numpy.random.choice', 'np.random.choice', (["['Q', 'E']", '(1)'], {'p': '[0.5, 0.5]'}), "(['Q', 'E'], 1, p=[0.5, 0.5])\n", (972, 1001), True, 'import numpy as np\n'), ((1056, 1101), 'numpy.random.choice', 'np.random.choice', (["['L', 'I']", '(1)'], {'p': '[0.5, 0.5]'}), "(['L', 'I'], 1, p=[0.5, 0.5])\n", (1072, 1101), True, 'import numpy as np\n'), ((1156, 1275), 'random.choice', 'random.choice', (["['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',\n 'S', 'T', 'W', 'Y', 'V']"], {}), "(['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K',\n 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'])\n", (1169, 1275), False, 'import random\n')]
|
import numpy as np
import cv2
import os
from glob import glob
from tqdm import tqdm
img_h, img_w = 256, 256
means, stdevs = [], []
img_list = []
TRAIN_DATASET_PATH = 'data/Real/subset/train/B'
image_fns = glob(os.path.join(TRAIN_DATASET_PATH, '*.*'))
for single_img_path in tqdm(image_fns):
img = cv2.imread(single_img_path)
img = cv2.resize(img, (img_w, img_h))
img = img[:, :, :, np.newaxis]
img_list.append(img)
imgs = np.concatenate(img_list, axis=3)
imgs = imgs.astype(np.float32) / 255.
for i in range(3):
pixels = imgs[:, :, i, :].ravel() # 拉成一行
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# BGR --> RGB , CV读取的需要转换,PIL读取的不用转换
means.reverse()
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
# normMean = [0.35389897, 0.39104056, 0.34307468]
# normStd = [0.2158508, 0.23398565, 0.20874721]
# normMean1 = [0.47324282, 0.498616, 0.46873462]
# normStd1 = [0.2431127, 0.2601882, 0.25678185]
# [0.413570895, 0.44482827999999996, 0.40590465]
# [0.22948174999999998, 0.24708692499999999, 0.23276452999999997]
|
[
"tqdm.tqdm",
"numpy.concatenate",
"numpy.std",
"cv2.imread",
"numpy.mean",
"os.path.join",
"cv2.resize"
] |
[((278, 293), 'tqdm.tqdm', 'tqdm', (['image_fns'], {}), '(image_fns)\n', (282, 293), False, 'from tqdm import tqdm\n'), ((444, 476), 'numpy.concatenate', 'np.concatenate', (['img_list'], {'axis': '(3)'}), '(img_list, axis=3)\n', (458, 476), True, 'import numpy as np\n'), ((213, 252), 'os.path.join', 'os.path.join', (['TRAIN_DATASET_PATH', '"""*.*"""'], {}), "(TRAIN_DATASET_PATH, '*.*')\n", (225, 252), False, 'import os\n'), ((305, 332), 'cv2.imread', 'cv2.imread', (['single_img_path'], {}), '(single_img_path)\n', (315, 332), False, 'import cv2\n'), ((343, 374), 'cv2.resize', 'cv2.resize', (['img', '(img_w, img_h)'], {}), '(img, (img_w, img_h))\n', (353, 374), False, 'import cv2\n'), ((598, 613), 'numpy.mean', 'np.mean', (['pixels'], {}), '(pixels)\n', (605, 613), True, 'import numpy as np\n'), ((633, 647), 'numpy.std', 'np.std', (['pixels'], {}), '(pixels)\n', (639, 647), True, 'import numpy as np\n')]
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .. import QtCore, QtGui
from . import AbstractMPLDataView
from .. import AbstractDataView2D
import logging
logger = logging.getLogger(__name__)
class ContourView(AbstractDataView2D, AbstractMPLDataView):
"""
The ContourView provides a UI widget for viewing a number of 1-D
data sets as a contour plot, starting from dataset 0 at y = 0
"""
def __init__(self, fig, data_list=None, cmap=None, norm=None, *args,
**kwargs):
"""
__init__ docstring
Parameters
----------
fig : figure to draw the artists on
x_data : list
list of vectors of x-coordinates
y_data : list
list of vectors of y-coordinates
lbls : list
list of the names of each data set
cmap : colormap that matplotlib understands
norm : mpl.colors.Normalize
"""
# set some defaults
# no defaults yet
# call the parent constructors
super(ContourView, self).__init__(data_list=data_list, fig=fig,
cmap=cmap, norm=norm, *args,
**kwargs)
# create the matplotlib axes
self._ax = self._fig.add_subplot(1, 1, 1)
self._ax.set_aspect('equal')
# plot the data
self.replot()
def replot(self):
"""
Override
Replot the data after modifying a display parameter (e.g.,
offset or autoscaling) or adding new data
"""
# TODO: This class was originally written to convert a 1-D stack into a
# 2-D contour. Rewrite this replot method
# get the keys from the dict
keys = list(six.iterkeys(self._data))
# number of datasets in the data dict
num_keys = len(keys)
# cannot plot data if there are no keys
if num_keys < 1:
return
# set the local counter
counter = num_keys - 1
# @tacaswell Should it be required that all datasets are the same
# length?
num_coords = len(self._data[keys[0]][0])
# declare the array
self._data_arr = np.zeros((num_keys, num_coords))
# add the data to the main axes
for key in self._data.keys():
# get the (x,y) data from the dictionary
(x, y) = self._data[key]
# add the data to the array
self._data_arr[counter] = y
# decrement the counter
counter -= 1
# get the first dataset to get the x axis and number of y datasets
x, y = self._data[keys[0]]
y = np.arange(len(keys))
# TODO: Colormap initialization is not working properly.
self._ax.contourf(x, y, self._data_arr) # , cmap=colors.Colormap(self._cmap))
|
[
"numpy.zeros",
"logging.getLogger",
"six.iterkeys"
] |
[((2744, 2771), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2761, 2771), False, 'import logging\n'), ((4791, 4823), 'numpy.zeros', 'np.zeros', (['(num_keys, num_coords)'], {}), '((num_keys, num_coords))\n', (4799, 4823), True, 'import numpy as np\n'), ((4341, 4365), 'six.iterkeys', 'six.iterkeys', (['self._data'], {}), '(self._data)\n', (4353, 4365), False, 'import six\n')]
|
import math
import numpy as np
from uam_simulator import my_utils
from uam_simulator import pathPlanning
from uam_simulator import orca
import gurobipy as grb
from gurobipy import GRB
import time as python_time
class Flightplan:
def __init__(self, t0, dt, positions, times=None):
self.start_time = t0
self.positions = positions
self.time_step = dt
self.times = None
if times is not None:
self.time_step = None
self.times = np.array(times)
else:
self.times = np.array([self.start_time + i * self.time_step for i in range(0, len(self.positions))])
if self.time_step is not None:
self.end_time = self.start_time + (len(self.positions) - 1) * self.time_step
else:
self.end_time=self.times[-1]
def get_planned_position_at(self, time, return_velocity=False, ignore_timed_out=False, debug=False):
""" Interpolates between the flight points """
if ignore_timed_out and time > self.end_time:
if return_velocity:
return None, None
else:
return None
n = len(self.positions)
if self.time_step is not None:
idx_float = (float(time) - float(self.start_time)) / float(self.time_step)
idx_low = min(math.floor(idx_float), n - 1)
idx_high = min(math.ceil(idx_float), n - 1)
if idx_low == idx_high:
# Indices are equal because idx_float is an int
if return_velocity:
if idx_high == n-1:
velocity = np.array([0, 0])
else:
velocity = (self.positions[idx_high+1]-self.positions[idx_high])/(self.times[idx_high+1]-self.times[idx_high])
return self.positions[idx_high], velocity
else:
return self.positions[idx_high]
else:
if time > self.times[-1]:
return np.copy(self.positions[-1])
idx_high = np.searchsorted(self.times, time)
idx_low = max(0, idx_high - 1)
if self.times[idx_high] == time or idx_low == idx_high:
if return_velocity:
if idx_high == n-1:
velocity = np.array([0, 0])
else:
velocity = (self.positions[idx_high+1]-self.positions[idx_high])/(self.times[idx_high+1]-self.times[idx_high])
return self.positions[idx_high], velocity
else:
return self.positions[idx_high]
idx_float = idx_low + (time - self.times[idx_low]) / (self.times[idx_high] - self.times[idx_low])
pos_high = self.positions[idx_high]
pos_low = self.positions[idx_low] # if time is exactly integer then returns the exact pos
if debug:
print(idx_float)
print(pos_high)
print(pos_low)
if return_velocity:
return pos_low + (pos_high - pos_low) * (idx_float - idx_low), (pos_high-pos_low)/(self.times[idx_high]-self.times[idx_low])
else:
return pos_low + (pos_high - pos_low) * (idx_float - idx_low)
def get_planned_trajectory_between(self, start_time, end_time, debug=False):
""" Returns trajectory between start_time and end_time"""
if (start_time - self.end_time) >= -1e-4 or (end_time - self.start_time) <= 1e-4:
return None, None
trajectory_end_time = min(end_time, self.end_time)
trajectory_start_time = max(start_time, self.start_time)
trajectory = []
times = []
if debug:
print('time step is '+str(self.time_step))
print('start_time is '+str(start_time))
print('end_time '+str(end_time))
print('positions '+str(self.positions))
print('times '+str(self.times))
print(start_time-self.end_time)
if self.time_step is None:
# self.times is sorted
[start_index, end_index] = np.searchsorted(self.times, [trajectory_start_time, trajectory_end_time])
temp = self.times[start_index]
if abs(self.times[start_index]-trajectory_start_time) > 1e-4:
# requires interpolation
# Since we already now the index we could avoid a second call to search sorted
trajectory.append(self.get_planned_position_at(trajectory_start_time))
times.append(trajectory_start_time)
for i in range(start_index, end_index):
trajectory.append(self.positions[i])
times.append(self.times[i])
# trajectory_end_time <= times[end_index]
if abs(self.times[end_index]-trajectory_end_time) > 1e-4:
# requires interpolation
trajectory.append(self.get_planned_position_at(trajectory_end_time))
times.append(trajectory_end_time)
else:
trajectory.append(self.positions[end_index])
times.append(trajectory_end_time)
else:
start_index_float = float((trajectory_start_time - self.start_time) / self.time_step)
end_index_float = float((trajectory_end_time - self.start_time) / self.time_step)
lower = math.ceil(start_index_float)
upper = min(math.floor(end_index_float), len(self.positions) - 1)
if lower != start_index_float:
pos_0 = self.get_planned_position_at(start_time)
trajectory.append(np.copy(pos_0))
times.append(trajectory_start_time)
for index in range(lower, upper + 1):
trajectory.append(self.positions[index])
# times.append(self.start_time+index*self.time_step)
times.append(self.times[index])
if upper != end_index_float:
pos_end = self.get_planned_position_at(end_time)
trajectory.append(pos_end)
times.append(trajectory_end_time)
return trajectory, times
def get_end_time(self):
if self.time_step is not None:
return self.start_time + (len(self.positions) - 1) * self.time_step
else:
return self.times[-1]
class Agent:
def __init__(self, env, radius, max_speed, start=None, end=None, start_time=0, agent_logic='dumb',
centralized_manager=None, algo_type=None, agent_dynamics=None, id=0, sensing_radius=10000,
flight_leg='initial'):
self.id = id
self.environment = env
self.centralized_manager = centralized_manager
self.agent_dynamics = agent_dynamics
if agent_logic == 'dumb':
protected_area = self.environment.get_protected_area()
else:
# All other agents can wait in place
protected_area = None
# Can't have random start and not random end (or vice versa)
if start is None or end is None:
self.start, self.goal = self.environment.get_random_start_and_end(protected_area_start=protected_area)
if np.linalg.norm(self.start - self.goal) < 10:
# Play one more time
# print('agent start and goal are close, redrawing at random')
self.start, self.goal = self.environment.get_random_start_and_end(protected_area_start=protected_area)
if np.linalg.norm(self.start - self.goal) < 10:
print('unlikely, agent start and goal are still close')
else:
self.start = start
self.goal = end
self.position = np.copy(self.start) # Passed by reference
self.new_position = np.copy(self.start)
self.radius = radius
self.orientation = 0
self.minSpeed = 0.0
self.maxSpeed = max_speed
self.sensing_radius = sensing_radius
self.desired_start_time = start_time
self.start_time = start_time # actual start time if a ground delay is planned
if np.linalg.norm(self.goal - self.start) == 0:
print(agent_logic)
print(start)
print(end)
print(np.linalg.norm(self.goal - self.start))
self.velocity = self.maxSpeed * (self.goal - self.start) / (np.linalg.norm(self.goal - self.start))
self.new_velocity = self.velocity
self.trajectory = []
self.trajectory_times = []
self.collision_avoidance_time = []
self.preflight_time = None
self.flightPlan = None
self.status = 'ok'
self.agent_logic = agent_logic
self.tolerance = self.environment.tolerance
self.t_removed_from_sim=None
if agent_logic == 'dumb':
self.ownship = False
else:
self.ownship = True
self.flight_status = 'initialized'
self.algo_type = algo_type
self.cumulative_density=0
self.density=0
self.n_steps=0
self.flight_leg=flight_leg
def get_predicted_end_time(self):
if self.flightPlan is not None:
return self.flightPlan.end_time
else:
print('Agent: in order to get the predicted end time a flight plan must exist')
return self.start_time
def compute_next_move(self, current_time, dt, debug=False, density=0):
""" Store the next position in self.new_position. The position is updated when move is called """
if self.agent_logic == 'dumb':
self.new_position = self.compute_straight_move(self.position, self.goal, self.maxSpeed, dt)
self.new_velocity = (self.new_position - self.position) / dt
if self.agent_logic == 'reactive':
self.cumulative_density += density
self.n_steps += 1
if self.algo_type is None:
self.algo_type = 'MVP'
self.new_velocity = self.collision_avoidance(dt, algo_type=self.algo_type)
self.new_velocity = self.velocity_update(self.new_velocity)
self.new_position += self.new_velocity * dt
if self.agent_logic == 'strategic':
# Follow flight plan (without consideration for kinematic properties)
self.new_position = self.flightPlan.get_planned_position_at(current_time + dt, debug=debug)
self.new_velocity = (self.new_position - self.position) / dt
if debug:
print('New position ' + str(self.new_position))
print('old position ' + str(self.position))
if self.trajectory == []:
self.trajectory.append(np.copy(self.position))
self.trajectory_times.append(current_time)
self.trajectory.append(np.copy(self.new_position))
self.trajectory_times.append(current_time + dt)
self.flight_status = 'ongoing'
def compute_straight_move(self, current_position, goal, speed, dt):
orientation = math.atan2(goal[1] - current_position[1], goal[0] - current_position[0])
d = np.linalg.norm(goal - current_position)
max_step_length = min(speed * dt, d) # slow down to arrive at the goal on the next time step
return current_position + np.array([math.cos(orientation), math.sin(orientation)]) * max_step_length
def move(self):
self.position = np.copy(self.new_position)
self.velocity = np.copy(self.new_velocity)
return self.position
def velocity_update(self, new_velocity):
# Introduce kinematic constraints
# For now just clamp the velocity and instantly change the orientation
v = np.linalg.norm(new_velocity)
v_clamped = my_utils.clamp(self.minSpeed, self.maxSpeed, v)
if self.agent_dynamics is None:
return new_velocity * v_clamped / v
else:
turn_angle = my_utils.get_angle(self.velocity, new_velocity)
max_angle=30*math.pi/180
if abs(turn_angle)>max_angle:
vel = self.velocity * v_clamped / np.linalg.norm(self.velocity)
theta=math.copysign(max_angle,turn_angle)
return vel @ np.asarray([[math.cos(theta), math.sin(theta)], [-math.sin(theta), math.cos(theta)]])
else:
return new_velocity * v_clamped / v
def preflight(self, dt, algo_type='Straight', density=0):
# Given, the start/goals and published flight plans of other agents find a free path and publish it
self.density = density
if self.centralized_manager is None:
print('agent.py preflight error, a centralized manager must exist')
if algo_type == 'Straight':
timer_start = python_time.time()
plan = []
plan.append(self.start)
pos = np.copy(self.start)
d = np.linalg.norm(self.goal - pos)
# Larger time steps require larger tolerance
# TODO tolerances are a bit of a mess
while d > self.maxSpeed * dt:
pos = self.compute_straight_move(pos, self.goal, self.maxSpeed, dt)
d = np.linalg.norm(self.goal - pos)
plan.append(pos)
if d != 0:
plan.append(self.goal)
self.flightPlan = Flightplan(self.start_time, dt, plan)
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'LocalVO':
timer_start = python_time.time()
local_planner = pathPlanning.Local_VO(self.start, self.goal, self.start_time, self.maxSpeed, self.centralized_manager, self.tolerance)
success, plan, times = local_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
## Debug
if len(times) < 2:
print('the plan is too short')
print('agent start '+ str(self.start))
print('agent goal '+str(self.goal))
print('agent plan pos '+str(plan))
print('agent plan times ' + str(times))
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'Decoupled':
timer_start = python_time.time()
decoupled_planner = pathPlanning.DecoupledApproach(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager, self.tolerance)
success, plan, times = decoupled_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'SIPP':
timer_start = python_time.time()
sipp_planner = pathPlanning.SIPP(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager, self.tolerance)
success, plan, times = sipp_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'A_star_8':
timer_start = python_time.time()
astar_planner = pathPlanning.AStar_8grid(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager)
success, plan, times = astar_planner.search()
if not success:
self.flight_status = 'cancelled'
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
else:
print('The algo type ' + algo_type + ' is not implemented')
def can_safely_take_off(self, t):
if self.algo_type == 'straight':
return True
neighbors = self.environment.get_neighbors(self.position, self.radius)
for vehicle in neighbors:
if t >= vehicle.start_time and vehicle.id != self.id:
# if np.linalg.norm(self.position - vehicle.position) <= self.radius:
self.flight_status = 'waiting'
return False
self.start_time = t
return True
def collision_avoidance(self, dt, algo_type='MVP'):
# Given current position, next flight plan goal and surrounding vehicles decide where to go
# Based on Hoekstra Bluesky simulator
# The returned velocity might not be feasible
if algo_type == 'MVP_Bluesky':
timer_start = python_time.time()
neighbors = self.get_neighbors()
velocity_change = np.asarray([0.0, 0.0])
direction = self.goal - self.position
d = np.linalg.norm(direction)
desired_velocity = min(self.maxSpeed, d / dt) * direction / d
safety_factor = 1.10 # 10% safety factor (as in The effects of Swarming on a Voltage Potential-Based Conflict Resolution Algorithm, <NAME>)
# if d<=self.radius:
# dV=0
# else:
for neighbor in neighbors:
# Find Time of Closest Approach
delta_pos = self.position - neighbor.position
dist=np.linalg.norm(delta_pos)
delta_vel = desired_velocity - neighbor.velocity
if np.linalg.norm(delta_vel)==0:
t_cpa=0
else:
t_cpa=-np.dot(delta_pos, delta_vel) / np.dot(delta_vel, delta_vel)
dcpa = delta_pos+delta_vel*t_cpa
dabsH = np.linalg.norm(dcpa)
# If there is a conflict
if dabsH < self.radius:
# If head-on conflict
if dabsH<=10:
dabsH=10
dcpa[0] = delta_pos[1] / dist * dabsH
dcpa[1] = -delta_pos[0] / dist * dabsH
if self.radius*safety_factor < dist:
erratum = np.cos(np.arcsin((self.radius*safety_factor) / dist) - np.arcsin(dabsH / dist))
dV =(((self.radius*safety_factor) / erratum - dabsH) * dcpa)/(abs(t_cpa)*dabsH)
else:
# If already moving away from conflict (tcpa is negative) then just keep going
if t_cpa<=0:
dV = 0
else:
dV =(self.radius*safety_factor - dabsH)*dcpa/(abs(t_cpa)*dabsH)
velocity_change += dV
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return desired_velocity + velocity_change
elif algo_type == 'VO':
timer_start = python_time.time()
intruders = self.get_neighbors()
d = np.linalg.norm(self.goal - self.position)
speed = min(d / dt, self.maxSpeed)
if d == 0:
print('VO, this should not happen')
print('distance to goal is 0')
desired_velocity = (self.goal - self.position) * speed / d
model = setupMIQCP(intruders, desired_velocity, self)
model.optimize()
if model.status != GRB.Status.OPTIMAL:
print('Error gurobi failed to find a solution')
print(model.status)
vars = model.getVars()
if intruders != []:
# plotter([-1000,1000],[-1000,1000],100,[get_VO(intruders[0],self)],chosen_v=np.array([vars[0].x,vars[1].x]))
pass
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return np.array([vars[0].x, vars[1].x])
elif algo_type == 'ORCA':
timer_start = python_time.time()
reactive_solver = orca.ORCA()
vel=reactive_solver.compute_new_velocity(self, dt)
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return vel
elif algo_type == 'straight':
timer_start = python_time.time()
d = np.linalg.norm(self.goal - self.position)
speed = min(d / dt, self.maxSpeed)
desired_velocity = (self.goal - self.position) * speed / d
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return desired_velocity
else:
print(algo_type+' not implemented ')
def get_neighbors(self):
neighbors = self.environment.get_neighbors(self.position, self.sensing_radius)
if neighbors == []:
return []
else:
return neighbors[neighbors != self]
def get_nearest_neighbors(self, k, max_radius):
# Will return itself so query one more neighbor
neighbors = self.environment.get_nearest_neighbors(self.position, k+1, max_radius)
if neighbors == []:
return []
else:
return neighbors[neighbors != self]
def finish_flight(self, t,goal_pos=None, t_removed_from_sim=None):
self.flight_status = 'finished'
self.arrival_time = t
self.t_removed_from_sim=t_removed_from_sim
if goal_pos is not None:
self.trajectory.append(np.copy(goal_pos))
self.trajectory_times.append(t)
def log_agent(self):
agent_log = {'flight_status': self.flight_status,
'agent_type': self.agent_logic,
'desired_time_of_departure': self.desired_start_time,
'agent_id':self.id}
if self.flight_status== 'finished' or self.flight_status == 'ongoing':
agent_log['actual_time_of_departure'] = self.start_time
if self.flight_status == 'finished':
ideal_length = float(np.linalg.norm(self.goal - self.start))
actual_length = 0
if self.trajectory == []:
print('agent, empty trajectory ') # happens if start and goal are really close
print(self.start)
print(self.goal)
pos_0 = self.trajectory[0]
for pos in self.trajectory:
d = np.linalg.norm(pos - pos_0)
actual_length += d
pos_0 = np.copy(pos)
direction = self.goal - self.start
heading = math.atan2(direction[1], direction[0])
if self.agent_logic == 'reactive':
self.density = self.cumulative_density/self.n_steps - 1
agent_log['flight_status']= self.flight_status
agent_log['agent_type']= self.agent_logic
agent_log['length_ideal']= ideal_length
agent_log['actual_length']= actual_length
agent_log['ideal_time_of_arrival']= self.desired_start_time+ideal_length / self.maxSpeed
agent_log['actual_time_of_arrival']= self.arrival_time
if self.t_removed_from_sim is not None:
agent_log['time_removed_from_sim']=self.t_removed_from_sim
agent_log['heading']= heading
agent_log['density']= self.density
if self.agent_logic == 'strategic':
agent_log['time_to_preflight'] = self.preflight_time
elif self.agent_logic == 'reactive':
agent_log['average_time_to_plan_avoidance'] = sum(self.collision_avoidance_time) / len(self.collision_avoidance_time)
agent_log['total_planning_time'] = sum(self.collision_avoidance_time)
return agent_log
def get_VO(intruder_agent, ownship_agent):
if intruder_agent == ownship_agent:
print('get_VO this should not happen intruder and ownship are the same')
rel_pos = intruder_agent.position - ownship_agent.position
d = np.linalg.norm(rel_pos)
if d == 0:
print('the distance between the two agents is 0')
if ownship_agent.radius > d:
print('there is an intruder in the protected radius')
print(ownship_agent.position)
print(intruder_agent.position)
alpha = math.asin(ownship_agent.radius / d) # VO cone half-angle (>=0)
theta = math.atan2(rel_pos[1], rel_pos[0])
vector1 = [math.cos(theta + alpha), math.sin(theta + alpha)]
vector2 = [math.cos(theta - alpha), math.sin(theta - alpha)]
# must be greater
normal_1 = np.array([vector1[1], -vector1[0]]) # Rotated +90 degrees
constraint1 = lambda x, y: np.dot((np.array([x, y]) - intruder_agent.velocity) + 0.1 * normal_1, normal_1)
# must be smaller
normal_2 = np.array([-vector2[1], vector2[0]]) # Rotated -90 degrees
constraint2 = lambda x, y: np.dot((np.array([x, y]) - intruder_agent.velocity) + 0.1 * normal_2, normal_2)
return constraint1, constraint2
def setupMIQCP(intruders, desired_vel, ownship_agent):
""" Intruders should be an array of agents """
model = grb.Model('VO')
max_vel = ownship_agent.maxSpeed
model.addVar(lb=-max_vel, ub=max_vel, name='x')
model.addVar(lb=-max_vel, ub=max_vel, name='y')
model.addVars(2 * len(intruders), vtype=GRB.BINARY)
model.update()
X = model.getVars()
n_intruder = 0
for intruder in intruders:
constraints_or = get_VO(intruder, ownship_agent)
n_constraint = 0
for constraint in constraints_or:
c = constraint(0, 0)
a = constraint(1, 0) - c
b = constraint(0, 1) - c
# K must be arbitrarily large so that when the binary constraint is 1 the constraint is always respected
K = abs(a * max_vel) + abs(b * max_vel) + c
model.addConstr(a * X[0] + b * X[1] - K * X[2 + 2 * n_intruder + n_constraint] <= -c)
n_constraint += 1
model.addConstr(X[2 + 2 * n_intruder] + X[2 + 2 * n_intruder + 1] <= 1)
n_intruder += 1
model.addConstr(X[0] * X[0] + X[1] * X[1] <= max_vel ** 2)
model.setObjective(
(X[0] - desired_vel[0]) * (X[0] - desired_vel[0]) + (X[1] - desired_vel[1]) * (X[1] - desired_vel[1]),
GRB.MINIMIZE)
model.setParam("OutputFlag", 0)
model.setParam("FeasibilityTol", 1e-9)
model.update()
return model
|
[
"math.asin",
"math.atan2",
"math.copysign",
"numpy.linalg.norm",
"uam_simulator.pathPlanning.AStar_8grid",
"numpy.copy",
"numpy.arcsin",
"math.cos",
"math.ceil",
"uam_simulator.orca.ORCA",
"numpy.asarray",
"gurobipy.Model",
"math.sin",
"numpy.dot",
"math.floor",
"uam_simulator.my_utils.clamp",
"numpy.searchsorted",
"time.time",
"uam_simulator.pathPlanning.Local_VO",
"uam_simulator.pathPlanning.SIPP",
"numpy.array",
"uam_simulator.my_utils.get_angle",
"uam_simulator.pathPlanning.DecoupledApproach"
] |
[((25063, 25086), 'numpy.linalg.norm', 'np.linalg.norm', (['rel_pos'], {}), '(rel_pos)\n', (25077, 25086), True, 'import numpy as np\n'), ((25344, 25379), 'math.asin', 'math.asin', (['(ownship_agent.radius / d)'], {}), '(ownship_agent.radius / d)\n', (25353, 25379), False, 'import math\n'), ((25420, 25454), 'math.atan2', 'math.atan2', (['rel_pos[1]', 'rel_pos[0]'], {}), '(rel_pos[1], rel_pos[0])\n', (25430, 25454), False, 'import math\n'), ((25622, 25657), 'numpy.array', 'np.array', (['[vector1[1], -vector1[0]]'], {}), '([vector1[1], -vector1[0]])\n', (25630, 25657), True, 'import numpy as np\n'), ((25829, 25864), 'numpy.array', 'np.array', (['[-vector2[1], vector2[0]]'], {}), '([-vector2[1], vector2[0]])\n', (25837, 25864), True, 'import numpy as np\n'), ((26155, 26170), 'gurobipy.Model', 'grb.Model', (['"""VO"""'], {}), "('VO')\n", (26164, 26170), True, 'import gurobipy as grb\n'), ((7713, 7732), 'numpy.copy', 'np.copy', (['self.start'], {}), '(self.start)\n', (7720, 7732), True, 'import numpy as np\n'), ((7784, 7803), 'numpy.copy', 'np.copy', (['self.start'], {}), '(self.start)\n', (7791, 7803), True, 'import numpy as np\n'), ((10998, 11070), 'math.atan2', 'math.atan2', (['(goal[1] - current_position[1])', '(goal[0] - current_position[0])'], {}), '(goal[1] - current_position[1], goal[0] - current_position[0])\n', (11008, 11070), False, 'import math\n'), ((11083, 11122), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal - current_position)'], {}), '(goal - current_position)\n', (11097, 11122), True, 'import numpy as np\n'), ((11379, 11405), 'numpy.copy', 'np.copy', (['self.new_position'], {}), '(self.new_position)\n', (11386, 11405), True, 'import numpy as np\n'), ((11430, 11456), 'numpy.copy', 'np.copy', (['self.new_velocity'], {}), '(self.new_velocity)\n', (11437, 11456), True, 'import numpy as np\n'), ((11665, 11693), 'numpy.linalg.norm', 'np.linalg.norm', (['new_velocity'], {}), '(new_velocity)\n', (11679, 11693), True, 'import numpy as np\n'), ((11714, 11761), 'uam_simulator.my_utils.clamp', 'my_utils.clamp', (['self.minSpeed', 'self.maxSpeed', 'v'], {}), '(self.minSpeed, self.maxSpeed, v)\n', (11728, 11761), False, 'from uam_simulator import my_utils\n'), ((25470, 25493), 'math.cos', 'math.cos', (['(theta + alpha)'], {}), '(theta + alpha)\n', (25478, 25493), False, 'import math\n'), ((25495, 25518), 'math.sin', 'math.sin', (['(theta + alpha)'], {}), '(theta + alpha)\n', (25503, 25518), False, 'import math\n'), ((25535, 25558), 'math.cos', 'math.cos', (['(theta - alpha)'], {}), '(theta - alpha)\n', (25543, 25558), False, 'import math\n'), ((25560, 25583), 'math.sin', 'math.sin', (['(theta - alpha)'], {}), '(theta - alpha)\n', (25568, 25583), False, 'import math\n'), ((493, 508), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (501, 508), True, 'import numpy as np\n'), ((2067, 2100), 'numpy.searchsorted', 'np.searchsorted', (['self.times', 'time'], {}), '(self.times, time)\n', (2082, 2100), True, 'import numpy as np\n'), ((4101, 4174), 'numpy.searchsorted', 'np.searchsorted', (['self.times', '[trajectory_start_time, trajectory_end_time]'], {}), '(self.times, [trajectory_start_time, trajectory_end_time])\n', (4116, 4174), True, 'import numpy as np\n'), ((5371, 5399), 'math.ceil', 'math.ceil', (['start_index_float'], {}), '(start_index_float)\n', (5380, 5399), False, 'import math\n'), ((8111, 8149), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (8125, 8149), True, 'import numpy as np\n'), ((8361, 8399), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (8375, 8399), True, 'import numpy as np\n'), ((10780, 10806), 'numpy.copy', 'np.copy', (['self.new_position'], {}), '(self.new_position)\n', (10787, 10806), True, 'import numpy as np\n'), ((11889, 11936), 'uam_simulator.my_utils.get_angle', 'my_utils.get_angle', (['self.velocity', 'new_velocity'], {}), '(self.velocity, new_velocity)\n', (11907, 11936), False, 'from uam_simulator import my_utils\n'), ((12728, 12746), 'time.time', 'python_time.time', ([], {}), '()\n', (12744, 12746), True, 'import time as python_time\n'), ((12823, 12842), 'numpy.copy', 'np.copy', (['self.start'], {}), '(self.start)\n', (12830, 12842), True, 'import numpy as np\n'), ((12859, 12890), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - pos)'], {}), '(self.goal - pos)\n', (12873, 12890), True, 'import numpy as np\n'), ((13363, 13381), 'time.time', 'python_time.time', ([], {}), '()\n', (13379, 13381), True, 'import time as python_time\n'), ((13536, 13554), 'time.time', 'python_time.time', ([], {}), '()\n', (13552, 13554), True, 'import time as python_time\n'), ((13583, 13705), 'uam_simulator.pathPlanning.Local_VO', 'pathPlanning.Local_VO', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager', 'self.tolerance'], {}), '(self.start, self.goal, self.start_time, self.maxSpeed,\n self.centralized_manager, self.tolerance)\n', (13604, 13705), False, 'from uam_simulator import pathPlanning\n'), ((14342, 14360), 'time.time', 'python_time.time', ([], {}), '()\n', (14358, 14360), True, 'import time as python_time\n'), ((14517, 14535), 'time.time', 'python_time.time', ([], {}), '()\n', (14533, 14535), True, 'import time as python_time\n'), ((14568, 14700), 'uam_simulator.pathPlanning.DecoupledApproach', 'pathPlanning.DecoupledApproach', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager', 'self.tolerance'], {}), '(self.start, self.goal, self.start_time, self\n .maxSpeed, self.centralized_manager, self.tolerance)\n', (14598, 14700), False, 'from uam_simulator import pathPlanning\n'), ((15090, 15108), 'time.time', 'python_time.time', ([], {}), '()\n', (15106, 15108), True, 'import time as python_time\n'), ((15260, 15278), 'time.time', 'python_time.time', ([], {}), '()\n', (15276, 15278), True, 'import time as python_time\n'), ((15306, 15424), 'uam_simulator.pathPlanning.SIPP', 'pathPlanning.SIPP', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager', 'self.tolerance'], {}), '(self.start, self.goal, self.start_time, self.maxSpeed,\n self.centralized_manager, self.tolerance)\n', (15323, 15424), False, 'from uam_simulator import pathPlanning\n'), ((15792, 15810), 'time.time', 'python_time.time', ([], {}), '()\n', (15808, 15810), True, 'import time as python_time\n'), ((15966, 15984), 'time.time', 'python_time.time', ([], {}), '()\n', (15982, 15984), True, 'import time as python_time\n'), ((16013, 16123), 'uam_simulator.pathPlanning.AStar_8grid', 'pathPlanning.AStar_8grid', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager'], {}), '(self.start, self.goal, self.start_time, self.\n maxSpeed, self.centralized_manager)\n', (16037, 16123), False, 'from uam_simulator import pathPlanning\n'), ((16608, 16626), 'time.time', 'python_time.time', ([], {}), '()\n', (16624, 16626), True, 'import time as python_time\n'), ((17621, 17639), 'time.time', 'python_time.time', ([], {}), '()\n', (17637, 17639), True, 'import time as python_time\n'), ((17715, 17737), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (17725, 17737), True, 'import numpy as np\n'), ((17804, 17829), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (17818, 17829), True, 'import numpy as np\n'), ((19653, 19671), 'time.time', 'python_time.time', ([], {}), '()\n', (19669, 19671), True, 'import time as python_time\n'), ((1330, 1351), 'math.floor', 'math.floor', (['idx_float'], {}), '(idx_float)\n', (1340, 1351), False, 'import math\n'), ((1387, 1407), 'math.ceil', 'math.ceil', (['idx_float'], {}), '(idx_float)\n', (1396, 1407), False, 'import math\n'), ((2016, 2043), 'numpy.copy', 'np.copy', (['self.positions[-1]'], {}), '(self.positions[-1])\n', (2023, 2043), True, 'import numpy as np\n'), ((5424, 5451), 'math.floor', 'math.floor', (['end_index_float'], {}), '(end_index_float)\n', (5434, 5451), False, 'import math\n'), ((7196, 7234), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.start - self.goal)'], {}), '(self.start - self.goal)\n', (7210, 7234), True, 'import numpy as np\n'), ((8253, 8291), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (8267, 8291), True, 'import numpy as np\n'), ((10670, 10692), 'numpy.copy', 'np.copy', (['self.position'], {}), '(self.position)\n', (10677, 10692), True, 'import numpy as np\n'), ((12118, 12154), 'math.copysign', 'math.copysign', (['max_angle', 'turn_angle'], {}), '(max_angle, turn_angle)\n', (12131, 12154), False, 'import math\n'), ((13144, 13175), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - pos)'], {}), '(self.goal - pos)\n', (13158, 13175), True, 'import numpy as np\n'), ((16335, 16353), 'time.time', 'python_time.time', ([], {}), '()\n', (16351, 16353), True, 'import time as python_time\n'), ((18303, 18328), 'numpy.linalg.norm', 'np.linalg.norm', (['delta_pos'], {}), '(delta_pos)\n', (18317, 18328), True, 'import numpy as np\n'), ((18653, 18673), 'numpy.linalg.norm', 'np.linalg.norm', (['dcpa'], {}), '(dcpa)\n', (18667, 18673), True, 'import numpy as np\n'), ((19858, 19876), 'time.time', 'python_time.time', ([], {}), '()\n', (19874, 19876), True, 'import time as python_time\n'), ((19938, 19979), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.position)'], {}), '(self.goal - self.position)\n', (19952, 19979), True, 'import numpy as np\n'), ((20704, 20722), 'time.time', 'python_time.time', ([], {}), '()\n', (20720, 20722), True, 'import time as python_time\n'), ((20816, 20848), 'numpy.array', 'np.array', (['[vars[0].x, vars[1].x]'], {}), '([vars[0].x, vars[1].x])\n', (20824, 20848), True, 'import numpy as np\n'), ((22450, 22467), 'numpy.copy', 'np.copy', (['goal_pos'], {}), '(goal_pos)\n', (22457, 22467), True, 'import numpy as np\n'), ((23586, 23624), 'math.atan2', 'math.atan2', (['direction[1]', 'direction[0]'], {}), '(direction[1], direction[0])\n', (23596, 23624), False, 'import math\n'), ((5620, 5634), 'numpy.copy', 'np.copy', (['pos_0'], {}), '(pos_0)\n', (5627, 5634), True, 'import numpy as np\n'), ((7495, 7533), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.start - self.goal)'], {}), '(self.start - self.goal)\n', (7509, 7533), True, 'import numpy as np\n'), ((12066, 12095), 'numpy.linalg.norm', 'np.linalg.norm', (['self.velocity'], {}), '(self.velocity)\n', (12080, 12095), True, 'import numpy as np\n'), ((14301, 14316), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (14309, 14316), True, 'import numpy as np\n'), ((15049, 15064), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (15057, 15064), True, 'import numpy as np\n'), ((15751, 15766), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (15759, 15766), True, 'import numpy as np\n'), ((16567, 16582), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (16575, 16582), True, 'import numpy as np\n'), ((18413, 18438), 'numpy.linalg.norm', 'np.linalg.norm', (['delta_vel'], {}), '(delta_vel)\n', (18427, 18438), True, 'import numpy as np\n'), ((20909, 20927), 'time.time', 'python_time.time', ([], {}), '()\n', (20925, 20927), True, 'import time as python_time\n'), ((20958, 20969), 'uam_simulator.orca.ORCA', 'orca.ORCA', ([], {}), '()\n', (20967, 20969), False, 'from uam_simulator import orca\n'), ((21057, 21075), 'time.time', 'python_time.time', ([], {}), '()\n', (21073, 21075), True, 'import time as python_time\n'), ((22999, 23037), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (23013, 23037), True, 'import numpy as np\n'), ((23401, 23428), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos - pos_0)'], {}), '(pos - pos_0)\n', (23415, 23428), True, 'import numpy as np\n'), ((23496, 23508), 'numpy.copy', 'np.copy', (['pos'], {}), '(pos)\n', (23503, 23508), True, 'import numpy as np\n'), ((25720, 25736), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (25728, 25736), True, 'import numpy as np\n'), ((25927, 25943), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (25935, 25943), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1635, 1643), True, 'import numpy as np\n'), ((2323, 2339), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2331, 2339), True, 'import numpy as np\n'), ((11269, 11290), 'math.cos', 'math.cos', (['orientation'], {}), '(orientation)\n', (11277, 11290), False, 'import math\n'), ((11292, 11313), 'math.sin', 'math.sin', (['orientation'], {}), '(orientation)\n', (11300, 11313), False, 'import math\n'), ((18551, 18579), 'numpy.dot', 'np.dot', (['delta_vel', 'delta_vel'], {}), '(delta_vel, delta_vel)\n', (18557, 18579), True, 'import numpy as np\n'), ((21237, 21255), 'time.time', 'python_time.time', ([], {}), '()\n', (21253, 21255), True, 'import time as python_time\n'), ((21272, 21313), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.position)'], {}), '(self.goal - self.position)\n', (21286, 21313), True, 'import numpy as np\n'), ((21456, 21474), 'time.time', 'python_time.time', ([], {}), '()\n', (21472, 21474), True, 'import time as python_time\n'), ((18520, 18548), 'numpy.dot', 'np.dot', (['delta_pos', 'delta_vel'], {}), '(delta_pos, delta_vel)\n', (18526, 18548), True, 'import numpy as np\n'), ((12196, 12211), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (12204, 12211), False, 'import math\n'), ((12213, 12228), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (12221, 12228), False, 'import math\n'), ((12250, 12265), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (12258, 12265), False, 'import math\n'), ((19087, 19132), 'numpy.arcsin', 'np.arcsin', (['(self.radius * safety_factor / dist)'], {}), '(self.radius * safety_factor / dist)\n', (19096, 19132), True, 'import numpy as np\n'), ((19135, 19158), 'numpy.arcsin', 'np.arcsin', (['(dabsH / dist)'], {}), '(dabsH / dist)\n', (19144, 19158), True, 'import numpy as np\n'), ((12233, 12248), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (12241, 12248), False, 'import math\n')]
|
import random
import time
import numpy as np
import copy
from itertools import compress
random.seed(123)
#remove columns from adj matrix.
#TODO needs additional scaling?
#Be carefull too not modify the initial complete support matrix
def get_sub_sampled_support(complete_support, node_to_keep):
index_array = complete_support[0][:] # make a copy to avoid modifying complete support
values = np.zeros(complete_support[1].shape)
index_array_sorted = index_array[:, 1].argsort()
j = 0
node_to_keep.sort()
for index_to_keep in node_to_keep:
while (j < len(index_array_sorted) and index_to_keep >= index_array[index_array_sorted[j]][1]):
if (index_to_keep == index_array[index_array_sorted[j]][1]):
values[index_array_sorted[j]] = complete_support[1][index_array_sorted[j]]
j += 1
sub_sampled_support = (index_array, values, complete_support[2])
return sub_sampled_support
# Return a train mask for label_percent of the trainig set.
# if maintain_label_balance, keep smallest number of labels per class in training set that respect the label_percent, except for 100 %
def get_train_mask(label_percent, y_train, initial_train_mask, maintain_label_balance=False):
train_index = np.argwhere(initial_train_mask).reshape(-1)
train_mask = np.zeros((initial_train_mask.shape), dtype=bool) # list of False
if maintain_label_balance:
ones_index = []
for i in range(y_train.shape[1]): # find the ones for each class
ones_index.append(train_index[np.argwhere(y_train[train_index, i] > 0).reshape(-1)])
if label_percent < 100:
smaller_num = min(
int(len(l) * (label_percent / 100))
for l in ones_index) # find smaller number of ones per class that respect the % constraint
for ones in ones_index:
random_index = random.sample(list(ones), smaller_num)
train_mask[random_index] = True # set the same number of ones for each class, so the set is balanced
else:
for ones in ones_index:
train_mask[ones] = True
else:
random_sampling_set_size = int((label_percent / 100) * train_index.shape[0])
random_list = random.sample(list(train_index), random_sampling_set_size)
train_mask[random_list] = True
label_percent = (100 * np.sum(train_mask) / train_index.shape[0])
return train_mask, label_percent
#returns a random list of indexes of the node to be kept at random.
def get_random_percent(num_nodes, percent):
if percent > 100:
print("This is not how percentage works.")
exit()
random_sampling_set_size = int((percent * num_nodes) / 100)
return random.sample(range(num_nodes), random_sampling_set_size)
#returns a list of indexes for the mask
def get_list_from_mask(mask):
return list(compress(range(len(mask)), mask))
# Set features of node that shouldn't be in the set to crazy things to make sure they are not in the gcnn
def modify_features_that_shouldnt_change_anything(features, note_to_keep):
note_doesnt_exist = [x for x in range(features[2][0]) if x not in note_to_keep]
a = np.where(np.isin(features[0][:, 0], note_doesnt_exist))
features[1][a[0]] = 10000000
|
[
"numpy.isin",
"numpy.sum",
"numpy.zeros",
"random.seed",
"numpy.argwhere"
] |
[((89, 105), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (100, 105), False, 'import random\n'), ((403, 438), 'numpy.zeros', 'np.zeros', (['complete_support[1].shape'], {}), '(complete_support[1].shape)\n', (411, 438), True, 'import numpy as np\n'), ((1324, 1370), 'numpy.zeros', 'np.zeros', (['initial_train_mask.shape'], {'dtype': 'bool'}), '(initial_train_mask.shape, dtype=bool)\n', (1332, 1370), True, 'import numpy as np\n'), ((3221, 3266), 'numpy.isin', 'np.isin', (['features[0][:, 0]', 'note_doesnt_exist'], {}), '(features[0][:, 0], note_doesnt_exist)\n', (3228, 3266), True, 'import numpy as np\n'), ((1263, 1294), 'numpy.argwhere', 'np.argwhere', (['initial_train_mask'], {}), '(initial_train_mask)\n', (1274, 1294), True, 'import numpy as np\n'), ((2400, 2418), 'numpy.sum', 'np.sum', (['train_mask'], {}), '(train_mask)\n', (2406, 2418), True, 'import numpy as np\n'), ((1562, 1602), 'numpy.argwhere', 'np.argwhere', (['(y_train[train_index, i] > 0)'], {}), '(y_train[train_index, i] > 0)\n', (1573, 1602), True, 'import numpy as np\n')]
|
'''
precision_and_recall.py
Run MATCH with PeTaL data.
Last modified on 10 August 2021.
DESCRIPTION
precision_and_recall.py produces three plots from results in MATCH/PeTaL.
These three plots appear in plots/YYYYMMDD_precision_recall and are
as follows:
- HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number
of labels predicted. Higher threshold means fewer labels get past the threshold.
- HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying
the threshold. As threshold decreases from 1 to 0, precision goes down but recall
goes up (because more labels get past the threshold).
- HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score
vary as threshold varies from 0 to 1.
OPTIONS
-m, --match PATH/TO/MATCH
Path of MATCH folder.
-p, --plots PATH/TO/plots
Path of plots folder.
-d, --dataset PeTaL
Name of dataset, e.g., "PeTaL".
-v, --verbose
Enable verbosity.
USAGE
python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose
Authors: <NAME> (<EMAIL>, <EMAIL>)
'''
import click
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
import logging
from collections import namedtuple
from tqdm import tqdm
Stats = namedtuple("Stats", "threshold topk precision recall f1")
@click.command()
@click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.')
@click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.')
@click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., "PeTaL".')
@click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.')
def main(match_path, plots_path, dataset, verbose):
"""Plots precision and recall and other statistics on graphs.
Args:
match_path (str): Path of MATCH folder.
plots_path (str): Path of plots folder.
verbose (bool): Verbose output.
"""
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s:%(name)s] %(message)s"
)
PRlogger = logging.getLogger("P&R")
DATASET = dataset
MODEL = 'MATCH'
res_labels = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy", allow_pickle=True)
res_scores = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy", allow_pickle=True)
test_labels = np.load(f"{match_path}/{DATASET}/test_labels.npy", allow_pickle=True)
train_labels = np.load(f"{match_path}/{DATASET}/train_labels.npy", allow_pickle=True)
if verbose:
PRlogger.info(f"Computing statistics by varying threshold for {MODEL} on {DATASET}.")
thresholds = list(x / 10000 for x in range(1, 10)) + \
list(x / 1000 for x in range(1, 10)) + \
list(x / 100 for x in range(1, 10)) + \
list(x / 20 for x in range(2, 19)) + \
list((90 + x) / 100 for x in range(1, 10)) + \
list((990 + x) / 1000 for x in range(1, 10)) + \
list((9990 + x) / 10000 for x in range(1, 10))
ps = []
rs = []
ts = []
f1s = []
topks = []
for threshold in tqdm(thresholds):
stats = compute_stats(threshold, res_labels, res_scores, test_labels)
ps.append(stats.precision)
rs.append(stats.recall)
ts.append(threshold)
f1s.append(stats.f1)
topks.append(stats.topk)
'''
Make the following plots to assess the performance of the model.
Precision-recall curve
Precision, recall, and F1 score by varying threshold
Numbers of labels predicted by varying threshold
'''
ALL_PLOTS_PATH = plots_path
if not os.path.exists(ALL_PLOTS_PATH):
os.mkdir(ALL_PLOTS_PATH)
else:
if verbose:
PRlogger.info(f"You already have a plots directory at {ALL_PLOTS_PATH}.")
now = datetime.now()
date_str = now.strftime("%Y%m%d")
time_str = now.strftime("%H%M%S")
comment = f"precision_recall" # "_on_{DATASET}"
PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f"{date_str}_{comment}")
if not os.path.exists(PLOTS_PATH):
os.mkdir(PLOTS_PATH)
if verbose:
PRlogger.info(f"New plots directory at {PLOTS_PATH}")
else:
if verbose:
PRlogger.info(f"You already have a plots directory at {PLOTS_PATH}")
########################################
# PRECISION-RECALL CURVE
########################################
plt.grid()
plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold')
plt.plot(ps, rs, linestyle='-')
plt.xlabel('Recall')
plt.xlim(0, 1)
plt.ylabel('Precision')
plt.ylim(0, 1)
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
########################################
# PRECISION, RECALL, AND F1 SCORE BY THRESHOLD
########################################
plt.grid()
plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}')
plt.plot(ts, ps, linestyle='-', label='Precision')
plt.plot(ts, rs, linestyle='-', label='Recall')
plt.plot(ts, f1s, linestyle='-', label='F1 score')
plt.xlabel('Threshold')
plt.xlim(0, 1)
plt.ylabel('Metrics')
plt.ylim(0, 1)
plt.legend()
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
########################################
# NUMBER OF LABELS PREDICTED BY THRESHOLD
########################################
plt.grid()
plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}')
plt.plot(ts, topks, linestyle='-', label='Number of Labels')
plt.xlabel('Threshold')
plt.xlim(0, 1)
plt.ylabel('Labels')
plt.legend()
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
def compute_stats(threshold, res_labels, res_scores, test_labels):
"""
compute_stats(threshold)
Parameters:
threshold: float, 0.0 < threshold < 1.0
res_labels: numpy array of predicted labels
res_scores: numpy array of predicted label scores
test_labels: numpy array of target labels
Returns:
Stats object containing
threshold
topk: average number of labels above threshold
precision: average precision across examples
recall: average recall across examples
f1: average F1 score across examples
Note:
precision, recall, and F1 scores are macro (averaged across examples, not labels)
"""
precisions = []
recalls = []
topks = []
f1s = []
for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels):
topk = np.argmax(res_score < threshold) # topk becomes the number of labels scoring above the threshold
precision = 1.0 if topk == 0 else np.mean([1 if x in test_label else 0 for x in res_label[:topk]])
recall = np.mean([1 if x in res_label[:topk] else 0 for x in test_label])
f1 = 0 if (precision + recall) == 0 else (2 * precision * recall) / (precision + recall)
topks.append(topk)
precisions.append(precision)
recalls.append(recall)
f1s.append(f1)
# print(res_label[:topk], precision, recall)
return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s))
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"numpy.load",
"matplotlib.pyplot.clf",
"numpy.argmax",
"click.option",
"logging.getLogger",
"numpy.mean",
"click.Path",
"os.path.join",
"os.path.exists",
"click.command",
"datetime.datetime.now",
"tqdm.tqdm",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlim",
"logging.basicConfig",
"matplotlib.pyplot.plot",
"collections.namedtuple",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1459, 1516), 'collections.namedtuple', 'namedtuple', (['"""Stats"""', '"""threshold topk precision recall f1"""'], {}), "('Stats', 'threshold topk precision recall f1')\n", (1469, 1516), False, 'from collections import namedtuple\n'), ((1519, 1534), 'click.command', 'click.command', ([], {}), '()\n', (1532, 1534), False, 'import click\n'), ((1746, 1850), 'click.option', 'click.option', (['"""--dataset"""', '"""-d"""', '"""dataset"""'], {'default': '"""PeTaL"""', 'help': '"""Name of dataset, e.g., "PeTaL"."""'}), '(\'--dataset\', \'-d\', \'dataset\', default=\'PeTaL\', help=\n \'Name of dataset, e.g., "PeTaL".\')\n', (1758, 1850), False, 'import click\n'), ((1847, 1969), 'click.option', 'click.option', (['"""--verbose"""', '"""-v"""'], {'type': 'click.BOOL', 'is_flag': '(True)', 'default': '(False)', 'required': '(False)', 'help': '"""Verbose output."""'}), "('--verbose', '-v', type=click.BOOL, is_flag=True, default=\n False, required=False, help='Verbose output.')\n", (1859, 1969), False, 'import click\n'), ((2244, 2333), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""[%(asctime)s:%(name)s] %(message)s"""'}), "(level=logging.INFO, format=\n '[%(asctime)s:%(name)s] %(message)s')\n", (2263, 2333), False, 'import logging\n'), ((2366, 2390), 'logging.getLogger', 'logging.getLogger', (['"""P&R"""'], {}), "('P&R')\n", (2383, 2390), False, 'import logging\n'), ((2453, 2547), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy',\n allow_pickle=True)\n", (2460, 2547), True, 'import numpy as np\n'), ((2561, 2655), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy',\n allow_pickle=True)\n", (2568, 2655), True, 'import numpy as np\n'), ((2670, 2739), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/test_labels.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/test_labels.npy', allow_pickle=True)\n", (2677, 2739), True, 'import numpy as np\n'), ((2759, 2829), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/train_labels.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/train_labels.npy', allow_pickle=True)\n", (2766, 2829), True, 'import numpy as np\n'), ((3398, 3414), 'tqdm.tqdm', 'tqdm', (['thresholds'], {}), '(thresholds)\n', (3402, 3414), False, 'from tqdm import tqdm\n'), ((4132, 4146), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4144, 4146), False, 'from datetime import datetime\n'), ((4292, 4345), 'os.path.join', 'os.path.join', (['ALL_PLOTS_PATH', 'f"""{date_str}_{comment}"""'], {}), "(ALL_PLOTS_PATH, f'{date_str}_{comment}')\n", (4304, 4345), False, 'import os\n'), ((4741, 4751), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4749, 4751), True, 'from matplotlib import pyplot as plt\n'), ((4756, 4841), 'matplotlib.pyplot.title', 'plt.title', (['f"""Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold"""'], {}), "(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold'\n )\n", (4765, 4841), True, 'from matplotlib import pyplot as plt\n'), ((4841, 4872), 'matplotlib.pyplot.plot', 'plt.plot', (['ps', 'rs'], {'linestyle': '"""-"""'}), "(ps, rs, linestyle='-')\n", (4849, 4872), True, 'from matplotlib import pyplot as plt\n'), ((4877, 4897), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4887, 4897), True, 'from matplotlib import pyplot as plt\n'), ((4902, 4916), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (4910, 4916), True, 'from matplotlib import pyplot as plt\n'), ((4921, 4944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4931, 4944), True, 'from matplotlib import pyplot as plt\n'), ((4949, 4963), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4957, 4963), True, 'from matplotlib import pyplot as plt\n'), ((4981, 5046), 'os.path.join', 'os.path.join', (['PLOTS_PATH', 'f"""{time_str}_prc_{MODEL}_{DATASET}.png"""'], {}), "(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png')\n", (4993, 5046), False, 'import os\n'), ((5051, 5113), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'PLOT_PATH', 'facecolor': '"""w"""', 'transparent': '(False)'}), "(fname=PLOT_PATH, facecolor='w', transparent=False)\n", (5062, 5113), True, 'from matplotlib import pyplot as plt\n'), ((5174, 5183), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5181, 5183), True, 'from matplotlib import pyplot as plt\n'), ((5331, 5341), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5339, 5341), True, 'from matplotlib import pyplot as plt\n'), ((5346, 5434), 'matplotlib.pyplot.title', 'plt.title', (['f"""Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}"""'], {}), "(\n f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}')\n", (5355, 5434), True, 'from matplotlib import pyplot as plt\n'), ((5434, 5484), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'ps'], {'linestyle': '"""-"""', 'label': '"""Precision"""'}), "(ts, ps, linestyle='-', label='Precision')\n", (5442, 5484), True, 'from matplotlib import pyplot as plt\n'), ((5489, 5536), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'rs'], {'linestyle': '"""-"""', 'label': '"""Recall"""'}), "(ts, rs, linestyle='-', label='Recall')\n", (5497, 5536), True, 'from matplotlib import pyplot as plt\n'), ((5541, 5591), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'f1s'], {'linestyle': '"""-"""', 'label': '"""F1 score"""'}), "(ts, f1s, linestyle='-', label='F1 score')\n", (5549, 5591), True, 'from matplotlib import pyplot as plt\n'), ((5596, 5619), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threshold"""'], {}), "('Threshold')\n", (5606, 5619), True, 'from matplotlib import pyplot as plt\n'), ((5624, 5638), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (5632, 5638), True, 'from matplotlib import pyplot as plt\n'), ((5643, 5664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Metrics"""'], {}), "('Metrics')\n", (5653, 5664), True, 'from matplotlib import pyplot as plt\n'), ((5669, 5683), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5677, 5683), True, 'from matplotlib import pyplot as plt\n'), ((5688, 5700), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5698, 5700), True, 'from matplotlib import pyplot as plt\n'), ((5718, 5784), 'os.path.join', 'os.path.join', (['PLOTS_PATH', 'f"""{time_str}_prf1_{MODEL}_{DATASET}.png"""'], {}), "(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png')\n", (5730, 5784), False, 'import os\n'), ((5789, 5851), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'PLOT_PATH', 'facecolor': '"""w"""', 'transparent': '(False)'}), "(fname=PLOT_PATH, facecolor='w', transparent=False)\n", (5800, 5851), True, 'from matplotlib import pyplot as plt\n'), ((5912, 5921), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5919, 5921), True, 'from matplotlib import pyplot as plt\n'), ((6064, 6074), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6072, 6074), True, 'from matplotlib import pyplot as plt\n'), ((6079, 6157), 'matplotlib.pyplot.title', 'plt.title', (['f"""Number of Labels Predicted by Threshold for {MODEL} on {DATASET}"""'], {}), "(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}')\n", (6088, 6157), True, 'from matplotlib import pyplot as plt\n'), ((6162, 6222), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'topks'], {'linestyle': '"""-"""', 'label': '"""Number of Labels"""'}), "(ts, topks, linestyle='-', label='Number of Labels')\n", (6170, 6222), True, 'from matplotlib import pyplot as plt\n'), ((6227, 6250), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threshold"""'], {}), "('Threshold')\n", (6237, 6250), True, 'from matplotlib import pyplot as plt\n'), ((6255, 6269), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (6263, 6269), True, 'from matplotlib import pyplot as plt\n'), ((6274, 6294), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Labels"""'], {}), "('Labels')\n", (6284, 6294), True, 'from matplotlib import pyplot as plt\n'), ((6299, 6311), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6309, 6311), True, 'from matplotlib import pyplot as plt\n'), ((6329, 6397), 'os.path.join', 'os.path.join', (['PLOTS_PATH', 'f"""{time_str}_labels_{MODEL}_{DATASET}.png"""'], {}), "(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png')\n", (6341, 6397), False, 'import os\n'), ((6402, 6464), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'PLOT_PATH', 'facecolor': '"""w"""', 'transparent': '(False)'}), "(fname=PLOT_PATH, facecolor='w', transparent=False)\n", (6413, 6464), True, 'from matplotlib import pyplot as plt\n'), ((6525, 6534), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6532, 6534), True, 'from matplotlib import pyplot as plt\n'), ((3940, 3970), 'os.path.exists', 'os.path.exists', (['ALL_PLOTS_PATH'], {}), '(ALL_PLOTS_PATH)\n', (3954, 3970), False, 'import os\n'), ((3980, 4004), 'os.mkdir', 'os.mkdir', (['ALL_PLOTS_PATH'], {}), '(ALL_PLOTS_PATH)\n', (3988, 4004), False, 'import os\n'), ((4358, 4384), 'os.path.exists', 'os.path.exists', (['PLOTS_PATH'], {}), '(PLOTS_PATH)\n', (4372, 4384), False, 'import os\n'), ((4394, 4414), 'os.mkdir', 'os.mkdir', (['PLOTS_PATH'], {}), '(PLOTS_PATH)\n', (4402, 4414), False, 'import os\n'), ((1585, 1608), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1595, 1608), False, 'import click\n'), ((1690, 1713), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1700, 1713), False, 'import click\n'), ((7500, 7532), 'numpy.argmax', 'np.argmax', (['(res_score < threshold)'], {}), '(res_score < threshold)\n', (7509, 7532), True, 'import numpy as np\n'), ((7721, 7787), 'numpy.mean', 'np.mean', (['[(1 if x in res_label[:topk] else 0) for x in test_label]'], {}), '([(1 if x in res_label[:topk] else 0) for x in test_label])\n', (7728, 7787), True, 'import numpy as np\n'), ((8082, 8096), 'numpy.mean', 'np.mean', (['topks'], {}), '(topks)\n', (8089, 8096), True, 'import numpy as np\n'), ((8098, 8117), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (8105, 8117), True, 'import numpy as np\n'), ((8119, 8135), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (8126, 8135), True, 'import numpy as np\n'), ((8137, 8149), 'numpy.mean', 'np.mean', (['f1s'], {}), '(f1s)\n', (8144, 8149), True, 'import numpy as np\n'), ((7639, 7705), 'numpy.mean', 'np.mean', (['[(1 if x in test_label else 0) for x in res_label[:topk]]'], {}), '([(1 if x in test_label else 0) for x in res_label[:topk]])\n', (7646, 7705), True, 'import numpy as np\n')]
|
####################데이터프레임의 문자열 컬럼들을 합치는 등의 작업으로 새로운 컬럼 생성#######################################
#이용함수 apply
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
# df = pd.DataFrame({'id' : [1,2,10,20,100,200],
# "name":['aaa','bbb','ccc','ddd','eee','fff']})
# print(df)
#
# #컬럼을 변경하여 새로운 컬럼을 생성
# #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬
# df['id_2']=df['id'].apply(lambda x:"{:0>5d}".format(x))
# print(df)
# # id name id_2
# # 0 1 aaa 00001
# # 1 2 bbb 00002
# # 2 10 ccc 00010
# # 3 20 ddd 00020
# # 4 100 eee 00100
# # 5 200 fff 00200
#
# # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다.
# #
# # x=3.141592
# # print("{:.2f}".format(x))
# # # 3.14
# #
# # print("{:+.2f}".format(x))
# # # +3.14
# #
# # x=-3.141592
# # print("{:+.2f}".format(x))
# # # -3.14
# #
# # x=2.718
# # print("{:.0f}".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림)
# # # 3
# #
# # x=3.147592
# # print("{:.2f}".format(x)) # .2f(소수 점 셋째자리에서 반올림)
# # # 3.15
# #
# # x=5
# # print("{:0>2d}".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 )
# # # 05
# #
# # x=7777777777
# # print("{:0>5d}".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지)
# # # 7777777777
# # print("{:,}".format(x))
# # # 7,777,777,777
# #
# # x=0.25
# # print("{:.2%}".format(x))
# # # 25.00%
# #
#
# #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨)
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 NaN
# # 1 2 bbb 00002 NaN
# # 2 10 ccc 00010 NaN
# # 3 20 ddd 00020 NaN
# # 4 100 eee 00100 NaN
# # 5 200 fff 00200 NaN
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1)
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
#
# #id를 소숫점 이하로 나타내는 새로운 열을 추가
# df['id_3']=df['id'].apply(lambda x: "{:.2f}".format(x))
# print(df)
# # id name id_2 id_name id_3
# # 0 1 aaa 00001 00001_aaa 1.00
# # 1 2 bbb 00002 00002_bbb 2.00
# # 2 10 ccc 00010 00010_ccc 10.00
# # 3 20 ddd 00020 00020_ddd 20.00
# # 4 100 eee 00100 00100_eee 100.00
# # 5 200 fff 00200 00200_fff 200.00
#
# df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌
# print(df)
# # id name id_2 id_name id_3 name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF
#
#
# # id_name_3 컬럼추가
# # id_name_3 => 1.00:AAA
#
# df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1)
# print(df)
# # id name id_2 id_name id_3 name_3 id_name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF
#
###################################################################################################################
#groupby 집계함수
# 1.딕셔너리를 이용해서 그룹화
#위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄
# data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분.
df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4'])
print(df)
# c1 c2 c3 c4 c5
# r1 0 1 2 3 4
# r2 5 6 7 8 9
# r3 10 11 12 13 14
# r4 15 16 17 18 19
# row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum())
# row_g2 = r3+r4
mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 0 1 2 3 4
# row_g2 5 6 7 8 9
# row_g3 10 11 12 13 14
# row_g4 15 16 17 18 19
mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 5 7 9 11 13
# row_g2 25 27 29 31 33
print(gbr.mean())
# c1 c2 c3 c4 c5
# row_g1 2.5 3.5 4.5 5.5 6.5
# row_g2 12.5 13.5 14.5 15.5 16.5
print(gbr.std())
# c1 c2 c3 c4 c5
# row_g1 3.535534 3.535534 3.535534 3.535534 3.535534
# row_g2 3.535534 3.535534 3.535534 3.535534 3.535534
# col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다.
# col_g2 = c3+c4+c5
mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'}
gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다.
print(gbc.sum())
# col_g1 col_g2
# r1 1 9
# r2 11 24
# r3 21 39
# r4 31 54
print(type(mdr))
# <class 'dict'>
print(mdr)
# {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'}
# dic -> Series
# Series를 이용한 그룹화
msr = Series(mdr)
print(type(msr))
# <class 'pandas.core.series.Series'>
print(msr)
# r1 row_g1
# r2 row_g1
# r3 row_g2
# r4 row_g2
# dtype: object
print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과
# c1 c2 c3 c4 c5
# row_g1 5 7 9 11 13
# row_g2 25 27 29 31 33
msc = Series(mdc)
print(df.groupby(msc,axis=1).sum())
# col_g1 col_g2
# r1 1 9
# r2 11 24
# r3 21 39
# r4 31 54
#함수를 이용한 그룹화
# 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달)
def rgf(x) :
if x == 'r1' or x == 'r2':
rg = 'row_g1'
else:
rg = 'row_g2'
return rg
# 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에 맞춰 데이터프레임 생성
print(df.groupby(rgf).sum())
|
[
"numpy.arange",
"pandas.Series"
] |
[((5890, 5901), 'pandas.Series', 'Series', (['mdr'], {}), '(mdr)\n', (5896, 5901), False, 'from pandas import DataFrame, Series\n'), ((6222, 6233), 'pandas.Series', 'Series', (['mdc'], {}), '(mdc)\n', (6228, 6233), False, 'from pandas import DataFrame, Series\n'), ((4245, 4258), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (4254, 4258), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic evaluation script that evaluates a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import yaml
from collections import Iterable, defaultdict
from itertools import cycle
import subprocess
import PIL
import math
import os
from PIL import Image
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.training import monitored_session
from datasets.plants import read_label_file
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
from matplotlib.font_manager import FontManager
import matplotlib
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
slim = tf.contrib.slim
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
OUTPUT_MODEL_NODE_NAMES_DICT = {
'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1',
'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1',
}
def define_tf_flags():
BATCH_SIZE = 100
tf.app.flags.DEFINE_integer(
'batch_size', BATCH_SIZE, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'max_num_batches', None,
'Max number of batches to evaluate by default use all.')
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/tfmodel/',
'Directory where the results are saved to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_string(
'dataset_name', 'plants', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'validation',
'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'mobilenet_v1',
'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None,
'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
tf.app.flags.DEFINE_integer(
'eval_image_size', None, 'Eval image size')
FLAGS = tf.app.flags.FLAGS
def get_dataset_dir(config):
return get_config_value(config, 'dataset_dir')
def get_config_value(config, key):
return config.get(key) or getattr(FLAGS, key)
def get_checkpoint_dir_path(config):
return get_config_value(config, 'checkpoint_path')
def get_lastest_check_point(config):
checkpoint_path = get_checkpoint_dir_path(config)
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
return checkpoint_path
def inspect_tfrecords(tfrecords_filename):
record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)
examples = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
examples.append(example)
# print(example)
return examples
def get_info(config, checkpoint_path=None,
calculate_confusion_matrix=False):
dataset_dir = get_dataset_dir(config)
model_name = get_model_name(config)
# tf.logging.set_verbosity(tf.logging.INFO)
tf.Graph().as_default()
tf_global_step = slim.get_or_create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir)
####################
# Select the model #
####################
num_classes = (dataset.num_classes - FLAGS.labels_offset)
network_fn = nets_factory.get_network_fn(
model_name,
num_classes=num_classes,
is_training=False)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_epochs=1, # 每張只讀一次
# num_readers=1,
shuffle=False,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
# common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
raw_images = image
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
allow_smaller_final_batch=True,
capacity=5 * FLAGS.batch_size)
####################
# Define the model #
####################
logits, _ = network_fn(images)
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(
slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
predictions = tf.argmax(logits, 1)
one_hot_predictions = slim.one_hot_encoding(
predictions, dataset.num_classes - FLAGS.labels_offset)
labels = tf.squeeze(labels)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'Recall_5': slim.metrics.streaming_recall_at_k(
logits, labels, 5),
})
if calculate_confusion_matrix:
confusion_matrix = tf.confusion_matrix(labels=labels,
num_classes=num_classes,
predictions=predictions)
else:
confusion_matrix = None
# Print the summaries to screen.
for name, value in names_to_values.items():
summary_name = 'eval/%s' % name
op = tf.summary.scalar(summary_name, value, collections=[])
op = tf.Print(op, [value], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))
checkpoint_path = checkpoint_path or get_lastest_check_point(config)
tf.logging.info('Evaluating %s' % checkpoint_path)
labels_to_names = read_label_file(dataset_dir)
probabilities = tf.nn.softmax(logits)
softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy(
one_hot_predictions, logits, label_smoothing=0.0, weights=1.0)
grad_imgs = tf.gradients(softmax_cross_entropy_loss,
images)[0]
return {
'labels_to_names': labels_to_names,
'checkpoint_path': checkpoint_path,
'num_batches': num_batches,
'names_to_values': names_to_values,
'names_to_updates': names_to_updates,
'variables_to_restore': variables_to_restore,
'images': images,
'raw_images': raw_images,
'network_fn': network_fn,
'labels': labels,
'logits': logits,
'probabilities': probabilities,
'predictions': predictions,
'confusion_matrix': confusion_matrix,
'loss': softmax_cross_entropy_loss,
'grad_imgs': grad_imgs,
}
def get_monitored_session(checkpoint_path):
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
# scaffold=scaffold,
# master=master,
# config=config
)
return monitored_session.MonitoredSession(
session_creator=session_creator)
def plot_confusion_matrix(confusion_matrix, labels_to_names=None,
save_dir='.'):
import seaborn as sns
set_matplot_zh_font()
# ax = plt.subplot()
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(18, 15)
# https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black
# confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01,
# confusion_matrix)
cmap = plt.get_cmap('Accent')
# cmap = plt.get_cmap('coolwarm')
# cmap = plt.get_cmap('plasma')
# cmap = plt.get_cmap('Blues')
# cmap.set_bad(color='black')
mask = np.zeros_like(confusion_matrix)
mask[confusion_matrix == 0] = True
# sns.set(font_scale=1)
with sns.axes_style('darkgrid'):
sns.heatmap(confusion_matrix,
linewidths=0.2,
linecolor='#eeeeee',
xticklabels=True,
yticklabels=True,
mask=mask, annot=False, ax=ax, cmap=cmap)
n = confusion_matrix.shape[0]
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Confusion Matrix')
axis = [labels_to_names[i] if labels_to_names else i
for i in range(n)]
ax.xaxis.set_ticklabels(axis, rotation=270)
ax.yaxis.set_ticklabels(axis, rotation=0)
pic_path = os.path.join(save_dir, 'confusion_matrix.png')
plt.savefig(pic_path)
print(pic_path, 'saved')
print('plot shown')
plt.show()
def get_matplot_zh_font():
# From https://blog.csdn.net/kesalin/article/details/71214038
fm = FontManager()
mat_fonts = set(f.name for f in fm.ttflist)
output = subprocess.check_output('fc-list :lang=zh-tw -f "%{family}\n"',
shell=True)
zh_fonts = set(f.split(',', 1)[0] for f in output.split('\n'))
available = list(mat_fonts & zh_fonts)
return available
def set_matplot_zh_font():
available = get_matplot_zh_font()
if len(available) > 0:
plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False
def deprocess_image(x, target_std=0.15):
# normalize tensor
x = np.abs(x)
x = np.max(x, axis=2)
x -= x.mean()
std = x.std()
if std:
x /= std
x *= target_std
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
def plot_image_in_grids(image_list, n_columns, file_name=None):
image_table = chunks(image_list, n_columns)
n_row = len(image_table)
plt.figure(figsize=(15, 10))
i = 1
for row in image_table:
for col in row:
plt.subplot(n_row, n_columns, i)
plt.imshow(col)
i += 1
if file_name:
plt.savefig(file_name)
print(file_name, 'saved')
else:
print('plot shown')
plt.show()
def plot_saliency(saliency, image, file_name=None):
plt.figure(figsize=(15, 10))
plot_image_in_grids([
[saliency, image]
], file_name)
def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False):
checkpoint_dir_path = get_checkpoint_dir_path(config)
if use_cached:
aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5')
if aggregated is not None:
return aggregated
calculate_confusion_matrix = True
info = get_info(config,
calculate_confusion_matrix=calculate_confusion_matrix)
num_batches = info['num_batches']
aggregated = {}
checkpoint_path = checkpoint_path or get_lastest_check_point(config)
with get_monitored_session(checkpoint_path) as sess:
for i in range(int(math.ceil(num_batches))):
print('batch #{} of {}'.format(i, num_batches))
params = {
k: v
for k, v in info.items()
if isinstance(v, tf.Tensor) and (not keys or k in keys)
}
try:
feed_dict = {}
res = sess.run(params, feed_dict=feed_dict)
except:
import traceback
traceback.print_exc()
raise
for k in res.keys():
value = res[k]
if k == 'confusion_matrix':
if k not in aggregated:
aggregated[k] = np.matrix(value)
else:
aggregated[k] += np.matrix(value)
else:
if k not in aggregated:
aggregated[k] = []
if isinstance(value, Iterable):
aggregated[k].extend(value)
else:
aggregated[k].append(value)
labels = res['labels']
print('len labels', len(labels))
all_labels = aggregated['labels']
print('all_labels length', len(all_labels))
print('all_labels unique length', len(set(all_labels)))
if use_cached:
save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated)
return aggregated
def _run_saliency_maps(config, use_cached=False):
checkpoint_path = get_lastest_check_point(config)
keys = [
'labels',
'images',
'grad_imgs',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
grad_imgs = aggregated['grad_imgs']
images = aggregated['images']
prefix = ''
save_saliency_maps(config, grad_imgs, images, prefix,
labels=aggregated['labels'])
def _run_info(config, use_cached=False):
checkpoint_path = get_lastest_check_point(config)
keys = [
'labels',
'images',
# 'raw_images',
'logits',
'probabilities',
'predictions',
'confusion_matrix',
# 'loss',
'grad_imgs',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
from collections import Counter
all_labels = aggregated['labels']
c = Counter(all_labels)
kv_pairs = sorted(dict(c).items(), key=lambda p: p[0])
for k, v in kv_pairs:
print(k, v)
def save_var(directory, file_name, info):
import h5py
info_file_path = os.path.join(directory, file_name)
f = h5py.File(info_file_path, 'w')
for k, v in info.items():
f[k] = v
f.close()
print(info_file_path, 'saved')
def load_var(directory, file_name):
import h5py
info_file_path = os.path.join(directory, file_name)
try:
with h5py.File(info_file_path, 'r') as f:
return {
k: f[k][:] for k in f.keys()
}
except IOError:
return None
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
return [l[i:i + n] for i in range(0, len(l), n)]
def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None):
n = images.shape[0]
save_dir = 'saliency_maps'
labels_to_names = read_label_file(get_dataset_dir(config))
label_count_map = defaultdict(int)
try:
os.makedirs(save_dir)
except OSError:
pass
for j in range(n):
image = images[j]
grad_img = grad_imgs[j]
label = labels[j]
label_name = labels_to_names[label]
if label_count_map[label] >= 10:
continue
file_name = '{}/{}{:03d}.jpg'.format(
save_dir,
'{:02}_{}_{}'.format(
label, label_name.encode('utf-8'),
prefix) if labels is not None else prefix,
label_count_map[label])
saliency = deprocess_image(grad_img, target_std=0.3)
restored_image = ((image / 2 + 0.5) * 255).astype('uint8')
blend = get_image_with_saliency_map(restored_image, saliency)
plot_image_in_grids([
saliency,
restored_image,
blend,
], n_columns=2, file_name=file_name)
label_count_map[label] += 1
def _plot_roc(logits_list, labels, predictions, probabilities,
plot_all_classes=False, save_dir=None):
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
possible_labels = list(range(max(labels) + 1))
y_binary = label_binarize(labels, classes=possible_labels)
output_matrix = np.array(probabilities)
y_score_matrix = output_matrix
y_score_matrix = np.where(
y_score_matrix == np.max(y_score_matrix, axis=1)[:, None],
y_score_matrix, 0)
tpr = {}
fpr = {}
roc_auc = {}
for i in range(len(possible_labels)):
y_scores = y_score_matrix[:, i]
fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores)
roc_auc[i] = auc(fpr[i], tpr[i])
# 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
y_score_matrix_ravel = y_score_matrix.ravel()
i_positive = y_score_matrix_ravel != 0
fpr["highest_probability"], tpr[
"highest_probability"], micro_thresholds = roc_curve(
y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive])
roc_auc["highest_probability"] = auc(fpr["highest_probability"],
tpr["highest_probability"])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], micro_thresholds = roc_curve(
y_binary.ravel(), y_score_matrix.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
n_classes = len(possible_labels)
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# key_series = 'micro'
key_series = 'highest_probability'
i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series])
optimal_threshold_fpr = fpr[key_series][i_optimal_micro]
optimal_threshold_tpr = tpr[key_series][i_optimal_micro]
optimal_threshold = micro_thresholds[i_optimal_micro]
print('optimal_threshold_fpr:', optimal_threshold_fpr)
print('optimal_threshold_tpr:', optimal_threshold_tpr)
print('optimal_threshold:', optimal_threshold)
# Plot all ROC curves
plt.figure()
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
if plot_all_classes:
for i, color in zip(range(n_classes), colors):
label = 'ROC curve of class {0} (area = {1:0.2f})'.format(
i, roc_auc[i])
label = None
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label=label)
plt.plot(fpr["highest_probability"], tpr["highest_probability"],
label='ROC curve (area = {0:0.2f})'
''.format(roc_auc["highest_probability"]),
color='blue', linestyle=':', linewidth=4)
# plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
pic_path = os.path.join(save_dir, 'roc_curve.png')
plt.savefig(pic_path)
print(pic_path, 'saved')
print('ROC curve shown')
plt.show()
def _roc_analysis(config, use_cached=False):
checkpoint_dir_path = get_checkpoint_dir_path(config)
keys = [
'logits',
'labels',
'predictions',
'probabilities',
]
info = _eval_tensors(config, keys=keys, use_cached=use_cached)
logits_list = info['logits']
labels = info['labels']
predictions = info['predictions']
probabilities = info['probabilities']
_plot_roc(logits_list, labels, predictions, probabilities,
save_dir=checkpoint_dir_path)
return
def inspect_datasets(config):
dataset_dir = get_dataset_dir(config)
examples = []
for i in range(5):
tfrecords_filename = os.path.join(
dataset_dir,
'plants_validation_{:05d}-of-00005.tfrecord'.format(i))
examples.extend(inspect_tfrecords(tfrecords_filename))
print(len(examples))
examples = []
for i in range(5):
tfrecords_filename = os.path.join(
dataset_dir,
'plants_train_{:05d}-of-00005.tfrecord'.format(i))
examples.extend(inspect_tfrecords(tfrecords_filename))
print(len(examples))
def resize(im, target_smallest_size):
resize_ratio = 1.0 * target_smallest_size / min(list(im.size))
target_size = tuple(int(resize_ratio * l) for l in im.size)
return im.resize(target_size, PIL.Image.BILINEAR)
def central_crop(im, w, h):
half_w = im.size[0] / 2
half_h = im.size[1] / 2
return im.crop(
(half_w - w / 2, half_h - h / 2, half_w + w / 2, half_h + h / 2))
def pre_process_resnet(im, coreml=False):
target_smallest_size = 224
im1 = resize(im, target_smallest_size)
im2 = central_crop(im1, target_smallest_size, target_smallest_size)
arr = np.asarray(im2).astype(np.float32)
if not coreml:
arr[:, :, 0] -= _R_MEAN
arr[:, :, 1] -= _G_MEAN
arr[:, :, 2] -= _B_MEAN
return arr
def central_crop_by_fraction(im, central_fraction):
w = im.size[0]
h = im.size[1]
return central_crop(im, w * central_fraction, h * central_fraction)
def pre_process_mobilenet(im, coreml=False):
# 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py
# 裡的 preprocess_for_eval
im1 = central_crop_by_fraction(im, 0.875)
target_smallest_size = 224
im2 = im1.resize((target_smallest_size, target_smallest_size),
PIL.Image.BILINEAR)
arr = np.asarray(im2).astype(np.float32)
if not coreml:
arr /= 255.0
arr -= 0.5
arr *= 2.0
return arr
def pre_process(config, im, coreml=False):
model_name = get_model_name(config)
return {
'resnet_v2_50': pre_process_resnet,
'mobilenet_v1': pre_process_mobilenet,
}[model_name](im, coreml=coreml)
def get_model_name(config):
model_name = get_config_value(config, 'model_name')
return model_name
def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None):
# http://www.cnblogs.com/arkenstone/p/7551270.html
filenames = [
('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),
('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),
# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),
]
for filename, label in filenames:
filename = dataset_dir_file(config, filename)
# image_np = cv2.imread(filename)
result = run_inference_on_file_pb(
config, filename, pb_file_path=pb_file_path,
dataset_dir=dataset_dir)
index = result['prediction_label']
print("Prediction label index:", index)
prediction_name = result['prediction_name']
print("Prediction name:", prediction_name)
print("Top 3 Prediction label index:", ' '.join(result['top_n_names']))
assert prediction_name == label
def dataset_dir_file(config, filename):
filename = os.path.join(get_dataset_dir(config), filename)
return filename
def run_inference_by_pb(config, image_np, pb_file_path=None):
checkpoint_dir_path = get_checkpoint_dir_path(config)
pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path
with tf.gfile.GFile(pb_file_path) as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return _run_inference_by_graph_def(config, graph_def, image_np)
def _run_inference_by_graph_def(config, graph_def, image_np,
enable_saliency_maps=False):
model_name = get_model_name(config)
image_size = 224
image_np = pre_process(config, image_np)
image_np = cv2.resize(image_np, (image_size, image_size))
# expand dims to shape [None, 299, 299, 3]
image_np = np.expand_dims(image_np, 0)
graph = tf.import_graph_def(graph_def, name='')
with tf.Session(graph=graph) as sess:
input_tensor_name = "input:0"
# output_tensor_name = "resnet_v2_50/predictions/Reshape_1:0"
output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[
model_name] + ":0"
input_tensor = sess.graph.get_tensor_by_name(
input_tensor_name) # get input tensor
output_tensor = sess.graph.get_tensor_by_name(
output_tensor_name) # get output tensor
tensor_map = {
'logits': output_tensor,
}
if enable_saliency_maps:
tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name(
'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0')
result = sess.run(tensor_map, feed_dict={input_tensor: image_np})
return {
'logits': result['logits'],
'grad_imgs': result.get('grad_imgs'),
}
def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None):
labels_to_names = read_label_file(get_dataset_dir(config))
dataset_dir = get_dataset_dir(config)
filenames = [
('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),
('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),
# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),
]
for filename, label in filenames:
filename = os.path.join(dataset_dir, filename)
image_np = PIL.Image.open(filename)
logits = run_inference_by_coreml(
config, image_np, coreml_file_path=coreml_file_path, )
print('logits', logits)
index = np.argmax(logits)
print("Prediction label index:", index)
prediction_name = labels_to_names[index]
print("Prediction name:", prediction_name)
index_list = np.argsort(logits)
print("Top 3 Prediction label index:",
index_list,
' '.join([labels_to_names[i] for i in list(index_list)]))
assert prediction_name == label
def run_inference_by_coreml(config, image_np, coreml_file_path=None):
import coremltools
import tfcoreml
model_name = get_model_name(config)
checkpoint_dir_path = get_checkpoint_dir_path(config)
frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path
coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path
image_np = pre_process(config, image_np, coreml=True)
image = Image.fromarray(image_np.astype('int8'), 'RGB')
input_tensor_shapes = {
"input:0": [1, image_np.shape[0], image_np.shape[1],
3]} # batch size is 1
output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + ":0"
coreml_model = coremltools.models.MLModel(coreml_model_file)
convert_model = False
# convert_model = True
if convert_model:
extra_args = {
'resnet_v2_50': {
'red_bias': -_R_MEAN,
'green_bias': -_G_MEAN,
'blue_bias': -_B_MEAN,
},
'mobilenet_v1': {
'red_bias': -1.0,
'green_bias': -1.0,
'blue_bias': -1.0,
'image_scale': 2.0 / 255.,
}
}[model_name]
coreml_model = tfcoreml.convert(
tf_model_path=frozen_model_file,
mlmodel_path=coreml_model_file.replace('.mlmodel',
'_test.mlmodel'),
input_name_shape_dict=input_tensor_shapes,
output_feature_names=[output_tensor_name],
image_input_names=['input:0'],
**extra_args
)
coreml_inputs = {'input__0': image}
coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False)
# example output: 'resnet_v2_50__predictions__Reshape_1__0'
probs = coreml_output[
output_tensor_name.replace('/', '__').replace(':', '__')].flatten()
return probs
def run_inference_on_file_pb(config, filename, pb_file_path=None,
dataset_dir=None):
labels_to_names = read_label_file(get_dataset_dir(config))
image_np = PIL.Image.open(filename)
logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[
'logits']
index = np.argmax(logits, 1)
prediction_name = labels_to_names[index[0]]
index_list = np.argsort(logits, 1)
top_n_names = list(reversed(
[labels_to_names[i] for i in list(index_list[0])]))
print('logits', logits)
result = {
'prediction_name': prediction_name,
'prediction_label': index[0],
'top_n_names': top_n_names,
'logits': logits.tolist(),
}
return result
def test_inference_by_model_files(config, dataset_dir=None,
frozen_graph_path=None,
coreml_file_path=None):
dataset_dir = dataset_dir or get_dataset_dir(config)
test_inference_by_pb(config, pb_file_path=frozen_graph_path,
dataset_dir=dataset_dir)
test_inference_by_coreml(config, coreml_file_path=coreml_file_path,
dataset_dir=dataset_dir)
def get_image_with_saliency_map(image_np, saliency):
image_np = np.copy(np.asarray(image_np))[:, :]
w, h = image_np.shape[0:2]
l = min(w, h)
saliency = cv2.resize(saliency, (l, l))
saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB)
canvas = image_np[:, :]
w_offset = int((w - l) / 2)
h_offset = int((h - l) / 2)
roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l]
intensify_factor = 3
alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1)
paint = np.copy(1 - alpha) * 255
overlap = roi_img[paint > 128]
if overlap.mean() + overlap.std() > 128:
color = np.array([0, 0, 255]).astype(float) / 255 # blue
else:
color = np.array([255, 200, 0]).astype(float) / 255 # orange
paint[:, :] *= color
roi_img = cv2.multiply(alpha, roi_img.astype(float))
roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int)
canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img
return canvas
def test_frozen_graph_saliency_map(config):
checkpoint_dir = config['checkpoint_path']
dataset_dir = get_dataset_dir(config)
frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb')
filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg')
labels_to_names = read_label_file(dataset_dir)
image_np = PIL.Image.open(filename)
results = run_inference_by_pb(config, image_np,
pb_file_path=frozen_graph_path)
logits = results['logits']
index = np.argmax(logits, 1)[0]
prediction_name = labels_to_names[index]
grad_imgs = results['grad_imgs']
saliency = deprocess_image(grad_imgs[0])
blend = get_image_with_saliency_map(image_np, saliency)
print(prediction_name)
plot_image_in_grids([
blend, image_np,
saliency,
], 2)
@click.group()
def cli():
pass
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def run_info(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_run_info(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
def test_models(config_file):
with open(config_file) as f:
config = yaml.load(f)
test_inference_by_model_files(config)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def plot_roc(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_roc_analysis(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def saliency_maps(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_run_saliency_maps(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def confusion_matrix(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
keys = [
'confusion_matrix',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
checkpoint_dir_path = get_checkpoint_dir_path(config)
dataset_dir = get_dataset_dir(config)
labels_to_names = read_label_file(dataset_dir)
plot_confusion_matrix(aggregated['confusion_matrix'],
labels_to_names=labels_to_names,
save_dir=checkpoint_dir_path)
if __name__ == '__main__':
define_tf_flags()
cli()
|
[
"tensorflow.app.flags.DEFINE_float",
"yaml.load",
"click.option",
"collections.defaultdict",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.interp",
"tensorflow.app.flags.DEFINE_integer",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"datasets.dataset_factory.get_dataset",
"numpy.max",
"tensorflow.squeeze",
"preprocessing.preprocessing_factory.get_preprocessing",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"matplotlib.pyplot.subplot",
"os.makedirs",
"tensorflow.argmax",
"PIL.Image.open",
"numpy.array",
"numpy.abs",
"itertools.cycle",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.train.Example",
"datasets.plants.read_label_file",
"click.group",
"math.ceil",
"matplotlib.use",
"tensorflow.import_graph_def",
"tensorflow.python.training.monitored_session.ChiefSessionCreator",
"numpy.matrix",
"click.argument",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"numpy.expand_dims",
"matplotlib.font_manager.FontManager",
"tensorflow.Print",
"tensorflow.app.flags.DEFINE_string",
"matplotlib.pyplot.xlabel",
"tensorflow.losses.softmax_cross_entropy",
"coremltools.models.MLModel",
"matplotlib.pyplot.title",
"tensorflow.confusion_matrix",
"nets.nets_factory.get_network_fn",
"tensorflow.logging.info",
"numpy.argmax",
"numpy.clip",
"os.path.join",
"tensorflow.train.ExponentialMovingAverage",
"seaborn.axes_style",
"numpy.copy",
"tensorflow.gradients",
"tensorflow.GraphDef",
"cv2.resize",
"h5py.File",
"matplotlib.pyplot.get_cmap",
"tensorflow.summary.scalar",
"subprocess.check_output",
"matplotlib.pyplot.legend",
"sklearn.preprocessing.label_binarize",
"cv2.add",
"matplotlib.pyplot.savefig",
"seaborn.heatmap",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.batch",
"tensorflow.nn.softmax",
"numpy.zeros_like",
"traceback.print_exc",
"tensorflow.python.training.monitored_session.MonitoredSession",
"collections.Counter",
"tensorflow.gfile.IsDirectory",
"numpy.asarray",
"tensorflow.Session",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"tensorflow.add_to_collection",
"os.environ.get",
"sklearn.metrics.auc"
] |
[((33480, 33493), 'click.group', 'click.group', ([], {}), '()\n', (33491, 33493), False, 'import click\n'), ((33532, 33561), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (33546, 33561), False, 'import click\n'), ((33563, 33605), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (33575, 33605), False, 'import click\n'), ((33772, 33801), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (33786, 33801), False, 'import click\n'), ((33956, 33985), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (33970, 33985), False, 'import click\n'), ((33987, 34029), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (33999, 34029), False, 'import click\n'), ((34200, 34229), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (34214, 34229), False, 'import click\n'), ((34231, 34273), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (34243, 34273), False, 'import click\n'), ((34454, 34483), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (34468, 34483), False, 'import click\n'), ((34485, 34527), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (34497, 34527), False, 'import click\n'), ((1415, 1444), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (1429, 1444), False, 'import os\n'), ((1521, 1542), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1535, 1542), False, 'import matplotlib\n'), ((1852, 1949), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', 'BATCH_SIZE', '"""The number of samples in each batch."""'], {}), "('batch_size', BATCH_SIZE,\n 'The number of samples in each batch.')\n", (1879, 1949), True, 'import tensorflow as tf\n'), ((1959, 2072), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_num_batches"""', 'None', '"""Max number of batches to evaluate by default use all."""'], {}), "('max_num_batches', None,\n 'Max number of batches to evaluate by default use all.')\n", (1986, 2072), True, 'import tensorflow as tf\n'), ((2090, 2182), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""master"""', '""""""', '"""The address of the TensorFlow master to use."""'], {}), "('master', '',\n 'The address of the TensorFlow master to use.')\n", (2116, 2182), True, 'import tensorflow as tf\n'), ((2192, 2342), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_path"""', 'None', '"""The directory where the model was written to or an absolute path to a checkpoint file."""'], {}), "('checkpoint_path', None,\n 'The directory where the model was written to or an absolute path to a checkpoint file.'\n )\n", (2218, 2342), True, 'import tensorflow as tf\n'), ((2366, 2470), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""eval_dir"""', '"""/tmp/tfmodel/"""', '"""Directory where the results are saved to."""'], {}), "('eval_dir', '/tmp/tfmodel/',\n 'Directory where the results are saved to.')\n", (2392, 2470), True, 'import tensorflow as tf\n'), ((2488, 2604), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_preprocessing_threads"""', '(4)', '"""The number of threads used to create the batches."""'], {}), "('num_preprocessing_threads', 4,\n 'The number of threads used to create the batches.')\n", (2515, 2604), True, 'import tensorflow as tf\n'), ((2622, 2714), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_name"""', '"""plants"""', '"""The name of the dataset to load."""'], {}), "('dataset_name', 'plants',\n 'The name of the dataset to load.')\n", (2648, 2714), True, 'import tensorflow as tf\n'), ((2724, 2827), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_split_name"""', '"""validation"""', '"""The name of the train/test split."""'], {}), "('dataset_split_name', 'validation',\n 'The name of the train/test split.')\n", (2750, 2827), True, 'import tensorflow as tf\n'), ((2845, 2949), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_dir"""', 'None', '"""The directory where the dataset files are stored."""'], {}), "('dataset_dir', None,\n 'The directory where the dataset files are stored.')\n", (2871, 2949), True, 'import tensorflow as tf\n'), ((2967, 3202), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""labels_offset"""', '(0)', '"""An offset for the labels in the dataset. This flag is primarily used to evaluate the VGG and ResNet architectures which do not use a background class for the ImageNet dataset."""'], {}), "('labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to evaluate the VGG and ResNet architectures which do not use a background class for the ImageNet dataset.'\n )\n", (2994, 3202), True, 'import tensorflow as tf\n'), ((3237, 3342), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_name"""', '"""mobilenet_v1"""', '"""The name of the architecture to evaluate."""'], {}), "('model_name', 'mobilenet_v1',\n 'The name of the architecture to evaluate.')\n", (3263, 3342), True, 'import tensorflow as tf\n'), ((3360, 3517), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""preprocessing_name"""', 'None', '"""The name of the preprocessing to use. If left as `None`, then the model_name flag is used."""'], {}), "('preprocessing_name', None,\n 'The name of the preprocessing to use. If left as `None`, then the model_name flag is used.'\n )\n", (3386, 3517), True, 'import tensorflow as tf\n'), ((3541, 3700), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""moving_average_decay"""', 'None', '"""The decay to use for the moving average.If left as None, then moving averages are not used."""'], {}), "('moving_average_decay', None,\n 'The decay to use for the moving average.If left as None, then moving averages are not used.'\n )\n", (3566, 3700), True, 'import tensorflow as tf\n'), ((3724, 3795), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""eval_image_size"""', 'None', '"""Eval image size"""'], {}), "('eval_image_size', None, 'Eval image size')\n", (3751, 3795), True, 'import tensorflow as tf\n'), ((4197, 4234), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4217, 4234), True, 'import tensorflow as tf\n'), ((4400, 4456), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', ([], {'path': 'tfrecords_filename'}), '(path=tfrecords_filename)\n', (4431, 4456), True, 'import tensorflow as tf\n'), ((5083, 5173), 'datasets.dataset_factory.get_dataset', 'dataset_factory.get_dataset', (['FLAGS.dataset_name', 'FLAGS.dataset_split_name', 'dataset_dir'], {}), '(FLAGS.dataset_name, FLAGS.dataset_split_name,\n dataset_dir)\n', (5110, 5173), False, 'from datasets import dataset_factory\n'), ((5334, 5421), 'nets.nets_factory.get_network_fn', 'nets_factory.get_network_fn', (['model_name'], {'num_classes': 'num_classes', 'is_training': '(False)'}), '(model_name, num_classes=num_classes,\n is_training=False)\n', (5361, 5421), False, 'from nets import nets_factory\n'), ((6271, 6349), 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', (['preprocessing_name'], {'is_training': '(False)'}), '(preprocessing_name, is_training=False)\n', (6310, 6349), False, 'from preprocessing import preprocessing_factory\n'), ((6544, 6720), 'tensorflow.train.batch', 'tf.train.batch', (['[image, label]'], {'batch_size': 'FLAGS.batch_size', 'num_threads': 'FLAGS.num_preprocessing_threads', 'allow_smaller_final_batch': '(True)', 'capacity': '(5 * FLAGS.batch_size)'}), '([image, label], batch_size=FLAGS.batch_size, num_threads=\n FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True,\n capacity=5 * FLAGS.batch_size)\n', (6558, 6720), True, 'import tensorflow as tf\n'), ((7292, 7312), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (7301, 7312), True, 'import tensorflow as tf\n'), ((7441, 7459), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (7451, 7459), True, 'import tensorflow as tf\n'), ((8667, 8717), 'tensorflow.logging.info', 'tf.logging.info', (["('Evaluating %s' % checkpoint_path)"], {}), "('Evaluating %s' % checkpoint_path)\n", (8682, 8717), True, 'import tensorflow as tf\n'), ((8740, 8768), 'datasets.plants.read_label_file', 'read_label_file', (['dataset_dir'], {}), '(dataset_dir)\n', (8755, 8768), False, 'from datasets.plants import read_label_file\n'), ((8789, 8810), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (8802, 8810), True, 'import tensorflow as tf\n'), ((8844, 8942), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['one_hot_predictions', 'logits'], {'label_smoothing': '(0.0)', 'weights': '(1.0)'}), '(one_hot_predictions, logits,\n label_smoothing=0.0, weights=1.0)\n', (8875, 8942), True, 'import tensorflow as tf\n'), ((9745, 9834), 'tensorflow.python.training.monitored_session.ChiefSessionCreator', 'monitored_session.ChiefSessionCreator', ([], {'checkpoint_filename_with_path': 'checkpoint_path'}), '(checkpoint_filename_with_path=\n checkpoint_path)\n', (9782, 9834), False, 'from tensorflow.python.training import monitored_session\n'), ((9934, 10001), 'tensorflow.python.training.monitored_session.MonitoredSession', 'monitored_session.MonitoredSession', ([], {'session_creator': 'session_creator'}), '(session_creator=session_creator)\n', (9968, 10001), False, 'from tensorflow.python.training import monitored_session\n'), ((10211, 10225), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10223, 10225), True, 'import matplotlib.pyplot as plt\n'), ((10537, 10559), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Accent"""'], {}), "('Accent')\n", (10549, 10559), True, 'import matplotlib.pyplot as plt\n'), ((10715, 10746), 'numpy.zeros_like', 'np.zeros_like', (['confusion_matrix'], {}), '(confusion_matrix)\n', (10728, 10746), True, 'import numpy as np\n'), ((11475, 11521), 'os.path.join', 'os.path.join', (['save_dir', '"""confusion_matrix.png"""'], {}), "(save_dir, 'confusion_matrix.png')\n", (11487, 11521), False, 'import os\n'), ((11526, 11547), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pic_path'], {}), '(pic_path)\n', (11537, 11547), True, 'import matplotlib.pyplot as plt\n'), ((11605, 11615), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11613, 11615), True, 'import matplotlib.pyplot as plt\n'), ((11720, 11733), 'matplotlib.font_manager.FontManager', 'FontManager', ([], {}), '()\n', (11731, 11733), False, 'from matplotlib.font_manager import FontManager\n'), ((11796, 11875), 'subprocess.check_output', 'subprocess.check_output', (['"""fc-list :lang=zh-tw -f "%{family}\n\\""""'], {'shell': '(True)'}), '("""fc-list :lang=zh-tw -f "%{family}\n\\"""", shell=True)\n', (11819, 11875), False, 'import subprocess\n'), ((12327, 12336), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (12333, 12336), True, 'import numpy as np\n'), ((12345, 12362), 'numpy.max', 'np.max', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (12351, 12362), True, 'import numpy as np\n'), ((12667, 12695), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (12677, 12695), True, 'import matplotlib.pyplot as plt\n'), ((13049, 13077), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (13059, 13077), True, 'import matplotlib.pyplot as plt\n'), ((16155, 16174), 'collections.Counter', 'Counter', (['all_labels'], {}), '(all_labels)\n', (16162, 16174), False, 'from collections import Counter\n'), ((16361, 16395), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (16373, 16395), False, 'import os\n'), ((16404, 16434), 'h5py.File', 'h5py.File', (['info_file_path', '"""w"""'], {}), "(info_file_path, 'w')\n", (16413, 16434), False, 'import h5py\n'), ((16606, 16640), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (16618, 16640), False, 'import os\n'), ((17161, 17177), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (17172, 17177), False, 'from collections import Iterable, defaultdict\n'), ((18377, 18424), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['labels'], {'classes': 'possible_labels'}), '(labels, classes=possible_labels)\n', (18391, 18424), False, 'from sklearn.preprocessing import label_binarize\n'), ((18446, 18469), 'numpy.array', 'np.array', (['probabilities'], {}), '(probabilities)\n', (18454, 18469), True, 'import numpy as np\n'), ((19248, 19307), 'sklearn.metrics.auc', 'auc', (["fpr['highest_probability']", "tpr['highest_probability']"], {}), "(fpr['highest_probability'], tpr['highest_probability'])\n", (19251, 19307), False, 'from sklearn.metrics import roc_curve, auc\n'), ((19536, 19567), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (19539, 19567), False, 'from sklearn.metrics import roc_curve, auc\n'), ((19861, 19883), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (19874, 19883), True, 'import numpy as np\n'), ((20117, 20148), 'sklearn.metrics.auc', 'auc', (["fpr['macro']", "tpr['macro']"], {}), "(fpr['macro'], tpr['macro'])\n", (20120, 20148), False, 'from sklearn.metrics import roc_curve, auc\n'), ((20238, 20282), 'numpy.argmax', 'np.argmax', (['(tpr[key_series] - fpr[key_series])'], {}), '(tpr[key_series] - fpr[key_series])\n', (20247, 20282), True, 'import numpy as np\n'), ((20663, 20675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20673, 20675), True, 'import matplotlib.pyplot as plt\n'), ((20690, 20737), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (20695, 20737), False, 'from itertools import cycle\n'), ((21322, 21342), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (21330, 21342), True, 'import matplotlib.pyplot as plt\n'), ((21347, 21368), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (21355, 21368), True, 'import matplotlib.pyplot as plt\n'), ((21373, 21406), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (21383, 21406), True, 'import matplotlib.pyplot as plt\n'), ((21411, 21443), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (21421, 21443), True, 'import matplotlib.pyplot as plt\n'), ((21448, 21470), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC curve"""'], {}), "('ROC curve')\n", (21457, 21470), True, 'import matplotlib.pyplot as plt\n'), ((21475, 21504), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (21485, 21504), True, 'import matplotlib.pyplot as plt\n'), ((21520, 21559), 'os.path.join', 'os.path.join', (['save_dir', '"""roc_curve.png"""'], {}), "(save_dir, 'roc_curve.png')\n", (21532, 21559), False, 'import os\n'), ((21564, 21585), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pic_path'], {}), '(pic_path)\n', (21575, 21585), True, 'import matplotlib.pyplot as plt\n'), ((21648, 21658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21656, 21658), True, 'import matplotlib.pyplot as plt\n'), ((26251, 26297), 'cv2.resize', 'cv2.resize', (['image_np', '(image_size, image_size)'], {}), '(image_np, (image_size, image_size))\n', (26261, 26297), False, 'import cv2\n'), ((26360, 26387), 'numpy.expand_dims', 'np.expand_dims', (['image_np', '(0)'], {}), '(image_np, 0)\n', (26374, 26387), True, 'import numpy as np\n'), ((26401, 26440), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (26420, 26440), True, 'import tensorflow as tf\n'), ((29154, 29199), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['coreml_model_file'], {}), '(coreml_model_file)\n', (29180, 29199), False, 'import coremltools\n'), ((30575, 30599), 'PIL.Image.open', 'PIL.Image.open', (['filename'], {}), '(filename)\n', (30589, 30599), False, 'import PIL\n'), ((30709, 30729), 'numpy.argmax', 'np.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (30718, 30729), True, 'import numpy as np\n'), ((30795, 30816), 'numpy.argsort', 'np.argsort', (['logits', '(1)'], {}), '(logits, 1)\n', (30805, 30816), True, 'import numpy as np\n'), ((31777, 31805), 'cv2.resize', 'cv2.resize', (['saliency', '(l, l)'], {}), '(saliency, (l, l))\n', (31787, 31805), False, 'import cv2\n'), ((31821, 31863), 'cv2.cvtColor', 'cv2.cvtColor', (['saliency', 'cv2.COLOR_GRAY2RGB'], {}), '(saliency, cv2.COLOR_GRAY2RGB)\n', (31833, 31863), False, 'import cv2\n'), ((32785, 32832), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""frozen_graph.pb"""'], {}), "(checkpoint_dir, 'frozen_graph.pb')\n", (32797, 32832), False, 'import os\n'), ((32927, 32955), 'datasets.plants.read_label_file', 'read_label_file', (['dataset_dir'], {}), '(dataset_dir)\n', (32942, 32955), False, 'from datasets.plants import read_label_file\n'), ((32971, 32995), 'PIL.Image.open', 'PIL.Image.open', (['filename'], {}), '(filename)\n', (32985, 32995), False, 'import PIL\n'), ((34882, 34910), 'datasets.plants.read_label_file', 'read_label_file', (['dataset_dir'], {}), '(dataset_dir)\n', (34897, 34910), False, 'from datasets.plants import read_label_file\n'), ((4262, 4305), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4288, 4305), True, 'import tensorflow as tf\n'), ((4536, 4554), 'tensorflow.train.Example', 'tf.train.Example', ([], {}), '()\n', (4552, 4554), True, 'import tensorflow as tf\n'), ((6928, 7005), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['FLAGS.moving_average_decay', 'tf_global_step'], {}), '(FLAGS.moving_average_decay, tf_global_step)\n', (6961, 7005), True, 'import tensorflow as tf\n'), ((7795, 7884), 'tensorflow.confusion_matrix', 'tf.confusion_matrix', ([], {'labels': 'labels', 'num_classes': 'num_classes', 'predictions': 'predictions'}), '(labels=labels, num_classes=num_classes, predictions=\n predictions)\n', (7814, 7884), True, 'import tensorflow as tf\n'), ((8155, 8209), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['summary_name', 'value'], {'collections': '[]'}), '(summary_name, value, collections=[])\n', (8172, 8209), True, 'import tensorflow as tf\n'), ((8223, 8258), 'tensorflow.Print', 'tf.Print', (['op', '[value]', 'summary_name'], {}), '(op, [value], summary_name)\n', (8231, 8258), True, 'import tensorflow as tf\n'), ((8267, 8315), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.SUMMARIES', 'op'], {}), '(tf.GraphKeys.SUMMARIES, op)\n', (8287, 8315), True, 'import tensorflow as tf\n'), ((8964, 9012), 'tensorflow.gradients', 'tf.gradients', (['softmax_cross_entropy_loss', 'images'], {}), '(softmax_cross_entropy_loss, images)\n', (8976, 9012), True, 'import tensorflow as tf\n'), ((10823, 10849), 'seaborn.axes_style', 'sns.axes_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (10837, 10849), True, 'import seaborn as sns\n'), ((10859, 11012), 'seaborn.heatmap', 'sns.heatmap', (['confusion_matrix'], {'linewidths': '(0.2)', 'linecolor': '"""#eeeeee"""', 'xticklabels': '(True)', 'yticklabels': '(True)', 'mask': 'mask', 'annot': '(False)', 'ax': 'ax', 'cmap': 'cmap'}), "(confusion_matrix, linewidths=0.2, linecolor='#eeeeee',\n xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap\n =cmap)\n", (10870, 11012), True, 'import seaborn as sns\n'), ((12877, 12899), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (12888, 12899), True, 'import matplotlib.pyplot as plt\n'), ((12980, 12990), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12988, 12990), True, 'import matplotlib.pyplot as plt\n'), ((17195, 17216), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (17206, 17216), False, 'import os\n'), ((18784, 18819), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_binary[:, i]', 'y_scores'], {}), '(y_binary[:, i], y_scores)\n', (18793, 18819), False, 'from sklearn.metrics import roc_curve, auc\n'), ((18841, 18860), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (18844, 18860), False, 'from sklearn.metrics import roc_curve, auc\n'), ((19935, 19969), 'numpy.interp', 'np.interp', (['all_fpr', 'fpr[i]', 'tpr[i]'], {}), '(all_fpr, fpr[i], tpr[i])\n', (19944, 19969), True, 'import numpy as np\n'), ((25820, 25848), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['pb_file_path'], {}), '(pb_file_path)\n', (25834, 25848), True, 'import tensorflow as tf\n'), ((25875, 25888), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (25886, 25888), True, 'import tensorflow as tf\n'), ((26450, 26473), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (26460, 26473), True, 'import tensorflow as tf\n'), ((27816, 27851), 'os.path.join', 'os.path.join', (['dataset_dir', 'filename'], {}), '(dataset_dir, filename)\n', (27828, 27851), False, 'import os\n'), ((27871, 27895), 'PIL.Image.open', 'PIL.Image.open', (['filename'], {}), '(filename)\n', (27885, 27895), False, 'import PIL\n'), ((28054, 28071), 'numpy.argmax', 'np.argmax', (['logits'], {}), '(logits)\n', (28063, 28071), True, 'import numpy as np\n'), ((28241, 28259), 'numpy.argsort', 'np.argsort', (['logits'], {}), '(logits)\n', (28251, 28259), True, 'import numpy as np\n'), ((32142, 32160), 'numpy.copy', 'np.copy', (['(1 - alpha)'], {}), '(1 - alpha)\n', (32149, 32160), True, 'import numpy as np\n'), ((33158, 33178), 'numpy.argmax', 'np.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (33167, 33178), True, 'import numpy as np\n'), ((33695, 33707), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (33704, 33707), False, 'import yaml\n'), ((33882, 33894), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (33891, 33894), False, 'import yaml\n'), ((34119, 34131), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (34128, 34131), False, 'import yaml\n'), ((34368, 34380), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (34377, 34380), False, 'import yaml\n'), ((34625, 34637), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (34634, 34637), False, 'import yaml\n'), ((4909, 4919), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4917, 4919), True, 'import tensorflow as tf\n'), ((12472, 12490), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (12479, 12490), True, 'import numpy as np\n'), ((12770, 12802), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', 'n_columns', 'i'], {}), '(n_row, n_columns, i)\n', (12781, 12802), True, 'import matplotlib.pyplot as plt\n'), ((12815, 12830), 'matplotlib.pyplot.imshow', 'plt.imshow', (['col'], {}), '(col)\n', (12825, 12830), True, 'import matplotlib.pyplot as plt\n'), ((16663, 16693), 'h5py.File', 'h5py.File', (['info_file_path', '"""r"""'], {}), "(info_file_path, 'r')\n", (16672, 16693), False, 'import h5py\n'), ((20957, 21014), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr[i]', 'tpr[i]'], {'color': 'color', 'lw': 'lw', 'label': 'label'}), '(fpr[i], tpr[i], color=color, lw=lw, label=label)\n', (20965, 21014), True, 'import matplotlib.pyplot as plt\n'), ((23399, 23414), 'numpy.asarray', 'np.asarray', (['im2'], {}), '(im2)\n', (23409, 23414), True, 'import numpy as np\n'), ((24113, 24128), 'numpy.asarray', 'np.asarray', (['im2'], {}), '(im2)\n', (24123, 24128), True, 'import numpy as np\n'), ((31684, 31704), 'numpy.asarray', 'np.asarray', (['image_np'], {}), '(image_np)\n', (31694, 31704), True, 'import numpy as np\n'), ((32491, 32528), 'cv2.add', 'cv2.add', (['(paint * (1 - alpha))', 'roi_img'], {}), '(paint * (1 - alpha), roi_img)\n', (32498, 32528), False, 'import cv2\n'), ((13802, 13824), 'math.ceil', 'math.ceil', (['num_batches'], {}), '(num_batches)\n', (13811, 13824), False, 'import math\n'), ((18562, 18592), 'numpy.max', 'np.max', (['y_score_matrix'], {'axis': '(1)'}), '(y_score_matrix, axis=1)\n', (18568, 18592), True, 'import numpy as np\n'), ((14236, 14257), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14255, 14257), False, 'import traceback\n'), ((32263, 32284), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (32271, 32284), True, 'import numpy as np\n'), ((32339, 32362), 'numpy.array', 'np.array', (['[255, 200, 0]'], {}), '([255, 200, 0])\n', (32347, 32362), True, 'import numpy as np\n'), ((14473, 14489), 'numpy.matrix', 'np.matrix', (['value'], {}), '(value)\n', (14482, 14489), True, 'import numpy as np\n'), ((14557, 14573), 'numpy.matrix', 'np.matrix', (['value'], {}), '(value)\n', (14566, 14573), True, 'import numpy as np\n')]
|
'''
File: \resource.py
Project: NumberRecongization
Created Date: Monday March 26th 2018
Author: Huisama
-----
Last Modified: Saturday March 31st 2018 11:08:21 pm
Modified By: Huisama
-----
Copyright (c) 2018 Hui
'''
import os
import scipy.misc as scm
import random
import numpy as np
import PIL
# STD_WIDTH = 667
# STD_HEIGHT = 83
STD_WIDTH = 252
STD_HEIGHT = 40
import matplotlib.pyplot as plt
'''
This class stands for dataset and provides data processing oparations
'''
class DataSet(object):
def __init__(self, data_dir, batch_size):
self.data_dir = data_dir
self.batch_size = batch_size
self.train_set_ratio = 0.8
self.validate_set_ratio = 0.1
'''
Get mean width and height of dataset
'''
def get_data_mean_size(self):
full_width, full_height = 0, 0
count = 0
def dummy(self, dir, file):
nonlocal full_width, full_height, count
filename = os.path.splitext(file)
if filename[1] == '.png':
fullfile = os.path.join(self.data_dir, dir, file)
width, height = self.get_size(fullfile)
full_width += width
full_height += height
print("%s, %s" % (width, height))
count += 1
self.lookup_dataset_dir(dummy)
return full_width / count, full_height / count
'''
Get width and height of a single image
'''
def get_size(self, image_file_path):
img = scm.imread(image_file_path)
return img.shape[1], img.shape[0]
'''
Load dataset
'''
def load_dataset(self):
self.neg_data = []
self.pos_data = []
self.poscount = 0
self.negcount = 0
def dummy(self, dir, file):
if file == 'dataset.txt':
# open and read in
with open(os.path.join(self.data_dir, dir, file)) as file:
for line in file:
newline = line.strip()
splittext = newline.split('\t')
if int(splittext[2]) == 1:
self.pos_data.append((
os.path.join(self.data_dir, dir, splittext[0]),
os.path.join(self.data_dir, dir, splittext[1]),
int(splittext[2])))
self.poscount += 1
else:
self.neg_data.append((
os.path.join(self.data_dir, dir, splittext[0]),
os.path.join(self.data_dir, dir, splittext[1]),
int(splittext[2])))
self.negcount += 1
self.lookup_dataset_dir(dummy)
# print("negcount: %d, poscount: %d" % (self.negcount, self.poscount))
return True
'''
Check if image has 4 channel
'''
def check_image_channels(self):
def dummy(self, dir, file):
filename = os.path.splitext(file)
if filename[1] == '.png':
fullfile = os.path.join(self.data_dir, dir, file)
img = scm.imread(fullfile)
if img.shape[2] != 3:
print("Wrong image: %d", fullfile)
self.lookup_dataset_dir(dummy)
'''
Generate dataset after loading dataset
'''
def generate_dataset(self):
random.shuffle(self.neg_data)
random.shuffle(self.pos_data)
# total = len(self.data)
pos_total = len(self.pos_data)
pos_train_size = int(pos_total * self.train_set_ratio)
pos_validate_size = int(pos_total * self.validate_set_ratio)
# pos_test_size = pos_total - pos_train_size - pos_validate_size
neg_total = len(self.neg_data)
neg_train_size = int(neg_total * self.train_set_ratio)
neg_validate_size = int(neg_total * self.validate_set_ratio)
# neg_test_size = neg_total - neg_train_size - neg_validate_size
self.batch_index = 0
self.pos_train_set = self.pos_data[0 : pos_train_size]
pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size]
pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total]
self.neg_train_set = self.neg_data[0 : neg_train_size]
neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size]
neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total]
dec = len(neg_validation_set) - len(pos_validation_set)
for _ in range(dec):
pos_validation_set.append(random.choice(self.pos_data))
dec = len(neg_test_set) - len(pos_test_set)
for _ in range(dec):
pos_test_set.append(random.choice(self.pos_data))
self.validation_set = []
self.validation_set.extend(pos_validation_set)
self.validation_set.extend(neg_validation_set)
self.test_set = []
self.test_set.extend(pos_test_set)
self.test_set.extend(neg_test_set)
'''
Ergodic files in dataset dir
'''
def lookup_dataset_dir(self, callback):
for _, dirs, _ in os.walk(self.data_dir):
for dir in dirs:
for _, _, files in os.walk(os.path.join(self.data_dir, dir)):
for file in files:
callback(self, dir, file)
'''
Get iamge data
'''
def get_image_data(self, tp):
image1, image2 = scm.imread(tp[0]), scm.imread(tp[1])
newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH)))
newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH)))
# img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis]
img_comb = np.dstack((newimg1, newimg2))
return img_comb / 255.0
'''
Get a batch of dataset
'''
def next_batch(self, batch_size):
random_neg = batch_size // 2
random_pos = batch_size - random_neg
org_pos_data = []
org_neg_data = []
for _ in range(random_pos):
org_pos_data.append(random.choice(self.pos_train_set))
for _ in range(random_neg):
org_neg_data.append(random.choice(self.neg_train_set))
pos_data = list(map(self.get_image_data, org_pos_data))
pos_labels = list(map(lambda e: e[2], org_pos_data))
neg_data = list(map(self.get_image_data, org_neg_data))
neg_labels = list(map(lambda e: e[2], org_neg_data))
pos_data.extend(neg_data)
pos_labels.extend(neg_labels)
return np.array(pos_data), np.array(pos_labels)
'''
Get validation dataset
'''
def get_validation_set(self):
data = np.array(list(map(self.get_image_data, self.validation_set)))
labels = np.array(list(map(lambda e: e[2], self.validation_set)))
return data, labels
'''
Get test dataset
'''
def get_test_set(self):
data = np.array(list(map(self.get_image_data, self.test_set)))
labels = np.array(list(map(lambda e: e[2], self.test_set)))
return data, labels
# obj = DataSet('./Pic', 8)
# obj.check_image_channels()
# obj.load_dataset()
# obj.generate_dataset()
# data, labels = obj.next_batch(8)
# while done != True:
# print(data[0][0].dtype)
# data, labels, done = obj.next_batch()
|
[
"numpy.dstack",
"random.shuffle",
"os.walk",
"random.choice",
"numpy.array",
"os.path.splitext",
"scipy.misc.imresize",
"os.path.join",
"scipy.misc.imread"
] |
[((1508, 1535), 'scipy.misc.imread', 'scm.imread', (['image_file_path'], {}), '(image_file_path)\n', (1518, 1535), True, 'import scipy.misc as scm\n'), ((3505, 3534), 'random.shuffle', 'random.shuffle', (['self.neg_data'], {}), '(self.neg_data)\n', (3519, 3534), False, 'import random\n'), ((3543, 3572), 'random.shuffle', 'random.shuffle', (['self.pos_data'], {}), '(self.pos_data)\n', (3557, 3572), False, 'import random\n'), ((5316, 5338), 'os.walk', 'os.walk', (['self.data_dir'], {}), '(self.data_dir)\n', (5323, 5338), False, 'import os\n'), ((5919, 5948), 'numpy.dstack', 'np.dstack', (['(newimg1, newimg2)'], {}), '((newimg1, newimg2))\n', (5928, 5948), True, 'import numpy as np\n'), ((960, 982), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (976, 982), False, 'import os\n'), ((3097, 3119), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (3113, 3119), False, 'import os\n'), ((5635, 5652), 'scipy.misc.imread', 'scm.imread', (['tp[0]'], {}), '(tp[0])\n', (5645, 5652), True, 'import scipy.misc as scm\n'), ((5654, 5671), 'scipy.misc.imread', 'scm.imread', (['tp[1]'], {}), '(tp[1])\n', (5664, 5671), True, 'import scipy.misc as scm\n'), ((5700, 5745), 'scipy.misc.imresize', 'scm.imresize', (['image1', '(STD_HEIGHT, STD_WIDTH)'], {}), '(image1, (STD_HEIGHT, STD_WIDTH))\n', (5712, 5745), True, 'import scipy.misc as scm\n'), ((5774, 5819), 'scipy.misc.imresize', 'scm.imresize', (['image2', '(STD_HEIGHT, STD_WIDTH)'], {}), '(image2, (STD_HEIGHT, STD_WIDTH))\n', (5786, 5819), True, 'import scipy.misc as scm\n'), ((6750, 6768), 'numpy.array', 'np.array', (['pos_data'], {}), '(pos_data)\n', (6758, 6768), True, 'import numpy as np\n'), ((6770, 6790), 'numpy.array', 'np.array', (['pos_labels'], {}), '(pos_labels)\n', (6778, 6790), True, 'import numpy as np\n'), ((1048, 1086), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'file'], {}), '(self.data_dir, dir, file)\n', (1060, 1086), False, 'import os\n'), ((3185, 3223), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'file'], {}), '(self.data_dir, dir, file)\n', (3197, 3223), False, 'import os\n'), ((3246, 3266), 'scipy.misc.imread', 'scm.imread', (['fullfile'], {}), '(fullfile)\n', (3256, 3266), True, 'import scipy.misc as scm\n'), ((4760, 4788), 'random.choice', 'random.choice', (['self.pos_data'], {}), '(self.pos_data)\n', (4773, 4788), False, 'import random\n'), ((4904, 4932), 'random.choice', 'random.choice', (['self.pos_data'], {}), '(self.pos_data)\n', (4917, 4932), False, 'import random\n'), ((6271, 6304), 'random.choice', 'random.choice', (['self.pos_train_set'], {}), '(self.pos_train_set)\n', (6284, 6304), False, 'import random\n'), ((6375, 6408), 'random.choice', 'random.choice', (['self.neg_train_set'], {}), '(self.neg_train_set)\n', (6388, 6408), False, 'import random\n'), ((5412, 5444), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir'], {}), '(self.data_dir, dir)\n', (5424, 5444), False, 'import os\n'), ((1887, 1925), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'file'], {}), '(self.data_dir, dir, file)\n', (1899, 1925), False, 'import os\n'), ((2240, 2286), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[0]'], {}), '(self.data_dir, dir, splittext[0])\n', (2252, 2286), False, 'import os\n'), ((2320, 2366), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[1]'], {}), '(self.data_dir, dir, splittext[1])\n', (2332, 2366), False, 'import os\n'), ((2580, 2626), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[0]'], {}), '(self.data_dir, dir, splittext[0])\n', (2592, 2626), False, 'import os\n'), ((2660, 2706), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[1]'], {}), '(self.data_dir, dir, splittext[1])\n', (2672, 2706), False, 'import os\n')]
|
import tensorflow as tf
import numpy as np
# training set. Contains a row of size 5 per train example. The row is same as a sentence, with words replaced
# by its equivalent unique index. The below dataset contains 6 unique words numbered 0-5. Ideally the word vector for
# 4 and 5 indexed words should be same.
X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]])
# output dummy for testing purpose
y_train = np.array([0,1])
# Create the embeddings
with tf.name_scope("embeddings"):
# Initiliaze the embedding vector by randomly distributing the weights.
embedding = tf.Variable(tf.random_uniform((6,
3), -1, 1))
# create the embedding layer
embed = tf.nn.embedding_lookup(embedding, X_train)
# So that we can apply a convolution 2d operations on top the expanded single channel embedded vectors
embedded_chars_expanded = tf.expand_dims(embed, -1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer());
result,result_expanded = sess.run([embed,embedded_chars_expanded]);
print(result_expanded.shape)
print(result)
print(result_expanded)
# OUTPUT
# result
# [[[ 0.89598155 0.4275496 0.00858593]
# [ 0.21602225 -0.44228792 -0.20533657]
# [ 0.9624436 -0.99176955 0.15964746]
# [-0.29004955 0.470721 0.00804782]
# [ 0.7497003 0.6044979 -0.5612638 ]]
#
# [[ 0.89598155 0.4275496 0.00858593]
# [ 0.21602225 -0.44228792 -0.20533657]
# [-0.48809385 -0.55618596 -0.73995876]
# [-0.29004955 0.470721 0.00804782]
# [ 0.7497003 0.6044979 -0.5612638 ]]]
# result_expanded - has a dimension of (2,5,3,1)
# [[[[-0.45975637]
# [-0.5756638 ]
# [ 0.7002065 ]]
#
# [[ 0.2708087 ]
# [ 0.7985747 ]
# [ 0.57897186]]
#
# [[ 0.6642673 ]
# [ 0.6548476 ]
# [ 0.00760126]]
#
# [[-0.7074845 ]
# [ 0.5100081 ]
# [ 0.7232883 ]]
#
# [[ 0.19342017]
# [-0.46509933]
# [ 0.8361807 ]]]
#
#
# [[[-0.45975637]
# [-0.5756638 ]
# [ 0.7002065 ]]
#
# [[ 0.2708087 ]
# [ 0.7985747 ]
# [ 0.57897186]]
#
# [[-0.90803576]
# [ 0.75451994]
# [ 0.8864901 ]]
#
# [[-0.7074845 ]
# [ 0.5100081 ]
# [ 0.7232883 ]]
#
# [[ 0.19342017]
# [-0.46509933]
# [ 0.8361807 ]]]]
|
[
"tensorflow.random_uniform",
"tensorflow.nn.embedding_lookup",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.array",
"tensorflow.name_scope",
"tensorflow.expand_dims"
] |
[((324, 368), 'numpy.array', 'np.array', (['[[0, 1, 4, 2, 3], [0, 1, 5, 2, 3]]'], {}), '([[0, 1, 4, 2, 3], [0, 1, 5, 2, 3]])\n', (332, 368), True, 'import numpy as np\n'), ((406, 422), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (414, 422), True, 'import numpy as np\n'), ((453, 480), 'tensorflow.name_scope', 'tf.name_scope', (['"""embeddings"""'], {}), "('embeddings')\n", (466, 480), True, 'import tensorflow as tf\n'), ((695, 737), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'X_train'], {}), '(embedding, X_train)\n', (717, 737), True, 'import tensorflow as tf\n'), ((876, 901), 'tensorflow.expand_dims', 'tf.expand_dims', (['embed', '(-1)'], {}), '(embed, -1)\n', (890, 901), True, 'import tensorflow as tf\n'), ((908, 920), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (918, 920), True, 'import tensorflow as tf\n'), ((586, 618), 'tensorflow.random_uniform', 'tf.random_uniform', (['(6, 3)', '(-1)', '(1)'], {}), '((6, 3), -1, 1)\n', (603, 618), True, 'import tensorflow as tf\n'), ((943, 976), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (974, 976), True, 'import tensorflow as tf\n')]
|
from astropy.io import ascii, fits
from astropy.table import QTable, Table
import arviz as az
from astropy.coordinates import SkyCoord
from astropy import units as u
import os
import pymoc
from astropy import wcs
from astropy.table import vstack, hstack
import numpy as np
import xidplus
# # Applying XID+CIGALE to Extreme Starbursts
# In this notebook, we read in the data files and prepare them for fitting with XID+CIGALE, the SED prior model extension to XID+. Here we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate of $> 10^{3}\mathrm{M_{\odot}yr^{-1}}$
# In[2]:
def process_prior(c,new_Table=None,
path_to_data=['../../../data/'],
field=['Lockman-SWIRE'],
path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'],
redshift_file=["/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits"],
redshift_prior=[0.1,2.0],
radius=6.0,
alt_model=False):
# Import required modules
# In[3]:
# In[4]:
# Set image and catalogue filenames
# In[5]:
#Folder containing maps
pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map
pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map
plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map
#output folder
output_folder='./'
# Load in images, noise maps, header info and WCS information
# In[6]:
#-----250-------------
hdulist = fits.open(pswfits)
im250phdu=hdulist[0].header
im250hdu=hdulist[1].header
im250=hdulist[1].data*1.0E3 #convert to mJy
nim250=hdulist[3].data*1.0E3 #convert to mJy
w_250 = wcs.WCS(hdulist[1].header)
pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
#-----350-------------
hdulist = fits.open(pmwfits)
im350phdu=hdulist[0].header
im350hdu=hdulist[1].header
im350=hdulist[1].data*1.0E3 #convert to mJy
nim350=hdulist[3].data*1.0E3 #convert to mJy
w_350 = wcs.WCS(hdulist[1].header)
pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
#-----500-------------
hdulist = fits.open(plwfits)
im500phdu=hdulist[0].header
im500hdu=hdulist[1].header
im500=hdulist[1].data*1.0E3 #convert to mJy
nim500=hdulist[3].data*1.0E3 #convert to mJy
w_500 = wcs.WCS(hdulist[1].header)
pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
# XID+ uses Multi Order Coverage (MOC) maps for cutting down maps and catalogues so they cover the same area. It can also take in MOCs as selection functions to carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position we are interested in. We will use a HEALPix order of 15 (the resolution: higher order means higher resolution)
moc=pymoc.util.catalog.catalog_to_moc(c,100,15)
# Load in catalogue you want to fit (and make any cuts). Here we use HELP's VO database and directly call it using PyVO
# In[10]:
import pyvo as vo
service = vo.dal.TAPService("https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap")
# In[11]:
resultset = service.search("SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',"+str(c.ra.deg[0])+", "+str(c.dec.deg[0])+", 0.028 ))")
# In[12]:
masterlist=resultset.table
def construct_prior(Table=None):
from astropy.coordinates import SkyCoord
#first use standard cut (i.e. not star and is detected in at least 3 opt/nir bands)
prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)]
#make skycoord from masterlist
catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec'])
#make skycoord from input table
c = SkyCoord(ra=Table['ra'], dec=Table['dec'])
#search around all of the new sources
idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec)
#for every new sources
for src in range(0,len(Table)):
#limit to matches around interested sources
ind = idxc == src
#if there are matches
if ind.sum() >0:
#choose the closest and check if its in the prior list all ready
in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id']
#if its not in prior list
if in_prior.sum() <1:
print(in_prior.sum())
#add to appended sources
prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]])
return prior_list
# In[64]:
import astropy.units as u
#create table of candidate source
t = QTable([c.ra, c.dec], names=('ra', 'dec'))
#add candidate source to new sources table, create prior list
if new_Table is not None:
prior_list=construct_prior(vstack([t,new_Table]))
else:
prior_list = construct_prior(t)
if alt_model==True:
sep = 18
separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec
remove_ind = (separation > np.min(separation)) & (separation < sep)
prior_list.remove_rows(remove_ind)
# ## Get Redshift and Uncertianty
#
# <NAME> defines a median and a hierarchical bayes combination redshift. We need uncertianty so lets match via `help_id`
# In[26]:
photoz=Table.read(redshift_file[0])
# In[27]:
#help_id=np.empty((len(photoz)),dtype=np.dtype('U27'))
for i in range(0,len(photoz)):
photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8')
#photoz['help_id']=help_id
# In[28]:
from astropy.table import Column, MaskedColumn
prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list))
prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc'))
# In[29]:
photoz
# In[30]:
ii=0
for i in range(0,len(prior_list)):
ind=photoz['help_id'] == prior_list['help_id'][i]
try:
if photoz['z1_median'][ind]>0.0:
prior_list['redshift'][i]=photoz['z1_median'][ind]
prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])]))
#prior_list['redshift_unc'].mask[i]=False
#prior_list['redshift'].mask[i]=False
except ValueError:
None
# In[33]:
dist_matrix=np.zeros((len(prior_list),len(prior_list)))
from astropy.coordinates import SkyCoord
from astropy import units as u
for i in range(0,len(prior_list)):
for j in range(0,len(prior_list)):
if i>j:
coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs')
coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg)
dist_matrix[i,j] = coord1.separation(coord2).value
# In[35]:
ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0)
xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list)))
yy[ind]
# In[36]:
prior_list[yy[ind]]
# In[37]:
prior_list['redshift'].mask[yy[ind]]=True
# In[38]:
prior_list=prior_list[prior_list['redshift'].mask == False]
# In[39]:
prior_list
# XID+ is built around two python classes. A prior and posterior class. There should be a prior class for each map being fitted. It is initiated with a map, noise map, primary header and map header and can be set with a MOC. It also requires an input prior catalogue and point spread function.
#
# In[40]:
#---prior250--------
prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header
prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma)
#---prior350--------
prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc)
prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior350.prior_bkg(-5.0,5)
#---prior500--------
prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc)
prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior500.prior_bkg(-5.0,5)
# Set PSF. For SPIRE, the PSF can be assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\mathrm{\mu m}$ respectively. Lets use the astropy module to construct a Gaussian PSF and assign it to the three XID+ prior classes.
# In[41]:
#pixsize array (size of pixels in arcseconds)
pixsize=np.array([pixsize250,pixsize350,pixsize500])
#point response function for the three bands
prfsize=np.array([18.15,25.15,36.3])
#use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355)
from astropy.convolution import Gaussian2DKernel
##---------fit using Gaussian beam-----------------------
prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101)
prf250.normalize(mode='peak')
prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101)
prf350.normalize(mode='peak')
prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101)
prf500.normalize(mode='peak')
pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map
pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map
pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map
prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins for grid (in pixel scale)
prior350.set_prf(prf350.array,pind350,pind350)
prior500.set_prf(prf500.array,pind500,pind500)
print('fitting '+ str(prior250.nsrc)+' sources \n')
print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels')
print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg))
# Before fitting, the prior classes need to take the PSF and calculate how muich each source contributes to each pixel. This process provides what we call a pointing matrix. Lets calculate the pointing matrix for each prior class
# In[43]:
prior250.get_pointing_matrix()
prior350.get_pointing_matrix()
prior500.get_pointing_matrix()
# In[44]:
return [prior250,prior350,prior500],prior_list
def getSEDs(data, src, nsamp=30,category='posterior'):
import subprocess
if category=='posterior':
d=data.posterior
else:
d=data.prior
subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False)
agn = d.agn.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
z = d.redshift.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
sfr = d.sfr.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
fin = open("/Volumes/pdh_storage/cigale/pcigale_orig.ini")
fout = open("/Volumes/pdh_storage/cigale/pcigale.ini", "wt")
for line in fin:
if 'redshift =' in line:
fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \n')
elif 'fracAGN =' in line:
fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \n')
else:
fout.write(line)
fin.close()
fout.close()
p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/')
p.wait()
SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits')
# set more appropriate units for dust
from astropy.constants import L_sun, M_sun
SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value
SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value
wavelengths = []
fluxes = []
for i in range(0, nsamp):
sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id']))
wavelengths.append(sed_plot['wavelength'] / 1E3)
fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu'])
from astropy.table import vstack, hstack
return hstack(wavelengths), hstack(fluxes)
|
[
"astropy.convolution.Gaussian2DKernel",
"numpy.abs",
"numpy.argmin",
"numpy.arange",
"astropy.table.hstack",
"pymoc.util.catalog.catalog_to_moc",
"numpy.random.choice",
"subprocess.Popen",
"astropy.table.QTable",
"numpy.min",
"astropy.io.fits.open",
"pyvo.dal.TAPService",
"astropy.table.Table.read",
"numpy.tril",
"astropy.wcs.WCS",
"astropy.table.vstack",
"numpy.array",
"xidplus.prior",
"astropy.coordinates.SkyCoord"
] |
[((1773, 1791), 'astropy.io.fits.open', 'fits.open', (['pswfits'], {}), '(pswfits)\n', (1782, 1791), False, 'from astropy.io import ascii, fits\n'), ((1965, 1991), 'astropy.wcs.WCS', 'wcs.WCS', (['hdulist[1].header'], {}), '(hdulist[1].header)\n', (1972, 1991), False, 'from astropy import wcs\n'), ((2007, 2042), 'numpy.abs', 'np.abs', (['(3600.0 * w_250.wcs.cdelt[0])'], {}), '(3600.0 * w_250.wcs.cdelt[0])\n', (2013, 2042), True, 'import numpy as np\n'), ((2130, 2148), 'astropy.io.fits.open', 'fits.open', (['pmwfits'], {}), '(pmwfits)\n', (2139, 2148), False, 'from astropy.io import ascii, fits\n'), ((2322, 2348), 'astropy.wcs.WCS', 'wcs.WCS', (['hdulist[1].header'], {}), '(hdulist[1].header)\n', (2329, 2348), False, 'from astropy import wcs\n'), ((2364, 2399), 'numpy.abs', 'np.abs', (['(3600.0 * w_350.wcs.cdelt[0])'], {}), '(3600.0 * w_350.wcs.cdelt[0])\n', (2370, 2399), True, 'import numpy as np\n'), ((2487, 2505), 'astropy.io.fits.open', 'fits.open', (['plwfits'], {}), '(plwfits)\n', (2496, 2505), False, 'from astropy.io import ascii, fits\n'), ((2678, 2704), 'astropy.wcs.WCS', 'wcs.WCS', (['hdulist[1].header'], {}), '(hdulist[1].header)\n', (2685, 2704), False, 'from astropy import wcs\n'), ((2720, 2755), 'numpy.abs', 'np.abs', (['(3600.0 * w_500.wcs.cdelt[0])'], {}), '(3600.0 * w_500.wcs.cdelt[0])\n', (2726, 2755), True, 'import numpy as np\n'), ((3243, 3288), 'pymoc.util.catalog.catalog_to_moc', 'pymoc.util.catalog.catalog_to_moc', (['c', '(100)', '(15)'], {}), '(c, 100, 15)\n', (3276, 3288), False, 'import pymoc\n'), ((3465, 3552), 'pyvo.dal.TAPService', 'vo.dal.TAPService', (['"""https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap"""'], {}), "(\n 'https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap')\n", (3482, 3552), True, 'import pyvo as vo\n'), ((5204, 5246), 'astropy.table.QTable', 'QTable', (['[c.ra, c.dec]'], {'names': "('ra', 'dec')"}), "([c.ra, c.dec], names=('ra', 'dec'))\n", (5210, 5246), False, 'from astropy.table import QTable, Table\n'), ((5898, 5926), 'astropy.table.Table.read', 'Table.read', (['redshift_file[0]'], {}), '(redshift_file[0])\n', (5908, 5926), False, 'from astropy.table import QTable, Table\n'), ((8375, 8433), 'xidplus.prior', 'xidplus.prior', (['im250', 'nim250', 'im250phdu', 'im250hdu'], {'moc': 'moc'}), '(im250, nim250, im250phdu, im250hdu, moc=moc)\n', (8388, 8433), False, 'import xidplus\n'), ((8724, 8782), 'xidplus.prior', 'xidplus.prior', (['im350', 'nim350', 'im350phdu', 'im350hdu'], {'moc': 'moc'}), '(im350, nim350, im350phdu, im350hdu, moc=moc)\n', (8737, 8782), False, 'import xidplus\n'), ((8942, 9000), 'xidplus.prior', 'xidplus.prior', (['im500', 'nim500', 'im500phdu', 'im500hdu'], {'moc': 'moc'}), '(im500, nim500, im500phdu, im500hdu, moc=moc)\n', (8955, 9000), False, 'import xidplus\n'), ((9458, 9504), 'numpy.array', 'np.array', (['[pixsize250, pixsize350, pixsize500]'], {}), '([pixsize250, pixsize350, pixsize500])\n', (9466, 9504), True, 'import numpy as np\n'), ((9564, 9594), 'numpy.array', 'np.array', (['[18.15, 25.15, 36.3]'], {}), '([18.15, 25.15, 36.3])\n', (9572, 9594), True, 'import numpy as np\n'), ((9813, 9873), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(prfsize[0] / 2.355)'], {'x_size': '(101)', 'y_size': '(101)'}), '(prfsize[0] / 2.355, x_size=101, y_size=101)\n', (9829, 9873), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((9915, 9975), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(prfsize[1] / 2.355)'], {'x_size': '(101)', 'y_size': '(101)'}), '(prfsize[1] / 2.355, x_size=101, y_size=101)\n', (9931, 9975), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((10017, 10077), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(prfsize[2] / 2.355)'], {'x_size': '(101)', 'y_size': '(101)'}), '(prfsize[2] / 2.355, x_size=101, y_size=101)\n', (10033, 10077), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((11452, 11523), 'numpy.random.choice', 'np.random.choice', (['(d.chain.size * d.draw.size)'], {'size': 'nsamp', 'replace': '(False)'}), '(d.chain.size * d.draw.size, size=nsamp, replace=False)\n', (11468, 11523), True, 'import numpy as np\n'), ((12428, 12500), 'subprocess.Popen', 'subprocess.Popen', (["['pcigale', 'run']"], {'cwd': '"""/Volumes/pdh_storage/cigale/"""'}), "(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/')\n", (12444, 12500), False, 'import subprocess\n'), ((12526, 12592), 'astropy.table.Table.read', 'Table.read', (['"""/Volumes/pdh_storage/cigale/out//models-block-0.fits"""'], {}), "('/Volumes/pdh_storage/cigale/out//models-block-0.fits')\n", (12536, 12592), False, 'from astropy.table import QTable, Table\n'), ((4130, 4182), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "masterlist['ra']", 'dec': "masterlist['dec']"}), "(ra=masterlist['ra'], dec=masterlist['dec'])\n", (4138, 4182), False, 'from astropy.coordinates import SkyCoord\n'), ((4234, 4276), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "Table['ra']", 'dec': "Table['dec']"}), "(ra=Table['ra'], dec=Table['dec'])\n", (4242, 4276), False, 'from astropy.coordinates import SkyCoord\n'), ((13209, 13228), 'astropy.table.hstack', 'hstack', (['wavelengths'], {}), '(wavelengths)\n', (13215, 13228), False, 'from astropy.table import vstack, hstack\n'), ((13230, 13244), 'astropy.table.hstack', 'hstack', (['fluxes'], {}), '(fluxes)\n', (13236, 13244), False, 'from astropy.table import vstack, hstack\n'), ((5378, 5400), 'astropy.table.vstack', 'vstack', (['[t, new_Table]'], {}), '([t, new_Table])\n', (5384, 5400), False, 'from astropy.table import vstack, hstack\n'), ((7644, 7664), 'numpy.tril', 'np.tril', (['dist_matrix'], {}), '(dist_matrix)\n', (7651, 7664), True, 'import numpy as np\n'), ((7680, 7700), 'numpy.tril', 'np.tril', (['dist_matrix'], {}), '(dist_matrix)\n', (7687, 7700), True, 'import numpy as np\n'), ((10121, 10141), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(1)'], {}), '(0, 101, 1)\n', (10130, 10141), True, 'import numpy as np\n'), ((10213, 10233), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(1)'], {}), '(0, 101, 1)\n', (10222, 10233), True, 'import numpy as np\n'), ((10305, 10325), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(1)'], {}), '(0, 101, 1)\n', (10314, 10325), True, 'import numpy as np\n'), ((5527, 5572), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["prior_list['ra']", "prior_list['dec']"], {}), "(prior_list['ra'], prior_list['dec'])\n", (5535, 5572), False, 'from astropy.coordinates import SkyCoord\n'), ((5616, 5634), 'numpy.min', 'np.min', (['separation'], {}), '(separation)\n', (5622, 5634), True, 'import numpy as np\n'), ((7374, 7466), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "(prior_list['ra'][i] * u.deg)", 'dec': "(prior_list['dec'][i] * u.deg)", 'frame': '"""icrs"""'}), "(ra=prior_list['ra'][i] * u.deg, dec=prior_list['dec'][i] * u.deg,\n frame='icrs')\n", (7382, 7466), False, 'from astropy.coordinates import SkyCoord\n'), ((7481, 7555), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "(prior_list['ra'][j] * u.deg)", 'dec': "(prior_list['dec'][j] * u.deg)"}), "(ra=prior_list['ra'][j] * u.deg, dec=prior_list['dec'][j] * u.deg)\n", (7489, 7555), False, 'from astropy.coordinates import SkyCoord\n'), ((4781, 4800), 'numpy.argmin', 'np.argmin', (['d2d[ind]'], {}), '(d2d[ind])\n', (4790, 4800), True, 'import numpy as np\n'), ((6827, 6883), 'numpy.abs', 'np.abs', (["(photoz['z1_median'][ind] - photoz['z1_min'][ind])"], {}), "(photoz['z1_median'][ind] - photoz['z1_min'][ind])\n", (6833, 6883), True, 'import numpy as np\n'), ((6882, 6938), 'numpy.abs', 'np.abs', (["(photoz['z1_max'][ind] - photoz['z1_median'][ind])"], {}), "(photoz['z1_max'][ind] - photoz['z1_median'][ind])\n", (6888, 6938), True, 'import numpy as np\n'), ((5059, 5078), 'numpy.argmin', 'np.argmin', (['d2d[ind]'], {}), '(d2d[ind])\n', (5068, 5078), True, 'import numpy as np\n')]
|
"""
Project: RadarBook
File: optimum_binary_example.py
Created by: <NAME>
On: 10/11/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
import sys
from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow
from numpy import arange, ceil
from PyQt5.QtWidgets import QApplication, QMainWindow
from matplotlib.backends.qt_compat import QtCore
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class OptimumBinary(QMainWindow, Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
# Connect to the input boxes, when the user presses enter the form updates
self.number_of_pulses.returnPressed.connect(self._update_canvas)
self.target_type.currentIndexChanged.connect(self._update_canvas)
# Set up a figure for the plotting canvas
fig = Figure()
self.axes1 = fig.add_subplot(111)
self.my_canvas = FigureCanvas(fig)
# Add the canvas to the vertical layout
self.verticalLayout.addWidget(self.my_canvas)
self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self))
# Update the canvas for the first display
self._update_canvas()
def _update_canvas(self):
"""
Update the figure when the user changes an input value.
:return:
"""
# Get the parameters from the form
number_of_pulses = int(self.number_of_pulses.text())
# Get the selected target type from the form
target_type = self.target_type.currentText()
if target_type == 'Swerling 0':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 1':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 2':
alpha = 0.91
beta = -0.38
elif target_type == 'Swerling 3':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 4':
alpha = 0.873
beta = -0.27
# Calculate the optimum choice for M
np = arange(1, number_of_pulses+1)
m_optimum = [ceil(10.0 ** beta * n ** alpha) for n in np]
# Clear the axes for the updated plot
self.axes1.clear()
# Display the results
self.axes1.plot(np, m_optimum, '')
# Set the plot title and labels
self.axes1.set_title('Optimum M for Binary Integration', size=14)
self.axes1.set_xlabel('Number of Pulses', size=12)
self.axes1.set_ylabel('M', size=12)
# Set the tick label size
self.axes1.tick_params(labelsize=12)
# Turn on the grid
self.axes1.grid(linestyle=':', linewidth=0.5)
# Update the canvas
self.my_canvas.draw()
def start():
form = OptimumBinary() # Set the form
form.show() # Show the form
def main():
app = QApplication(sys.argv) # A new instance of QApplication
form = OptimumBinary() # Set the form
form.show() # Show the form
app.exec_() # Execute the app
if __name__ == '__main__':
main()
|
[
"numpy.ceil",
"matplotlib.backends.backend_qt5agg.FigureCanvas",
"matplotlib.figure.Figure",
"numpy.arange",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QApplication"
] |
[((3146, 3168), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (3158, 3168), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow\n'), ((1104, 1112), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (1110, 1112), False, 'from matplotlib.figure import Figure\n'), ((1181, 1198), 'matplotlib.backends.backend_qt5agg.FigureCanvas', 'FigureCanvas', (['fig'], {}), '(fig)\n', (1193, 1198), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((2337, 2368), 'numpy.arange', 'arange', (['(1)', '(number_of_pulses + 1)'], {}), '(1, number_of_pulses + 1)\n', (2343, 2368), False, 'from numpy import arange, ceil\n'), ((1352, 1391), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.my_canvas', 'self'], {}), '(self.my_canvas, self)\n', (1369, 1391), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((2388, 2419), 'numpy.ceil', 'ceil', (['(10.0 ** beta * n ** alpha)'], {}), '(10.0 ** beta * n ** alpha)\n', (2392, 2419), False, 'from numpy import arange, ceil\n')]
|
import math
import tensorflow as tf
import cv2
import numpy as np
from scipy import signal
def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray:
"""
Normalize the input image to a given range set by min and max parameter
Args:
image ([type]): [description]
new_min ([type], optional): [description]. Defaults to 0.
new_max ([type], optional): [description]. Defaults to 255.
Returns:
[np.ndarray]: Normalized image
"""
original_dtype = image.dtype
image = image.astype(np.float32)
image_min, image_max = np.min(image), np.max(image)
image = tf.cast(image, np.float32)
normalized_image = (new_max - new_min) / (image_max - image_min) * (image - image_min) + new_min
return tf.cast(normalized_image, original_dtype)
def normalize_kernel(kernel: np.array) -> np.ndarray:
return kernel / np.sum(kernel, axis=-1)
def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray:
krange = np.arange(kernel_size)
x, y = np.meshgrid(krange, krange)
constant = np.round(kernel_size / 2)
x -= constant
y -= constant
kernel = 1 / (2 * math.pi * sigma**2) * np.math.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))
return normalize_kernel(kernel)
def gaussian_filter(
image: np.ndarray, kernel_size: int,
sigma: float, dtype=np.float32, strides: int = 1
) -> np.ndarray:
"""
Apply convolution filter to image with gaussian image kernel
TODO: Verify this methos with tensorflow
https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy
Args:
image ([np.ndarray]): [description]
kernel_size ([int]): [description]
sigma ([float]): [description]
dtype ([type], optional): [description]. Defaults to np.float32.
strides ([int], optional): [description]. Defaults to 1.
Returns:
[np.ndarray]: [description]
"""
kernel = gaussian_kernel2d(kernel_size, sigma)
if len(image.shape) == 3:
image = image[np.newaxis, ...]
image = tf.cast(image, tf.float32)
image = image.astype(np.float32)
image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides]
return image.astype(dtype)
def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray:
shape = image.shape
shape = shape[:2] if len(image.shape) == 3 else shape[1:3]
return shape
def scale_shape(image: np.ndarray, scale: float):
shape = image_shape(image, np.float32)
shape = np.math.ceil(shape * scale)
return shape.astype(np.float32)
def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray:
assert len(image.shape) in (3, 4), 'The tensor must be of dimension 3 or 4'
image = image.astype(np.float32)
rescale_size = scale_shape(image, scale)
interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC)
rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation)
return rescaled_image.astype(dtype)
def read_image(filename: str, **kwargs) -> np.ndarray:
mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED)
return cv2.imread(filename, flags=mode)
def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray:
"""
#### Image Normalization
The first step for DIQA is to pre-process the images. The image is converted into grayscale,
and then a low-pass filter is applied. The low-pass filter is defined as:
\begin{align*}
\hat{I} = I_{gray} - I^{low}
\end{align*}
where the low-frequency image is the result of the following algorithm:
1. Blur the grayscale image.
2. Downscale it by a factor of SCALING_FACTOR.
3. Upscale it back to the original size.
The main reasons for this normalization are (1) the Human Visual System (HVS) is not sensitive to changes
in the low-frequency band, and (2) image distortions barely affect the low-frequency component of images.
Arguments:
image {np.ndarray} -- [description]
Returns:
np.ndarray -- [description]
"""
image = tf.cast(image, tf.float32)
image = tf.image.rgb_to_grayscale(image)
image_low = gaussian_filter(image, 16, 7 / 6)
image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC)
image_low = tf.image.resize(image_low,
size=image_shape(image),
method=tf.image.ResizeMethod.BICUBIC)
return image - tf.cast(image_low, image.dtype)
|
[
"numpy.math.exp",
"numpy.meshgrid",
"tensorflow.image.rgb_to_grayscale",
"numpy.sum",
"scipy.signal.convolve2d",
"tensorflow.cast",
"cv2.imread",
"numpy.min",
"numpy.arange",
"numpy.max",
"numpy.math.ceil",
"numpy.round",
"cv2.resize"
] |
[((640, 666), 'tensorflow.cast', 'tf.cast', (['image', 'np.float32'], {}), '(image, np.float32)\n', (647, 666), True, 'import tensorflow as tf\n'), ((780, 821), 'tensorflow.cast', 'tf.cast', (['normalized_image', 'original_dtype'], {}), '(normalized_image, original_dtype)\n', (787, 821), True, 'import tensorflow as tf\n'), ((1024, 1046), 'numpy.arange', 'np.arange', (['kernel_size'], {}), '(kernel_size)\n', (1033, 1046), True, 'import numpy as np\n'), ((1058, 1085), 'numpy.meshgrid', 'np.meshgrid', (['krange', 'krange'], {}), '(krange, krange)\n', (1069, 1085), True, 'import numpy as np\n'), ((1101, 1126), 'numpy.round', 'np.round', (['(kernel_size / 2)'], {}), '(kernel_size / 2)\n', (1109, 1126), True, 'import numpy as np\n'), ((2100, 2126), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2107, 2126), True, 'import tensorflow as tf\n'), ((2586, 2613), 'numpy.math.ceil', 'np.math.ceil', (['(shape * scale)'], {}), '(shape * scale)\n', (2598, 2613), True, 'import numpy as np\n'), ((2989, 3049), 'cv2.resize', 'cv2.resize', (['image', 'rescale_size'], {'interpolation': 'interpolation'}), '(image, rescale_size, interpolation=interpolation)\n', (2999, 3049), False, 'import cv2\n'), ((3210, 3242), 'cv2.imread', 'cv2.imread', (['filename'], {'flags': 'mode'}), '(filename, flags=mode)\n', (3220, 3242), False, 'import cv2\n'), ((4164, 4190), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4171, 4190), True, 'import tensorflow as tf\n'), ((4203, 4235), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['image'], {}), '(image)\n', (4228, 4235), True, 'import tensorflow as tf\n'), ((599, 612), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (605, 612), True, 'import numpy as np\n'), ((614, 627), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (620, 627), True, 'import numpy as np\n'), ((898, 921), 'numpy.sum', 'np.sum', (['kernel'], {'axis': '(-1)'}), '(kernel, axis=-1)\n', (904, 921), True, 'import numpy as np\n'), ((1207, 1257), 'numpy.math.exp', 'np.math.exp', (['(-(x ** 2 + y ** 2) / (2 * sigma ** 2))'], {}), '(-(x ** 2 + y ** 2) / (2 * sigma ** 2))\n', (1218, 1257), True, 'import numpy as np\n'), ((2176, 2251), 'scipy.signal.convolve2d', 'signal.convolve2d', (['image', 'kernel[:, :, np.newaxis, np.newaxis]'], {'mode': '"""same"""'}), "(image, kernel[:, :, np.newaxis, np.newaxis], mode='same')\n", (2193, 2251), False, 'from scipy import signal\n'), ((4564, 4595), 'tensorflow.cast', 'tf.cast', (['image_low', 'image.dtype'], {}), '(image_low, image.dtype)\n', (4571, 4595), True, 'import tensorflow as tf\n')]
|
import numpy as np
from matplotlib import pyplot as plt
def loadFile(filename):
f = open(filename,'r')
text = f.read()
f.close()
rewards = []
steps = []
for line in text.split('\n'):
pieces = line.split(',')
if(len(pieces) == 2):
rewards.append(float(pieces[0]))
steps.append(int(pieces[1]))
return rewards,steps
def loadFiles(files):
rewards = []
steps = []
for f in files:
r,s = loadFile(f)
rewards.extend(r)
steps.extend(s)
return rewards,steps,
def plotResults(rewards,steps,outputFile):
plt.subplot(2,1,1)
plt.plot(rewards)
plt.xlabel('number of games played')
plt.ylabel('reward received per game')
plt.subplot(2,1,2)
plt.plot(steps)
plt.xlabel('number of games played')
plt.ylabel('number of actions taken per game')
plt.savefig(outputFile)
def Average(rewards,n):
return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)]
if(__name__ == "__main__"):
LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284']
SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147']
rewards,steps = loadFiles(['./SmallAgent/29-01-2018'])
rewards = Average(rewards,10)
steps = Average(steps,10)
plotResults(rewards,steps,"./test.png")
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((559, 579), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (570, 579), True, 'from matplotlib import pyplot as plt\n'), ((580, 597), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards'], {}), '(rewards)\n', (588, 597), True, 'from matplotlib import pyplot as plt\n'), ((600, 636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of games played"""'], {}), "('number of games played')\n", (610, 636), True, 'from matplotlib import pyplot as plt\n'), ((639, 677), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reward received per game"""'], {}), "('reward received per game')\n", (649, 677), True, 'from matplotlib import pyplot as plt\n'), ((683, 703), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (694, 703), True, 'from matplotlib import pyplot as plt\n'), ((704, 719), 'matplotlib.pyplot.plot', 'plt.plot', (['steps'], {}), '(steps)\n', (712, 719), True, 'from matplotlib import pyplot as plt\n'), ((722, 758), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of games played"""'], {}), "('number of games played')\n", (732, 758), True, 'from matplotlib import pyplot as plt\n'), ((761, 807), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of actions taken per game"""'], {}), "('number of actions taken per game')\n", (771, 807), True, 'from matplotlib import pyplot as plt\n'), ((813, 836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputFile'], {}), '(outputFile)\n', (824, 836), True, 'from matplotlib import pyplot as plt\n'), ((872, 897), 'numpy.mean', 'np.mean', (['rewards[i:i + n]'], {}), '(rewards[i:i + n])\n', (879, 897), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA_MCCNN
#
# https://github.com/CNES/Pandora_MCCNN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test the cost volume create by mc_cnn
"""
import unittest
import numpy as np
import torch
import torch.nn as nn
from mc_cnn.run import computes_cost_volume_mc_cnn_fast
from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer
from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator
from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator
# pylint: disable=no-self-use
class TestMCCNN(unittest.TestCase):
"""
TestMCCNN class allows to test the cost volume create by mc_cnn
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1))
self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1
self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1))
self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1
def test_computes_cost_volume_mc_cnn_fast(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 5), np.nan)
# disparity -2
cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()
# disparity 0
cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy()
# disparity 1
cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_fast_negative_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function with negative disparities
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity -4
# all nan
# disparity -3
cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy()
# disparity -2
cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_fast_positive_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function with positive disparities
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity 1
cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()
# disparity 3
cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy()
# disparity 4
# all nan
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def sad_cost(self, ref_features, sec_features):
"""
Useful to test the computes_cost_volume_mc_cnn_accurate function
"""
return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0)
def test_computes_cost_volume_mc_cnn_accurate(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 5), np.nan)
# disparity -2
cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()
# disparity 0
cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy()
# disparity 1
cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function with negative disparities
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity -4
# all nan
# disparity -3
cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy()
# disparity -2
cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function with positive disparities
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity 1
cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()
# disparity 3
cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy()
# disparity 4
# all nan
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
# pylint: disable=invalid-name
# -> because changing the name here loses the reference to the actual name of the checked function
def test_MiddleburyGenerator(self):
"""
test the function MiddleburyGenerator
"""
# Script use to create images_middlebury and samples_middlebury :
# pylint: disable=pointless-string-statement
"""
# shape 1, 2, 13, 13 : 1 exposures, 2 = left and right images
image_pairs_0 = np.zeros((1, 2, 13, 13))
# left
image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1))
# right
image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1
image_pairs_1 = np.zeros((1, 2, 13, 13))
image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1))
image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1
img_file = h5py.File('images_middlebury.hdf5', 'w')
img_0 = [image_pairs_0]
grp = img_file.create_group(str(0))
# 1 illumination
for light in range(len(img_0)):
dset = grp.create_dataset(str(light), data=img_0[light])
img_1 = [image_pairs_1]
grp = img_file.create_group(str(1))
for light in range(len(img_1)):
dset = grp.create_dataset(str(light), data=img_1[light])
sampl_file = h5py.File('sample_middlebury.hdf5', 'w')
# disparity of image_pairs_0
x0 = np.array([[0., 5., 6., 1.]
[0., 7., 7., 1.]])
# disparity of image_pairs_1
x1 = np.array([[ 1., 7., 5., -1.]
[ 0., 0., 0., 0.]])
sampl_file.create_dataset(str(0), data=x0)
sampl_file.create_dataset(str(1), data=x1)
"""
# Positive disparity
cfg = {
"data_augmentation": False,
"dataset_neg_low": 1,
"dataset_neg_high": 1,
"dataset_pos": 0,
"augmentation_param": {
"vertical_disp": 0,
"scale": 0.8,
"hscale": 0.8,
"hshear": 0.1,
"trans": 0,
"rotate": 28,
"brightness": 1.3,
"contrast": 1.1,
"d_hscale": 0.9,
"d_hshear": 0.3,
"d_vtrans": 1,
"d_rotate": 3,
"d_brightness": 0.7,
"d_contrast": 1.1,
},
}
training_loader = MiddleburyGenerator("tests/sample_middlebury.hdf5", "tests/images_middlebury.hdf5", cfg)
# Patch of shape 3, 11, 11
# With the firt dimension = left patch, right positive patch, right negative patch
patch = training_loader.__getitem__(0)
x_ref_patch = 6
y_ref_patch = 5
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = 1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# negative disparity
patch = training_loader.__getitem__(2)
x_ref_patch = 5
y_ref_patch = 7
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = -1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# pylint: disable=invalid-name
# -> because changing the name here loses the reference to the actual name of the checked function
def test_DataFusionContestGenerator(self):
"""
test the function DataFusionContestGenerator
"""
# pylint: disable=pointless-string-statement
"""
# Script use to create images_middlebury and samples_middlebury :
# shape 2, 13, 13 : 2 = left and right images, row, col
image_pairs_0 = np.zeros((2, 13, 13))
# left
image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1))
# right
image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1
image_pairs_1 = np.zeros((2, 13, 13))
image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1))
image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1
img_file = h5py.File('images_dfc.hdf5', 'w')
img_file.create_dataset(str(0), data=image_pairs_0)
img_file.create_dataset(str(1), data=image_pairs_1)
sampl_file = h5py.File('sample_dfc.hdf5', 'w')
# disparity of image_pairs_0
x0 = np.array([[0., 5., 6., 1.],
[0., 7., 7., 1.]])
# disparity of image_pairs_1
x1 = np.array([[ 1., 7., 5., -1.],
[ 0., 0., 0., 0.]])
sampl_file.create_dataset(str(0), data=x0)
sampl_file.create_dataset(str(1), data=x1)
"""
# Positive disparity
cfg = {
"data_augmentation": False,
"dataset_neg_low": 1,
"dataset_neg_high": 1,
"dataset_pos": 0,
"vertical_disp": 0,
"augmentation_param": {
"scale": 0.8,
"hscale": 0.8,
"hshear": 0.1,
"trans": 0,
"rotate": 28,
"brightness": 1.3,
"contrast": 1.1,
"d_hscale": 0.9,
"d_hshear": 0.3,
"d_vtrans": 1,
"d_rotate": 3,
"d_brightness": 0.7,
"d_contrast": 1.1,
},
}
training_loader = DataFusionContestGenerator("tests/sample_dfc.hdf5", "tests/images_dfc.hdf5", cfg)
# Patch of shape 3, 11, 11
# With the firt dimension = left patch, right positive patch, right negative patch
patch = training_loader.__getitem__(0)
x_ref_patch = 6
y_ref_patch = 5
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = 1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# negative disparity
patch = training_loader.__getitem__(2)
x_ref_patch = 5
y_ref_patch = 7
patch_size = 5
gt_ref_patch = self.ref_img_1[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = -1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 7
gt_sec_pos_patch = self.sec_img_2[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 7
gt_sec_neg_patch = self.sec_img_2[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"numpy.full",
"numpy.stack",
"numpy.testing.assert_array_equal",
"torch.randn",
"torch.nn.CosineSimilarity",
"mc_cnn.run.computes_cost_volume_mc_cnn_fast",
"mc_cnn.dataset_generator.middlebury_generator.MiddleburyGenerator",
"numpy.arange",
"numpy.testing.assert_allclose",
"mc_cnn.dataset_generator.datas_fusion_contest_generator.DataFusionContestGenerator",
"mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer"
] |
[((21139, 21154), 'unittest.main', 'unittest.main', ([], {}), '()\n', (21152, 21154), False, 'import unittest\n'), ((1942, 1986), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (1953, 1986), False, 'import torch\n'), ((2010, 2054), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (2021, 2054), False, 'import torch\n'), ((2070, 2107), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (2089, 2107), True, 'import torch.nn as nn\n'), ((2187, 2213), 'numpy.full', 'np.full', (['(4, 4, 5)', 'np.nan'], {}), '((4, 4, 5), np.nan)\n', (2194, 2213), True, 'import numpy as np\n'), ((2937, 3003), 'mc_cnn.run.computes_cost_volume_mc_cnn_fast', 'computes_cost_volume_mc_cnn_fast', (['ref_feature', 'sec_features', '(-2)', '(2)'], {}), '(ref_feature, sec_features, -2, 2)\n', (2969, 3003), False, 'from mc_cnn.run import computes_cost_volume_mc_cnn_fast\n'), ((3125, 3174), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (3151, 3174), True, 'import numpy as np\n'), ((3427, 3471), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (3438, 3471), False, 'import torch\n'), ((3495, 3539), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (3506, 3539), False, 'import torch\n'), ((3555, 3592), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (3574, 3592), True, 'import torch.nn as nn\n'), ((3672, 3698), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (3679, 3698), True, 'import numpy as np\n'), ((4222, 4289), 'mc_cnn.run.computes_cost_volume_mc_cnn_fast', 'computes_cost_volume_mc_cnn_fast', (['ref_feature', 'sec_features', '(-4)', '(-1)'], {}), '(ref_feature, sec_features, -4, -1)\n', (4254, 4289), False, 'from mc_cnn.run import computes_cost_volume_mc_cnn_fast\n'), ((4411, 4460), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (4437, 4460), True, 'import numpy as np\n'), ((4713, 4757), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (4724, 4757), False, 'import torch\n'), ((4781, 4825), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (4792, 4825), False, 'import torch\n'), ((4841, 4878), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (4860, 4878), True, 'import torch.nn as nn\n'), ((4958, 4984), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (4965, 4984), True, 'import numpy as np\n'), ((5503, 5568), 'mc_cnn.run.computes_cost_volume_mc_cnn_fast', 'computes_cost_volume_mc_cnn_fast', (['ref_feature', 'sec_features', '(1)', '(4)'], {}), '(ref_feature, sec_features, 1, 4)\n', (5535, 5568), False, 'from mc_cnn.run import computes_cost_volume_mc_cnn_fast\n'), ((5690, 5739), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (5716, 5739), True, 'import numpy as np\n'), ((6200, 6248), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (6211, 6248), False, 'import torch\n'), ((6272, 6320), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (6283, 6320), False, 'import torch\n'), ((6401, 6427), 'numpy.full', 'np.full', (['(4, 4, 5)', 'np.nan'], {}), '((4, 4, 5), np.nan)\n', (6408, 6427), True, 'import numpy as np\n'), ((7232, 7247), 'mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer', 'AccMcCnnInfer', ([], {}), '()\n', (7245, 7247), False, 'from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer\n'), ((7567, 7616), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (7593, 7616), True, 'import numpy as np\n'), ((7876, 7924), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (7887, 7924), False, 'import torch\n'), ((7948, 7996), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (7959, 7996), False, 'import torch\n'), ((8077, 8103), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (8084, 8103), True, 'import numpy as np\n'), ((8676, 8691), 'mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer', 'AccMcCnnInfer', ([], {}), '()\n', (8689, 8691), False, 'from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer\n'), ((9012, 9061), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (9038, 9061), True, 'import numpy as np\n'), ((9322, 9370), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (9333, 9370), False, 'import torch\n'), ((9394, 9442), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (9405, 9442), False, 'import torch\n'), ((9523, 9549), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (9530, 9549), True, 'import numpy as np\n'), ((10117, 10132), 'mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer', 'AccMcCnnInfer', ([], {}), '()\n', (10130, 10132), False, 'from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer\n'), ((10451, 10500), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (10477, 10500), True, 'import numpy as np\n'), ((12976, 13068), 'mc_cnn.dataset_generator.middlebury_generator.MiddleburyGenerator', 'MiddleburyGenerator', (['"""tests/sample_middlebury.hdf5"""', '"""tests/images_middlebury.hdf5"""', 'cfg'], {}), "('tests/sample_middlebury.hdf5',\n 'tests/images_middlebury.hdf5', cfg)\n", (12995, 13068), False, 'from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator\n'), ((14329, 14397), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (14337, 14397), True, 'import numpy as np\n'), ((14513, 14558), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (14542, 14558), True, 'import numpy as np\n'), ((15729, 15797), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (15737, 15797), True, 'import numpy as np\n'), ((15913, 15958), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (15942, 15958), True, 'import numpy as np\n'), ((18131, 18216), 'mc_cnn.dataset_generator.datas_fusion_contest_generator.DataFusionContestGenerator', 'DataFusionContestGenerator', (['"""tests/sample_dfc.hdf5"""', '"""tests/images_dfc.hdf5"""', 'cfg'], {}), "('tests/sample_dfc.hdf5', 'tests/images_dfc.hdf5',\n cfg)\n", (18157, 18216), False, 'from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator\n'), ((19476, 19544), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (19484, 19544), True, 'import numpy as np\n'), ((19660, 19705), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (19689, 19705), True, 'import numpy as np\n'), ((20876, 20944), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (20884, 20944), True, 'import numpy as np\n'), ((21060, 21105), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (21089, 21105), True, 'import numpy as np\n'), ((1454, 1485), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1463, 1485), True, 'import numpy as np\n'), ((1609, 1640), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1618, 1640), True, 'import numpy as np\n'), ((1529, 1560), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1538, 1560), True, 'import numpy as np\n'), ((1684, 1715), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1693, 1715), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sqlite3
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
def add_truth(data, database):
data = data.sort_values('event_no').reset_index(drop = True)
with sqlite3.connect(database) as con:
query = 'select event_no, energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no']))
truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True)
truth['track'] = 0
truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] = 1
add_these = []
for key in truth.columns:
if key not in data.columns:
add_these.append(key)
for key in add_these:
data[key] = truth[key]
return data
def get_interaction_type(row):
if row["interaction_type"] == 1: # CC
particle_type = "nu_" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])]
return f"{particle_type} CC"
else:
return "NC"
def resolution_fn(r):
if len(r) > 1:
return (np.percentile(r, 84) - np.percentile(r, 16)) / 2.
else:
return np.nan
def add_energylog10(df):
df['energy_log10'] = np.log10(df['energy'])
return df
def get_error(residual):
rng = np.random.default_rng(42)
w = []
for i in range(150):
new_sample = rng.choice(residual, size = len(residual), replace = True)
w.append(resolution_fn(new_sample))
return np.std(w)
def get_roc_and_auc(data, target):
fpr, tpr, _ = roc_curve(data[target], data[target+'_pred'])
auc_score = auc(fpr,tpr)
return fpr,tpr,auc_score
def plot_roc(target, runids, save_dir, save_as_csv = False):
width = 3.176*2
height = 2.388*2
fig = plt.figure(figsize = (width,height))
for runid in runids:
data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target))
database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)
if save_as_csv:
data = add_truth(data, database)
data = add_energylog10(data)
data.to_csv(save_dir + '/%s_%s.csv'%(runid, target))
pulses_cut_val = 20
if runid == 140021:
pulses_cut_val = 10
fpr, tpr, auc = get_roc_and_auc(data, target)
plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3)))
plt.legend()
plt.title('Track/Cascade Classification')
plt.ylabel('True Positive Rate', fontsize = 12)
plt.xlabel('False Positive Rate', fontsize = 12)
ymax = 0.3
x_text = 0.2
y_text = ymax - 0.05
y_sep = 0.1
plt.text(x_text, y_text - 0 * y_sep, "IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)"%(runids[0], runids[1]), va='top', fontsize = 8)
plt.text(x_text, y_text - 1 * y_sep, "Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ", va='top', fontsize = 8)
plt.text(x_text, y_text - 2 * y_sep, "n_pulses > (%s, %s) selection applied during training"%(10,20), va='top', fontsize = 8)
fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches="tight")
return
def calculate_width(data_sliced, target):
track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True)
cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True)
if target == 'energy':
residual_track = ((track[target + '_pred'] - track[target])/track[target])*100
residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100
elif target == 'zenith':
residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi))
residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi))
else:
residual_track = (track[target + '_pred'] - track[target])
residual_cascade = (cascade[target + '_pred'] - cascade[target])
return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade)
def get_width(df, target):
track_widths = []
cascade_widths = []
track_errors = []
cascade_errors = []
energy = []
bins = np.arange(0,3.1,0.1)
if target in ['zenith', 'energy', 'XYZ']:
for i in range(1,len(bins)):
print(bins[i])
idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i])
data_sliced = df.loc[idx, :].reset_index(drop = True)
energy.append(np.mean(data_sliced['energy_log10']))
track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target)
track_widths.append(track_width)
cascade_widths.append(cascade_width)
track_errors.append(track_error)
cascade_errors.append(cascade_error)
track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors})
cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors})
return track_plot_data, cascade_plot_data
else:
print('target not supported: %s'%target)
# Load data
def make_plot(target, runids, save_dir, save_as_csv = False):
colors = {140021: 'tab:blue', 140022: 'tab:orange'}
fig = plt.figure(constrained_layout = True)
ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6)
for runid in runids:
predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)
database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)
pulses_cut_val = 20
if runid == 140021:
pulses_cut_val = 10
df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True)
df = add_truth(df, database)
df = add_energylog10(df)
if save_as_csv:
df.to_csv(save_dir + '/%s_%s.csv'%(runid, target))
plot_data_track, plot_data_cascade = get_width(df, target)
ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha = 1)
ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track %s'%runid)
ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha = 1)
ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label = 'Cascade %s'%runid )
ax2 = ax1.twinx()
ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color = colors[runid])
#plt.title('$\\nu_{v,u,e}$', size = 20)
ax1.tick_params(axis='x', labelsize=6)
ax1.tick_params(axis='y', labelsize=6)
ax1.set_xlim((0,3.1))
leg = ax1.legend(frameon=False, fontsize = 8)
for line in leg.get_lines():
line.set_linewidth(4.0)
if target == 'energy':
ax1.set_ylim((0,175))
ymax = 23.
y_sep = 8
unit_tag = '(%)'
else:
unit_tag = '(deg.)'
if target == 'angular_res':
target = 'direction'
if target == 'XYZ':
target = 'vertex'
unit_tag = '(m)'
if target == 'zenith':
ymax = 10.
y_sep = 2.3
ax1.set_ylim((0,45))
plt.tick_params(right=False,labelright=False)
ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10)
ax1.set_xlabel('Energy (log10 GeV)', size = 10)
x_text = 0.5
y_text = ymax - 2.
ax1.text(x_text, y_text - 0 * y_sep, "IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)"%(runids[0], runids[1]), va='top', fontsize = 8)
ax1.text(x_text, y_text - 1 * y_sep, "Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ", va='top', fontsize = 8)
ax1.text(x_text, y_text - 2 * y_sep, "n_pulses > (%s, %s) selection applied during training"%(10,20), va='top', fontsize = 8)
fig.suptitle("%s regression Upgrade MC using GNN"%target)
#fig.suptitle('%s Resolution'%target.capitalize(), size = 12)
fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches="tight")
return
runids = [140021, 140022]
targets = ['zenith', 'energy', 'track']
save_as_csv = True
save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv'
for target in targets:
if target != 'track':
make_plot(target, runids, save_dir, save_as_csv)
else:
plot_roc(target, runids, save_dir, save_as_csv)
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.subplot2grid",
"numpy.random.default_rng",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"pandas.DataFrame",
"numpy.std",
"numpy.log10",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.text",
"numpy.percentile",
"sqlite3.connect",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"pandas.read_sql",
"matplotlib.pyplot.xlabel"
] |
[((1250, 1272), 'numpy.log10', 'np.log10', (["df['energy']"], {}), "(df['energy'])\n", (1258, 1272), True, 'import numpy as np\n'), ((1323, 1348), 'numpy.random.default_rng', 'np.random.default_rng', (['(42)'], {}), '(42)\n', (1344, 1348), True, 'import numpy as np\n'), ((1520, 1529), 'numpy.std', 'np.std', (['w'], {}), '(w)\n', (1526, 1529), True, 'import numpy as np\n'), ((1584, 1631), 'sklearn.metrics.roc_curve', 'roc_curve', (['data[target]', "data[target + '_pred']"], {}), "(data[target], data[target + '_pred'])\n", (1593, 1631), False, 'from sklearn.metrics import roc_curve\n'), ((1646, 1659), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1649, 1659), False, 'from sklearn.metrics import auc\n'), ((1803, 1838), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (1813, 1838), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2580), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2578, 2580), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2626), 'matplotlib.pyplot.title', 'plt.title', (['"""Track/Cascade Classification"""'], {}), "('Track/Cascade Classification')\n", (2594, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2631, 2676), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(12)'}), "('True Positive Rate', fontsize=12)\n", (2641, 2676), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(12)'}), "('False Positive Rate', fontsize=12)\n", (2693, 2729), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2956), 'matplotlib.pyplot.text', 'plt.text', (['x_text', '(y_text - 0 * y_sep)', "('IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)' % (runids[0], runids[1])\n )"], {'va': '"""top"""', 'fontsize': '(8)'}), "(x_text, y_text - 0 * y_sep, \n 'IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)' % (runids[0],\n runids[1]), va='top', fontsize=8)\n", (2817, 2956), True, 'import matplotlib.pyplot as plt\n'), ((2952, 3068), 'matplotlib.pyplot.text', 'plt.text', (['x_text', '(y_text - 1 * y_sep)', '"""Pulsemaps used: SplitInIcePulses_GraphSage_Pulses """'], {'va': '"""top"""', 'fontsize': '(8)'}), "(x_text, y_text - 1 * y_sep,\n 'Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ', va='top', fontsize=8)\n", (2960, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3207), 'matplotlib.pyplot.text', 'plt.text', (['x_text', '(y_text - 2 * y_sep)', "('n_pulses > (%s, %s) selection applied during training' % (10, 20))"], {'va': '"""top"""', 'fontsize': '(8)'}), "(x_text, y_text - 2 * y_sep, \n 'n_pulses > (%s, %s) selection applied during training' % (10, 20), va=\n 'top', fontsize=8)\n", (3079, 3207), True, 'import matplotlib.pyplot as plt\n'), ((4393, 4415), 'numpy.arange', 'np.arange', (['(0)', '(3.1)', '(0.1)'], {}), '(0, 3.1, 0.1)\n', (4402, 4415), True, 'import numpy as np\n'), ((5514, 5549), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (5524, 5549), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5616), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(6, 6)', '(0, 0)'], {'colspan': '(6)', 'rowspan': '(6)'}), '((6, 6), (0, 0), colspan=6, rowspan=6)\n', (5578, 5616), True, 'import matplotlib.pyplot as plt\n'), ((7910, 7956), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'right': '(False)', 'labelright': '(False)'}), '(right=False, labelright=False)\n', (7925, 7956), True, 'import matplotlib.pyplot as plt\n'), ((262, 287), 'sqlite3.connect', 'sqlite3.connect', (['database'], {}), '(database)\n', (277, 287), False, 'import sqlite3\n'), ((1880, 2054), 'pandas.read_csv', 'pd.read_csv', (["('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'\n % (runid, target))"], {}), "(\n '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'\n % (runid, target))\n", (1891, 2054), True, 'import pandas as pd\n'), ((5058, 5144), 'pandas.DataFrame', 'pd.DataFrame', (["{'mean': energy, 'width': track_widths, 'width_error': track_errors}"], {}), "({'mean': energy, 'width': track_widths, 'width_error':\n track_errors})\n", (5070, 5144), True, 'import pandas as pd\n'), ((5169, 5259), 'pandas.DataFrame', 'pd.DataFrame', (["{'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}"], {}), "({'mean': energy, 'width': cascade_widths, 'width_error':\n cascade_errors})\n", (5181, 5259), True, 'import pandas as pd\n'), ((1109, 1129), 'numpy.percentile', 'np.percentile', (['r', '(84)'], {}), '(r, 84)\n', (1122, 1129), True, 'import numpy as np\n'), ((1132, 1152), 'numpy.percentile', 'np.percentile', (['r', '(16)'], {}), '(r, 16)\n', (1145, 1152), True, 'import numpy as np\n'), ((4700, 4736), 'numpy.mean', 'np.mean', (["data_sliced['energy_log10']"], {}), "(data_sliced['energy_log10'])\n", (4707, 4736), True, 'import numpy as np\n'), ((438, 461), 'pandas.read_sql', 'pd.read_sql', (['query', 'con'], {}), '(query, con)\n', (449, 461), True, 'import pandas as pd\n'), ((6060, 6089), 'pandas.read_csv', 'pd.read_csv', (['predictions_path'], {}), '(predictions_path)\n', (6071, 6089), True, 'import pandas as pd\n')]
|
from preprocessing.vectorizers import Doc2VecVectorizer
from nnframework.data_builder import DataBuilder
import pandas as pd
import constants as const
import numpy as np
def generate_d2v_vectors(source_file):
df = pd.read_csv(source_file)
messages = df["Message"].values
vectorizer = Doc2VecVectorizer()
vectors = vectorizer.vectorize(messages)
return np.c_[df.iloc[:,0].values, vectors]
if __name__ == '__main__':
# Generate vectors (with index)
output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED)
# Save vectors as npy file
np.save(const.FILE_DOC2VEC_INPUTS_UNLABELLED, output)
|
[
"pandas.read_csv",
"numpy.save",
"preprocessing.vectorizers.Doc2VecVectorizer"
] |
[((219, 243), 'pandas.read_csv', 'pd.read_csv', (['source_file'], {}), '(source_file)\n', (230, 243), True, 'import pandas as pd\n'), ((298, 317), 'preprocessing.vectorizers.Doc2VecVectorizer', 'Doc2VecVectorizer', ([], {}), '()\n', (315, 317), False, 'from preprocessing.vectorizers import Doc2VecVectorizer\n'), ((579, 632), 'numpy.save', 'np.save', (['const.FILE_DOC2VEC_INPUTS_UNLABELLED', 'output'], {}), '(const.FILE_DOC2VEC_INPUTS_UNLABELLED, output)\n', (586, 632), True, 'import numpy as np\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="M_qo7DmLJKLP"
# #Class-Conditional Bernoulli Mixture Model for EMNIST
# + [markdown] id="TU1pCzcIJHTm"
# ## Setup
#
# + id="400WanLyGA2C"
# !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
# %cd -q /pyprobml/scripts
# + id="k1rLl6dHH7Wh"
# !pip install -q superimport
# !pip install -q distrax
# + id="cLpBn5KQeB46"
from conditional_bernoulli_mix_lib import ClassConditionalBMM
from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class
from noisy_spelling_hmm import Word
from jax import vmap
import jax.numpy as jnp
import jax
from jax.random import PRNGKey, split
import numpy as np
from matplotlib import pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="ey9k06RweuKc" outputId="38131e5a-82fb-49db-c4d3-f4364a643152"
select_n = 25
dataset, targets = get_emnist_images_per_class(select_n)
dataset, targets = jnp.array(dataset), jnp.array(targets)
# + [markdown] id="KwNq7HYYLPO9"
# ## Initialization of Class Conditional BMMs
# + colab={"base_uri": "https://localhost:8080/"} id="UABtUDPjffFt" outputId="d873a708-542c-44e6-8c72-2c5908c7bbad"
n_mix = 30
n_char = 52
mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix))
p_min, p_max = 0.4, 0.6
n_pixels = 28 * 28
probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels)))
class_priors = jnp.array(np.full((n_char,), 1./n_char))
cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
# + [markdown] id="Qa95Fua5Kc3i"
# ## Full Batch Gradient Descentt
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="PDzuEjs9Kewi" outputId="c81916c0-c6b7-45bd-d308-eab878afe281"
num_epochs, batch_size = 100, len(dataset)
losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="37mNMNrpInfh"
# ## EM Algorithm
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="FJeBzIKYfsUk" outputId="9d8db485-a251-4b1a-a6e5-93833c83dce6"
losses = cbm_em.fit_em(dataset, targets, 8)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="NjCQpoH1Iuuf"
# ## Plot of the Probabilities of Components Distribution
# + id="KkyAHDW4JgyM"
def plot_components_dist(cbm, n_mix):
fig = plt.figure(figsize=(45, 20))
for k in range(n_mix):
for cls in range(cbm.num_of_classes):
plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1)
plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = "gray")
plt.axis('off')
plt.tight_layout()
plt.show()
# + [markdown] id="J8KLkCWpNAeF"
# ### GD
# + colab={"base_uri": "https://localhost:8080/", "height": 666} id="DSOiuNeAM8gl" outputId="dce9416a-b646-423d-b4bf-c78728db1cab"
plot_components_dist(cbm_gd, n_mix)
# + [markdown] id="FO31plUVNDSO"
# ### EM
# + id="ZM43qs6FfvlP" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="81a095f1-1099-4809-90a8-272dbed11662"
plot_components_dist(cbm_em, n_mix)
# + [markdown] id="IqRdcklzOeAY"
# ## Sampling
# + id="wgI6sFWKN4ax"
p1, p2, p3 = 0.4, 0.1, 2e-3
n_misspelled = 1 # number of misspelled words created for each class
vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band']
rng_key = PRNGKey(0)
keys = [dev_array for dev_array in split(rng_key, len(vocab))]
# + id="x3GpZ8jbf11N" colab={"base_uri": "https://localhost:8080/"} outputId="5a348b69-bdf4-4f80-f059-1062ba2fbb88"
hmms = {word: Word(word, p1, p2, p3, n_char, "all", mixing_coeffs=cbm_em.model.mixture_distribution.probs,
initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab}
samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys)
# + id="7VXVsobcg_KO" colab={"base_uri": "https://localhost:8080/"} outputId="3e915a79-7f5c-4131-d6ee-97f11c83d86f"
decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, "all")
get_decoded_samples(decoded_words)
# + [markdown] id="xrRy8MG0afR8"
# ### Figure
# + id="O0-HaN5rQAvP"
def plot_samples(samples):
samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28))
fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10))
fig.subplots_adjust(hspace = .2, wspace=.001)
for i, ax in enumerate(axes.flatten()):
ax.imshow(samples[i], cmap="gray")
ax.set_axis_off()
fig.tight_layout()
plt.show()
# + id="EbZn9vrfhei4" colab={"base_uri": "https://localhost:8080/", "height": 728} outputId="114217bf-cadb-4331-82ef-b4844c038342"
plot_samples(samples)
# + [markdown] id="eNDmwV7EPyrR"
# ## Calculation of Log Likelihoods for Test Data
# + id="525MUl5HPe1K"
# noisy words
test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-']
test_images = fake_test_data(test_words, dataset, targets, n_char + 1, "all")
# + id="1dFCdVNgPYtJ"
def plot_log_likelihood(hmms, test_words, test_images, vocab):
fig, axes = plt.subplots(4, 3, figsize=(20, 10))
for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)):
flattened_img = img.reshape((len(img), -1))
loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab)
loglikelihoods = jnp.array(loglikelihoods)
ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color="black")
ax.set_title(f'{word}')
plt.tight_layout()
plt.show()
# + id="qv-Df8GEhfC4" colab={"base_uri": "https://localhost:8080/", "height": 784} outputId="9be6abf3-0ecc-4ef5-e301-380c5eac38ff"
plot_log_likelihood(hmms, test_words, test_images, vocab)
|
[
"jax.nn.log_softmax",
"conditional_bernoulli_mix_utils.fake_test_data",
"jax.random.PRNGKey",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.full",
"conditional_bernoulli_mix_utils.get_decoded_samples",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"jax.vmap",
"matplotlib.pyplot.ylabel",
"noisy_spelling_hmm.Word",
"conditional_bernoulli_mix_utils.get_emnist_images_per_class",
"jax.numpy.array",
"numpy.random.uniform",
"conditional_bernoulli_mix_lib.ClassConditionalBMM",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((1137, 1174), 'conditional_bernoulli_mix_utils.get_emnist_images_per_class', 'get_emnist_images_per_class', (['select_n'], {}), '(select_n)\n', (1164, 1174), False, 'from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class\n'), ((1705, 1813), 'conditional_bernoulli_mix_lib.ClassConditionalBMM', 'ClassConditionalBMM', ([], {'mixing_coeffs': 'mixing_coeffs', 'probs': 'probs', 'class_priors': 'class_priors', 'n_char': 'n_char'}), '(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=\n class_priors, n_char=n_char)\n', (1724, 1813), False, 'from conditional_bernoulli_mix_lib import ClassConditionalBMM\n'), ((1818, 1926), 'conditional_bernoulli_mix_lib.ClassConditionalBMM', 'ClassConditionalBMM', ([], {'mixing_coeffs': 'mixing_coeffs', 'probs': 'probs', 'class_priors': 'class_priors', 'n_char': 'n_char'}), '(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=\n class_priors, n_char=n_char)\n', (1837, 1926), False, 'from conditional_bernoulli_mix_lib import ClassConditionalBMM\n'), ((2270, 2310), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'color': '"""k"""', 'linewidth': '(3)'}), "(losses, color='k', linewidth=3)\n", (2278, 2310), True, 'from matplotlib import pyplot as plt\n'), ((2311, 2334), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2321, 2334), True, 'from matplotlib import pyplot as plt\n'), ((2335, 2372), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Negative Log Likelihood"""'], {}), "('Negative Log Likelihood')\n", (2345, 2372), True, 'from matplotlib import pyplot as plt\n'), ((2373, 2383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2381, 2383), True, 'from matplotlib import pyplot as plt\n'), ((2613, 2653), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'color': '"""k"""', 'linewidth': '(3)'}), "(losses, color='k', linewidth=3)\n", (2621, 2653), True, 'from matplotlib import pyplot as plt\n'), ((2654, 2677), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2664, 2677), True, 'from matplotlib import pyplot as plt\n'), ((2678, 2715), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Negative Log Likelihood"""'], {}), "('Negative Log Likelihood')\n", (2688, 2715), True, 'from matplotlib import pyplot as plt\n'), ((2716, 2726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2724, 2726), True, 'from matplotlib import pyplot as plt\n'), ((3944, 3954), 'jax.random.PRNGKey', 'PRNGKey', (['(0)'], {}), '(0)\n', (3951, 3954), False, 'from jax.random import PRNGKey, split\n'), ((4733, 4767), 'conditional_bernoulli_mix_utils.get_decoded_samples', 'get_decoded_samples', (['decoded_words'], {}), '(decoded_words)\n', (4752, 4767), False, 'from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class\n'), ((5598, 5661), 'conditional_bernoulli_mix_utils.fake_test_data', 'fake_test_data', (['test_words', 'dataset', 'targets', '(n_char + 1)', '"""all"""'], {}), "(test_words, dataset, targets, n_char + 1, 'all')\n", (5612, 5661), False, 'from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class\n'), ((1194, 1212), 'jax.numpy.array', 'jnp.array', (['dataset'], {}), '(dataset)\n', (1203, 1212), True, 'import jax.numpy as jnp\n'), ((1214, 1232), 'jax.numpy.array', 'jnp.array', (['targets'], {}), '(targets)\n', (1223, 1232), True, 'import jax.numpy as jnp\n'), ((1480, 1517), 'numpy.full', 'np.full', (['(n_char, n_mix)', '(1.0 / n_mix)'], {}), '((n_char, n_mix), 1.0 / n_mix)\n', (1487, 1517), True, 'import numpy as np\n'), ((1578, 1636), 'numpy.random.uniform', 'np.random.uniform', (['p_min', 'p_max', '(n_char, n_mix, n_pixels)'], {}), '(p_min, p_max, (n_char, n_mix, n_pixels))\n', (1595, 1636), True, 'import numpy as np\n'), ((1664, 1696), 'numpy.full', 'np.full', (['(n_char,)', '(1.0 / n_char)'], {}), '((n_char,), 1.0 / n_char)\n', (1671, 1696), True, 'import numpy as np\n'), ((2889, 2917), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(45, 20)'}), '(figsize=(45, 20))\n', (2899, 2917), True, 'from matplotlib import pyplot as plt\n'), ((3218, 3236), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3234, 3236), True, 'from matplotlib import pyplot as plt\n'), ((3239, 3249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3247, 3249), True, 'from matplotlib import pyplot as plt\n'), ((4149, 4335), 'noisy_spelling_hmm.Word', 'Word', (['word', 'p1', 'p2', 'p3', 'n_char', '"""all"""'], {'mixing_coeffs': 'cbm_em.model.mixture_distribution.probs', 'initial_probs': 'cbm_em.model.components_distribution.distribution.probs', 'n_mix': 'n_mix'}), "(word, p1, p2, p3, n_char, 'all', mixing_coeffs=cbm_em.model.\n mixture_distribution.probs, initial_probs=cbm_em.model.\n components_distribution.distribution.probs, n_mix=n_mix)\n", (4153, 4335), False, 'from noisy_spelling_hmm import Word\n'), ((4600, 4637), 'jax.vmap', 'vmap', (['decode'], {'in_axes': '(0, None, None)'}), '(decode, in_axes=(0, None, None))\n', (4604, 4637), False, 'from jax import vmap\n'), ((4950, 4998), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(4)', 'nrows': '(10)', 'figsize': '(4, 10)'}), '(ncols=4, nrows=10, figsize=(4, 10))\n', (4962, 4998), True, 'from matplotlib import pyplot as plt\n'), ((5187, 5197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5195, 5197), True, 'from matplotlib import pyplot as plt\n'), ((5765, 5801), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(3)'], {'figsize': '(20, 10)'}), '(4, 3, figsize=(20, 10))\n', (5777, 5801), True, 'from matplotlib import pyplot as plt\n'), ((6223, 6241), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6239, 6241), True, 'from matplotlib import pyplot as plt\n'), ((6246, 6256), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6254, 6256), True, 'from matplotlib import pyplot as plt\n'), ((6071, 6096), 'jax.numpy.array', 'jnp.array', (['loglikelihoods'], {}), '(loglikelihoods)\n', (6080, 6096), True, 'import jax.numpy as jnp\n'), ((2997, 3069), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_mix', 'cbm.num_of_classes', '(cbm.num_of_classes * k + cls + 1)'], {}), '(n_mix, cbm.num_of_classes, cbm.num_of_classes * k + cls + 1)\n', (3008, 3069), True, 'from matplotlib import pyplot as plt\n'), ((3199, 3214), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3207, 3214), True, 'from matplotlib import pyplot as plt\n'), ((4640, 4658), 'jax.numpy.array', 'jnp.array', (['samples'], {}), '(samples)\n', (4649, 4658), True, 'import jax.numpy as jnp\n'), ((4880, 4897), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (4888, 4897), True, 'import numpy as np\n'), ((6134, 6168), 'jax.nn.log_softmax', 'jax.nn.log_softmax', (['loglikelihoods'], {}), '(loglikelihoods)\n', (6152, 6168), False, 'import jax\n')]
|
__author__ = 'palmer'
# every method in smoothing should accept (im,**args)
def median(im, **kwargs):
from scipy import ndimage
im = ndimage.filters.median_filter(im,**kwargs)
return im
def hot_spot_removal(xic, q=99.):
import numpy as np
xic_q = np.percentile(xic, q)
xic[xic > xic_q] = xic_q
return xic
|
[
"numpy.percentile",
"scipy.ndimage.filters.median_filter"
] |
[((141, 184), 'scipy.ndimage.filters.median_filter', 'ndimage.filters.median_filter', (['im'], {}), '(im, **kwargs)\n', (170, 184), False, 'from scipy import ndimage\n'), ((268, 289), 'numpy.percentile', 'np.percentile', (['xic', 'q'], {}), '(xic, q)\n', (281, 289), True, 'import numpy as np\n')]
|
# coding: utf-8
# MultiPerceptron
# queueを使った学習
# 学習step数を記録
# 学習データはCSVの代わりにジェネレータを搭載
# 3x11x4のNNモデルに変更
# scoreを追加
import os
_FILE_DIR=os.path.abspath(os.path.dirname(__file__))
import time
import tensorflow as tf
import threading
from sklearn.utils import shuffle
import sys
sys.path.append(_FILE_DIR+'/..')
from generator import SensorGenerator
import numpy as np
tf.reset_default_graph()
MODEL_DIR=_FILE_DIR+"/model"
SUMMARY_LOG_DIR=_FILE_DIR+"/log"
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
n_nodes_hl1 = 11
data_cols = 3 # センサーの数。left45,front,right45
n_classes = 4 # 予測結果の数。stop,left,forward,right
batch_size = 100 # バッチサイズは10〜100前後に
chunk_size = 100 # FIFOQueueのcapacity
target_step = 10000000 # ステップ数
TEST_NUM = 10000 # テストデータ件数
generator = SensorGenerator()
def generate_random_train_data(batch_size):
CSVDATA=[]
# 10m以内の判定を学習させる
#sensors = np.random.randint(0,1000,[batch_size,3])
# 前方20cm以内の判定を学習させる
#LEFT45 = np.random.randint(0,1000,batch_size)
#FRONT = np.random.randint(0,20,batch_size)
#RIGHT45 = np.random.randint(0,1000,batch_size)
# 前方20cm-100cm、左右100cm以内のの判定を学習させる
#LEFT45 = np.random.randint(0,100,batch_size)
#FRONT = np.random.randint(20,200,batch_size)
#RIGHT45 = np.random.randint(0,100,batch_size)
# 2m以内の判定を学習させる
#LEFT45 = np.random.randint(0,200,batch_size)
#FRONT = np.random.randint(0,200,batch_size)
#RIGHT45 = np.random.randint(0,200,batch_size)
# 1m以内の判定を学習させる
#LEFT45 = np.random.randint(0,100,batch_size)
#FRONT = np.random.randint(0,100,batch_size)
#RIGHT45 = np.random.randint(0,100,batch_size)
# 2m以内の判定を学習させる
sensors = np.random.randint(0,200,[batch_size,3])
#sensors = np.c_[LEFT45,FRONT,RIGHT45]
for i in range(batch_size):
GENERATOR_RESULT = generator.driving_instruction(sensors[i])
CSVROW = np.hstack((sensors[i],GENERATOR_RESULT[0:4]))
CSVDATA.append(CSVROW)
CSVDATA = np.array(CSVDATA)
batch_data = CSVDATA[0:batch_size,0:data_cols]
batch_target = CSVDATA[0:batch_size,data_cols:]
return batch_data, batch_target
def load_and_enqueue(sess):
while True:
try:
batch_data, batch_target = generate_random_train_data(batch_size)
sess.run(enqueue_op, feed_dict={placeholder_input_data:batch_data, placeholder_input_target:batch_target})
except tf.errors.CancelledError as e:
break
print("finished enqueueing")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
with tf.variable_scope("input"):
placeholder_input_data = tf.placeholder('float', [None, data_cols], name='input_data') # for load_and_enqueue. use dequeue_data_op for prediction
placeholder_input_target = tf.placeholder('float', name='input_target') # for load_and_enqueue. use dequeue_target_op for prediction
placeholder_batch_size = tf.placeholder(tf.int32, name='batch_size') # need feed_dict in training sess.run(). don't need for prediction.
with tf.variable_scope("step"):
placeholder_step = tf.placeholder(tf.int32, name='input_step') # step値入力用
variable_step = tf.Variable(initial_value=0, name="step") # step記録用
step_op = variable_step.assign(placeholder_step)
with tf.variable_scope("queue"):
queue = tf.FIFOQueue(
capacity=chunk_size, # enqueue size
dtypes=['float', 'float'],
shapes=[[data_cols], [n_classes]],
name='FIFOQueue'
)
# Enqueue and dequeue operations
enqueue_op = queue.enqueue_many([placeholder_input_data, placeholder_input_target], name='enqueue_op')
dequeue_data_op, dequeue_target_op = queue.dequeue_many(placeholder_batch_size, name='dequeue_op') # instead of data/target placeholder
with tf.variable_scope('neural_network_model'):
hidden_1_layer = {'weights':tf.Variable(weight_variable([data_cols, n_nodes_hl1])),
'biases':tf.Variable(bias_variable([n_nodes_hl1]))}
output_layer = {'weights':tf.Variable(weight_variable([n_nodes_hl1, n_classes])),
'biases':tf.Variable(bias_variable([n_classes])),}
l1 = tf.add(tf.matmul(dequeue_data_op,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
# 予測結果
prediction = tf.add(tf.matmul(l1,output_layer['weights']), output_layer['biases'], name='output_y')
# スコア
score = tf.nn.softmax(prediction, name='score')
with tf.variable_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=dequeue_target_op)
loss_op = tf.reduce_mean(losses, name='cost')
tf.summary.scalar('loss', loss_op)
with tf.variable_scope('accuracy'):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(dequeue_target_op, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'), name='accuracy')
tf.summary.scalar('accuracy', accuracy)
summary_op = tf.summary.merge_all()
train_op = tf.train.AdamOptimizer(0.0001).minimize(loss_op, name='train_op')
saver = tf.train.Saver(max_to_keep=1000)
test_data, test_target =generate_random_train_data(TEST_NUM)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
if ckpt:
# checkpointファイルから最後に保存したモデルへのパスを取得する
last_model = ckpt.model_checkpoint_path
print("load {0}".format(last_model))
# 学習済みモデルを読み込む
saver.restore(sess, last_model)
LOAD_MODEL = True
else:
print("initialization")
# 初期化処理
init_op = tf.global_variables_initializer()
sess.run(init_op)
writer = tf.summary.FileWriter(SUMMARY_LOG_DIR, sess.graph)
start_time, start_clock = time.time(), time.clock()
# Start a thread to enqueue data asynchronously, and hide I/O latency.
coord = tf.train.Coordinator()
enqueue_thread = threading.Thread(target=load_and_enqueue, args=[sess])
enqueue_thread.isDaemon()
enqueue_thread.start()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
step = 0 # 最後にstep数をモデルに記録するために変数を用意しておく
try:
# check the accuracy before training (without feed_dict!)
print(sess.run(accuracy, feed_dict={placeholder_batch_size:chunk_size})) # check batch_size's data
# step取得
_step = sess.run(variable_step)
print("learned step:{}".format(_step))
for step in range(_step+1, target_step+1):
batch_loss=0
w_summary=None
_, batch_loss, w_summary = sess.run([train_op, loss_op, summary_op],
feed_dict={placeholder_batch_size:batch_size})
if step % 1000 == 0:
if not w_summary is None:
writer.add_summary(w_summary, step)
ac = sess.run(accuracy, feed_dict={placeholder_batch_size:chunk_size}) # check batch_size's data
# テストデータでの精度を確認する
test_accuracy = accuracy.eval({'queue/dequeue_op:0':test_data,
'queue/dequeue_op:1':test_target})
if step % 10000 == 0:
print("Step:%d accuracy:%.8f test_accuracy:%.8f loss:%.8f time:%.8f clock:%.14f" % (step,ac,test_accuracy,batch_loss,time.time()-start_time,time.clock()-start_clock))
# 1000000 step毎にsaveする
if step % 1000000 == 0:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
sess.run(queue.close(cancel_pending_enqueues=True))
except Exception as e:
# Report exceptions to the coodinator.
print(e)
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
# ステップ学習時、保存する
if step > _step:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
# テストデータを新たに生成し、精度を確認する
test_data, test_target =generate_random_train_data(TEST_NUM)
print('Accuracy:',accuracy.eval({dequeue_data_op:test_data,
dequeue_target_op:test_target}))
# 総step数を表示する
print('step:{}'.format(sess.run(variable_step)))
print("end")
|
[
"tensorflow.train.Coordinator",
"tensorflow.reset_default_graph",
"tensorflow.matmul",
"numpy.random.randint",
"tensorflow.Variable",
"tensorflow.truncated_normal",
"sys.path.append",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"os.path.dirname",
"os.path.exists",
"tensorflow.variable_scope",
"time.clock",
"tensorflow.train.start_queue_runners",
"generator.SensorGenerator",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.cast",
"tensorflow.FIFOQueue",
"tensorflow.summary.merge_all",
"tensorflow.train.get_checkpoint_state",
"threading.Thread",
"tensorflow.train.Saver",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.Session",
"tensorflow.constant",
"numpy.hstack",
"os.makedirs",
"tensorflow.argmax",
"time.time",
"numpy.array",
"tensorflow.train.AdamOptimizer"
] |
[((278, 312), 'sys.path.append', 'sys.path.append', (["(_FILE_DIR + '/..')"], {}), "(_FILE_DIR + '/..')\n", (293, 312), False, 'import sys\n'), ((369, 393), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (391, 393), True, 'import tensorflow as tf\n'), ((776, 793), 'generator.SensorGenerator', 'SensorGenerator', ([], {}), '()\n', (791, 793), False, 'from generator import SensorGenerator\n'), ((5030, 5052), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5050, 5052), True, 'import tensorflow as tf\n'), ((5140, 5172), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1000)'}), '(max_to_keep=1000)\n', (5154, 5172), True, 'import tensorflow as tf\n'), ((153, 178), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'import os\n'), ((464, 489), 'os.path.exists', 'os.path.exists', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (478, 489), False, 'import os\n'), ((495, 517), 'os.makedirs', 'os.makedirs', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (506, 517), False, 'import os\n'), ((1670, 1712), 'numpy.random.randint', 'np.random.randint', (['(0)', '(200)', '[batch_size, 3]'], {}), '(0, 200, [batch_size, 3])\n', (1687, 1712), True, 'import numpy as np\n'), ((1964, 1981), 'numpy.array', 'np.array', (['CSVDATA'], {}), '(CSVDATA)\n', (1972, 1981), True, 'import numpy as np\n'), ((2518, 2556), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (2537, 2556), True, 'import tensorflow as tf\n'), ((2568, 2588), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (2579, 2588), True, 'import tensorflow as tf\n'), ((2630, 2659), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (2641, 2659), True, 'import tensorflow as tf\n'), ((2671, 2691), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (2682, 2691), True, 'import tensorflow as tf\n'), ((2698, 2724), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input"""'], {}), "('input')\n", (2715, 2724), True, 'import tensorflow as tf\n'), ((2755, 2816), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, data_cols]'], {'name': '"""input_data"""'}), "('float', [None, data_cols], name='input_data')\n", (2769, 2816), True, 'import tensorflow as tf\n'), ((2907, 2951), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {'name': '"""input_target"""'}), "('float', name='input_target')\n", (2921, 2951), True, 'import tensorflow as tf\n'), ((3042, 3085), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""batch_size"""'}), "(tf.int32, name='batch_size')\n", (3056, 3085), True, 'import tensorflow as tf\n'), ((3161, 3186), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""step"""'], {}), "('step')\n", (3178, 3186), True, 'import tensorflow as tf\n'), ((3211, 3254), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""input_step"""'}), "(tf.int32, name='input_step')\n", (3225, 3254), True, 'import tensorflow as tf\n'), ((3286, 3327), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'name': '"""step"""'}), "(initial_value=0, name='step')\n", (3297, 3327), True, 'import tensorflow as tf\n'), ((3397, 3423), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""queue"""'], {}), "('queue')\n", (3414, 3423), True, 'import tensorflow as tf\n'), ((3437, 3555), 'tensorflow.FIFOQueue', 'tf.FIFOQueue', ([], {'capacity': 'chunk_size', 'dtypes': "['float', 'float']", 'shapes': '[[data_cols], [n_classes]]', 'name': '"""FIFOQueue"""'}), "(capacity=chunk_size, dtypes=['float', 'float'], shapes=[[\n data_cols], [n_classes]], name='FIFOQueue')\n", (3449, 3555), True, 'import tensorflow as tf\n'), ((3896, 3937), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""neural_network_model"""'], {}), "('neural_network_model')\n", (3913, 3937), True, 'import tensorflow as tf\n'), ((4366, 4380), 'tensorflow.nn.relu', 'tf.nn.relu', (['l1'], {}), '(l1)\n', (4376, 4380), True, 'import tensorflow as tf\n'), ((4519, 4558), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['prediction'], {'name': '"""score"""'}), "(prediction, name='score')\n", (4532, 4558), True, 'import tensorflow as tf\n'), ((4565, 4590), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (4582, 4590), True, 'import tensorflow as tf\n'), ((4605, 4694), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'prediction', 'labels': 'dequeue_target_op'}), '(logits=prediction, labels=\n dequeue_target_op)\n', (4644, 4694), True, 'import tensorflow as tf\n'), ((4704, 4739), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {'name': '"""cost"""'}), "(losses, name='cost')\n", (4718, 4739), True, 'import tensorflow as tf\n'), ((4744, 4778), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss_op'], {}), "('loss', loss_op)\n", (4761, 4778), True, 'import tensorflow as tf\n'), ((4785, 4814), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (4802, 4814), True, 'import tensorflow as tf\n'), ((4976, 5015), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (4993, 5015), True, 'import tensorflow as tf\n'), ((5239, 5251), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5249, 5251), True, 'import tensorflow as tf\n'), ((5272, 5312), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (5301, 5312), True, 'import tensorflow as tf\n'), ((5704, 5754), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['SUMMARY_LOG_DIR', 'sess.graph'], {}), '(SUMMARY_LOG_DIR, sess.graph)\n', (5725, 5754), True, 'import tensorflow as tf\n'), ((5899, 5921), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (5919, 5921), True, 'import tensorflow as tf\n'), ((5943, 5997), 'threading.Thread', 'threading.Thread', ([], {'target': 'load_and_enqueue', 'args': '[sess]'}), '(target=load_and_enqueue, args=[sess])\n', (5959, 5997), False, 'import threading\n'), ((6069, 6121), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord', 'sess': 'sess'}), '(coord=coord, sess=sess)\n', (6097, 6121), True, 'import tensorflow as tf\n'), ((1873, 1919), 'numpy.hstack', 'np.hstack', (['(sensors[i], GENERATOR_RESULT[0:4])'], {}), '((sensors[i], GENERATOR_RESULT[0:4]))\n', (1882, 1919), True, 'import numpy as np\n'), ((4277, 4330), 'tensorflow.matmul', 'tf.matmul', (['dequeue_data_op', "hidden_1_layer['weights']"], {}), "(dequeue_data_op, hidden_1_layer['weights'])\n", (4286, 4330), True, 'import tensorflow as tf\n'), ((4417, 4455), 'tensorflow.matmul', 'tf.matmul', (['l1', "output_layer['weights']"], {}), "(l1, output_layer['weights'])\n", (4426, 4455), True, 'import tensorflow as tf\n'), ((4839, 4863), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (4848, 4863), True, 'import tensorflow as tf\n'), ((4865, 4896), 'tensorflow.argmax', 'tf.argmax', (['dequeue_target_op', '(1)'], {}), '(dequeue_target_op, 1)\n', (4874, 4896), True, 'import tensorflow as tf\n'), ((4928, 4953), 'tensorflow.cast', 'tf.cast', (['correct', '"""float"""'], {}), "(correct, 'float')\n", (4935, 4953), True, 'import tensorflow as tf\n'), ((5065, 5095), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (5087, 5095), True, 'import tensorflow as tf\n'), ((5630, 5663), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5661, 5663), True, 'import tensorflow as tf\n'), ((5785, 5796), 'time.time', 'time.time', ([], {}), '()\n', (5794, 5796), False, 'import time\n'), ((5798, 5810), 'time.clock', 'time.clock', ([], {}), '()\n', (5808, 5810), False, 'import time\n'), ((7344, 7355), 'time.time', 'time.time', ([], {}), '()\n', (7353, 7355), False, 'import time\n'), ((7367, 7379), 'time.clock', 'time.clock', ([], {}), '()\n', (7377, 7379), False, 'import time\n')]
|
#
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from abc import ABC
import logging
import os
from os.path import abspath, dirname, join
import sys
import unittest
import torch
import random
import numpy as np
import pandas as pd
from merlion.models.defaults import DefaultDetector, DefaultDetectorConfig
from merlion.plot import plot_anoms_plotly
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils import TimeSeries
from ts_datasets.anomaly import *
rootdir = dirname(dirname(dirname(abspath(__file__))))
logger = logging.getLogger(__name__)
def set_random_seeds():
torch.manual_seed(12345)
random.seed(12345)
np.random.seed(12345)
def get_train_test_splits(df: pd.DataFrame, metadata: pd.DataFrame, n: int) -> (pd.DataFrame, pd.DataFrame, np.ndarray):
train_df = df[metadata.trainval]
test_df = df[~metadata.trainval]
test_labels = pd.DataFrame(metadata[~metadata.trainval].anomaly)
return train_df.tail(n), test_df.head(n), test_labels[:n]
class Mixin(ABC):
def test_score(self):
print("-" * 80)
logger.info("test_score\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
test_ts = TimeSeries.from_pd(self.test_df)
score_ts = self.model.get_anomaly_score(test_ts)
scores = score_ts.to_pd().values.flatten()
min_score, max_score, sum_score = min(scores), max(scores), sum(scores)
logger.info(f"scores look like: {scores[:10]}")
logger.info(f"min score = {min_score}")
logger.info(f"max score = {max_score}")
logger.info(f"sum score = {sum_score}")
def test_save_load(self):
print("-" * 80)
logger.info("test_save_load\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
path = join(rootdir, "tmp", "default", "anom", "multi" if multi else "uni")
self.model.save(dirname=path)
loaded_model = DefaultDetector.load(dirname=path)
test_ts = TimeSeries.from_pd(self.test_df)
scores = self.model.get_anomaly_score(test_ts)
scores_np = scores.to_pd().values.flatten()
loaded_model_scores = loaded_model.get_anomaly_score(test_ts)
loaded_model_scores = loaded_model_scores.to_pd().values.flatten()
self.assertEqual(len(scores_np), len(loaded_model_scores))
alarms = self.model.post_rule(scores)
loaded_model_alarms = loaded_model.post_rule(scores)
self.assertSequenceEqual(list(alarms), list(loaded_model_alarms))
def test_plot(self):
try:
import plotly
print("-" * 80)
logger.info("test_plot\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
savedir = join(rootdir, "tmp", "default", "anom")
os.makedirs(savedir, exist_ok=True)
path = join(savedir, ("multi" if multi else "uni") + ".png")
test_ts = TimeSeries.from_pd(self.test_df)
fig = self.model.plot_anomaly_plotly(
time_series=test_ts, time_series_prev=train_ts, plot_time_series_prev=True
)
plot_anoms_plotly(fig, TimeSeries.from_pd(self.test_labels))
try:
import kaleido
fig.write_image(path, engine="kaleido")
except ImportError:
logger.info("kaleido not installed, not trying to save image")
except ImportError:
logger.info("plotly not installed, skipping test case")
class TestUnivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(
DefaultDetectorConfig(granularity="1h", threshold=AggregateAlarms(alm_threshold=1.5))
)
# Time series with anomalies in both train split and test split
df = pd.read_csv(join(rootdir, "data", "synthetic_anomaly", "horizontal_spike_anomaly.csv"))
df.timestamp = pd.to_datetime(df.timestamp, unit="s")
df = df.set_index("timestamp")
# Get training & testing splits
self.train_df = df.iloc[: -len(df) // 2, :1]
self.test_df = df.iloc[-len(df) // 2 :, :1]
self.test_labels = df.iloc[-len(df) // 2 :, -1:]
class TestMultivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(DefaultDetectorConfig(threshold=AggregateAlarms(alm_threshold=2)))
self.dataset = MSL(rootdir=join(rootdir, "data", "smap"))
df, metadata = self.dataset[0]
self.train_df, self.test_df, self.test_labels = get_train_test_splits(df, metadata, 2000)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.INFO
)
unittest.main()
|
[
"pandas.DataFrame",
"unittest.main",
"os.path.abspath",
"numpy.random.seed",
"os.makedirs",
"logging.basicConfig",
"torch.manual_seed",
"merlion.utils.TimeSeries.from_pd",
"merlion.models.defaults.DefaultDetector.load",
"merlion.post_process.threshold.AggregateAlarms",
"random.seed",
"pandas.to_datetime",
"os.path.join",
"logging.getLogger"
] |
[((716, 743), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (733, 743), False, 'import logging\n'), ((774, 798), 'torch.manual_seed', 'torch.manual_seed', (['(12345)'], {}), '(12345)\n', (791, 798), False, 'import torch\n'), ((803, 821), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (814, 821), False, 'import random\n'), ((826, 847), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (840, 847), True, 'import numpy as np\n'), ((1063, 1113), 'pandas.DataFrame', 'pd.DataFrame', (['metadata[~metadata.trainval].anomaly'], {}), '(metadata[~metadata.trainval].anomaly)\n', (1075, 1113), True, 'import pandas as pd\n'), ((5241, 5381), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'stream': 'sys.stdout', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n stream=sys.stdout, level=logging.INFO)\n", (5260, 5381), False, 'import logging\n'), ((5391, 5406), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5404, 5406), False, 'import unittest\n'), ((1387, 1420), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.train_df'], {}), '(self.train_df)\n', (1405, 1420), False, 'from merlion.utils import TimeSeries\n'), ((1475, 1507), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_df'], {}), '(self.test_df)\n', (1493, 1507), False, 'from merlion.utils import TimeSeries\n'), ((2097, 2130), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.train_df'], {}), '(self.train_df)\n', (2115, 2130), False, 'from merlion.utils import TimeSeries\n'), ((2215, 2283), 'os.path.join', 'join', (['rootdir', '"""tmp"""', '"""default"""', '"""anom"""', "('multi' if multi else 'uni')"], {}), "(rootdir, 'tmp', 'default', 'anom', 'multi' if multi else 'uni')\n", (2219, 2283), False, 'from os.path import abspath, dirname, join\n'), ((2345, 2379), 'merlion.models.defaults.DefaultDetector.load', 'DefaultDetector.load', ([], {'dirname': 'path'}), '(dirname=path)\n', (2365, 2379), False, 'from merlion.models.defaults import DefaultDetector, DefaultDetectorConfig\n'), ((2399, 2431), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_df'], {}), '(self.test_df)\n', (2417, 2431), False, 'from merlion.utils import TimeSeries\n'), ((4517, 4555), 'pandas.to_datetime', 'pd.to_datetime', (['df.timestamp'], {'unit': '"""s"""'}), "(df.timestamp, unit='s')\n", (4531, 4555), True, 'import pandas as pd\n'), ((686, 703), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (693, 703), False, 'from os.path import abspath, dirname, join\n'), ((3182, 3215), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.train_df'], {}), '(self.train_df)\n', (3200, 3215), False, 'from merlion.utils import TimeSeries\n'), ((3315, 3354), 'os.path.join', 'join', (['rootdir', '"""tmp"""', '"""default"""', '"""anom"""'], {}), "(rootdir, 'tmp', 'default', 'anom')\n", (3319, 3354), False, 'from os.path import abspath, dirname, join\n'), ((3367, 3402), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (3378, 3402), False, 'import os\n'), ((3422, 3475), 'os.path.join', 'join', (['savedir', "(('multi' if multi else 'uni') + '.png')"], {}), "(savedir, ('multi' if multi else 'uni') + '.png')\n", (3426, 3475), False, 'from os.path import abspath, dirname, join\n'), ((3499, 3531), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_df'], {}), '(self.test_df)\n', (3517, 3531), False, 'from merlion.utils import TimeSeries\n'), ((4418, 4492), 'os.path.join', 'join', (['rootdir', '"""data"""', '"""synthetic_anomaly"""', '"""horizontal_spike_anomaly.csv"""'], {}), "(rootdir, 'data', 'synthetic_anomaly', 'horizontal_spike_anomaly.csv')\n", (4422, 4492), False, 'from os.path import abspath, dirname, join\n'), ((3722, 3758), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_labels'], {}), '(self.test_labels)\n', (3740, 3758), False, 'from merlion.utils import TimeSeries\n'), ((5040, 5069), 'os.path.join', 'join', (['rootdir', '"""data"""', '"""smap"""'], {}), "(rootdir, 'data', 'smap')\n", (5044, 5069), False, 'from os.path import abspath, dirname, join\n'), ((4274, 4308), 'merlion.post_process.threshold.AggregateAlarms', 'AggregateAlarms', ([], {'alm_threshold': '(1.5)'}), '(alm_threshold=1.5)\n', (4289, 4308), False, 'from merlion.post_process.threshold import AggregateAlarms\n'), ((4970, 5002), 'merlion.post_process.threshold.AggregateAlarms', 'AggregateAlarms', ([], {'alm_threshold': '(2)'}), '(alm_threshold=2)\n', (4985, 5002), False, 'from merlion.post_process.threshold import AggregateAlarms\n')]
|
# treemodels.py
from __future__ import division
import gtk
from debug import *
import numpy as np
import matplotlib.pyplot as plt
class CountingActivitiesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for CountingActivity's in a Log."""
def __init__ (self, log):
gtk.GenericTreeModel.__init__ (self)
self.log = log
@property
def n_rows (self):
return len (self.log.counting_activities)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 2
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.log.counting_activities):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if len (self.log.counting_activities) == 0:
return None
activity = sorted (self.log.counting_activities)[row]
if col == 0:
return activity.name
elif col == 1:
return activity.unit
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class TimingActivitiesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for TimingActivity's in a Log."""
def __init__ (self, log):
gtk.GenericTreeModel.__init__ (self)
self.log = log
@property
def n_rows (self):
return len (self.log.timing_activities)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 1
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.log.timing_activities):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if len (self.log.timing_activities) == 0:
return None
activity = sorted (self.log.timing_activities)[row]
if col == 0:
return activity.name
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class CountingEntriesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for CountingEntry's in a Log."""
def __init__ (self, log, activity_name):
gtk.GenericTreeModel.__init__ (self)
self.log = log
self.activity_name = activity_name
@property
def entries (self):
return self.log.get_entries (self.activity_name)
@property
def n_rows (self):
return len (self.entries)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 3
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.entries):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if self.n_rows == 0:
return None
entry = self.entries[row]
if col == 0:
return str (entry.date)
elif col == 1:
return str (entry.n)
elif col == 2:
return str (entry.error)
elif col == 3:
return str (entry.note)
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class TimingEntriesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for TimingEntry's in a Log."""
def __init__ (self, log, activity_name):
gtk.GenericTreeModel.__init__ (self)
self.log = log
self.activity_name = activity_name
@property
def entries (self):
return self.log.get_entries (self.activity_name)
@property
def n_rows (self):
return len (self.entries)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 2
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.entries):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
def fmt (t):
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}'.format (
t.year, t.month, t.day, t.hour, t.minute)
if self.n_rows == 0:
return None
entry = self.entries[row]
if col == 0:
return fmt (entry.start_time)
elif col == 1:
return fmt (entry.end_time)
elif col == 2:
return str (entry.note)
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class ActivityDrawModel (gtk.GenericTreeModel):
"""Gtk TreeModel for drawing Activity's in a Log."""
def __init__ (self, activities):
gtk.GenericTreeModel.__init__ (self)
self.activities = sorted (activities)
self.checks = [
False for activity in self.activities]
n = len (self.activities)
##mpl_colors = [
## (0.0, 0.0, 1.0),
## (0.0, 0.5, 0.0),
## (1.0, 0.0, 0.0),
## (0.0, 0.75, 0.75),
## (0.75, 0.0, 0.75),
## (0.75, 0.75, 0.0),
## (0.0, 0.0, 0.0),
## (0.0, 0.0, 1.0) ]
##n_color = len (mpl_colors)
##self.colors = [
## gtk.gdk.Color (*mpl_colors[i % n_color]) for i in xrange (n)]
cm = plt.get_cmap ('rainbow')
self.colors = [
gtk.gdk.Color (*cm (i / n)[:3])
for i in xrange (n)]
self.color_tuples = [
cm (i / n)[:3]
for i in xrange (n)]
self.alphas = [
int (.8 * 65535) for activity in self.activities]
@property
def n_rows (self):
return len (self.activities)
# toggle
def toggle (self, path):
row = int (path)
self.checks[row] = not self.checks[row]
def toggle_all (self):
if np.sum (self.checks) == len (self.checks):
value = False
else:
value = True
for row in xrange (len (self.checks)):
self.checks[row] = value
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 3
def on_get_column_type (self, index):
if index == 0:
return bool
elif index == 1:
return str
elif index == 2:
return gtk.gdk.Pixbuf
def on_get_iter (self, path):
if self.n_rows:
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if self.n_rows == 0:
return None
activity = sorted (self.activities)[row]
if col == 0:
return self.checks[row]
elif col == 1:
return activity.name
else:
pb = gtk.gdk.Pixbuf (
gtk.gdk.COLORSPACE_RGB, True, 8, 16, 16)
color = self.colors[row]
color_str = '{0:02x}{1:02x}{2:02x}{3:02x}'.format (
*map (int,
(color.red / 256, color.green / 256, color.blue / 256,
self.alphas[row] / 256)))
pb.fill (int (color_str, 16))
return pb
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
|
[
"gtk.GenericTreeModel.__init__",
"gtk.gdk.Pixbuf",
"numpy.sum",
"matplotlib.pyplot.get_cmap"
] |
[((286, 321), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (315, 321), False, 'import gtk\n'), ((1999, 2034), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (2028, 2034), False, 'import gtk\n'), ((3661, 3696), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (3690, 3696), False, 'import gtk\n'), ((5561, 5596), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (5590, 5596), False, 'import gtk\n'), ((7563, 7598), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (7592, 7598), False, 'import gtk\n'), ((8227, 8250), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (8239, 8250), True, 'import matplotlib.pyplot as plt\n'), ((8763, 8782), 'numpy.sum', 'np.sum', (['self.checks'], {}), '(self.checks)\n', (8769, 8782), True, 'import numpy as np\n'), ((9753, 9808), 'gtk.gdk.Pixbuf', 'gtk.gdk.Pixbuf', (['gtk.gdk.COLORSPACE_RGB', '(True)', '(8)', '(16)', '(16)'], {}), '(gtk.gdk.COLORSPACE_RGB, True, 8, 16, 16)\n', (9767, 9808), False, 'import gtk\n')]
|
import sys
import torch
from torch.autograd import Variable
import numpy as np
import os
from os import path
import argparse
import random
import copy
from tqdm import tqdm
import pickle
from scorer.data_helper.json_reader import read_sorted_scores, read_pair_anno_scores, read_articles, \
read_processed_scores, read_scores
from scipy.stats import spearmanr, pearsonr, kendalltau
import math
from torchvision import models
from resources import MODEL_WEIGHT_DIR
from resources import OUTPUTS_DIR
from matplotlib import pyplot as plt
import csv
def parse_split_data(sorted_scores, train_percent, dev_percent, prompt='structure'):
train = {}
dev = {}
test = {}
all = {}
topic_count = 0
for article_id, scores_list in tqdm(sorted_scores.items()):
entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
entry['sys_summ' + repr(sid)] = [s['scores'][prompt] for s in scores_list if s['summ_id'] == sid][
0] # that can be done more efficiently, but who cares...
rand = random.random()
all[article_id] = entry
if rand < train_percent:
train[article_id] = entry
elif rand < train_percent + dev_percent:
dev[article_id] = entry
else:
test[article_id] = entry
topic_count += 1
print("topics in parse_split_data", topic_count)
return train, dev, test, all
def parse_split_data_balanced(sorted_scores, train_percent, dev_percent, prompt='structure'):
train = {}
dev = {}
test = {}
all = {}
topic_count = 0
article_ids = list(sorted_scores.keys())
random.shuffle(article_ids)
num_articles = len(article_ids)
train_ids = article_ids[0:int(train_percent * num_articles)]
dev_ids = article_ids[int(train_percent * num_articles):int((train_percent + dev_percent) * num_articles)]
# test_ids=article_ids[int((train_percent+dev_percent)*num_articles):]
for article_id, scores_list in tqdm(sorted_scores.items()):
entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
entry['sys_summ' + repr(sid)] = [s['scores'][prompt] for s in scores_list if s['summ_id'] == sid][
0] # that can be done more efficiently, but who cares...
# rand = random.random()
all[article_id] = entry
if article_id in train_ids:
train[article_id] = entry
elif article_id in dev_ids:
dev[article_id] = entry
else:
test[article_id] = entry
topic_count += 1
print("topics in parse_split_data", topic_count)
return train, dev, test, all
def build_model(model_type, vec_length, learn_rate=None):
if 'linear' in model_type:
deep_model = torch.nn.Sequential(
torch.nn.Linear(vec_length, 1),
)
else:
deep_model = torch.nn.Sequential(
torch.nn.Linear(vec_length, int(vec_length / 2)),
torch.nn.ReLU(),
torch.nn.Linear(int(vec_length / 2), 1),
)
if learn_rate is not None:
optimiser = torch.optim.Adam(deep_model.parameters(), lr=learn_rate)
return deep_model, optimiser
else:
return deep_model
def deep_pair_train(vec_list, target, deep_model, optimiser, device):
# print(np.array(vec_list).shape)
input = Variable(torch.from_numpy(np.array(vec_list)).float())
# print(input)
if 'gpu' in device:
input = input.to('cuda')
value_variables = deep_model(input)
# print(value_variables)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(value_variables)
# print(pred)
# print(np.array(target).shape, np.array(target).reshape(-1, 2, 1).shape)
target_variables = Variable(torch.from_numpy(np.array(target)).float()).view(-1, 2, 1)
# print(target_variables)
if 'gpu' in device:
target_variables = target_variables.to('cuda')
loss_fn = torch.nn.BCELoss()
loss = loss_fn(pred, target_variables)
# print(loss)
optimiser.zero_grad()
loss.backward()
optimiser.step()
return loss.cpu().item()
def deep_pair_train_loss_only(vec_list, target, deep_model, optimiser, device):
# print(np.array(vec_list).shape)
input = Variable(torch.from_numpy(np.array(vec_list)).float())
# print(input)
if 'gpu' in device:
input = input.to('cuda')
value_variables = deep_model(input)
# print(value_variables)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(value_variables)
# print(pred)
# print(np.array(target).shape, np.array(target).reshape(-1, 2, 1).shape)
target_variables = Variable(torch.from_numpy(np.array(target)).float()).view(-1, 2, 1)
# print(target_variables)
if 'gpu' in device:
target_variables = target_variables.to('cuda')
loss_fn = torch.nn.BCELoss()
loss = loss_fn(pred, target_variables)
# print(loss)
return loss.cpu().item()
def build_pairs(entries):
pair_list = []
topic_count = 0
summ_count = 0
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
if i == j: continue
if entry[summ_ids[i]] > entry[summ_ids[j]]:
pref = [1, 0]
elif entry[summ_ids[i]] < entry[summ_ids[j]]:
pref = [0, 1]
else:
pref = [0.5, 0.5]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
# print(pair_list)
topic_count += 1
summ_count = summ_count + len(summ_ids)
print("topics", topic_count)
print("summ", summ_count)
return pair_list
def build_anno_pairs(entries, pair_anno_scores):
pair_list = []
topic_count = 0
summ_count = 0
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
if i == j: continue
# get keys from dictionary
entry_keys = list(entry.keys())
# get pair preference from pair_anno_scores
for pair in pair_anno_scores[article_id]:
if pair['summ_id_i'] == int(entry_keys[i][8]) and pair['summ_id_j'] == int(entry_keys[j][8]):
if pair['pref'] == 1:
pref = [1, 0]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
else:
pref = [0, 1]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
elif pair['summ_id_j'] == int(entry_keys[i][8]) and pair['summ_id_i'] == int(entry_keys[j][8]):
if pair['pref'] == 1:
pref = [0, 1]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
else:
pref = [1, 0]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
topic_count += 1
summ_count = summ_count + len(summ_ids)
print("topics", topic_count)
print("summ", summ_count)
# print(pair_list)
return pair_list
def build_human_pair_scores(pair_list):
human_pair_scores = {}
for entry in pair_list:
article_id = str(entry[0])
sum_id_i = str(entry[1])
sum_id_j = str(entry[2])
pref = entry[3]
summ_entry = {}
if article_id in human_pair_scores:
if pref == [1, 0]:
if sum_id_i in human_pair_scores[article_id]:
human_pair_scores[article_id][sum_id_i] + 1
else:
human_pair_scores[article_id][sum_id_i] = 1
else:
if sum_id_j in human_pair_scores[article_id]:
human_pair_scores[article_id][sum_id_j] + 1
else:
human_pair_scores[article_id][sum_id_j] = 1
else:
if pref == [1, 0]:
summ_entry[sum_id_i] = 1
summ_entry[sum_id_j] = 0
else:
summ_entry[sum_id_i] = 0
summ_entry[sum_id_j] = 1
human_pair_scores[article_id] = summ_entry
return human_pair_scores
# randomize_pref_order and double_prefs are only relevant if the learning function learns f(s0,s1)=pref. in our case, we learn f(s0)=pref[0] and f(s1)=pref[1], so this should be set to False
def build_pairs_majority_preferences(entries, sorted_scores, target_type='graded', ignore_ties=False,
randomize_pref_order=False, double_prefs=False):
pair_list = []
topic_count = 0
anno_count = 0
summ_count = 0
entries_text = {}
# get summary text and matching id
for article_id, scores_list in tqdm(sorted_scores.items()):
temp_entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
# get summary text
s_text = [s['sys_summ'] for s in scores_list if s['summ_id'] == sid][0]
temp_entry['sys_summ' + repr(sid)] = s_text
# save in dictionary
entries_text[article_id] = temp_entry
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# mapping from summary text to last summary id with that text. that's the one we will use
summ2id = {entries_text[article_id][summ_id]: summ_id for summ_id in summ_ids}
# put here the prefs for this article
article_prefs = {}
# still run through all pairs
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
# run through dictionary containing summ_ids and matching text
# for key, value in entries_text[article_id].items():
# get text for current summaries i and j
# if key == summ_ids[i]:
# text_i = value
# elif key == summ_ids[j]:
# text_j = value
text_i = entries_text[article_id][summ_ids[i]]
text_j = entries_text[article_id][summ_ids[j]]
# check if text is identical, if yes skip
if i == j or text_i == text_j:
# print("DUPLICATE FOUND: TEXT i", text_i, "TEXT j", text_i)
continue
# get the unique summ ids
unique_summ_id_pair = [summ2id[text_i], summ2id[text_j]]
# some debug output
# noinspection PyUnreachableCode
if False:
print("%s vs. %s (IDs %s vs. %s)" % (
summ_ids[i], summ_ids[j], unique_summ_id_pair[0], unique_summ_id_pair[1]))
full_entry = sorted_scores[article_id]
print(" system %s with score %s (%s) vs." % (
full_entry[i]['sys_name'], full_entry[i]['scores']['redundancy'], entry[summ_ids[i]]))
print(" system %s with score %s (%s)" % (
full_entry[j]['sys_name'], full_entry[j]['scores']['redundancy'], entry[summ_ids[j]]))
print(
" \"%s...\" vs. \"%s...\"" % (full_entry[i]['sys_summ'][:20], full_entry[j]['sys_summ'][:20]))
# unique_summ_id_pair.sort()
if entry[summ_ids[i]] > entry[summ_ids[j]]:
pref = [1, 0]
elif entry[summ_ids[i]] < entry[summ_ids[j]]:
pref = [0, 1]
else:
pref = [0.5, 0.5]
# if entry[unique_summ_id_pair[0]] > entry[unique_summ_id_pair[1]]:
# pref = [1, 0]
# elif entry[unique_summ_id_pair[0]] > entry[unique_summ_id_pair[1]]:
# pref = [0, 1]
# else:
# # todo we could completely ignore ties. doesnt change much. low prio
# pref = [0.5, 0.5]
# sort the ids so that we get a unique key, so that (sys_summ0,sys_summ1) and (sys_summ1,sys_summ0) are the same
if unique_summ_id_pair[1] < unique_summ_id_pair[0]:
unique_summ_id_pair = unique_summ_id_pair[::-1]
pref = pref[::-1]
# convert to tuple, otherwise its not hashable for the dict
unique_summ_id_pair = tuple(unique_summ_id_pair)
# add up the pref to the total pref vector of the specific summary pair. create a new entry if not existing
article_prefs[unique_summ_id_pair] = article_prefs.get(unique_summ_id_pair,
np.array([0, 0])) + np.array(pref)
# transform to target
for unique_summ_id_pair, pref in article_prefs.items():
# depending on the mode, use binary target, or graded one
pref = (pref / (pref[0] + pref[1])).tolist()
if target_type == 'binary':
if pref[0] > pref[1]:
pref = [1, 0]
elif pref[0] < pref[1]:
pref = [1, 0]
else:
pref = [0.5, 0.5]
# skip if it is a tie and you want to ignore ties
if pref[0] != 0.5 or not ignore_ties:
# include the pref two times, once in one direction and once in the other direction
if double_prefs:
pair_list.append((article_id, unique_summ_id_pair[1], unique_summ_id_pair[0], pref[::-1]))
pair_list.append((article_id, unique_summ_id_pair[0], unique_summ_id_pair[1], pref))
else:
# include the pref in the reverse order by chance. this might be necessary if there is a bias in the distribution of the score, e.g. if they are ordered
if randomize_pref_order and bool(random.getrandbits(1)):
pair_list.append((article_id, unique_summ_id_pair[1], unique_summ_id_pair[0], pref[::-1]))
else:
pair_list.append((article_id, unique_summ_id_pair[0], unique_summ_id_pair[1], pref))
topic_count += 1
anno_count += len(summ_ids)
summ_count += len(summ2id)
print("topics", topic_count)
print("annotations", anno_count)
print("summ", summ_count)
print("summ pairs", len(pair_list))
return pair_list
def build_pair_vecs(vecs, pairs):
pair_vec_list = []
for aid, sid1, sid2, _ in pairs:
article_vec = list(vecs[aid]['article'])
s1_vec = list(vecs[aid][sid1])
s2_vec = list(vecs[aid][sid2])
pair_vec_list.append([article_vec + s1_vec, article_vec + s2_vec])
return pair_vec_list
def pair_train_rewarder(vec_dic, pairs, deep_model, optimiser, loss_only, batch_size=32, device='cpu'):
loss_list = []
shuffled_pairs = pairs[:]
np.random.shuffle(shuffled_pairs)
vec_pairs = build_pair_vecs(vec_dic, shuffled_pairs)
# print('total number of pairs built: {}'.format(len(vec_pairs)))
for pointer in range(int((len(
pairs) - 1) / batch_size) + 1): # there was a bug here. when len(pairs) was a vielfaches of 32, then there was a last batch with [] causing an exception
vec_batch = vec_pairs[pointer * batch_size:(pointer + 1) * batch_size]
target_batch = shuffled_pairs[pointer * batch_size:(pointer + 1) * batch_size]
target_batch = [ee[-1] for ee in target_batch]
if loss_only:
loss = deep_pair_train_loss_only(vec_batch, target_batch, deep_model, optimiser, device)
else:
loss = deep_pair_train(vec_batch, target_batch, deep_model, optimiser, device)
loss_list.append(loss)
return np.mean(loss_list)
def test_rewarder(vec_list, human_scores, model, device, plot_file=None):
results = {'rho': [], 'rho_p': [], 'pcc': [], 'pcc_p': [], 'tau': [], 'tau_p': [], 'rho_global': [],
'pcc_global': [], 'tau_global': []}
true_scores_all = []
pred_scores_all = np.array([])
# print(human_scores)
# pred_scores_all = []
for article_id in human_scores:
entry = human_scores[article_id]
summ_ids = list(entry.keys())
if len(summ_ids) < 2: continue
concat_vecs = []
true_scores = []
for i in range(len(summ_ids)):
article_vec = list(vec_list[article_id]['article'])
summ_vec = list(vec_list[article_id][summ_ids[i]])
# print(np.array(concat_vecs).shape, np.array(article_vec).shape, np.array(summ_vec).shape)
concat_vecs.append(article_vec + summ_vec)
# print(np.array(concat_vecs).shape)
# print(entry[summ_ids[i]])
true_scores.append(entry[summ_ids[i]])
true_scores_all += true_scores # add scores for topic to list of all scores
input = Variable(torch.from_numpy(np.array(concat_vecs)).float())
if 'gpu' in device:
input = input.to('cuda')
model.eval()
with torch.no_grad():
# print(true_scores)
# print(np.array(true_scores).shape)
# print(input)
# print(input.shape)
# print(model(input).data.cpu().numpy())
# print(model(input).data.cpu().numpy().shape)
pred_scores = model(input).data.cpu().numpy().reshape(1, -1)[0]
pred_scores_all = np.concatenate((pred_scores_all, pred_scores), axis=0)
# pred_scores_all += pred_scores.tolist()
rho, rho_p = spearmanr(true_scores, pred_scores)
pcc, pcc_p = pearsonr(true_scores, pred_scores)
tau, tau_p = kendalltau(true_scores, pred_scores)
if not (math.isnan(rho) or math.isnan(pcc) or math.isnan(tau)):
results['rho'].append(rho)
results['rho_p'].append(rho_p)
results['pcc'].append(pcc)
results['pcc_p'].append(pcc_p)
results['tau'].append(tau)
results['tau_p'].append(tau_p)
rho = spearmanr(true_scores_all, pred_scores_all)[0]
pcc = pearsonr(true_scores_all, pred_scores_all)[0]
tau = kendalltau(true_scores_all, pred_scores_all)[0]
if not (math.isnan(rho) or math.isnan(pcc) or math.isnan(tau)):
results['rho_global'].append(rho)
results['pcc_global'].append(pcc)
results['tau_global'].append(tau)
if plot_file is not None:
fig, ax = plt.subplots()
# true_scores_all=np.array(true_scores_all)
# pred_scores_all=np.array(pred_scores_all)
unique = np.sort(np.unique(true_scores_all))
data_to_plot = [pred_scores_all[true_score == true_scores_all] for true_score in unique]
# bw_methods determines how soft the distribution curve will be. lower values are more sharp
ax.violinplot(data_to_plot, showmeans=True, showmedians=True, bw_method=0.2)
ax.scatter(true_scores_all + np.random.normal(0, 0.1, pred_scores_all.shape[0]), pred_scores_all, marker=".",
s=3, alpha=0.5)
ax.set_title('Comparison and distributions of true values to predicted score')
ax.set_xlabel('true scores')
ax.set_ylabel('predicted scores')
xticklabels = true_scores_all
ax.set_xticks(true_scores_all)
print("violin plot written to: %s" % plot_file)
plt.savefig(plot_file)
return results
def parse_args(argv):
ap = argparse.ArgumentParser("arguments for summary sampler")
ap.add_argument('-e', '--epoch_num', type=int, default=50)
ap.add_argument('-b', '--batch_size', type=int, default=32)
ap.add_argument('-tt', '--train_type', type=str, help='pairwise or regression', default='pairwise')
ap.add_argument('-tp', '--train_percent', type=float, help='how many data used for training', default=.64)
ap.add_argument('-dp', '--dev_percent', type=float, help='how many data used for dev', default=.16)
ap.add_argument('-lr', '--learn_rate', type=float, help='learning rate', default=3e-4)
ap.add_argument('-mt', '--model_type', type=str, help='deep/linear', default='linear')
ap.add_argument('-dv', '--device', type=str, help='cpu/gpu', default='gpu')
ap.add_argument('-se', '--seed', type=int, help='random seed number', default='1')
ap.add_argument('-fn', '--file_name', type=str, help='file name for csv output',
default='BetterRewardsStatistics_test.csv')
args = ap.parse_args(argv)
return args.epoch_num, args.batch_size, args.train_type, args.train_percent, args.dev_percent, args.learn_rate, args.model_type, args.device, args.seed, args.file_name
def main(argv):
epoch_num, batch_size, train_type, train_percent, dev_percent, learn_rate, model_type, device, seed, file_name = parse_args(
argv[1:])
print('\n=====Arguments====')
print('epoch num {}'.format(epoch_num))
print('batch size {}'.format(batch_size))
print('train type {}'.format(train_type))
print('train percent {}'.format(train_percent))
print('dev percent {}'.format(dev_percent))
print('learn rate {}'.format(learn_rate))
print('model type {}'.format(model_type))
print('device {}'.format(device))
print('seed {}'.format(seed))
print('file name {}'.format(file_name))
print('=====Arguments====\n')
csv_column_names = ['seed', 'learn_rate', 'model_type', 'train_pairs', 'dev_pairs', 'test_pairs', 'epoch_num',
'loss_train', 'loss_dev', 'loss_test', 'rho_train', 'rho_p_train', 'pcc_train', 'pcc_p_train',
'tau_train', 'tau_p_train', 'rho_train_global', 'pcc_train_global', 'tau_train_global',
'rho_dev', 'rho_p_dev', 'pcc_dev', 'pcc_p_dev', 'tau_dev', 'tau_p_dev',
'rho_dev_global', 'pcc_dev_global', 'tau_dev_global', 'rho_test', 'rho_p_test', 'pcc_test',
'pcc_p_test', 'tau_test', 'tau_p_test', 'rho_test_global', 'pcc_test_global', 'tau_test_global']
# check if csv_file exists
if path.exists(file_name):
csv_exists = True
else:
csv_exists = False
with open(file_name, 'a', newline='') as csv_file:
writer = csv.writer(csv_file)
# if a new csv_file is generated, write column names
if csv_exists is False:
writer.writerow(csv_column_names)
np.random.seed(seed=seed)
random.seed(seed)
torch.random.manual_seed(seed)
torch.manual_seed(seed)
if train_percent + dev_percent >= 1.:
print('ERROR! Train data percentage plus dev data percentage is {}! Make sure the sum is below 1.0!'.format(
train_percent + dev_percent))
exit(1)
BERT_VEC_LENGTH = 1024 # change this to 768 if you use bert-base
deep_model, optimiser = build_model(model_type, BERT_VEC_LENGTH * 2, learn_rate)
if 'gpu' in device:
deep_model.to('cuda')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# read human scores and vectors for summaries/docs, and split the train/dev/test set
sorted_scores = read_sorted_scores()
# read pair anno scores
pair_anno_scores = read_pair_anno_scores()
# train, dev, test, all = parse_split_data(sorted_scores, train_percent, dev_percent)
train, dev, test, all = parse_split_data_balanced(sorted_scores, train_percent, dev_percent)
# without majority preferences
# train_pairs = build_pairs(train)
# dev_pairs = build_pairs(dev)
# test_pairs = build_pairs(test)
# without majority preferences but with pair anno
train_pairs = build_anno_pairs(train, pair_anno_scores)
dev_pairs = build_anno_pairs(dev, pair_anno_scores)
test_pairs = build_anno_pairs(test, pair_anno_scores)
# with majority preferences
# train_pairs = build_pairs_majority_preferences(train, sorted_scores)
# dev_pairs = build_pairs_majority_preferences(dev, sorted_scores)
# test_pairs = build_pairs_majority_preferences(test, sorted_scores)
# with majority preferences and pair anno
# train_pairs = build_anno_pairs_majority_preferences(train, sorted_scores, pair_anno_scores)
# dev_pairs = build_anno_pairs_majority_preferences(dev, sorted_scores, pair_anno_scores)
# test_pairs = build_anno_pairs_majority_preferences(test, sorted_scores, pair_anno_scores)
# build human pair scores for pairs
train_anno = build_human_pair_scores(train_pairs)
dev_anno = build_human_pair_scores(dev_pairs)
test_anno = build_human_pair_scores(test_pairs)
print(len(train_pairs), len(dev_pairs), len(test_pairs))
# read bert vectors
with open('data/doc_summ_bert_vectors.pkl', 'rb') as ff:
all_vec_dic = pickle.load(ff)
pcc_list = []
weights_list = []
for ii in range(epoch_num + 1):
print('\n=====EPOCH {}====='.format(ii))
if ii == 0:
# do not train in epoch 0, just evaluate the performance of the randomly initialized model (sanity check and baseline)
loss_train = pair_train_rewarder(all_vec_dic, train_pairs, deep_model, optimiser, True, batch_size,
device)
else:
# from epoch 1 on, receive the data and learn from it. the loss is still the loss before fed with the training examples
loss_train = pair_train_rewarder(all_vec_dic, train_pairs, deep_model, optimiser, False, batch_size,
device)
loss_dev = pair_train_rewarder(all_vec_dic, dev_pairs, deep_model, optimiser, True, batch_size, device)
loss_test = pair_train_rewarder(all_vec_dic, test_pairs, deep_model, optimiser, True, batch_size, device)
csv_row = [seed, learn_rate, model_type, len(train_pairs), len(dev_pairs), len(test_pairs), ii, loss_train,
loss_dev, loss_test]
print('--> losses (train,dev,test)', loss_train, loss_dev, loss_test)
# Train-Data only
print("==Train==")
# results_train = test_rewarder(all_vec_dic, train, deep_model, device)
results_train = test_rewarder(all_vec_dic, train_anno, deep_model, device)
for metric in results_train:
print('{}\t{}'.format(metric, np.mean(results_train[metric])))
csv_row.append(np.mean(results_train[metric]))
print("==Dev==")
# results = test_rewarder(all_vec_dic, dev, deep_model, device)
results = test_rewarder(all_vec_dic, dev_anno, deep_model, device)
for metric in results:
print('{}\t{}'.format(metric, np.mean(results[metric])))
csv_row.append(np.mean(results[metric]))
# Test-Data only
print("==Test==")
# results_test = test_rewarder(all_vec_dic, test, deep_model, device)
results_test = test_rewarder(all_vec_dic, test_anno, deep_model, device)
for metric in results_test:
print('{}\t{}'.format(metric, np.mean(results_test[metric])))
csv_row.append(np.mean(results_test[metric]))
writer.writerow(csv_row)
pcc_list.append(np.mean(results['pcc']))
weights_list.append(copy.deepcopy(deep_model.state_dict()))
idx = np.argmax(pcc_list)
best_result = pcc_list[idx]
print('\n======Best results come from epoch no. {}====='.format(idx))
deep_model.load_state_dict(weights_list[idx])
output_pattern = 'batch{}_{}_trainPercent{}_seed{}_lrate{}_{}_epoch{}'.format(
batch_size, train_type, train_percent, seed, learn_rate, model_type, epoch_num
)
test_results = test_rewarder(all_vec_dic, test, deep_model, device,
os.path.join(OUTPUTS_DIR, output_pattern + '_onTest.pdf'))
test_rewarder(all_vec_dic, train, deep_model, device,
os.path.join(OUTPUTS_DIR, output_pattern + '_onTrain.pdf'))
test_rewarder(all_vec_dic, dev, deep_model, device, os.path.join(OUTPUTS_DIR, output_pattern + '_onDev.pdf'))
print('Its performance on the test set is:')
for metric in test_results:
print('{}\t{}'.format(metric, np.mean(test_results[metric])))
model_weight_name = 'pcc{0:.4f}_'.format(np.mean(test_results['pcc']))
model_weight_name += 'seed{}_epoch{}_batch{}_{}_trainPercent{}_lrate{}_{}.model'.format(
seed, epoch_num, batch_size, train_type, train_percent, learn_rate, model_type
)
torch.save(weights_list[idx], os.path.join(MODEL_WEIGHT_DIR, model_weight_name))
print('\nbest model weight saved to: {}'.format(os.path.join(MODEL_WEIGHT_DIR, model_weight_name)))
if __name__ == '__main__':
main(sys.argv)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.argmax",
"random.shuffle",
"numpy.mean",
"torch.nn.Softmax",
"pickle.load",
"numpy.random.normal",
"scipy.stats.kendalltau",
"torch.no_grad",
"os.path.join",
"numpy.unique",
"torch.nn.BCELoss",
"os.path.exists",
"random.seed",
"torch.nn.Linear",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle",
"math.isnan",
"scorer.data_helper.json_reader.read_pair_anno_scores",
"csv.writer",
"torch.random.manual_seed",
"torch.manual_seed",
"scipy.stats.pearsonr",
"random.random",
"numpy.concatenate",
"torch.nn.ReLU",
"scipy.stats.spearmanr",
"scorer.data_helper.json_reader.read_sorted_scores",
"numpy.array",
"random.getrandbits",
"matplotlib.pyplot.savefig"
] |
[((1669, 1696), 'random.shuffle', 'random.shuffle', (['article_ids'], {}), '(article_ids)\n', (1683, 1696), False, 'import random\n'), ((3637, 3660), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (3653, 3660), False, 'import torch\n'), ((4015, 4033), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (4031, 4033), False, 'import torch\n'), ((4545, 4568), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (4561, 4568), False, 'import torch\n'), ((4923, 4941), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (4939, 4941), False, 'import torch\n'), ((16061, 16094), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_pairs'], {}), '(shuffled_pairs)\n', (16078, 16094), True, 'import numpy as np\n'), ((16916, 16934), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (16923, 16934), True, 'import numpy as np\n'), ((17214, 17226), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17222, 17226), True, 'import numpy as np\n'), ((20592, 20648), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""arguments for summary sampler"""'], {}), "('arguments for summary sampler')\n", (20615, 20648), False, 'import argparse\n'), ((23194, 23216), 'os.path.exists', 'path.exists', (['file_name'], {}), '(file_name)\n', (23205, 23216), False, 'from os import path\n'), ((1080, 1095), 'random.random', 'random.random', ([], {}), '()\n', (1093, 1095), False, 'import random\n'), ((18715, 18750), 'scipy.stats.spearmanr', 'spearmanr', (['true_scores', 'pred_scores'], {}), '(true_scores, pred_scores)\n', (18724, 18750), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((18772, 18806), 'scipy.stats.pearsonr', 'pearsonr', (['true_scores', 'pred_scores'], {}), '(true_scores, pred_scores)\n', (18780, 18806), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((18828, 18864), 'scipy.stats.kendalltau', 'kendalltau', (['true_scores', 'pred_scores'], {}), '(true_scores, pred_scores)\n', (18838, 18864), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19193, 19236), 'scipy.stats.spearmanr', 'spearmanr', (['true_scores_all', 'pred_scores_all'], {}), '(true_scores_all, pred_scores_all)\n', (19202, 19236), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19250, 19292), 'scipy.stats.pearsonr', 'pearsonr', (['true_scores_all', 'pred_scores_all'], {}), '(true_scores_all, pred_scores_all)\n', (19258, 19292), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19306, 19350), 'scipy.stats.kendalltau', 'kendalltau', (['true_scores_all', 'pred_scores_all'], {}), '(true_scores_all, pred_scores_all)\n', (19316, 19350), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19597, 19611), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19609, 19611), True, 'from matplotlib import pyplot as plt\n'), ((20516, 20538), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_file'], {}), '(plot_file)\n', (20527, 20538), True, 'from matplotlib import pyplot as plt\n'), ((23354, 23374), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (23364, 23374), False, 'import csv\n'), ((23523, 23548), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (23537, 23548), True, 'import numpy as np\n'), ((23557, 23574), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (23568, 23574), False, 'import random\n'), ((23583, 23613), 'torch.random.manual_seed', 'torch.random.manual_seed', (['seed'], {}), '(seed)\n', (23607, 23613), False, 'import torch\n'), ((23622, 23645), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (23639, 23645), False, 'import torch\n'), ((24329, 24349), 'scorer.data_helper.json_reader.read_sorted_scores', 'read_sorted_scores', ([], {}), '()\n', (24347, 24349), False, 'from scorer.data_helper.json_reader import read_sorted_scores, read_pair_anno_scores, read_articles, read_processed_scores, read_scores\n'), ((24411, 24434), 'scorer.data_helper.json_reader.read_pair_anno_scores', 'read_pair_anno_scores', ([], {}), '()\n', (24432, 24434), False, 'from scorer.data_helper.json_reader import read_sorted_scores, read_pair_anno_scores, read_articles, read_processed_scores, read_scores\n'), ((28711, 28730), 'numpy.argmax', 'np.argmax', (['pcc_list'], {}), '(pcc_list)\n', (28720, 28730), True, 'import numpy as np\n'), ((2866, 2896), 'torch.nn.Linear', 'torch.nn.Linear', (['vec_length', '(1)'], {}), '(vec_length, 1)\n', (2881, 2896), False, 'import torch\n'), ((3034, 3049), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (3047, 3049), False, 'import torch\n'), ((18207, 18222), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18220, 18222), False, 'import torch\n'), ((18584, 18638), 'numpy.concatenate', 'np.concatenate', (['(pred_scores_all, pred_scores)'], {'axis': '(0)'}), '((pred_scores_all, pred_scores), axis=0)\n', (18598, 18638), True, 'import numpy as np\n'), ((19366, 19381), 'math.isnan', 'math.isnan', (['rho'], {}), '(rho)\n', (19376, 19381), False, 'import math\n'), ((19385, 19400), 'math.isnan', 'math.isnan', (['pcc'], {}), '(pcc)\n', (19395, 19400), False, 'import math\n'), ((19404, 19419), 'math.isnan', 'math.isnan', (['tau'], {}), '(tau)\n', (19414, 19419), False, 'import math\n'), ((19743, 19769), 'numpy.unique', 'np.unique', (['true_scores_all'], {}), '(true_scores_all)\n', (19752, 19769), True, 'import numpy as np\n'), ((26057, 26072), 'pickle.load', 'pickle.load', (['ff'], {}), '(ff)\n', (26068, 26072), False, 'import pickle\n'), ((29201, 29258), 'os.path.join', 'os.path.join', (['OUTPUTS_DIR', "(output_pattern + '_onTest.pdf')"], {}), "(OUTPUTS_DIR, output_pattern + '_onTest.pdf')\n", (29213, 29258), False, 'import os\n'), ((29344, 29402), 'os.path.join', 'os.path.join', (['OUTPUTS_DIR', "(output_pattern + '_onTrain.pdf')"], {}), "(OUTPUTS_DIR, output_pattern + '_onTrain.pdf')\n", (29356, 29402), False, 'import os\n'), ((29464, 29520), 'os.path.join', 'os.path.join', (['OUTPUTS_DIR', "(output_pattern + '_onDev.pdf')"], {}), "(OUTPUTS_DIR, output_pattern + '_onDev.pdf')\n", (29476, 29520), False, 'import os\n'), ((29734, 29762), 'numpy.mean', 'np.mean', (["test_results['pcc']"], {}), "(test_results['pcc'])\n", (29741, 29762), True, 'import numpy as np\n'), ((30001, 30050), 'os.path.join', 'os.path.join', (['MODEL_WEIGHT_DIR', 'model_weight_name'], {}), '(MODEL_WEIGHT_DIR, model_weight_name)\n', (30013, 30050), False, 'import os\n'), ((18881, 18896), 'math.isnan', 'math.isnan', (['rho'], {}), '(rho)\n', (18891, 18896), False, 'import math\n'), ((18900, 18915), 'math.isnan', 'math.isnan', (['pcc'], {}), '(pcc)\n', (18910, 18915), False, 'import math\n'), ((18919, 18934), 'math.isnan', 'math.isnan', (['tau'], {}), '(tau)\n', (18929, 18934), False, 'import math\n'), ((20092, 20142), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', 'pred_scores_all.shape[0]'], {}), '(0, 0.1, pred_scores_all.shape[0])\n', (20108, 20142), True, 'import numpy as np\n'), ((28599, 28622), 'numpy.mean', 'np.mean', (["results['pcc']"], {}), "(results['pcc'])\n", (28606, 28622), True, 'import numpy as np\n'), ((30108, 30157), 'os.path.join', 'os.path.join', (['MODEL_WEIGHT_DIR', 'model_weight_name'], {}), '(MODEL_WEIGHT_DIR, model_weight_name)\n', (30120, 30157), False, 'import os\n'), ((3443, 3461), 'numpy.array', 'np.array', (['vec_list'], {}), '(vec_list)\n', (3451, 3461), True, 'import numpy as np\n'), ((4351, 4369), 'numpy.array', 'np.array', (['vec_list'], {}), '(vec_list)\n', (4359, 4369), True, 'import numpy as np\n'), ((13857, 13871), 'numpy.array', 'np.array', (['pref'], {}), '(pref)\n', (13865, 13871), True, 'import numpy as np\n'), ((27744, 27774), 'numpy.mean', 'np.mean', (['results_train[metric]'], {}), '(results_train[metric])\n', (27751, 27774), True, 'import numpy as np\n'), ((28100, 28124), 'numpy.mean', 'np.mean', (['results[metric]'], {}), '(results[metric])\n', (28107, 28124), True, 'import numpy as np\n'), ((28502, 28531), 'numpy.mean', 'np.mean', (['results_test[metric]'], {}), '(results_test[metric])\n', (28509, 28531), True, 'import numpy as np\n'), ((29653, 29682), 'numpy.mean', 'np.mean', (['test_results[metric]'], {}), '(test_results[metric])\n', (29660, 29682), True, 'import numpy as np\n'), ((13837, 13853), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (13845, 13853), True, 'import numpy as np\n'), ((18076, 18097), 'numpy.array', 'np.array', (['concat_vecs'], {}), '(concat_vecs)\n', (18084, 18097), True, 'import numpy as np\n'), ((27680, 27710), 'numpy.mean', 'np.mean', (['results_train[metric]'], {}), '(results_train[metric])\n', (27687, 27710), True, 'import numpy as np\n'), ((28042, 28066), 'numpy.mean', 'np.mean', (['results[metric]'], {}), '(results[metric])\n', (28049, 28066), True, 'import numpy as np\n'), ((28439, 28468), 'numpy.mean', 'np.mean', (['results_test[metric]'], {}), '(results_test[metric])\n', (28446, 28468), True, 'import numpy as np\n'), ((3848, 3864), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (3856, 3864), True, 'import numpy as np\n'), ((4756, 4772), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (4764, 4772), True, 'import numpy as np\n'), ((15048, 15069), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (15066, 15069), False, 'import random\n')]
|
import numpy as np
from matplotlib import pyplot as plt
n = 100
x = range(0,n)
y = range(0,n)
for k in range(0, n):
y[k] = y[k] + 3*np.random.randn() + 100
plt.figure(figsize=(20,10))
plt.scatter(x, y)
plt.savefig("./images/rawData.png")
X = np.zeros([n,1])
target = np.zeros([n,1])
X[:,0] = x
target[:,0] = y
np.savetxt("X.txt", X, delimiter=",", fmt='%f')
np.savetxt("y.txt", target, delimiter=",", fmt='%f')
|
[
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig"
] |
[((162, 190), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (172, 190), True, 'from matplotlib import pyplot as plt\n'), ((190, 207), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (201, 207), True, 'from matplotlib import pyplot as plt\n'), ((208, 243), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/rawData.png"""'], {}), "('./images/rawData.png')\n", (219, 243), True, 'from matplotlib import pyplot as plt\n'), ((249, 265), 'numpy.zeros', 'np.zeros', (['[n, 1]'], {}), '([n, 1])\n', (257, 265), True, 'import numpy as np\n'), ((274, 290), 'numpy.zeros', 'np.zeros', (['[n, 1]'], {}), '([n, 1])\n', (282, 290), True, 'import numpy as np\n'), ((317, 364), 'numpy.savetxt', 'np.savetxt', (['"""X.txt"""', 'X'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "('X.txt', X, delimiter=',', fmt='%f')\n", (327, 364), True, 'import numpy as np\n'), ((365, 417), 'numpy.savetxt', 'np.savetxt', (['"""y.txt"""', 'target'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "('y.txt', target, delimiter=',', fmt='%f')\n", (375, 417), True, 'import numpy as np\n'), ((137, 154), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (152, 154), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# basic import
import os
import os.path as op
import sys
import time
sys.path.insert(0, op.join(op.dirname(__file__),'..','..'))
# python libs
import numpy as np
import xarray as xr
# custom libs
from teslakit.project_site import PathControl
from teslakit.extremes import FitGEV_KMA_Frechet
# --------------------------------------
# Test data storage
pc = PathControl()
p_tests = pc.p_test_data
p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet')
# input
p_npz = op.join(p_test, 'swell_1_Hs.npz')
# --------------------------------------
# Load data
npzf = np.load(p_npz)
bmus = npzf['arr_0']
n_clusters = npzf['arr_1']
var_wvs = npzf['arr_2']
print(bmus)
print(n_clusters)
print(var_wvs)
print()
# TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92
gp_pars = FitGEV_KMA_Frechet(
bmus, n_clusters, var_wvs)
print(gp_pars)
|
[
"numpy.load",
"os.path.dirname",
"teslakit.extremes.FitGEV_KMA_Frechet",
"teslakit.project_site.PathControl",
"os.path.join"
] |
[((408, 421), 'teslakit.project_site.PathControl', 'PathControl', ([], {}), '()\n', (419, 421), False, 'from teslakit.project_site import PathControl\n'), ((456, 515), 'os.path.join', 'op.join', (['p_tests', '"""ClimateEmulator"""', '"""gev_fit_kma_fretchet"""'], {}), "(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet')\n", (463, 515), True, 'import os.path as op\n'), ((533, 566), 'os.path.join', 'op.join', (['p_test', '"""swell_1_Hs.npz"""'], {}), "(p_test, 'swell_1_Hs.npz')\n", (540, 566), True, 'import os.path as op\n'), ((629, 643), 'numpy.load', 'np.load', (['p_npz'], {}), '(p_npz)\n', (636, 643), True, 'import numpy as np\n'), ((842, 887), 'teslakit.extremes.FitGEV_KMA_Frechet', 'FitGEV_KMA_Frechet', (['bmus', 'n_clusters', 'var_wvs'], {}), '(bmus, n_clusters, var_wvs)\n', (860, 887), False, 'from teslakit.extremes import FitGEV_KMA_Frechet\n'), ((143, 163), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (153, 163), True, 'import os.path as op\n')]
|
import os
import sys
import cv2
import time
import caffe
import numpy as np
import config
sys.path.append('../')
from fast_mtcnn import fast_mtcnn
from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72
from baidu import call_baidu_api
def create_net(model_dir, iter_num):
model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num)
proto_path = 'landmark.prototxt'
return caffe.Net(proto_path, model_path, caffe.TEST)
if __name__ == '__main__':
iter_num = int(sys.argv[1])
img_path = sys.argv[2]
model_dir = config.MODEL_DIR
if len(sys.argv) > 3:
model_dir = sys.argv[3]
img = cv2.imread(img_path)
net = create_net(model_dir, iter_num)
mtcnn = fast_mtcnn()
boxes = mtcnn(img_path)
for box in boxes:
if not is_valid_facebox(box):
continue
exp_box = expand_mtcnn_box(img, box)
cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]]
baidu_result = call_baidu_api(cropped, '')
baidu_lm = extract_baidu_lm72(baidu_result[0][-1])
for x, y in baidu_lm:
x = int(x + exp_box[0])
y = int(y + exp_box[1])
cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1)
h, w, _ = cropped.shape
cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE))
cropped = np.swapaxes(cropped, 0, 2)
cropped = (cropped - 127.5) / 127.5
net.blobs['data'].data[0] = cropped
out = net.forward()
landmark = out['Dense2'][0]
for pt in landmark.reshape((config.LANDMARK_SIZE, 2)):
x, y = pt
x = x * w + exp_box[0]
y = y * h + exp_box[1]
cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0), 1)
time.sleep(0.5)
cv2.imwrite('result.jpg', img)
|
[
"sys.path.append",
"gen_landmark.extract_baidu_lm72",
"fast_mtcnn.fast_mtcnn",
"baidu.call_baidu_api",
"gen_landmark.is_valid_facebox",
"cv2.imwrite",
"gen_landmark.expand_mtcnn_box",
"time.sleep",
"cv2.imread",
"numpy.swapaxes",
"caffe.Net",
"os.path.join",
"cv2.resize"
] |
[((90, 112), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (105, 112), False, 'import sys\n'), ((315, 380), 'os.path.join', 'os.path.join', (['model_dir', "('landmark_iter_%d.caffemodel' % iter_num)"], {}), "(model_dir, 'landmark_iter_%d.caffemodel' % iter_num)\n", (327, 380), False, 'import os\n'), ((429, 474), 'caffe.Net', 'caffe.Net', (['proto_path', 'model_path', 'caffe.TEST'], {}), '(proto_path, model_path, caffe.TEST)\n', (438, 474), False, 'import caffe\n'), ((664, 684), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (674, 684), False, 'import cv2\n'), ((740, 752), 'fast_mtcnn.fast_mtcnn', 'fast_mtcnn', ([], {}), '()\n', (750, 752), False, 'from fast_mtcnn import fast_mtcnn\n'), ((1807, 1837), 'cv2.imwrite', 'cv2.imwrite', (['"""result.jpg"""', 'img'], {}), "('result.jpg', img)\n", (1818, 1837), False, 'import cv2\n'), ((880, 906), 'gen_landmark.expand_mtcnn_box', 'expand_mtcnn_box', (['img', 'box'], {}), '(img, box)\n', (896, 906), False, 'from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72\n'), ((998, 1025), 'baidu.call_baidu_api', 'call_baidu_api', (['cropped', '""""""'], {}), "(cropped, '')\n", (1012, 1025), False, 'from baidu import call_baidu_api\n'), ((1045, 1084), 'gen_landmark.extract_baidu_lm72', 'extract_baidu_lm72', (['baidu_result[0][-1]'], {}), '(baidu_result[0][-1])\n', (1063, 1084), False, 'from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72\n'), ((1303, 1358), 'cv2.resize', 'cv2.resize', (['cropped', '(config.IMG_SIZE, config.IMG_SIZE)'], {}), '(cropped, (config.IMG_SIZE, config.IMG_SIZE))\n', (1313, 1358), False, 'import cv2\n'), ((1377, 1403), 'numpy.swapaxes', 'np.swapaxes', (['cropped', '(0)', '(2)'], {}), '(cropped, 0, 2)\n', (1388, 1403), True, 'import numpy as np\n'), ((1786, 1801), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1796, 1801), False, 'import time\n'), ((818, 839), 'gen_landmark.is_valid_facebox', 'is_valid_facebox', (['box'], {}), '(box)\n', (834, 839), False, 'from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72\n')]
|
import numpy
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
x = [1,2,3,4]
x=numpy.array(x)
print(x.shape)
x=x.reshape(2,-1)
print(x.shape)
print(x)
x=x.reshape(-1)
print(x.shape)
print(x)
y = [2,4,6,8]
#x*2=[2,4,6,8]
#x*x=[1,4,9,16]
#sum(x) = 10
pf=numpy.polyfit(x,y,3)
print(pf)
print(type(pf))
model = numpy.poly1d(pf)
drv=model.deriv()
print(model([1,2,3,4]))
print(type(drv))
print(model)
print(drv)
coeff=r2_score(y, model(x))
print(coeff)
|
[
"numpy.poly1d",
"numpy.array",
"numpy.polyfit"
] |
[((119, 133), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (130, 133), False, 'import numpy\n'), ((294, 316), 'numpy.polyfit', 'numpy.polyfit', (['x', 'y', '(3)'], {}), '(x, y, 3)\n', (307, 316), False, 'import numpy\n'), ((349, 365), 'numpy.poly1d', 'numpy.poly1d', (['pf'], {}), '(pf)\n', (361, 365), False, 'import numpy\n')]
|
import numpy as np
import tensorflow as tf
import random
import _pickle as pkl
import matplotlib.pyplot as plt
from pylab import rcParams
import scipy
import scipy.stats as stats
from tensorflow.python.ops import gen_nn_ops
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.allow_growth = True
MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32)
MEAN_IMAGE[:, :, :, 0] = 103.939
MEAN_IMAGE[:, :, :, 1] = 116.779
MEAN_IMAGE[:, :, :, 2] = 123.68
EPSILON = 1e-12
MIN_INPUT = -MEAN_IMAGE
MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE
def dataReader():
X = np.zeros((100, 227, 227, 3))
y = np.zeros(100)
for num in range(4):
with open(
"./ImagenetValidationSamples/imagenet_sample_{}.pkl".format(
num), "rb") as inputs:
dic_temp = pkl.load(inputs)
X[num * 20:num * 20 + 20] = dic_temp["X"]
y[num * 20:num * 20 + 20] = dic_temp["y"]
labels = dic_temp["labels"]
return X, y.astype(int), labels
class SimpleGradientAttack(object):
def __init__(self,
mean_image,
sess,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
pixel_max: the maximum pixel value in the image.
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\
NET.input.get_shape()[-1]!=test_image.shape[-1]:
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\
NET2.input.get_shape()[-1]!=test_image.shape[-1]:
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.create_attack_ops(NET2, test_image.shape[-3],
test_image.shape[-2])
self.update_new_image(test_image, original_label)
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
self.test_image,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
self.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph:
self.original_label,
self.topk_ph:
self.original_topk,
self.mass_center_ph:
self.original_mass_center
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input:
feed,
NET.label_ph:
self.original_label,
self.topk_ph:
self.original_topk,
self.mass_center_ph:
self.original_mass_center
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.topK_direction,
in_image, self.NET2)
perturbation = np.reshape(perturbation, [w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.mass_center_direction, in_image,
self.NET2)
perturbation = np.reshape(perturbation, [w, h, c])
elif attack_method == "target":
self.use_target = True
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
in_image, self.NET2)
debug = self.run_model(self.sess, self.debug, in_image,
self.NET2)
print("MSE: ", debug)
perturbation = np.reshape(perturbation, [w, h, c])
return np.sign(perturbation)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * np.sign(pert) - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
test_image_pert, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
test_image_pert, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1.
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection",
target=None):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
beta_0: parameter for manipulate (target) attack
beta_1: parameter for manipulate (target) attack
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
self.beta_0 = beta_0
self.beta_1 = beta_1
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
min_criterion = 1.
perturb_size = 0.
last_image = None
for counter in range(iters):
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
pass
if criterion == 1.:
return None
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class IntegratedGradientsAttack(object):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
reference_image=None,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
num_steps: Number of steps in Integrated Gradients Algorithm
reference_image: Mean subtracted reference image of Integrated Gradients Algorithm
pixel_max: the maximum pixel value in the image.
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if sum([
NET.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if sum([
NET2.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
self.num_steps = num_steps
self.reference_image = np.zeros_like(
test_image) if reference_image is None else reference_image
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.create_attack_ops(self.NET2, test_image.shape[-3],
test_image.shape[-2])
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
counterfactuals = self.create_counterfactuals(test_image)
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.debug = topK_loss
NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
NET.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def create_counterfactuals(self, in_image):
ref_subtracted = in_image - self.reference_image
counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\
for i in range(self.num_steps)])
return np.array(counterfactuals)
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
NET.reference_image:
self.reference_image,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.reference_image: self.reference_image,
NET.label_ph: self.original_label,
self.target_map_ph: self.target_map
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.reference_image: self.reference_image,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
counterfactuals = self.create_counterfactuals(in_image)
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(self.num_steps, w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.NET2.topK_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.NET2.mass_center_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "target":
self.use_target = True
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation,
[self.num_steps, w, h, c])
perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\
for i in range(self.num_steps)]),0)
return np.sign(perturbation_summed)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * np.sign(pert) - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
counterfactuals = self.create_counterfactuals(test_image_pert)
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
counterfactuals, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
counterfactuals, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection"):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4]
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
self.beta_0 = beta_0
self.beta_1 = beta_1
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
min_criterion = 1.
for counter in range(iters):
# if counter % int(iters / 5) == 0:
# print("Iteration : {}".format(counter))
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
# print(pert.sum())
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
# print("attack")
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
# print("labels is changed")
pass
if min_criterion == 1.:
# print(
# "The attack was not successfull for maximum allowed perturbation size equal to {}"
# .format(epsilon))
# return 1., 1., self.original_confidence, 0.
return None
# print(
# '''For maximum allowed perturbation size equal to {}, the resulting perturbation size was equal to {}
# '''.format(epsilon,
# np.max(np.abs(self.test_image - self.perturbed_image))))
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
counterfactuals = self.create_counterfactuals(self.perturbed_image)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class SmoothGradientsAttack(object):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
reference_image=None,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
num_steps: Number of steps in Integrated Gradients Algorithm
reference_image: not used
pixel_max: maximum pixel value in the input image
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if sum([
NET.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if sum([
NET2.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
self.num_steps = num_steps
self.reference_image = np.zeros_like(
test_image) if reference_image is None else reference_image
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.create_attack_ops(self.NET2, test_image.shape[-3],
test_image.shape[-2])
self.update_new_image(test_image, original_label)
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
counterfactuals = self.create_counterfactuals(test_image)
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
print("Pred: ", np.argmax(predicted_scores, 1))
print("Label: ", original_label)
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.debug = topK_loss
NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
NET.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def create_counterfactuals(self, in_image, noise_ratio=0.1):
counterfactuals = np.array([
in_image + np.random.normal(scale=0.1 *
(in_image.max() - in_image.min()),
size=in_image.shape)
for _ in range(self.num_steps)
])
return np.array(counterfactuals)
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.target_map_ph: self.target_map
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
counterfactuals = self.create_counterfactuals(in_image)
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(self.num_steps, w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.NET2.topK_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.NET2.mass_center_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "target":
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation,
[self.num_steps, w, h, c])
perturbation_summed = np.mean(perturbation, 0)
return np.sign(perturbation_summed)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * pert - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
counterfactuals = self.create_counterfactuals(test_image_pert)
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
counterfactuals, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
counterfactuals, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1.
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection"):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4]
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
self.original = self.test_image.copy()
if attack_method == 'target':
self.use_target = True
else:
self.use_target = False
self.beta_0 = beta_0
self.beta_1 = beta_1
min_criterion = 1.
last_image = None
for counter in range(iters):
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
pass
if criterion == 1.:
return None
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
counterfactuals = self.create_counterfactuals(self.perturbed_image)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class UniGradientsAttack(SmoothGradientsAttack):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
radii=4,
reference_image=None,
target_map=None,
pixel_max=255.):
self.radii = radii / (255. / pixel_max)
super(UniGradientsAttack,
self).__init__(sess,
mean_image,
test_image,
original_label,
NET,
NET2=NET2,
k_top=1000,
num_steps=num_steps,
reference_image=reference_image,
target_map=target_map,
pixel_max=255.)
def create_counterfactuals(self, in_image):
counterfactuals = np.array([
in_image +
np.random.uniform(-1, 1, size=in_image.shape) * self.radii
for _ in range(self.num_steps)
])
return np.array(counterfactuals)
|
[
"tensorflow.reduce_sum",
"numpy.abs",
"tensorflow.keras.losses.MSE",
"numpy.argmax",
"numpy.clip",
"tensorflow.ConfigProto",
"numpy.mean",
"numpy.arange",
"numpy.linalg.norm",
"numpy.random.normal",
"_pickle.load",
"numpy.zeros_like",
"tensorflow.nn.top_k",
"tensorflow.placeholder",
"numpy.max",
"numpy.reshape",
"tensorflow.gradients",
"numpy.intersect1d",
"numpy.ones_like",
"tensorflow.reduce_mean",
"numpy.random.uniform",
"scipy.spatial.distance.cosine",
"scipy.stats.spearmanr",
"numpy.zeros",
"numpy.array",
"numpy.sign"
] |
[((237, 253), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (251, 253), True, 'import tensorflow as tf\n'), ((598, 626), 'numpy.zeros', 'np.zeros', (['(100, 227, 227, 3)'], {}), '((100, 227, 227, 3))\n', (606, 626), True, 'import numpy as np\n'), ((635, 648), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (643, 648), True, 'import numpy as np\n'), ((311, 337), 'numpy.zeros', 'np.zeros', (['(1, 227, 227, 3)'], {}), '((1, 227, 227, 3))\n', (319, 337), True, 'import numpy as np\n'), ((3924, 3981), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w * h]', 'name': '"""topk_ph"""'}), "(tf.float32, shape=[w * h], name='topk_ph')\n", (3938, 3981), True, 'import tensorflow as tf\n'), ((4088, 4148), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[2]', 'name': '"""mass_center_ph"""'}), "(tf.float32, shape=[2], name='mass_center_ph')\n", (4102, 4148), True, 'import tensorflow as tf\n'), ((4268, 4330), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w, h]', 'name': '"""target_map_ph"""'}), "(tf.float32, shape=[w, h], name='target_map_ph')\n", (4282, 4330), True, 'import tensorflow as tf\n'), ((4574, 4652), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_class]', 'name': '"""original_output_ph"""'}), "(tf.float32, shape=[None, num_class], name='original_output_ph')\n", (4588, 4652), True, 'import tensorflow as tf\n'), ((4740, 4781), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_0"""'}), "(tf.float32, name='beta_0')\n", (4754, 4781), True, 'import tensorflow as tf\n'), ((4807, 4848), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_1"""'}), "(tf.float32, name='beta_1')\n", (4821, 4848), True, 'import tensorflow as tf\n'), ((5682, 5755), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[test_image.shape[-3] * test_image.shape[-2]]'], {}), '(self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])\n', (5692, 5755), True, 'import numpy as np\n'), ((5872, 5887), 'numpy.zeros', 'np.zeros', (['(w * h)'], {}), '(w * h)\n', (5880, 5887), True, 'import numpy as np\n'), ((6866, 6906), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['NET.saliency_flatten', 'k_top'], {}), '(NET.saliency_flatten, k_top)\n', (6877, 6906), True, 'import tensorflow as tf\n'), ((7204, 7254), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency_flatten * self.topk_ph)'], {}), '(NET.saliency_flatten * self.topk_ph)\n', (7217, 7254), True, 'import tensorflow as tf\n'), ((13282, 13303), 'numpy.sign', 'np.sign', (['perturbation'], {}), '(perturbation)\n', (13289, 13303), True, 'import numpy as np\n'), ((13932, 14002), 'numpy.clip', 'np.clip', (['out_image', '(-self.mean_image)', '(self.pixel_max - self.mean_image)'], {}), '(out_image, -self.mean_image, self.pixel_max - self.mean_image)\n', (13939, 14002), True, 'import numpy as np\n'), ((18257, 18281), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (18263, 18281), True, 'import numpy as np\n'), ((22283, 22340), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w * h]', 'name': '"""topk_ph"""'}), "(tf.float32, shape=[w * h], name='topk_ph')\n", (22297, 22340), True, 'import tensorflow as tf\n'), ((22447, 22507), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[2]', 'name': '"""mass_center_ph"""'}), "(tf.float32, shape=[2], name='mass_center_ph')\n", (22461, 22507), True, 'import tensorflow as tf\n'), ((22627, 22689), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w, h]', 'name': '"""target_map_ph"""'}), "(tf.float32, shape=[w, h], name='target_map_ph')\n", (22641, 22689), True, 'import tensorflow as tf\n'), ((22803, 22844), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_0"""'}), "(tf.float32, name='beta_0')\n", (22817, 22844), True, 'import tensorflow as tf\n'), ((22870, 22911), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_1"""'}), "(tf.float32, name='beta_1')\n", (22884, 22911), True, 'import tensorflow as tf\n'), ((23066, 23144), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_class]', 'name': '"""original_output_ph"""'}), "(tf.float32, shape=[None, num_class], name='original_output_ph')\n", (23080, 23144), True, 'import tensorflow as tf\n'), ((24615, 24688), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[test_image.shape[-3] * test_image.shape[-2]]'], {}), '(self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])\n', (24625, 24688), True, 'import numpy as np\n'), ((24805, 24820), 'numpy.zeros', 'np.zeros', (['(w * h)'], {}), '(w * h)\n', (24813, 24820), True, 'import numpy as np\n'), ((25238, 25278), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['NET.saliency_flatten', 'k_top'], {}), '(NET.saliency_flatten, k_top)\n', (25249, 25278), True, 'import tensorflow as tf\n'), ((25576, 25626), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency_flatten * self.topk_ph)'], {}), '(NET.saliency_flatten * self.topk_ph)\n', (25589, 25626), True, 'import tensorflow as tf\n'), ((26771, 26796), 'numpy.array', 'np.array', (['counterfactuals'], {}), '(counterfactuals)\n', (26779, 26796), True, 'import numpy as np\n'), ((32895, 32923), 'numpy.sign', 'np.sign', (['perturbation_summed'], {}), '(perturbation_summed)\n', (32902, 32923), True, 'import numpy as np\n'), ((33552, 33622), 'numpy.clip', 'np.clip', (['out_image', '(-self.mean_image)', '(self.pixel_max - self.mean_image)'], {}), '(out_image, -self.mean_image, self.pixel_max - self.mean_image)\n', (33559, 33622), True, 'import numpy as np\n'), ((38426, 38450), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (38432, 38450), True, 'import numpy as np\n'), ((42463, 42520), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w * h]', 'name': '"""topk_ph"""'}), "(tf.float32, shape=[w * h], name='topk_ph')\n", (42477, 42520), True, 'import tensorflow as tf\n'), ((42627, 42687), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[2]', 'name': '"""mass_center_ph"""'}), "(tf.float32, shape=[2], name='mass_center_ph')\n", (42641, 42687), True, 'import tensorflow as tf\n'), ((42807, 42869), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w, h]', 'name': '"""target_map_ph"""'}), "(tf.float32, shape=[w, h], name='target_map_ph')\n", (42821, 42869), True, 'import tensorflow as tf\n'), ((42983, 43024), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_0"""'}), "(tf.float32, name='beta_0')\n", (42997, 43024), True, 'import tensorflow as tf\n'), ((43050, 43091), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_1"""'}), "(tf.float32, name='beta_1')\n", (43064, 43091), True, 'import tensorflow as tf\n'), ((43246, 43324), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_class]', 'name': '"""original_output_ph"""'}), "(tf.float32, shape=[None, num_class], name='original_output_ph')\n", (43260, 43324), True, 'import tensorflow as tf\n'), ((44291, 44364), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[test_image.shape[-3] * test_image.shape[-2]]'], {}), '(self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])\n', (44301, 44364), True, 'import numpy as np\n'), ((44481, 44496), 'numpy.zeros', 'np.zeros', (['(w * h)'], {}), '(w * h)\n', (44489, 44496), True, 'import numpy as np\n'), ((45581, 45621), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['NET.saliency_flatten', 'k_top'], {}), '(NET.saliency_flatten, k_top)\n', (45592, 45621), True, 'import tensorflow as tf\n'), ((45919, 45969), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency_flatten * self.topk_ph)'], {}), '(NET.saliency_flatten * self.topk_ph)\n', (45932, 45969), True, 'import tensorflow as tf\n'), ((47179, 47204), 'numpy.array', 'np.array', (['counterfactuals'], {}), '(counterfactuals)\n', (47187, 47204), True, 'import numpy as np\n'), ((52460, 52484), 'numpy.mean', 'np.mean', (['perturbation', '(0)'], {}), '(perturbation, 0)\n', (52467, 52484), True, 'import numpy as np\n'), ((52500, 52528), 'numpy.sign', 'np.sign', (['perturbation_summed'], {}), '(perturbation_summed)\n', (52507, 52528), True, 'import numpy as np\n'), ((53148, 53218), 'numpy.clip', 'np.clip', (['out_image', '(-self.mean_image)', '(self.pixel_max - self.mean_image)'], {}), '(out_image, -self.mean_image, self.pixel_max - self.mean_image)\n', (53155, 53218), True, 'import numpy as np\n'), ((57542, 57566), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (57548, 57566), True, 'import numpy as np\n'), ((59693, 59718), 'numpy.array', 'np.array', (['counterfactuals'], {}), '(counterfactuals)\n', (59701, 59718), True, 'import numpy as np\n'), ((836, 852), '_pickle.load', 'pkl.load', (['inputs'], {}), '(inputs)\n', (844, 852), True, 'import _pickle as pkl\n'), ((6542, 6572), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (6551, 6572), True, 'import numpy as np\n'), ((6733, 6757), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (6739, 6757), True, 'import numpy as np\n'), ((6944, 6956), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (6953, 6956), True, 'import numpy as np\n'), ((6958, 6970), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (6967, 6970), True, 'import numpy as np\n'), ((7355, 7414), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((NET.mass_center - self.mass_center_ph) ** 2)'], {}), '((NET.mass_center - self.mass_center_ph) ** 2)\n', (7368, 7414), True, 'import tensorflow as tf\n'), ((7625, 7678), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.target_map_ph', 'NET.saliency'], {}), '(self.target_map_ph, NET.saliency)\n', (7644, 7678), True, 'import tensorflow as tf\n'), ((7704, 7760), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.original_output_ph', 'NET.output'], {}), '(self.original_output_ph, NET.output)\n', (7723, 7760), True, 'import tensorflow as tf\n'), ((12130, 12162), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(w, h, c)'}), '(size=(w, h, c))\n', (12146, 12162), True, 'import numpy as np\n'), ((13450, 13473), 'numpy.zeros_like', 'np.zeros_like', (['in_image'], {}), '(in_image)\n', (13463, 13473), True, 'import numpy as np\n'), ((14239, 14257), 'numpy.argmax', 'np.argmax', (['prob', '(1)'], {}), '(prob, 1)\n', (14248, 14257), True, 'import numpy as np\n'), ((18948, 18983), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (18958, 18983), True, 'import numpy as np\n'), ((22132, 22157), 'numpy.zeros_like', 'np.zeros_like', (['test_image'], {}), '(test_image)\n', (22145, 22157), True, 'import numpy as np\n'), ((23645, 23675), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (23654, 23675), True, 'import numpy as np\n'), ((23836, 23860), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (23842, 23860), True, 'import numpy as np\n'), ((25316, 25328), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (25325, 25328), True, 'import numpy as np\n'), ((25330, 25342), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (25339, 25342), True, 'import numpy as np\n'), ((25757, 25816), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((NET.mass_center - self.mass_center_ph) ** 2)'], {}), '((NET.mass_center - self.mass_center_ph) ** 2)\n', (25770, 25816), True, 'import tensorflow as tf\n'), ((26025, 26078), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.target_map_ph', 'NET.saliency'], {}), '(self.target_map_ph, NET.saliency)\n', (26044, 26078), True, 'import tensorflow as tf\n'), ((26104, 26160), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.original_output_ph', 'NET.output'], {}), '(self.original_output_ph, NET.output)\n', (26123, 26160), True, 'import tensorflow as tf\n'), ((31593, 31641), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.num_steps, w, h, c)'}), '(size=(self.num_steps, w, h, c))\n', (31609, 31641), True, 'import numpy as np\n'), ((33070, 33093), 'numpy.zeros_like', 'np.zeros_like', (['in_image'], {}), '(in_image)\n', (33083, 33093), True, 'import numpy as np\n'), ((33859, 33877), 'numpy.argmax', 'np.argmax', (['prob', '(1)'], {}), '(prob, 1)\n', (33868, 33877), True, 'import numpy as np\n'), ((39188, 39223), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (39198, 39223), True, 'import numpy as np\n'), ((42312, 42337), 'numpy.zeros_like', 'np.zeros_like', (['test_image'], {}), '(test_image)\n', (42325, 42337), True, 'import numpy as np\n'), ((45152, 45182), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (45161, 45182), True, 'import numpy as np\n'), ((45448, 45472), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (45454, 45472), True, 'import numpy as np\n'), ((45659, 45671), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (45668, 45671), True, 'import numpy as np\n'), ((45673, 45685), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (45682, 45685), True, 'import numpy as np\n'), ((46100, 46159), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((NET.mass_center - self.mass_center_ph) ** 2)'], {}), '((NET.mass_center - self.mass_center_ph) ** 2)\n', (46113, 46159), True, 'import tensorflow as tf\n'), ((46368, 46421), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.target_map_ph', 'NET.saliency'], {}), '(self.target_map_ph, NET.saliency)\n', (46387, 46421), True, 'import tensorflow as tf\n'), ((46447, 46503), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.original_output_ph', 'NET.output'], {}), '(self.original_output_ph, NET.output)\n', (46466, 46503), True, 'import tensorflow as tf\n'), ((51351, 51399), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.num_steps, w, h, c)'}), '(size=(self.num_steps, w, h, c))\n', (51367, 51399), True, 'import numpy as np\n'), ((52675, 52698), 'numpy.zeros_like', 'np.zeros_like', (['in_image'], {}), '(in_image)\n', (52688, 52698), True, 'import numpy as np\n'), ((53455, 53473), 'numpy.argmax', 'np.argmax', (['prob', '(1)'], {}), '(prob, 1)\n', (53464, 53473), True, 'import numpy as np\n'), ((58304, 58339), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (58314, 58339), True, 'import numpy as np\n'), ((513, 537), 'numpy.ones_like', 'np.ones_like', (['MEAN_IMAGE'], {}), '(MEAN_IMAGE)\n', (525, 537), True, 'import numpy as np\n'), ((5796, 5831), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[w * h]'], {}), '(self.saliency1, [w * h])\n', (5806, 5831), True, 'import numpy as np\n'), ((7288, 7322), 'tensorflow.gradients', 'tf.gradients', (['topK_loss', 'NET.input'], {}), '(topK_loss, NET.input)\n', (7300, 7322), True, 'import tensorflow as tf\n'), ((7464, 7505), 'tensorflow.gradients', 'tf.gradients', (['mass_center_loss', 'NET.input'], {}), '(mass_center_loss, NET.input)\n', (7476, 7505), True, 'import tensorflow as tf\n'), ((12365, 12400), 'numpy.reshape', 'np.reshape', (['perturbation', '[w, h, c]'], {}), '(perturbation, [w, h, c])\n', (12375, 12400), True, 'import numpy as np\n'), ((18546, 18581), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (18556, 18581), True, 'import numpy as np\n'), ((24729, 24764), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[w * h]'], {}), '(self.saliency1, [w * h])\n', (24739, 24764), True, 'import numpy as np\n'), ((25690, 25724), 'tensorflow.gradients', 'tf.gradients', (['topK_loss', 'NET.input'], {}), '(topK_loss, NET.input)\n', (25702, 25724), True, 'import tensorflow as tf\n'), ((25865, 25906), 'tensorflow.gradients', 'tf.gradients', (['mass_center_loss', 'NET.input'], {}), '(mass_center_loss, NET.input)\n', (25877, 25906), True, 'import tensorflow as tf\n'), ((31856, 31907), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (31866, 31907), True, 'import numpy as np\n'), ((38786, 38821), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (38796, 38821), True, 'import numpy as np\n'), ((44405, 44440), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[w * h]'], {}), '(self.saliency1, [w * h])\n', (44415, 44440), True, 'import numpy as np\n'), ((45294, 45324), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (45303, 45324), True, 'import numpy as np\n'), ((46033, 46067), 'tensorflow.gradients', 'tf.gradients', (['topK_loss', 'NET.input'], {}), '(topK_loss, NET.input)\n', (46045, 46067), True, 'import tensorflow as tf\n'), ((46208, 46249), 'tensorflow.gradients', 'tf.gradients', (['mass_center_loss', 'NET.input'], {}), '(mass_center_loss, NET.input)\n', (46220, 46249), True, 'import tensorflow as tf\n'), ((51614, 51665), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (51624, 51665), True, 'import numpy as np\n'), ((57902, 57937), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (57912, 57937), True, 'import numpy as np\n'), ((7021, 7057), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * x_mesh)'], {}), '(NET.saliency * x_mesh)\n', (7034, 7057), True, 'import tensorflow as tf\n'), ((7081, 7117), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * y_mesh)'], {}), '(NET.saliency * y_mesh)\n', (7094, 7117), True, 'import tensorflow as tf\n'), ((7832, 7858), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['target_dis'], {}), '(target_dis)\n', (7846, 7858), True, 'import tensorflow as tf\n'), ((7912, 7938), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output_dis'], {}), '(output_dis)\n', (7926, 7938), True, 'import tensorflow as tf\n'), ((8035, 8071), 'tensorflow.gradients', 'tf.gradients', (['target_loss', 'NET.input'], {}), '(target_loss, NET.input)\n', (8047, 8071), True, 'import tensorflow as tf\n'), ((12659, 12694), 'numpy.reshape', 'np.reshape', (['perturbation', '[w, h, c]'], {}), '(perturbation, [w, h, c])\n', (12669, 12694), True, 'import numpy as np\n'), ((13635, 13648), 'numpy.sign', 'np.sign', (['pert'], {}), '(pert)\n', (13642, 13648), True, 'import numpy as np\n'), ((17952, 17998), 'numpy.abs', 'np.abs', (['(self.test_image - self.perturbed_image)'], {}), '(self.test_image - self.perturbed_image)\n', (17958, 17998), True, 'import numpy as np\n'), ((18619, 18655), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'self.top2'], {}), '(self.topK, self.top2)\n', (18633, 18655), True, 'import numpy as np\n'), ((25393, 25429), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * x_mesh)'], {}), '(NET.saliency * x_mesh)\n', (25406, 25429), True, 'import tensorflow as tf\n'), ((25453, 25489), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * y_mesh)'], {}), '(NET.saliency * y_mesh)\n', (25466, 25489), True, 'import tensorflow as tf\n'), ((26232, 26258), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['target_dis'], {}), '(target_dis)\n', (26246, 26258), True, 'import tensorflow as tf\n'), ((26312, 26338), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output_dis'], {}), '(output_dis)\n', (26326, 26338), True, 'import tensorflow as tf\n'), ((26435, 26471), 'tensorflow.gradients', 'tf.gradients', (['target_loss', 'NET.input'], {}), '(target_loss, NET.input)\n', (26447, 26471), True, 'import tensorflow as tf\n'), ((32178, 32229), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (32188, 32229), True, 'import numpy as np\n'), ((33255, 33268), 'numpy.sign', 'np.sign', (['pert'], {}), '(pert)\n', (33262, 33268), True, 'import numpy as np\n'), ((37615, 37661), 'numpy.abs', 'np.abs', (['(self.test_image - self.perturbed_image)'], {}), '(self.test_image - self.perturbed_image)\n', (37621, 37661), True, 'import numpy as np\n'), ((38859, 38895), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'self.top2'], {}), '(self.topK, self.top2)\n', (38873, 38895), True, 'import numpy as np\n'), ((45736, 45772), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * x_mesh)'], {}), '(NET.saliency * x_mesh)\n', (45749, 45772), True, 'import tensorflow as tf\n'), ((45796, 45832), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * y_mesh)'], {}), '(NET.saliency * y_mesh)\n', (45809, 45832), True, 'import tensorflow as tf\n'), ((46575, 46601), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['target_dis'], {}), '(target_dis)\n', (46589, 46601), True, 'import tensorflow as tf\n'), ((46655, 46681), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output_dis'], {}), '(output_dis)\n', (46669, 46681), True, 'import tensorflow as tf\n'), ((46778, 46814), 'tensorflow.gradients', 'tf.gradients', (['target_loss', 'NET.input'], {}), '(target_loss, NET.input)\n', (46790, 46814), True, 'import tensorflow as tf\n'), ((51936, 51987), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (51946, 51987), True, 'import numpy as np\n'), ((57236, 57282), 'numpy.abs', 'np.abs', (['(self.test_image - self.perturbed_image)'], {}), '(self.test_image - self.perturbed_image)\n', (57242, 57282), True, 'import numpy as np\n'), ((57975, 58011), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'self.top2'], {}), '(self.topK, self.top2)\n', (57989, 58011), True, 'import numpy as np\n'), ((14883, 14947), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (14904, 14947), False, 'import scipy\n'), ((34578, 34642), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (34599, 34642), False, 'import scipy\n'), ((54174, 54238), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (54195, 54238), False, 'import scipy\n'), ((59564, 59609), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'in_image.shape'}), '(-1, 1, size=in_image.shape)\n', (59581, 59609), True, 'import numpy as np\n'), ((13231, 13266), 'numpy.reshape', 'np.reshape', (['perturbation', '[w, h, c]'], {}), '(perturbation, [w, h, c])\n', (13241, 13266), True, 'import numpy as np\n'), ((14494, 14525), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'top2'], {}), '(self.topK, top2)\n', (14508, 14525), True, 'import numpy as np\n'), ((15227, 15270), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.mass_center1 - center2)'], {}), '(self.mass_center1 - center2)\n', (15241, 15270), True, 'import numpy as np\n'), ((15556, 15628), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (15585, 15628), False, 'import scipy\n'), ((32613, 32664), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (32623, 32664), True, 'import numpy as np\n'), ((34189, 34220), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'top2'], {}), '(self.topK, top2)\n', (34203, 34220), True, 'import numpy as np\n'), ((34922, 34965), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.mass_center1 - center2)'], {}), '(self.mass_center1 - center2)\n', (34936, 34965), True, 'import numpy as np\n'), ((35251, 35323), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (35280, 35323), False, 'import scipy\n'), ((52336, 52387), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (52346, 52387), True, 'import numpy as np\n'), ((53785, 53816), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'top2'], {}), '(self.topK, top2)\n', (53799, 53816), True, 'import numpy as np\n'), ((54518, 54561), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.mass_center1 - center2)'], {}), '(self.mass_center1 - center2)\n', (54532, 54561), True, 'import numpy as np\n'), ((54847, 54919), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (54876, 54919), False, 'import scipy\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 15:05:24 2018
@author: Hendry
"""
from read_data import *
from TokenizeSentences import *
import numpy as np
def onehot(data,nClass):
data2 = np.zeros([len(data),nClass])
for i in range(nClass):
data2[np.where(data==i),i]= 1
return data2
def get_text_idx(text,vocab,max_document_length):
text_array = np.zeros([len(text), max_document_length],dtype=np.int32)
for i,x in enumerate(text):
words = x
for j, w in enumerate(words):
if w in vocab:
text_array[i, j] = vocab[w]
else :
text_array[i, j] = vocab['the']
return text_array
def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1):
train_bodies = readRawData('train_bodies.csv')
if useTextsum == 0:
trainDocs = TokenizeSentences(splitData(train_bodies,1))
else:
f = open('./fnc_data/train_1.txt','r')
data = f.readlines()
f.close()
trainDocs = TokenizeSentences(data)
trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int')
train_stances = readRawData('train_stances.csv')
trainTitle = TokenizeSentences(splitData(train_stances,0))
trainTitleIdx = np.array(splitData(train_stances,1)).astype('int')
trainRes = np.array(splitData(train_stances,2))
trainRes[np.where(trainRes=='unrelated')]='0'
trainRes[np.where(trainRes=='agree')]='1'
trainRes[np.where(trainRes=='disagree')]='2'
trainRes[np.where(trainRes=='discuss')]='3'
trainRes =trainRes.astype('int')
maxDocLength = 0
for i in range(len(trainDocs)):
maxDocLength = max(maxDocLength,len(trainDocs[i]))
maxTitleLength = 0
for i in range(len(trainTitle)):
maxTitleLength = max(maxTitleLength,len(trainTitle[i]))
trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength)
trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength)
trainTitleDocs = [[] for i in range(len(trainTitle))]
for i in range(len(trainTitle)):
idx = np.where(trainDocsIdx==trainTitleIdx[i])
trainTitleDocs[i]=trainDocs[int(idx[0])]
trainTitleDocs = np.array(trainTitleDocs)
trainDocs = np.array(trainDocs)
trainTitle = np.array(trainTitle)
uniIdx = np.unique(trainTitleIdx)
uniIdxTest = uniIdx[round(0.95*len(uniIdx)):]
validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0])
for i in range(len(uniIdxTest)-1):
validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1]))
validIdx = sorted(validIdx)
fullIdx = list(range(len(trainTitleIdx)))
trainIdx = list(set(fullIdx).difference(set(validIdx)))
x1Train = trainTitleDocs[trainIdx]
x2Train = trainTitle[trainIdx]
trainRes = np.array(trainRes)
y0Train = trainRes[trainIdx]
x1Valid = trainTitleDocs[validIdx]
x2Valid = trainTitle[validIdx]
y0Valid = trainRes[validIdx]
if typeOfClassify==0:
yValid = onehot(y0Valid,4)
yTrain = onehot(y0Train,4)
elif typeOfClassify==1:
y0Train[y0Train>0]=1
y0Valid[y0Valid>0]=1
yValid = onehot(y0Valid,2)
yTrain = onehot(y0Train,2)
elif typeOfClassify==2:
x1Train = x1Train[y0Train>0]
x2Train = x2Train[y0Train>0]
y0Train = y0Train[y0Train>0]-1
x1Valid = x1Valid[y0Valid>0]
x2Valid = x2Valid[y0Valid>0]
y0Valid = y0Valid[y0Valid>0]-1
yValid = onehot(y0Valid,3)
yTrain = onehot(y0Train,3)
vocab_size = len(w2v_model.vocab_hash)
return x1Train, x1Valid, x2Train, x2Valid, yTrain, yValid, vocab_size
|
[
"numpy.argwhere",
"numpy.where",
"numpy.array",
"numpy.unique"
] |
[((2079, 2103), 'numpy.array', 'np.array', (['trainTitleDocs'], {}), '(trainTitleDocs)\n', (2087, 2103), True, 'import numpy as np\n'), ((2117, 2136), 'numpy.array', 'np.array', (['trainDocs'], {}), '(trainDocs)\n', (2125, 2136), True, 'import numpy as np\n'), ((2151, 2171), 'numpy.array', 'np.array', (['trainTitle'], {}), '(trainTitle)\n', (2159, 2171), True, 'import numpy as np\n'), ((2182, 2206), 'numpy.unique', 'np.unique', (['trainTitleIdx'], {}), '(trainTitleIdx)\n', (2191, 2206), True, 'import numpy as np\n'), ((2267, 2310), 'numpy.argwhere', 'np.argwhere', (['(trainTitleIdx == uniIdxTest[0])'], {}), '(trainTitleIdx == uniIdxTest[0])\n', (2278, 2310), True, 'import numpy as np\n'), ((2639, 2657), 'numpy.array', 'np.array', (['trainRes'], {}), '(trainRes)\n', (2647, 2657), True, 'import numpy as np\n'), ((1307, 1340), 'numpy.where', 'np.where', (["(trainRes == 'unrelated')"], {}), "(trainRes == 'unrelated')\n", (1315, 1340), True, 'import numpy as np\n'), ((1354, 1383), 'numpy.where', 'np.where', (["(trainRes == 'agree')"], {}), "(trainRes == 'agree')\n", (1362, 1383), True, 'import numpy as np\n'), ((1397, 1429), 'numpy.where', 'np.where', (["(trainRes == 'disagree')"], {}), "(trainRes == 'disagree')\n", (1405, 1429), True, 'import numpy as np\n'), ((1443, 1474), 'numpy.where', 'np.where', (["(trainRes == 'discuss')"], {}), "(trainRes == 'discuss')\n", (1451, 1474), True, 'import numpy as np\n'), ((1974, 2016), 'numpy.where', 'np.where', (['(trainDocsIdx == trainTitleIdx[i])'], {}), '(trainDocsIdx == trainTitleIdx[i])\n', (1982, 2016), True, 'import numpy as np\n'), ((2382, 2429), 'numpy.argwhere', 'np.argwhere', (['(trainTitleIdx == uniIdxTest[i + 1])'], {}), '(trainTitleIdx == uniIdxTest[i + 1])\n', (2393, 2429), True, 'import numpy as np\n'), ((280, 299), 'numpy.where', 'np.where', (['(data == i)'], {}), '(data == i)\n', (288, 299), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Author: <NAME>.
# Created: Dec 11, 2014.
"""Some utility functions to handle images."""
import math
import numpy as np
import PIL.Image
from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT
import skimage.transform
def imcast(img, dtype, color_space="default"):
"""Cast the input image to a given data type.
Parameters
----------
img: ndarray
The input image.
dtype: np.dtype
The type that output image to be cast into.
color_space: string, optional
The color space of the input image, which affects the casting operation.
Returns
-------
The output image that is cast into `dtype`.
Notes
-----
* For `color_space=="default"`, we perform a linear scaling with following
range conventions:
* `np.uint8`: `[0, 255]`;
* `np.uint16`: `[0, 65535]`;
* `np.float32` and `np.float64`: `[0.0, 1.0]`.
For example, if the input `img` is of `np.uint8` type and the expected
`dtype` is `np.float32`, then the output will be
`np.asarray(img / 255., np.float32)`.
* For `color_space=="CIE-L*a*b*"`, the "normal" value ranges are
`0 <= L <= 100, -127 <= a, b <= 127`, and we perform the following cast:
* `np.uint8`: `L <- L * 255 / 100, a <- a + 128, b <- b + 128`;
* `np.uint16`: currently not supported;
* `np.float32` and `np.float64`: left as is.
"""
if img.dtype == dtype:
return img
if color_space == "default":
if dtype == np.uint8:
if img.dtype == np.uint16:
return np.asarray(img / 257, np.uint8)
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img * 255., np.uint8)
elif dtype == np.uint16:
if img.dtype == np.uint8:
return np.asarray(img, np.uint16) * 257
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img * 65535., np.uint16)
elif dtype == np.float32 or dtype == np.float64:
if img.dtype == np.uint8:
return np.asarray(img, dtype) / 255.
elif img.dtype == np.uint16:
return np.asarray(img, dtype) / 65535.
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img, dtype)
elif color_space == "CIE-L*a*b*":
if dtype == np.uint8:
if img.dtype == np.float32 or img.dtype == np.float64:
dst = np.empty(img.shape, np.uint8)
dst[:,:,0] = img[:,:,0] * 255. / 100.
dst[:,:,1] = img[:,:,1] + 128.
dst[:,:,2] = img[:,:,2] + 128.
return dst
elif dtype == np.float32 or dtype == np.float64:
if img.dtype == np.uint8:
dst = np.empty(img.shape, dtype)
dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100.
dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128.
dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128.
return dst
raise Exception(
"Unexpected conversion from '%s' to '%s' with '%s' color space" % \
(img.dtype, dtype, color_space))
def imread(filename, dtype=np.uint8, color_space="default"):
"""Read the image followed by an :py:func:`imcast`."""
img = PIL.Image.open(filename)
if img.mode != "RGB":
img = img.convert("RGB")
if hasattr(img, "_getexif"):
try:
exif = img._getexif() or {}
except IOError:
exif = {}
orientation = exif.get(0x0112)
if orientation:
# see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html
# for explanation of the magical constants
# or see http://jpegclub.org/exif_orientation.html for a nice visual explanation
# also, rotations are counter-clockwise in PIL
orientation = int(orientation)
rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90]
flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None,
FLIP_LEFT_RIGHT, None]
orientation0 = orientation - 1 # it's 1-indexed per the EXIF spec
if 0 <= orientation0 < len(rotation):
if rotation[orientation0] is not None:
img = img.transpose(rotation[orientation0])
if flip[orientation0] is not None:
img = img.transpose(flip[orientation0])
return imcast(np.array(img), dtype, color_space)
def imwrite(filename, img, dtype=np.uint8, color_space="default"):
"""Perform an :py:func:`imcast` before writing to the output file."""
import scipy.misc
return scipy.misc.imsave(filename, imcast(img, dtype, color_space))
def imresize(img, size):
"""Resize the input image.
Parameters
----------
img: ndarray
The input image to be resized.
size: a scalar for `scale` or a 2-tuple for `(num_rows, num_cols)`
One of the `num_rows` or `num_cols` can be -1, which will be inferred
such that the output image has the same aspect ratio as the input.
Returns
-------
The resized image.
"""
if hasattr(size, "__len__"):
num_rows, num_cols = size
assert (num_rows > 0) or (num_cols > 0)
if num_rows < 0:
num_rows = num_cols * img.shape[0] / img.shape[1]
if num_cols < 0:
num_cols = num_rows * img.shape[1] / img.shape[0]
else:
num_rows = int(round(img.shape[0] * size))
num_cols = int(round(img.shape[1] * size))
return skimage.transform.resize(img, (num_rows, num_cols))
def create_icon_mosaic(icons, icon_shape=None,
border_size=1, border_color=None, empty_color=None,
mosaic_shape=None, mosaic_dtype=np.float):
"""Create a mosaic of image icons.
Parameters
----------
icons: a list of `ndarray`s
A list of icons to be put together for mosaic. Currently we require all
icons to be multi-channel images of the same size.
icon_shape: 3-tuple, optional
The shape of icons in the output mosaic as `(num_rows, num_cols, num_channels)`.
If not specified, use the shape of first image in `icons`.
border_size: int, optional
The size of border.
border_color: 3-tuple, optional
The color of border, black if not specified.
empty_color: 3-tuple, optional
The color for empty cells, black if not specified.
mosaic_shape: 2-tuple, optional
The shape of output mosaic as `(num_icons_per_row,
num_icons_per_col)`. If not specified, try to make a square mosaic
according to number of icons.
mosaic_dtype: dtype
The data type of output mosaic.
Returns
-------
The created mosaic image.
"""
# Set default parameters.
num_icons = len(icons)
assert num_icons > 0
if icon_shape is None:
icon_shape = icons[0].shape
assert len(icon_shape) == 3
num_channels = icon_shape[2]
if border_color is None:
border_color = np.zeros(num_channels)
if empty_color is None:
empty_color = np.zeros(num_channels)
if mosaic_shape is None:
num_cols = int(math.ceil(math.sqrt(num_icons)))
num_rows = int(math.ceil(float(num_icons) / num_cols))
mosaic_shape = (num_rows, num_cols)
mosaic_image_shape = (
mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size,
mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size,
icon_shape[2])
# Create mosaic image and fill with border color.
mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype)
for c in xrange(mosaic_image.shape[2]):
mosaic_image[:,:,c] = border_color[c]
# Fill in the input icons.
for idx in xrange(num_icons):
i = idx / mosaic_shape[1]
j = idx % mosaic_shape[1]
iStart = i * (icon_shape[0] + border_size)
jStart = j * (icon_shape[1] + border_size)
mosaic_image[iStart:iStart+icon_shape[0],
jStart:jStart+icon_shape[1],:] = icons[idx]
# Fill the empty icons with empty colors.
for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]):
i = idx / mosaic_shape[1]
j = idx % mosaic_shape[1]
iStart = i * (icon_shape[0] + border_size)
jStart = j * (icon_shape[1] + border_size)
for c in xrange(mosaic_image.shape[2]):
mosaic_image[iStart:iStart+icon_shape[0],
jStart:jStart+icon_shape[1],c] = empty_color[c]
return mosaic_image
def image_size_from_file(filename):
"""Read the image size from a file.
This function only loads but the image header (rather than the whole
rasterized data) in order to determine its dimension.
Parameters
----------
filename: string
The input image file.
Returns
-------
The 2-tuple for image size `(num_rows, num_cols)`.
"""
with PIL.Image.open(filename) as img:
width, height = img.size
return height, width
|
[
"math.sqrt",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.array"
] |
[((7824, 7872), 'numpy.empty', 'np.empty', (['mosaic_image_shape'], {'dtype': 'mosaic_dtype'}), '(mosaic_image_shape, dtype=mosaic_dtype)\n', (7832, 7872), True, 'import numpy as np\n'), ((4631, 4644), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4639, 4644), True, 'import numpy as np\n'), ((7259, 7281), 'numpy.zeros', 'np.zeros', (['num_channels'], {}), '(num_channels)\n', (7267, 7281), True, 'import numpy as np\n'), ((7332, 7354), 'numpy.zeros', 'np.zeros', (['num_channels'], {}), '(num_channels)\n', (7340, 7354), True, 'import numpy as np\n'), ((1634, 1665), 'numpy.asarray', 'np.asarray', (['(img / 257)', 'np.uint8'], {}), '(img / 257, np.uint8)\n', (1644, 1665), True, 'import numpy as np\n'), ((7417, 7437), 'math.sqrt', 'math.sqrt', (['num_icons'], {}), '(num_icons)\n', (7426, 7437), False, 'import math\n'), ((1758, 1791), 'numpy.asarray', 'np.asarray', (['(img * 255.0)', 'np.uint8'], {}), '(img * 255.0, np.uint8)\n', (1768, 1791), True, 'import numpy as np\n'), ((2562, 2591), 'numpy.empty', 'np.empty', (['img.shape', 'np.uint8'], {}), '(img.shape, np.uint8)\n', (2570, 2591), True, 'import numpy as np\n'), ((1885, 1911), 'numpy.asarray', 'np.asarray', (['img', 'np.uint16'], {}), '(img, np.uint16)\n', (1895, 1911), True, 'import numpy as np\n'), ((2010, 2046), 'numpy.asarray', 'np.asarray', (['(img * 65535.0)', 'np.uint16'], {}), '(img * 65535.0, np.uint16)\n', (2020, 2046), True, 'import numpy as np\n'), ((2884, 2910), 'numpy.empty', 'np.empty', (['img.shape', 'dtype'], {}), '(img.shape, dtype)\n', (2892, 2910), True, 'import numpy as np\n'), ((2164, 2186), 'numpy.asarray', 'np.asarray', (['img', 'dtype'], {}), '(img, dtype)\n', (2174, 2186), True, 'import numpy as np\n'), ((3013, 3044), 'numpy.asarray', 'np.asarray', (['img[:, :, 1]', 'dtype'], {}), '(img[:, :, 1], dtype)\n', (3023, 3044), True, 'import numpy as np\n'), ((3079, 3110), 'numpy.asarray', 'np.asarray', (['img[:, :, 2]', 'dtype'], {}), '(img[:, :, 2], dtype)\n', (3089, 3110), True, 'import numpy as np\n'), ((2258, 2280), 'numpy.asarray', 'np.asarray', (['img', 'dtype'], {}), '(img, dtype)\n', (2268, 2280), True, 'import numpy as np\n'), ((2382, 2404), 'numpy.asarray', 'np.asarray', (['img', 'dtype'], {}), '(img, dtype)\n', (2392, 2404), True, 'import numpy as np\n'), ((2940, 2971), 'numpy.asarray', 'np.asarray', (['img[:, :, 0]', 'dtype'], {}), '(img[:, :, 0], dtype)\n', (2950, 2971), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
import os
import sys
import numpy as np
from simulater import Simulater
from play_back import PlayBack, PlayBacks
COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT']
def get_max_command(target_dict):
return max([(v,k) for k,v in target_dict.items()])[1]
def simplify(command):
return command[0]
def print_Q(Q, x, y):
ret = []
for i in range(y):
ret.append(['0' for _ in range(x)])
for k in Q:
ret[k[1]][k[0]] = simplify(get_max_command(Q[k]))
for this_line in ret:
print(''.join(this_line))
if __name__ == '__main__':
# parameters
file_name = 'default.txt'
epoch_num = 1000
max_trial = 5000
gamma = 0.1
alpha = 0.1
epsilon = 0.5
# make simulater
sim = Simulater(file_name)
# initialize Q value
x, y = sim.map_size()
Q = {}
for i in range(x):
for j in range(y):
Q[(i, j)] = {_:np.random.normal() for _ in COMMAND}
#Q[(i, j)] = {_:0.0 for _ in COMMAND}
# main
minimum_pbs = None
for epoch in range(epoch_num):
sim.reset()
this_pbs = PlayBacks()
for i in range(max_trial):
# get current
current_x, current_y = sim.get_current()
# select_command
tmp_Q = Q[(current_x, current_y)]
command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND)
current_value = tmp_Q[command]
# reward
reward = sim(command)
# update
next_x, next_y = sim.get_current()
next_max_command = get_max_command(Q[(next_x, next_y)])
next_value = Q[(next_x, next_y)][next_max_command]
tmp_Q[command] += alpha * (reward + gamma * next_value - current_value)
# play back
this_pbs.append(PlayBack((current_x, current_y),
command,
(next_x, next_y),
reward))
# end check
if sim.end_episode():
print('find goal')
epsilon *= 0.95
if epsilon < 0.05:
epsilon = 0.05
if minimum_pbs is None:
minimum_pbs = this_pbs
elif len(minimum_pbs) > len(this_pbs):
minimum_pbs = this_pbs
print(epsilon)
break
# update with minimum_pbs
if minimum_pbs is not None:
for pb in minimum_pbs:
tmp_Q = Q[pb.state]
current_value = tmp_Q[pb.action]
next_Q = Q[pb.next_state]
next_max_command = get_max_command(next_Q)
next_value = next_Q[next_max_command]
tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value - current_value)
sim.printing()
print('---')
print_Q(Q, x, y)
print('---')
|
[
"numpy.random.uniform",
"simulater.Simulater",
"play_back.PlayBacks",
"numpy.random.normal",
"numpy.random.choice",
"play_back.PlayBack"
] |
[((768, 788), 'simulater.Simulater', 'Simulater', (['file_name'], {}), '(file_name)\n', (777, 788), False, 'from simulater import Simulater\n'), ((1127, 1138), 'play_back.PlayBacks', 'PlayBacks', ([], {}), '()\n', (1136, 1138), False, 'from play_back import PlayBack, PlayBacks\n'), ((929, 947), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (945, 947), True, 'import numpy as np\n'), ((1412, 1437), 'numpy.random.choice', 'np.random.choice', (['COMMAND'], {}), '(COMMAND)\n', (1428, 1437), True, 'import numpy as np\n'), ((1898, 1965), 'play_back.PlayBack', 'PlayBack', (['(current_x, current_y)', 'command', '(next_x, next_y)', 'reward'], {}), '((current_x, current_y), command, (next_x, next_y), reward)\n', (1906, 1965), False, 'from play_back import PlayBack, PlayBacks\n'), ((1377, 1396), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1394, 1396), True, 'import numpy as np\n')]
|
from numpy.linalg import cholesky
import numpy as np
def senti2cate(x):
if x<=-0.6:
return 0
elif x>-0.6 and x<=-0.2:
return 1
elif x>-0.2 and x<0.2:
return 2
elif x>=0.2 and x<0.6:
return 3
elif x>=0.6:
return 4
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def auc(label,score):
label=np.array(label)
score=np.array(score)
false_score = score[label==0]
positive_score = score[label==1]
num_positive = (label==1).sum()
num_negative = (label==0).sum()
positive_score = positive_score.reshape((num_positive,1))
positive_score = np.repeat(positive_score,num_negative,axis=1)
false_score = false_score.reshape((1,num_negative))
false_score = np.repeat(false_score,num_positive,axis=0)
return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean())
def embedding(embfile,word_dict):
emb_dict = {}
with open(embfile,'rb')as f:
while True:
line = f.readline()
if len(line) == 0:
break
data = line.split()
word = data[0].decode()
if len(word) != 0:
vec = [float(x) for x in data[1:]]
if word in word_dict:
emb_dict[word] = vec
emb_table = [0]*len(word_dict)
dummy = np.zeros(300,dtype='float32')
all_emb = []
for i in emb_dict:
emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32')
all_emb.append(emb_table[word_dict[i][0]])
all_emb = np.array(all_emb,dtype='float32')
mu = np.mean(all_emb, axis=0)
Sigma = np.cov(all_emb.T)
norm = np.random.multivariate_normal(mu, Sigma, 1)
for i in range(len(emb_table)):
if type(emb_table[i]) == int:
emb_table[i] = np.reshape(norm, 300)
emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,))
emb_table = np.array(emb_table,dtype='float32')
return emb_table
|
[
"numpy.random.uniform",
"numpy.sum",
"numpy.zeros",
"numpy.argsort",
"numpy.mean",
"numpy.array",
"numpy.take",
"numpy.random.multivariate_normal",
"numpy.reshape",
"numpy.cov",
"numpy.repeat"
] |
[((379, 405), 'numpy.take', 'np.take', (['y_true', 'order[:k]'], {}), '(y_true, order[:k])\n', (386, 405), True, 'import numpy as np\n'), ((497, 522), 'numpy.sum', 'np.sum', (['(gains / discounts)'], {}), '(gains / discounts)\n', (503, 522), True, 'import numpy as np\n'), ((757, 779), 'numpy.take', 'np.take', (['y_true', 'order'], {}), '(y_true, order)\n', (764, 779), True, 'import numpy as np\n'), ((911, 926), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (919, 926), True, 'import numpy as np\n'), ((937, 952), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (945, 952), True, 'import numpy as np\n'), ((1179, 1226), 'numpy.repeat', 'np.repeat', (['positive_score', 'num_negative'], {'axis': '(1)'}), '(positive_score, num_negative, axis=1)\n', (1188, 1226), True, 'import numpy as np\n'), ((1299, 1343), 'numpy.repeat', 'np.repeat', (['false_score', 'num_positive'], {'axis': '(0)'}), '(false_score, num_positive, axis=0)\n', (1308, 1343), True, 'import numpy as np\n'), ((1916, 1946), 'numpy.zeros', 'np.zeros', (['(300)'], {'dtype': '"""float32"""'}), "(300, dtype='float32')\n", (1924, 1946), True, 'import numpy as np\n'), ((2131, 2165), 'numpy.array', 'np.array', (['all_emb'], {'dtype': '"""float32"""'}), "(all_emb, dtype='float32')\n", (2139, 2165), True, 'import numpy as np\n'), ((2174, 2198), 'numpy.mean', 'np.mean', (['all_emb'], {'axis': '(0)'}), '(all_emb, axis=0)\n', (2181, 2198), True, 'import numpy as np\n'), ((2211, 2228), 'numpy.cov', 'np.cov', (['all_emb.T'], {}), '(all_emb.T)\n', (2217, 2228), True, 'import numpy as np\n'), ((2242, 2285), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'Sigma', '(1)'], {}), '(mu, Sigma, 1)\n', (2271, 2285), True, 'import numpy as np\n'), ((2428, 2471), 'numpy.random.uniform', 'np.random.uniform', (['(-0.03)', '(0.03)'], {'size': '(300,)'}), '(-0.03, 0.03, size=(300,))\n', (2445, 2471), True, 'import numpy as np\n'), ((2486, 2522), 'numpy.array', 'np.array', (['emb_table'], {'dtype': '"""float32"""'}), "(emb_table, dtype='float32')\n", (2494, 2522), True, 'import numpy as np\n'), ((340, 359), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (350, 359), True, 'import numpy as np\n'), ((718, 737), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (728, 737), True, 'import numpy as np\n'), ((844, 860), 'numpy.sum', 'np.sum', (['rr_score'], {}), '(rr_score)\n', (850, 860), True, 'import numpy as np\n'), ((863, 877), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (869, 877), True, 'import numpy as np\n'), ((2028, 2066), 'numpy.array', 'np.array', (['emb_dict[i]'], {'dtype': '"""float32"""'}), "(emb_dict[i], dtype='float32')\n", (2036, 2066), True, 'import numpy as np\n'), ((2387, 2408), 'numpy.reshape', 'np.reshape', (['norm', '(300)'], {}), '(norm, 300)\n', (2397, 2408), True, 'import numpy as np\n')]
|
import numpy as np
import re
lineRegex = re.compile(r"(turn on|turn off|toggle) (\d+),(\d+) through (\d+),(\d+)")
def day6(fileName):
lights = np.zeros((1000, 1000), dtype=bool)
with open(fileName) as infile:
for line in infile:
match = lineRegex.match(line)
if match:
for x in range(int(match[2]), int(match[4]) + 1):
for y in range(int(match[3]), int(match[5]) + 1):
if match[1] == "turn on":
lights[y, x] = True
elif match[1] == "turn off":
lights[y, x] = False
elif match[1] == "toggle":
lights[y, x] = not lights[y, x]
else:
raise ValueError(f"Unknown directive: {match[1]}")
print(f"There are {lights.sum()} lights!")
def day6b(fileName):
lights = np.zeros((1000, 1000), dtype=int)
with open(fileName) as infile:
for line in infile:
match = lineRegex.match(line)
if match:
x1 = int(match[2])
x2 = int(match[4])
y1 = int(match[3])
y2 = int(match[5])
if match[1] == "turn on":
lights[y1:y2 + 1, x1:x2 + 1] += 1
elif match[1] == "turn off":
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
lights[y, x] = max(lights[y, x] - 1, 0)
elif match[1] == "toggle":
lights[y1:y2 + 1, x1:x2 + 1] += 2
else:
raise ValueError(f"Unknown directive: {match[1]}")
print(f"Brightness: {lights.sum()}")
#day6("6test.txt")
#day6("6.txt")
day6b("6btest.txt")
day6b("6.txt") #15343601
|
[
"numpy.zeros",
"re.compile"
] |
[((42, 117), 're.compile', 're.compile', (['"""(turn on|turn off|toggle) (\\\\d+),(\\\\d+) through (\\\\d+),(\\\\d+)"""'], {}), "('(turn on|turn off|toggle) (\\\\d+),(\\\\d+) through (\\\\d+),(\\\\d+)')\n", (52, 117), False, 'import re\n'), ((146, 180), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {'dtype': 'bool'}), '((1000, 1000), dtype=bool)\n', (154, 180), True, 'import numpy as np\n'), ((734, 767), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {'dtype': 'int'}), '((1000, 1000), dtype=int)\n', (742, 767), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
def discretize(value,action_dim,n_outputs):
discretization = tf.round(value)
discretization = tf.minimum(tf.constant(n_outputs-1, dtype=tf.float32,shape=[1,action_dim]),
tf.maximum(tf.constant(0, dtype=tf.float32,shape=[1,action_dim]), tf.to_float(discretization)))
return tf.to_int32(discretization)
if __name__=='__main__':
value=np.array((0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9))
a=discretize(value,value.shape[0],2)
with tf.Session() as sess:
print(a.eval())
|
[
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.round",
"tensorflow.to_int32",
"numpy.array",
"tensorflow.to_float"
] |
[((113, 128), 'tensorflow.round', 'tf.round', (['value'], {}), '(value)\n', (121, 128), True, 'import tensorflow as tf\n'), ((359, 386), 'tensorflow.to_int32', 'tf.to_int32', (['discretization'], {}), '(discretization)\n', (370, 386), True, 'import tensorflow as tf\n'), ((426, 484), 'numpy.array', 'np.array', (['(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)'], {}), '((0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9))\n', (434, 484), True, 'import numpy as np\n'), ((162, 229), 'tensorflow.constant', 'tf.constant', (['(n_outputs - 1)'], {'dtype': 'tf.float32', 'shape': '[1, action_dim]'}), '(n_outputs - 1, dtype=tf.float32, shape=[1, action_dim])\n', (173, 229), True, 'import tensorflow as tf\n'), ((534, 546), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (544, 546), True, 'import tensorflow as tf\n'), ((262, 317), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32', 'shape': '[1, action_dim]'}), '(0, dtype=tf.float32, shape=[1, action_dim])\n', (273, 317), True, 'import tensorflow as tf\n'), ((317, 344), 'tensorflow.to_float', 'tf.to_float', (['discretization'], {}), '(discretization)\n', (328, 344), True, 'import tensorflow as tf\n')]
|
import numpy as np
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
"""
Elastic deformation of images as described in
<NAME>, "Best Practices for
Convolutional Neural Networks applied to Visual
Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Modified from:
https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62
Modified to take 3D inputs
Deforms both the image and corresponding label file
Label volumes are interpolated via nearest neighbour
"""
def elastic_transform_3d(img_numpy, labels=None, alpha=1, sigma=20, c_val=0.0, method="linear"):
"""
:param img_numpy: 3D medical image modality
:param labels: 3D medical image labels
:param alpha: scaling factor of gaussian filter
:param sigma: standard deviation of random gaussian filter
:param c_val: fill value
:param method: interpolation method. supported methods : ("linear", "nearest")
:return: deformed image and/or label
"""
assert img_numpy.ndim == 3 , 'Wrong img shape, provide 3D img'
if labels is not None:
assert img_numpy.shape == labels.shape , "Shapes of img and label do not much!"
shape = img_numpy.shape
# Define 3D coordinate system
coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2])
# Interpolated img
im_intrps = RegularGridInterpolator(coords, img_numpy,
method=method,
bounds_error=False,
fill_value=c_val)
# Get random elastic deformations
dx = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dz = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
# Define sample points
x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]]
indices = np.reshape(x + dx, (-1, 1)), \
np.reshape(y + dy, (-1, 1)), \
np.reshape(z + dz, (-1, 1))
# Interpolate 3D image image
img_numpy = im_intrps(indices).reshape(shape)
# Interpolate labels
if labels is not None:
lab_intrp = RegularGridInterpolator(coords, labels,
method="nearest",
bounds_error=False,
fill_value=0)
labels = lab_intrp(indices).reshape(shape).astype(labels.dtype)
return img_numpy, labels
return img_numpy
|
[
"numpy.random.rand",
"scipy.interpolate.RegularGridInterpolator",
"numpy.arange",
"numpy.reshape"
] |
[((1499, 1599), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['coords', 'img_numpy'], {'method': 'method', 'bounds_error': '(False)', 'fill_value': 'c_val'}), '(coords, img_numpy, method=method, bounds_error=\n False, fill_value=c_val)\n', (1522, 1599), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((1397, 1416), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (1406, 1416), True, 'import numpy as np\n'), ((1418, 1437), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (1427, 1437), True, 'import numpy as np\n'), ((1439, 1458), 'numpy.arange', 'np.arange', (['shape[2]'], {}), '(shape[2])\n', (1448, 1458), True, 'import numpy as np\n'), ((2230, 2257), 'numpy.reshape', 'np.reshape', (['(x + dx)', '(-1, 1)'], {}), '(x + dx, (-1, 1))\n', (2240, 2257), True, 'import numpy as np\n'), ((2275, 2302), 'numpy.reshape', 'np.reshape', (['(y + dy)', '(-1, 1)'], {}), '(y + dy, (-1, 1))\n', (2285, 2302), True, 'import numpy as np\n'), ((2320, 2347), 'numpy.reshape', 'np.reshape', (['(z + dz)', '(-1, 1)'], {}), '(z + dz, (-1, 1))\n', (2330, 2347), True, 'import numpy as np\n'), ((2505, 2601), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['coords', 'labels'], {'method': '"""nearest"""', 'bounds_error': '(False)', 'fill_value': '(0)'}), "(coords, labels, method='nearest', bounds_error=\n False, fill_value=0)\n", (2528, 2601), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((1780, 1802), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1794, 1802), True, 'import numpy as np\n'), ((1905, 1927), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1919, 1927), True, 'import numpy as np\n'), ((2030, 2052), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (2044, 2052), True, 'import numpy as np\n')]
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General helper functions."""
from os import path
import numpy as np
from skimage import measure
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib.libmise import mise
from tensorflow_graphics.projects.nasa.lib import datasets
from tensorflow_graphics.projects.nasa.lib import models
import tensorflow_probability as tfp
from tqdm import trange
import trimesh
tf.disable_eager_execution()
tfd = tfp.distributions
def define_flags():
"""Define command line flags."""
flags = tf.app.flags
# Dataset Parameters
flags.DEFINE_enum("dataset", "amass",
list(k for k in datasets.dataset_dict.keys()),
"Name of the dataset.")
flags.DEFINE_string("data_dir", None, "Directory to load data from.")
flags.mark_flag_as_required("data_dir")
flags.DEFINE_integer("sample_bbox", 1024, "Number of bbox samples.")
flags.DEFINE_integer("sample_surf", 1024, "Number of surface samples.")
flags.DEFINE_integer("batch_size", 12, "Batch size.")
flags.DEFINE_integer("motion", 0, "Index of the motion for evaluation.")
flags.DEFINE_integer("subject", 0, "Index of the subject for training.")
# Model Parameters
flags.DEFINE_enum("model", "nasa", list(k for k in models.model_dict.keys()),
"Name of the model.")
flags.DEFINE_integer("n_parts", 24, "Number of parts.")
flags.DEFINE_integer("total_dim", 960,
"Dimension of the latent vector (in total).")
flags.DEFINE_bool("shared_decoder", False, "Whether to use shared decoder.")
flags.DEFINE_float("soft_blend", 5., "The constant to blend parts.")
flags.DEFINE_bool("projection", True,
"Whether to use projected shape features.")
flags.DEFINE_float("level_set", 0.5, "The value of the level_set.")
flags.DEFINE_integer("n_dims", 3, "The dimension of the query points.")
# Training Parameters
flags.DEFINE_float("lr", 1e-4, "Learning rate")
flags.DEFINE_string("train_dir", None, "Training directory.")
flags.mark_flag_as_required("train_dir")
flags.DEFINE_integer("max_steps", 200000, "Number of optimization steps.")
flags.DEFINE_integer("save_every", 5000,
"Number of steps to save checkpoint.")
flags.DEFINE_integer("summary_every", 500,
"Number of steps to save checkpoint.")
flags.DEFINE_float("label_w", 0.5, "Weight of labed vertices loss.")
flags.DEFINE_float("minimal_w", 0.05, "Weight of minimal loss.")
flags.DEFINE_bool("use_vert", True,
"Whether to use vertices on the mesh for training.")
flags.DEFINE_bool("use_joint", True,
"Whether to use joint-based transformation.")
flags.DEFINE_integer("sample_vert", 2048, "Number of vertex samples.")
# Evalulation Parameters
flags.DEFINE_bool("gen_mesh_only", False, "Whether to generate meshes only.")
# Tracking Parameters
flags.DEFINE_float("theta_lr", 5e-4, "Learning rate")
flags.DEFINE_integer("max_steps_per_frame", 1792,
"Number of optimization steps for tracking each frame.")
flags.DEFINE_enum("gradient_type", "reparam", ["vanilla", "reparam"],
"Type of gradient to use in theta optimization.")
flags.DEFINE_integer("sample_track_vert", 1024,
"Number of vertex samples for tracking each frame.")
flags.DEFINE_integer("n_noisy_samples", 8,
"Number of noisy samples per vertex")
flags.DEFINE_float("bandwidth", 1e-2, "Bandwidth of the gaussian noises.")
flags.DEFINE_bool(
"left_trans", False,
"Whether to use left side transformation (True) or right side (False).")
flags.DEFINE_string("joint_data", None, "Path to load joint data.")
flags.DEFINE_float("glue_w", 20., "Weight of length constraint loss.")
flags.DEFINE_float("trans_range", 1., "The range of allowed translations.")
def gen_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=0):
"""Generating meshes given a trained NASA model."""
scale = 1.1 # Scale of the padded bbox regarding the tight one.
level_set = hparams.level_set
latent_val = sess.run(latent, feed_dict)
mesh_extractor = mise.MISE(32, 3, level_set)
points = mesh_extractor.query()
gt_verts = batch_val["vert"].reshape([-1, 3])
gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0)
gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5
gt_scale = (gt_bbox[1] - gt_bbox[0]).max()
while points.shape[0] != 0:
orig_points = points
points = points.astype(np.float32)
points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution -
0.5) * scale
points = points * gt_scale + gt_center
n_points = points.shape[1]
values = []
for i in range(0, n_points,
100000): # Add this to prevent OOM due to points overload.
feed_dict[latent_holder] = latent_val
feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1)
value = sess.run(occ[:, idx], feed_dict)
values.append(value)
values = np.concatenate(values, axis=1)
values = values[0, :, 0].astype(np.float64)
mesh_extractor.update(orig_points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
try:
value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6)
verts, faces, normals, unused_var = measure.marching_cubes_lewiner(
value_grid, min(level_set, value_grid.max()))
del normals
verts -= 1
verts /= np.array([
value_grid.shape[0] - 3, value_grid.shape[1] - 3,
value_grid.shape[2] - 3
],
dtype=np.float32)
verts = scale * (verts - 0.5)
verts = verts * gt_scale + gt_center
faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)
mesh = trimesh.Trimesh(vertices=verts, faces=faces)
return mesh
except: # pylint: disable=bare-except
return None
def save_mesh(sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
pth="meshes"):
"""Generate and save meshes to disk given a trained NASA model."""
name = batch_val["name"][0].decode("utf-8")
subject, motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
start = hparams.n_parts
for i in range(start, hparams.n_parts + 1):
mesh_model = gen_mesh(
sess,
feed_dict,
latent_holder,
point_holder,
latent,
occ,
batch_val,
hparams,
idx=i)
mesh_name = "full_pred.obj"
if mesh_model is not None:
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
mesh_model.export(fout, file_type="obj")
return subject, motion, frame, mesh_model
def save_pointcloud(data, hparams, pth="pointcloud"):
"""Save pointcloud to disk."""
name = data["name"][0].decode("utf-8")
unused_subject, unused_motion, frame = amass_name_helper(name)
pth = path.join(hparams.train_dir, pth, frame)
if not tf.io.gfile.isdir(pth):
tf.io.gfile.makedirs(pth)
mesh_name = "pointcloud.obj"
with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout:
pointcloud = data["vert"].reshape([-1, 3])
for v in pointcloud:
fout.write("v {0} {1} {2}\n".format(*v.tolist()))
def amass_name_helper(name):
name, frame = name.split("-")
subject = name[:5]
motion = name[6:]
return subject, motion, frame
def make_summary_feed_dict(
iou_hook,
iou,
best_hook,
best_iou,
):
feed_dict = {}
feed_dict[iou_hook] = iou
feed_dict[best_hook] = best_iou
return feed_dict
def parse_global_step(ckpt):
basename = path.basename(ckpt)
return int(basename.split("-")[-1])
def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ,
point, label, hparams):
"""Compute IoU."""
iou = 0.
eps = 1e-9
latent_val = sess.run(latent, feed_dict)
n_points = point.shape[2]
preds = []
for start in range(0, n_points, 100000):
feed_dict[point_holder] = point[:, :, start:start + 100000]
feed_dict[latent_holder] = latent_val
pred = sess.run(occ, feed_dict)
preds.append(pred)
pred = np.concatenate(preds, axis=2)
pred = (pred >= hparams.level_set).astype(np.float32)
label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1)
iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps)
return iou
def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans,
joints, hparams):
"""Compute the prior term as a glue loss."""
n_dims = hparams.n_dims
# Invert the transformation
r_inv = inv_transforms[..., :n_dims, :n_dims]
t_inv = inv_transforms[..., :n_dims, -1:]
r = tf.transpose(r_inv, [0, 2, 1])
t = -tf.matmul(r, t_inv)
transforms = tf.concat(
[tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2)
transforms = tf.matmul(transforms, inv_first_frame_trans)
# Compute transformations of father joints and apply it to vectors from frame0
father_transforms = tf.reduce_sum(
tf.expand_dims(transforms, axis=1) *
connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]),
axis=0)
end_pts_homo = tf.expand_dims(
tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1)
end_pts_transformed = tf.matmul(father_transforms, end_pts_homo)
end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims]
# Compute vectors in current configuration
pred_links = tf.reshape(joints, [hparams.n_parts, n_dims])
# Compute distance between links and transformed vectors
return tf.reduce_sum(tf.square(pred_links - end_pts_transformed))
def vanilla_theta_gradient(model_fn, batch_holder, hparams):
"""A vanilla gradient estimator for the pose, theta."""
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, "gen_mesh")
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def reparam_theta_gradient(model_fn, batch_holder, hparams):
"""A gradient estimaor for the pose, theta, using the reparam trick."""
sigma = hparams.bandwidth
n_samples = hparams.n_noisy_samples
latent_holder, latent, occ_eval = model_fn(batch_holder, None, None,
"gen_mesh")
if hparams.sample_vert > 0:
points = batch_holder["point"]
weights = batch_holder["weight"]
n_vert = tf.shape(points)[2]
sample_indices = tf.random.uniform([1, 1, hparams.sample_vert],
minval=0,
maxval=n_vert,
dtype=tf.int32)
points = tf.gather(points, sample_indices, axis=2, batch_dims=2)
weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2)
batch_holder["point"] = points
batch_holder["weight"] = weights
dist = tfd.Normal(loc=0., scale=sigma)
n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert
noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims))
unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None,
"gen_mesh")
occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1])
occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3)
return latent_holder, latent, occ_eval, tf.reduce_mean(
tf.square(occ - hparams.level_set))
def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss,
sess, k, hparams):
"""Optimize the pose, theta, during tracking."""
sess.run(reset_op)
loss_val = 0
glue_val = 0
with trange(hparams.max_steps_per_frame) as t:
for unused_i in t:
loss_val, unused_var, rec_val, glue_val = sess.run(
[loss, train_op, rec_loss, glue_loss], feed_dict)
t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format(
k, rec_val, glue_val))
return loss_val, glue_val
|
[
"numpy.sum",
"numpy.maximum",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.disable_eager_execution",
"os.path.join",
"numpy.pad",
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.io.gfile.makedirs",
"numpy.stack",
"trimesh.Trimesh",
"os.path.basename",
"tqdm.trange",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.reshape",
"numpy.concatenate",
"tensorflow.compat.v1.io.gfile.isdir",
"tensorflow_graphics.projects.nasa.lib.datasets.dataset_dict.keys",
"tensorflow_graphics.projects.nasa.lib.models.model_dict.keys",
"tensorflow.compat.v1.concat",
"numpy.expand_dims",
"tensorflow.compat.v1.random.uniform",
"tensorflow.compat.v1.ones_like",
"tensorflow_graphics.projects.cvxnet.lib.libmise.mise.MISE",
"numpy.array"
] |
[((980, 1008), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (1006, 1008), True, 'import tensorflow.compat.v1 as tf\n'), ((4902, 4929), 'tensorflow_graphics.projects.cvxnet.lib.libmise.mise.MISE', 'mise.MISE', (['(32)', '(3)', 'level_set'], {}), '(32, 3, level_set)\n', (4911, 4929), False, 'from tensorflow_graphics.projects.cvxnet.lib.libmise import mise\n'), ((7066, 7106), 'os.path.join', 'path.join', (['hparams.train_dir', 'pth', 'frame'], {}), '(hparams.train_dir, pth, frame)\n', (7075, 7106), False, 'from os import path\n'), ((7858, 7898), 'os.path.join', 'path.join', (['hparams.train_dir', 'pth', 'frame'], {}), '(hparams.train_dir, pth, frame)\n', (7867, 7898), False, 'from os import path\n'), ((8551, 8570), 'os.path.basename', 'path.basename', (['ckpt'], {}), '(ckpt)\n', (8564, 8570), False, 'from os import path\n'), ((9072, 9101), 'numpy.concatenate', 'np.concatenate', (['preds'], {'axis': '(2)'}), '(preds, axis=2)\n', (9086, 9101), True, 'import numpy as np\n'), ((9642, 9672), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['r_inv', '[0, 2, 1]'], {}), '(r_inv, [0, 2, 1])\n', (9654, 9672), True, 'import tensorflow.compat.v1 as tf\n'), ((9815, 9859), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['transforms', 'inv_first_frame_trans'], {}), '(transforms, inv_first_frame_trans)\n', (9824, 9859), True, 'import tensorflow.compat.v1 as tf\n'), ((10236, 10278), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['father_transforms', 'end_pts_homo'], {}), '(father_transforms, end_pts_homo)\n', (10245, 10278), True, 'import tensorflow.compat.v1 as tf\n'), ((10419, 10464), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['joints', '[hparams.n_parts, n_dims]'], {}), '(joints, [hparams.n_parts, n_dims])\n', (10429, 10464), True, 'import tensorflow.compat.v1 as tf\n'), ((12832, 12891), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['occ', '[1, hparams.n_parts + 1, -1, n_samples, 1]'], {}), '(occ, [1, hparams.n_parts + 1, -1, n_samples, 1])\n', (12842, 12891), True, 'import tensorflow.compat.v1 as tf\n'), ((12901, 12949), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['occ[:, hparams.n_parts:]'], {'axis': '(3)'}), '(occ[:, hparams.n_parts:], axis=3)\n', (12915, 12949), True, 'import tensorflow.compat.v1 as tf\n'), ((5787, 5817), 'numpy.concatenate', 'np.concatenate', (['values'], {'axis': '(1)'}), '(values, axis=1)\n', (5801, 5817), True, 'import numpy as np\n'), ((6015, 6076), 'numpy.pad', 'np.pad', (['value_grid', '(1)', '"""constant"""'], {'constant_values': '(-1000000.0)'}), "(value_grid, 1, 'constant', constant_values=-1000000.0)\n", (6021, 6076), True, 'import numpy as np\n'), ((6241, 6349), 'numpy.array', 'np.array', (['[value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3]'], {'dtype': 'np.float32'}), '([value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.\n shape[2] - 3], dtype=np.float32)\n', (6249, 6349), True, 'import numpy as np\n'), ((6476, 6540), 'numpy.stack', 'np.stack', (['[faces[..., 1], faces[..., 0], faces[..., 2]]'], {'axis': '(-1)'}), '([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1)\n', (6484, 6540), True, 'import numpy as np\n'), ((6552, 6596), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': 'verts', 'faces': 'faces'}), '(vertices=verts, faces=faces)\n', (6567, 6596), False, 'import trimesh\n'), ((7116, 7138), 'tensorflow.compat.v1.io.gfile.isdir', 'tf.io.gfile.isdir', (['pth'], {}), '(pth)\n', (7133, 7138), True, 'import tensorflow.compat.v1 as tf\n'), ((7144, 7169), 'tensorflow.compat.v1.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['pth'], {}), '(pth)\n', (7164, 7169), True, 'import tensorflow.compat.v1 as tf\n'), ((7908, 7930), 'tensorflow.compat.v1.io.gfile.isdir', 'tf.io.gfile.isdir', (['pth'], {}), '(pth)\n', (7925, 7930), True, 'import tensorflow.compat.v1 as tf\n'), ((7936, 7961), 'tensorflow.compat.v1.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['pth'], {}), '(pth)\n', (7956, 7961), True, 'import tensorflow.compat.v1 as tf\n'), ((9234, 9254), 'numpy.sum', 'np.sum', (['(pred * label)'], {}), '(pred * label)\n', (9240, 9254), True, 'import numpy as np\n'), ((9680, 9699), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['r', 't_inv'], {}), '(r, t_inv)\n', (9689, 9699), True, 'import tensorflow.compat.v1 as tf\n'), ((10303, 10343), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['end_pts_transformed'], {'axis': '(-1)'}), '(end_pts_transformed, axis=-1)\n', (10313, 10343), True, 'import tensorflow.compat.v1 as tf\n'), ((10548, 10591), 'tensorflow.compat.v1.square', 'tf.square', (['(pred_links - end_pts_transformed)'], {}), '(pred_links - end_pts_transformed)\n', (10557, 10591), True, 'import tensorflow.compat.v1 as tf\n'), ((10998, 11089), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['[1, 1, hparams.sample_vert]'], {'minval': '(0)', 'maxval': 'n_vert', 'dtype': 'tf.int32'}), '([1, 1, hparams.sample_vert], minval=0, maxval=n_vert,\n dtype=tf.int32)\n', (11015, 11089), True, 'import tensorflow.compat.v1 as tf\n'), ((11216, 11271), 'tensorflow.compat.v1.gather', 'tf.gather', (['points', 'sample_indices'], {'axis': '(2)', 'batch_dims': '(2)'}), '(points, sample_indices, axis=2, batch_dims=2)\n', (11225, 11271), True, 'import tensorflow.compat.v1 as tf\n'), ((11286, 11342), 'tensorflow.compat.v1.gather', 'tf.gather', (['weights', 'sample_indices'], {'axis': '(2)', 'batch_dims': '(2)'}), '(weights, sample_indices, axis=2, batch_dims=2)\n', (11295, 11342), True, 'import tensorflow.compat.v1 as tf\n'), ((12084, 12175), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['[1, 1, hparams.sample_vert]'], {'minval': '(0)', 'maxval': 'n_vert', 'dtype': 'tf.int32'}), '([1, 1, hparams.sample_vert], minval=0, maxval=n_vert,\n dtype=tf.int32)\n', (12101, 12175), True, 'import tensorflow.compat.v1 as tf\n'), ((12302, 12357), 'tensorflow.compat.v1.gather', 'tf.gather', (['points', 'sample_indices'], {'axis': '(2)', 'batch_dims': '(2)'}), '(points, sample_indices, axis=2, batch_dims=2)\n', (12311, 12357), True, 'import tensorflow.compat.v1 as tf\n'), ((12372, 12428), 'tensorflow.compat.v1.gather', 'tf.gather', (['weights', 'sample_indices'], {'axis': '(2)', 'batch_dims': '(2)'}), '(weights, sample_indices, axis=2, batch_dims=2)\n', (12381, 12428), True, 'import tensorflow.compat.v1 as tf\n'), ((13276, 13311), 'tqdm.trange', 'trange', (['hparams.max_steps_per_frame'], {}), '(hparams.max_steps_per_frame)\n', (13282, 13311), False, 'from tqdm import trange\n'), ((5652, 5699), 'numpy.expand_dims', 'np.expand_dims', (['points[:, i:i + 100000]'], {'axis': '(1)'}), '(points[:, i:i + 100000], axis=1)\n', (5666, 5699), True, 'import numpy as np\n'), ((8019, 8044), 'os.path.join', 'path.join', (['pth', 'mesh_name'], {}), '(pth, mesh_name)\n', (8028, 8044), False, 'from os import path\n'), ((9733, 9759), 'tensorflow.compat.v1.concat', 'tf.concat', (['[r, t]'], {'axis': '(-1)'}), '([r, t], axis=-1)\n', (9742, 9759), True, 'import tensorflow.compat.v1 as tf\n'), ((9985, 10019), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['transforms'], {'axis': '(1)'}), '(transforms, axis=1)\n', (9999, 10019), True, 'import tensorflow.compat.v1 as tf\n'), ((10957, 10973), 'tensorflow.compat.v1.shape', 'tf.shape', (['points'], {}), '(points)\n', (10965, 10973), True, 'import tensorflow.compat.v1 as tf\n'), ((11560, 11594), 'tensorflow.compat.v1.square', 'tf.square', (['(occ - hparams.level_set)'], {}), '(occ - hparams.level_set)\n', (11569, 11594), True, 'import tensorflow.compat.v1 as tf\n'), ((12043, 12059), 'tensorflow.compat.v1.shape', 'tf.shape', (['points'], {}), '(points)\n', (12051, 12059), True, 'import tensorflow.compat.v1 as tf\n'), ((13014, 13048), 'tensorflow.compat.v1.square', 'tf.square', (['(occ - hparams.level_set)'], {}), '(occ - hparams.level_set)\n', (13023, 13048), True, 'import tensorflow.compat.v1 as tf\n'), ((9275, 9298), 'numpy.maximum', 'np.maximum', (['pred', 'label'], {}), '(pred, label)\n', (9285, 9298), True, 'import numpy as np\n'), ((10160, 10190), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['end_pts[..., :1]'], {}), '(end_pts[..., :1])\n', (10172, 10190), True, 'import tensorflow.compat.v1 as tf\n'), ((1213, 1241), 'tensorflow_graphics.projects.nasa.lib.datasets.dataset_dict.keys', 'datasets.dataset_dict.keys', ([], {}), '()\n', (1239, 1241), False, 'from tensorflow_graphics.projects.nasa.lib import datasets\n'), ((1828, 1852), 'tensorflow_graphics.projects.nasa.lib.models.model_dict.keys', 'models.model_dict.keys', ([], {}), '()\n', (1850, 1852), False, 'from tensorflow_graphics.projects.nasa.lib import models\n'), ((5287, 5317), 'numpy.expand_dims', 'np.expand_dims', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (5301, 5317), True, 'import numpy as np\n'), ((7520, 7545), 'os.path.join', 'path.join', (['pth', 'mesh_name'], {}), '(pth, mesh_name)\n', (7529, 7545), False, 'from os import path\n')]
|
import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.datasets import mnist
x_train = None
y_train = None
x_test = None
y_test = None
def init():
global x_train, y_train, x_test, y_test
(x_train_tmp, y_train_tmp), (x_test_tmp, y_test_tmp) = mnist.load_data()
x_train = x_train_tmp.reshape(-1, 784)
x_test = x_test_tmp.reshape(-1, 784)
train_size = x_train.shape[0]
test_size = x_test.shape[0]
y_train = np.zeros((train_size, 10))
for i in range(train_size):
y_train[i][y_train_tmp[i]] = 1
y_test = np.zeros((test_size, 10))
for i in range(test_size):
y_test[i][y_test_tmp[i]] = 1
pass
if __name__ == '__main__':
import time
init()
model = Sequential()
model.add(Dense(units=1000, activation='sigmoid', input_dim=784))
model.add(Dense(units=500, activation='sigmoid'))
model.add(Dense(units=10, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
start_time = time.time()
model.fit(x_train, y_train, epochs=10, batch_size=1000)
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000)
print(loss_and_metrics)
print('Total Time: ', (time.time() - start_time))
|
[
"keras.datasets.mnist.load_data",
"numpy.zeros",
"time.time",
"keras.layers.Dense",
"keras.models.Sequential"
] |
[((308, 325), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (323, 325), False, 'from keras.datasets import mnist\n'), ((490, 516), 'numpy.zeros', 'np.zeros', (['(train_size, 10)'], {}), '((train_size, 10))\n', (498, 516), True, 'import numpy as np\n'), ((601, 626), 'numpy.zeros', 'np.zeros', (['(test_size, 10)'], {}), '((test_size, 10))\n', (609, 626), True, 'import numpy as np\n'), ((773, 785), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (783, 785), False, 'from keras.models import Sequential\n'), ((1073, 1084), 'time.time', 'time.time', ([], {}), '()\n', (1082, 1084), False, 'import time\n'), ((800, 854), 'keras.layers.Dense', 'Dense', ([], {'units': '(1000)', 'activation': '"""sigmoid"""', 'input_dim': '(784)'}), "(units=1000, activation='sigmoid', input_dim=784)\n", (805, 854), False, 'from keras.layers import Dense\n'), ((870, 908), 'keras.layers.Dense', 'Dense', ([], {'units': '(500)', 'activation': '"""sigmoid"""'}), "(units=500, activation='sigmoid')\n", (875, 908), False, 'from keras.layers import Dense\n'), ((924, 961), 'keras.layers.Dense', 'Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (929, 961), False, 'from keras.layers import Dense\n'), ((1271, 1282), 'time.time', 'time.time', ([], {}), '()\n', (1280, 1282), False, 'import time\n')]
|
from __future__ import print_function
import sys, h5py as h5, numpy as np, yt, csv
from time import time, sleep
from PreFRBLE.file_system import *
from PreFRBLE.parameter import *
from time import time
def TimeElapsed( func, *args, **kwargs ):
""" measure time taken to compute function """
def MeasureTime():
t0 = time()
res = func( *args, **kwargs)
print( "{} took {} s".format( func.__name__, time()-t0 ) )
return res
return MeasureTime()
from time import sleep
## wrapper to write hdf5 files consistently
def Write2h5( filename='', datas=[], keys=[] ):
""" conveniently write datas to keys in filename. overwrite existing entries """
if type(keys) is str:
sys.exit( 'Write2h5 needs list of datas and keys' )
### small workaround to allow for parallel computation. Use with caution, might corrupt nodes in your h5 file. in that case, visit:
### https://stackoverflow.com/questions/47979751/recover-data-from-corrupted-file/61147632?noredirect=1#comment108190378_61147632
tries = 0
while tries < 30:
#try:
with h5.File( filename, 'a' ) as f:
for data, key in zip( datas, keys ):
try:
f[key][()]
f.__delitem__( key )
except:
pass
f.create_dataset( key, data=data )
break
#except:
sleep(3e-2)
tries += 1
pass
else:
print( "couldn't write ", keys )
sys.exit(1)
## Read FRBcat
#FRB_dtype = [('ID','S'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','S'), ('tele','S')]
#FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','U10'),('tau','U10'),('host_redshift','U4'), ('tele','U10')]
FRB_dtype = [('ID','U9'),('DM','f'),('DM_gal','f'), ('RM','f'),('tau','f'),('host_redshift','f'), ('tele','U10')]
def GetFRBcat( telescopes=None, RM=None, tau=None, print_number=False ):
"""
read all FRBs in FRBcat, downloaded to frbcat_file
Parameters
----------
telescopes : list
list of considered telescopes, FRBs of other telescopes are ignored
RM : boolean
if True, only return FRBs observed with RM
tau : boolean
if True, only return FRBs observed with temproal broadening
print_number : boolean
if True, print number of extractet FRBs
Returns
-------
FRBs : array
structured numpy.array containing values listed in FRBcat
"""
### read all FRBs from FRBcat
### optional: read only those FRBs observed by telescope with RM and tau
### print_number:True print number of extracted FRBs
FRBs = []
with open( frbcat_file, 'r') as f:
reader = csv.reader( f )
header = np.array(next(reader))
# header = np.array(reader.next())
i_ID = 0
i_DM = np.where( header == 'rmp_dm' )[0][0]
i_DM_gal = np.where( header == 'rop_mw_dm_limit' )[0][0]
i_RM = np.where( header == 'rmp_rm' )[0][0]
i_tau = np.where( header == 'rmp_scattering' )[0][0]
i_zs = np.where( header == 'rmp_redshift_host' )[0][0]
i_tele = np.where( header == 'telescope' )[0][0]
i_s = [i_ID, i_DM, i_DM_gal, i_RM, i_tau, i_zs, i_tele] ## order must fit order of FRB_dtype
for row in reader:
if telescopes and ( row[i_tele] not in [telescopes_FRBcat[tele] for tele in telescopes] ) :
continue
if tau and ( row[i_tau] == 'null' ) :
continue
if RM and ( row[i_RM] == 'null' ) :
continue
FRBs.append( tuple( [ decode(row[i].split('&')[0], dtype) for i, dtype in zip( i_s, np.array(FRB_dtype)[:,1] ) ] ) )
return np.array( FRBs, dtype=FRB_dtype )
def decode( string, dtype='U' ):
""" short wrapper to decode byte-strings read from FRBcat """
if 'f' in dtype:
if 'null' in string:
return float('NaN')
return float(string)
return string
def GetFRBsMeasures( measure='DM', FRBs=None ):
""" returns measures of FRBs in FRBcat read with GetFRBcat() """
if measure == 'DM':
return FRBs['DM']-FRBs['DM_gal']
elif measure == 'RM':
return FRBs['RM']
## flocker to keep parallel processes from writing to same file simultaneously
## provided by derpston, https://github.com/derpston/python-simpleflock/blob/master/src/simpleflock.py#L14
import os, fcntl, errno
class SimpleFlock:
"""Provides the simplest possible interface to flock-based file locking. Intended for use with the `with` syntax. It will create/truncate/delete the lock file as necessary."""
def __init__(self, path, timeout = None):
self._path = path
self._timeout = timeout
self._fd = None
def __enter__(self):
self._fd = os.open(self._path, os.O_CREAT)
start_lock_search = time()
while True:
try:
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Lock acquired!
return
except (OSError, IOError) as ex:
if ex.errno != errno.EAGAIN: # Resource temporarily unavailable
raise
elif self._timeout is not None and time() > (start_lock_search + self._timeout):
# Exceeded the user-specified timeout.
print( "timeout exceeded" )
raise
# TODO It would be nice to avoid an arbitrary sleep here, but spinning
# without a delay is also undesirable.
sleep(0.1)
def __exit__(self, *args):
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
self._fd = None
# Try to remove the lock file, but don't try too hard because it is
# unnecessary. This is mostly to help the user see whether a lock
# exists by examining the filesystem.
try:
os.unlink(self._path)
except:
pass
''' USAGE
with SimpleFlock("locktest", 2): ## "locktest" is a temporary file that tells whether the lock is active
## perform action on the locked file(s)
## file is locked when with starts until its left
## if file is locked, code is paused until lock is released, then with is performed
'''
def first(iterable, condition = lambda x: True):
"""
Returns the first item in the `iterable` that satisfies the `condition`.
If the condition is not given, returns the first item of the iterable.
Returns -1 if no item satysfing the condition is found.
>>> first( (1,2,3), condition=lambda x: x % 2 == 0)
2
>>> first(range(3, 100))
3
>>> first( (1,2,3), condition=lambda x: x > 9)
-1
THANKS TO Caridorc
https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition
"""
try:
return next(x for x in iterable if condition(x))
except:
return -1
## wrapper to show time needed for some function
'''
def HowLong( f, *args, print_additional='', **kwargs ):
""" wrapper to print the time needed to call function f """
t0 = time()
ret = f( *args, **kwargs )
t = time() - t0
print( "Running %s took %i minutes and %.1f seconds %s" % (f.__name__, t//60, t%60, print_additional ) )
return ret
'''
|
[
"os.open",
"h5py.File",
"csv.reader",
"os.unlink",
"fcntl.flock",
"time.sleep",
"time.time",
"numpy.where",
"numpy.array",
"os.close",
"sys.exit"
] |
[((3834, 3865), 'numpy.array', 'np.array', (['FRBs'], {'dtype': 'FRB_dtype'}), '(FRBs, dtype=FRB_dtype)\n', (3842, 3865), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((333, 339), 'time.time', 'time', ([], {}), '()\n', (337, 339), False, 'from time import time\n'), ((735, 784), 'sys.exit', 'sys.exit', (['"""Write2h5 needs list of datas and keys"""'], {}), "('Write2h5 needs list of datas and keys')\n", (743, 784), False, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((1473, 1484), 'time.sleep', 'sleep', (['(0.03)'], {}), '(0.03)\n', (1478, 1484), False, 'from time import sleep\n'), ((1585, 1596), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1593, 1596), False, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((2814, 2827), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2824, 2827), False, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((4911, 4942), 'os.open', 'os.open', (['self._path', 'os.O_CREAT'], {}), '(self._path, os.O_CREAT)\n', (4918, 4942), False, 'import os, fcntl, errno\n'), ((4969, 4975), 'time.time', 'time', ([], {}), '()\n', (4973, 4975), False, 'from time import time\n'), ((5667, 5703), 'fcntl.flock', 'fcntl.flock', (['self._fd', 'fcntl.LOCK_UN'], {}), '(self._fd, fcntl.LOCK_UN)\n', (5678, 5703), False, 'import os, fcntl, errno\n'), ((5710, 5728), 'os.close', 'os.close', (['self._fd'], {}), '(self._fd)\n', (5718, 5728), False, 'import os, fcntl, errno\n'), ((1124, 1146), 'h5py.File', 'h5.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (1131, 1146), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((5619, 5629), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (5624, 5629), False, 'from time import sleep\n'), ((5962, 5983), 'os.unlink', 'os.unlink', (['self._path'], {}), '(self._path)\n', (5971, 5983), False, 'import os, fcntl, errno\n'), ((2953, 2981), 'numpy.where', 'np.where', (["(header == 'rmp_dm')"], {}), "(header == 'rmp_dm')\n", (2961, 2981), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((3009, 3046), 'numpy.where', 'np.where', (["(header == 'rop_mw_dm_limit')"], {}), "(header == 'rop_mw_dm_limit')\n", (3017, 3046), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((3070, 3098), 'numpy.where', 'np.where', (["(header == 'rmp_rm')"], {}), "(header == 'rmp_rm')\n", (3078, 3098), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((3123, 3159), 'numpy.where', 'np.where', (["(header == 'rmp_scattering')"], {}), "(header == 'rmp_scattering')\n", (3131, 3159), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((3183, 3222), 'numpy.where', 'np.where', (["(header == 'rmp_redshift_host')"], {}), "(header == 'rmp_redshift_host')\n", (3191, 3222), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((3248, 3279), 'numpy.where', 'np.where', (["(header == 'telescope')"], {}), "(header == 'telescope')\n", (3256, 3279), True, 'import sys, h5py as h5, numpy as np, yt, csv\n'), ((5020, 5072), 'fcntl.flock', 'fcntl.flock', (['self._fd', '(fcntl.LOCK_EX | fcntl.LOCK_NB)'], {}), '(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n', (5031, 5072), False, 'import os, fcntl, errno\n'), ((430, 436), 'time.time', 'time', ([], {}), '()\n', (434, 436), False, 'from time import time\n'), ((5307, 5313), 'time.time', 'time', ([], {}), '()\n', (5311, 5313), False, 'from time import time\n'), ((3790, 3809), 'numpy.array', 'np.array', (['FRB_dtype'], {}), '(FRB_dtype)\n', (3798, 3809), True, 'import sys, h5py as h5, numpy as np, yt, csv\n')]
|
#!/usr/bin/env python
"""SequenceMotifDecomposer is a motif finder algorithm.
@author: <NAME>
@email: <EMAIL>
"""
import logging
import multiprocessing as mp
import os
from collections import defaultdict
from eden import apply_async
import numpy as np
from scipy.sparse import vstack
from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence
from itertools import izip
import time
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import SGDClassifier
from sklearn.cluster import MiniBatchKMeans
from eden.sequence import Vectorizer
from StringIO import StringIO
from Bio import SeqIO
from Bio.Align.Applications import MuscleCommandline
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from corebio.seq import Alphabet, SeqList
import weblogolib as wbl
from scipy.cluster.hierarchy import linkage
import regex as re
from collections import Counter
from sklearn import metrics
from eden.util.NeedlemanWunsh import edit_distance
import random
import pylab as plt
import joblib
from scipy.optimize import curve_fit
import multiprocessing
logger = logging.getLogger(__name__)
def sigmoid(x, a, b):
"""sigmoid."""
return 1 / (1 + np.exp(-(x - a) / b))
class PValueEvaluator(object):
"""Fit a parametrized sigmoid on the empirical cumulative distribution."""
def __init__(self, random_state=1):
"""Constructor."""
self.random_state = random_state
self.a = -4
self.b = 1
def ecdf(self, x):
"""Empirical cumulative distribution function."""
xs = np.sort(x)
ys = np.arange(1, len(xs) + 1) / float(len(xs))
return xs, ys
def fit(self, scores):
"""fit."""
if scores:
xs, ys = self.ecdf(scores)
popt, pcov = curve_fit(sigmoid, xs, ys)
self.a, self.b = popt
else:
logger.debug('Warning: reverting to default values')
logger.debug('ECDF fit on %d values' % (len(scores)))
logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b))
def predict(self, value):
"""pvalue."""
y = sigmoid(value, self.a, self.b)
p_val = 1 - y
return p_val
def ecdf(x):
"""Empirical cumulative distribution function."""
xs = np.sort(x)
ys = np.arange(1, len(xs) + 1) / float(len(xs))
return xs, ys
def letter_regex(k, size, regex_th=0.3):
"""letter_regex."""
code = []
for letter, count in k:
if count / float(size) > regex_th:
if letter != '-':
code.append(letter)
if len(code) == 0:
code_str = None
elif len(code) == 1:
code_str = code[0]
else:
code_str = '(' + '|'.join(code) + ')'
return code_str
def consensus_regex(trimmed_align_seqs, regex_th):
"""consensus_regex."""
cluster = []
for h, align_seq in trimmed_align_seqs:
str_list = [c for c in align_seq]
concat_str = np.array(str_list, dtype=np.dtype('a'))
cluster.append(concat_str)
cluster = np.vstack(cluster)
size = len(trimmed_align_seqs)
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
code = ''
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
l = letter_regex(k, size, regex_th=regex_th)
if l:
code += l
return code
def find_occurrences(needle, haystack):
"""find_occurrences."""
for h, s in haystack:
matches = re.findall(needle, s, overlapped=True)
if len(matches):
yield 1
else:
yield 0
def occurrences(needle, haystack):
"""occurrences."""
counts = sum(find_occurrences(needle, haystack))
size = len(haystack)
return counts, float(counts) / size
def extract_consensus(seqs, motives, regex_th):
"""extract_consensus."""
for id in motives:
c_regex = consensus_regex(motives[id]['trimmed_align_seqs'], regex_th)
counts, freq = occurrences(c_regex, seqs)
yield freq, id, c_regex, counts, motives[id]['consensus_seq']
def plot_location(needle, haystack,
cluster_id=None, nbins=20, size=(17, 2), fname=None):
"""plot_location."""
locs = []
for h, s in haystack:
for match in re.finditer(needle, s):
s = match.start()
e = match.end()
m = s + (e - s) / 2
locs.append(m)
plt.figure(figsize=size)
n, bins, patches = plt.hist(
locs, nbins, normed=0, facecolor='blue', alpha=0.3)
plt.grid()
plt.title(needle)
plt.xlabel('Position')
plt.ylabel('Num occurrences')
if fname:
plt.draw()
figname = '%s_loc_%d.png' % (fname, cluster_id)
plt.savefig(
figname, bbox_inches='tight', transparent=True, pad_inches=0)
else:
figname = None
plt.show()
plt.close()
return figname
def extract_location(needle, haystack):
"""extract_location."""
locs = []
for h, s in haystack:
for match in re.finditer(needle, s):
s = match.start()
e = match.end()
m = s + (e - s) / 2
locs.append(m)
if locs:
avg_loc = np.percentile(locs, 50)
std_loc = np.percentile(locs, 70) - np.percentile(locs, 30)
else:
avg_loc = -1
std_loc = 0
return avg_loc, std_loc
def hits(motives, ids=None):
"""hits."""
for i in ids:
for h, s in motives[i]['seqs']:
tokens = h.split('<loc>')
seq_id = tokens[0]
begin, end = tokens[1].split(':')
yield (seq_id, int(begin), int(end), i)
def compute_cooccurence(motives, ids=None):
"""compute_cooccurence."""
if ids is None:
ids = [id for id in motives]
seqs_summary = defaultdict(list)
for seq_id, begin, end, i in hits(motives, ids=ids):
seqs_summary[seq_id].append((begin, end, i))
distances = defaultdict(list)
size = max(id for id in motives) + 1
cooccurence_mtx = np.zeros((size, size))
for seq_id in sorted(seqs_summary):
cluster_ids = [cluster_id
for begin, end, cluster_id in seqs_summary[seq_id]]
centers = defaultdict(list)
for begin, end, cluster_id in seqs_summary[seq_id]:
centers[cluster_id].append(begin + (end - begin) / 2)
cluster_ids = set(cluster_ids)
for i in cluster_ids:
for j in cluster_ids:
cooccurence_mtx[i, j] += 1
if i != j:
# find closest instance j from any instance in i
d_ij = []
for c_i in centers[i]:
for c_j in centers[j]:
d_ij.append(abs(c_i - c_j))
selected_abs = min(d_ij)
for c_i in centers[i]:
for c_j in centers[j]:
if selected_abs == abs(c_i - c_j):
selected = c_i - c_j
distances[(i, j)].append(selected)
cooccurence_mtx = np.nan_to_num(cooccurence_mtx)
orig_cooccurence_mtx = cooccurence_mtx.copy()
cooccurence_list = []
for i, row in enumerate(cooccurence_mtx):
norm = row[i]
if norm != 0:
row /= norm
else:
row = np.zeros(row.shape)
row[i] = 0
cooccurence_list.append(row)
norm_cooccurence_mtx = np.vstack(cooccurence_list)
return orig_cooccurence_mtx, norm_cooccurence_mtx, distances
def plot_distance(cluster_id_i,
cluster_id_j,
regex_i,
regex_j,
distances,
nbins=5,
size=(6, 2),
fname=None):
"""plot_distance."""
ds = distances[(cluster_id_i, cluster_id_j)]
plt.figure(figsize=size)
n, bins, patches = plt.hist(
ds, nbins, normed=0, facecolor='green', alpha=0.3)
plt.grid()
plt.title('%s vs %s' % (regex_i, regex_j))
plt.xlabel('Relative position')
plt.ylabel('Num occurrences')
if fname:
plt.draw()
figname = '%s_dist_%d_vs_%d.png' % (fname, cluster_id_i, cluster_id_j)
plt.savefig(
figname, bbox_inches='tight', transparent=True, pad_inches=0)
else:
figname = None
plt.show()
plt.close()
return figname
def mean_shift_decomposition(sig, half_windw_size=5):
"""mean_shift_decomposition."""
sig_len = len(sig)
for i in range(half_windw_size, sig_len - half_windw_size):
min_sig = np.min(sig[i - half_windw_size:i + half_windw_size])
if min_sig == sig[i]:
yield i
def box_decomposition(sig, half_windw_size=5):
"""box_decomposition."""
ids = list(mean_shift_decomposition(sig, half_windw_size))
for i in range(len(ids) - 1):
start = ids[i]
end = ids[i + 1]
width = end - start
val = sum(sig[start:end])
yield val, start, end, width
def cumulative_score(seqs, smod):
"""cumulative_score."""
median_len = np.median([len(s) for h, s in seqs])
sigs = None
for scores in smod.score(seqs):
sig = np.array(scores)
if len(sig) != median_len:
logger.debug('Length mismatch: %d != %d' % (len(sig), median_len))
if sigs is None:
if len(sig) >= median_len:
sigs = sig[:median_len]
else:
if len(sig) >= median_len:
sigs = sigs + sig[:median_len]
sig = np.array(sigs) / float(len(seqs))
return sig
def trim_seqs(seqs, smod, half_windw_size=7):
"""trim_seqs."""
sig = cumulative_score(seqs, smod)
val, start, end, width = max(box_decomposition(sig, half_windw_size))
logger.debug('val:%.1f beg:%s end:%s width:%s' % (val, start, end, width))
for h, s in seqs:
if s[start:end]:
yield (h, s[start:end])
def plot_cumulative_score(smod,
seqs,
size=(6, 2),
fname=None):
"""plot_cumulative_score."""
sig = cumulative_score(seqs, smod)
plt.figure(figsize=size)
sigp = np.copy(sig)
sigp[sigp < 0] = 0
plt.bar(range(len(sigp)), sigp, alpha=0.3, color='g')
sign = np.copy(sig)
sign[sign >= 0] = 0
plt.bar(range(len(sign)), sign, alpha=0.3, color='r')
plt.grid()
plt.xlabel('Position')
plt.ylabel('Importance score')
if fname:
plt.draw()
figname = '%s_importance.png' % (fname)
plt.savefig(
figname, bbox_inches='tight', transparent=True, pad_inches=0)
else:
figname = None
plt.show()
plt.close()
return figname
# ------------------------------------------------------------------------------
def serial_pre_process(iterable, vectorizer=None):
"""serial_pre_process."""
data_matrix = vectorizer.transform(iterable)
return data_matrix
def chunks(iterable, n):
"""chunks."""
iterable = iter(iterable)
while True:
items = []
for i in range(n):
it = iterable.next()
items.append(it)
yield items
def multiprocess_vectorize(iterable,
vectorizer=None,
pos_block_size=100,
n_jobs=-1):
"""multiprocess_vectorize."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(iterable, pos_block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Vectorizing')
start_time = time.time()
matrices = []
for i, p in enumerate(results):
loc_start_time = time.time()
pos_data_matrix = p.get()
matrices += pos_data_matrix
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
size = pos_data_matrix.shape
logger.debug('%d %s (%.2f secs) (delta: %.2f)' %
(i, size, d_time, d_loc_time))
pool.close()
pool.join()
data_matrix = vstack(matrices)
return data_matrix
def multiprocess_fit(pos_iterable, neg_iterable,
vectorizer=None,
estimator=None,
pos_block_size=100,
neg_block_size=100,
n_jobs=-1):
"""multiprocess_fit."""
start_time = time.time()
classes = np.array([1, -1])
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
pos_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(pos_iterable, pos_block_size)]
neg_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(neg_iterable, neg_block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Fitting')
start_time = time.time()
for i, (p, n) in enumerate(izip(pos_results, neg_results)):
loc_start_time = time.time()
pos_data_matrix = p.get()
y = [1] * pos_data_matrix.shape[0]
neg_data_matrix = n.get()
y += [-1] * neg_data_matrix.shape[0]
y = np.array(y)
data_matrix = vstack([pos_data_matrix, neg_data_matrix])
estimator.partial_fit(data_matrix, y, classes=classes)
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
size = pos_data_matrix.shape
logger.debug('%d %s (%.2f secs) (delta: %.2f)' %
(i, size, d_time, d_loc_time))
pool.close()
pool.join()
return estimator
def multiprocess_performance(pos_iterable, neg_iterable,
vectorizer=None,
estimator=None,
pos_block_size=100,
neg_block_size=100,
n_jobs=-1):
"""multiprocess_performance."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
pos_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(pos_iterable, pos_block_size)]
neg_results = [apply_async(
pool, serial_pre_process,
args=(seqs, vectorizer))
for seqs in chunks(neg_iterable, neg_block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Performance evaluation')
start_time = time.time()
preds = []
binary_preds = []
true_targets = []
for i, (p, n) in enumerate(izip(pos_results, neg_results)):
loc_start_time = time.time()
pos_data_matrix = p.get()
y = [1] * pos_data_matrix.shape[0]
neg_data_matrix = n.get()
y += [-1] * neg_data_matrix.shape[0]
y = np.array(y)
true_targets.append(y)
data_matrix = vstack([pos_data_matrix, neg_data_matrix])
pred = estimator.decision_function(data_matrix)
preds.append(pred)
binary_pred = estimator.predict(data_matrix)
binary_preds.append(binary_pred)
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
size = pos_data_matrix.shape
logger.debug('%d %s (%.2f secs) (delta: %.2f)' %
(i, size, d_time, d_loc_time))
pool.close()
pool.join()
preds = np.hstack(preds)
binary_preds = np.hstack(binary_preds)
true_targets = np.hstack(true_targets)
return preds, binary_preds, true_targets
def serial_subarray(iterable,
vectorizer=None,
estimator=None,
min_subarray_size=5,
max_subarray_size=10):
"""serial_subarray."""
annotated_seqs = vectorizer.annotate(iterable, estimator=estimator)
subarrays_items = []
for (orig_header, orig_seq), (seq, score) in zip(iterable, annotated_seqs):
subarrays = compute_max_subarrays_sequence(
seq=seq, score=score,
min_subarray_size=min_subarray_size,
max_subarray_size=max_subarray_size,
margin=1,
output='all')
subseqs = []
for subarray in subarrays:
subseq_seq = subarray['subarray_string']
begin = subarray['begin']
end = subarray['end']
score = subarray['score']
header = orig_header
header += '<loc>%d:%d<loc>' % (begin, end)
header += '<score>%.4f<score>' % (score)
header += '<subseq>%s<subseq>' % (subseq_seq)
subseq = (header, seq)
subseqs.append(subseq)
subarrays_items += subseqs
return subarrays_items
def multiprocess_subarray(iterable,
vectorizer=None,
estimator=None,
min_subarray_size=5,
max_subarray_size=10,
block_size=100,
n_jobs=-1):
"""multiprocess_subarray."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
results = [apply_async(
pool, serial_subarray,
args=(seqs,
vectorizer,
estimator,
min_subarray_size,
max_subarray_size))
for seqs in chunks(iterable, block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Annotating')
start_time = time.time()
subarrays_items = []
for i, p in enumerate(results):
loc_start_time = time.time()
subarrays_item = p.get()
subarrays_items += subarrays_item
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
logger.debug('%d (%.2f secs) (delta: %.2f)' %
(i, d_time, d_loc_time))
pool.close()
pool.join()
return subarrays_items
def serial_score(iterable,
vectorizer=None,
estimator=None):
"""serial_score."""
annotated_seqs = vectorizer.annotate(iterable, estimator=estimator)
scores = [score for seq, score in annotated_seqs]
return scores
def multiprocess_score(iterable,
vectorizer=None,
estimator=None,
block_size=100,
n_jobs=-1):
"""multiprocess_score."""
start_time = time.time()
if n_jobs == -1:
pool = mp.Pool()
else:
pool = mp.Pool(n_jobs)
results = [apply_async(
pool, serial_score,
args=(seqs,
vectorizer,
estimator))
for seqs in chunks(iterable, block_size)]
logger.debug('Setup %.2f secs' % (time.time() - start_time))
logger.debug('Predicting')
start_time = time.time()
scores_items = []
for i, p in enumerate(results):
loc_start_time = time.time()
scores = p.get()
scores_items += scores
d_time = time.time() - start_time
d_loc_time = time.time() - loc_start_time
logger.debug('%d (%.2f secs) (delta: %.2f)' %
(i, d_time, d_loc_time))
pool.close()
pool.join()
return scores_items
# ------------------------------------------------------------------------------
def _fasta_to_fasta(lines):
seq = ""
for line in lines:
if line:
if line[0] == '>':
if seq:
yield seq
seq = ""
line_str = str(line)
yield line_str.strip()
else:
line_str = line.split()
if line_str:
seq += str(line_str[0]).strip()
if seq:
yield seq
# ------------------------------------------------------------------------------
class MuscleAlignWrapper(object):
"""A wrapper to perform Muscle Alignment on sequences."""
def __init__(self,
diags=False,
maxiters=16,
maxhours=None,
# TODO: check if this alphabet is required
# it over-rides tool.alphabet
alphabet='dna', # ['dna', 'rna', 'protein']
):
"""Initialize an instance."""
self.diags = diags
self.maxiters = maxiters
self.maxhours = maxhours
if alphabet == 'protein':
self.alphabet = IUPAC.protein
elif alphabet == 'rna':
self.alphabet = IUPAC.unambiguous_rna
else:
self.alphabet = IUPAC.unambiguous_dna
def _seq_to_stdin_fasta(self, seqs):
# seperating headers
headers, instances = [list(x) for x in zip(*seqs)]
instances_seqrecord = []
for i, j in enumerate(instances):
instances_seqrecord.append(
SeqRecord(Seq(j, self.alphabet), id=str(i)))
handle = StringIO()
SeqIO.write(instances_seqrecord, handle, "fasta")
data = handle.getvalue()
return headers, data
def _perform_ma(self, data):
params = {'maxiters': 7}
if self.diags is True:
params['diags'] = True
if self.maxhours is not None:
params['maxhours'] = self.maxhours
muscle_cline = MuscleCommandline(**params)
stdout, stderr = muscle_cline(stdin=data)
return stdout
def _fasta_to_seqs(self, headers, stdout):
out = list(_fasta_to_fasta(stdout.split('\n')))
motif_seqs = [''] * len(headers)
for i in range(len(out[:-1]))[::2]:
id = int(out[i].split(' ')[0].split('>')[1])
motif_seqs[id] = out[i + 1]
return zip(headers, motif_seqs)
def transform(self, seqs=[]):
"""Carry out alignment."""
headers, data = self._seq_to_stdin_fasta(seqs)
stdout = self._perform_ma(data)
aligned_seqs = self._fasta_to_seqs(headers, stdout)
return aligned_seqs
# ------------------------------------------------------------------------------
class Weblogo(object):
"""A wrapper of weblogolib for creating sequence."""
def __init__(self,
output_format='png', # ['eps','png','png_print','jpeg']
stacks_per_line=40,
sequence_type='dna', # ['protein','dna','rna']
ignore_lower_case=False,
# ['bits','nats','digits','kT','kJ/mol','kcal/mol','probability']
units='bits',
first_position=1,
logo_range=list(),
# composition = 'auto',
scale_stack_widths=True,
error_bars=True,
title='',
figure_label='',
show_x_axis=True,
x_label='',
show_y_axis=True,
y_label='',
y_axis_tic_spacing=1.0,
show_ends=False,
# ['auto','base','pairing','charge','chemistry','classic','monochrome']
color_scheme='classic',
resolution=96,
fineprint='',
):
"""Initialize an instance."""
options = wbl.LogoOptions()
options.stacks_per_line = stacks_per_line
options.sequence_type = sequence_type
options.ignore_lower_case = ignore_lower_case
options.unit_name = units
options.first_index = first_position
if logo_range:
options.logo_start = logo_range[0]
options.logo_end = logo_range[1]
options.scale_width = scale_stack_widths
options.show_errorbars = error_bars
if title:
options.title = title
if figure_label:
options.logo_label = figure_label
options.show_xaxis = show_x_axis
if x_label:
options.xaxis_label = x_label
options.show_yaxis = show_y_axis
if y_label:
options.yaxis_label = y_label
options.yaxis_tic_interval = y_axis_tic_spacing
options.show_ends = show_ends
options.color_scheme = wbl.std_color_schemes[color_scheme]
options.resolution = resolution
if fineprint:
options.fineprint = fineprint
self.options = options
self.output_format = output_format
def create_logo(self, seqs=[]):
"""Create sequence logo for input sequences."""
# seperate headers
headers, instances = [list(x)
for x in zip(*seqs)]
if self.options.sequence_type is 'rna':
alphabet = Alphabet('ACGU')
elif self.options.sequence_type is 'protein':
alphabet = Alphabet('ACDEFGHIKLMNPQRSTVWY')
else:
alphabet = Alphabet('AGCT')
motif_corebio = SeqList(alist=instances, alphabet=alphabet)
data = wbl.LogoData().from_seqs(motif_corebio)
format = wbl.LogoFormat(data, self.options)
if self.output_format == 'png':
return wbl.png_formatter(data, format)
elif self.output_format == 'png_print':
return wbl.png_print_formatter(data, format)
elif self.output_format == 'jpeg':
return wbl.jpeg_formatter(data, format)
else:
return wbl.eps_formatter(data, format)
# ------------------------------------------------------------------------------
class SequenceMotifDecomposer(BaseEstimator, ClassifierMixin):
"""SequenceMotifDecomposer."""
def __init__(self,
complexity=5,
n_clusters=10,
min_subarray_size=4,
max_subarray_size=10,
estimator=SGDClassifier(warm_start=True),
class_estimator=SGDClassifier(),
clusterer=MiniBatchKMeans(),
pos_block_size=300,
neg_block_size=300,
n_jobs=-1):
"""Construct."""
self.complexity = complexity
self.n_clusters = n_clusters
self.min_subarray_size = min_subarray_size
self.max_subarray_size = max_subarray_size
self.pos_block_size = pos_block_size
self.neg_block_size = neg_block_size
self.n_jobs = n_jobs
self.vectorizer = Vectorizer(complexity=complexity,
auto_weights=True,
nbits=15)
self.estimator = estimator
self.class_estimator = class_estimator
self.clusterer = clusterer
self.clusterer_is_fit = False
def save(self, model_name):
"""save."""
joblib.dump(self, model_name, compress=1)
def load(self, obj):
"""load."""
self.__dict__.update(joblib.load(obj).__dict__)
def fit(self, pos_seqs=None, neg_seqs=None):
"""fit."""
try:
self.estimator = multiprocess_fit(
pos_seqs, neg_seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
pos_block_size=self.pos_block_size,
neg_block_size=self.neg_block_size,
n_jobs=self.n_jobs)
self.fit_decomposition(neg_seqs)
return self
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def performance(self, pos_seqs=None, neg_seqs=None):
"""performance."""
try:
y_pred, y_binary, y_test = multiprocess_performance(
pos_seqs, neg_seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
pos_block_size=self.pos_block_size,
neg_block_size=self.neg_block_size,
n_jobs=self.n_jobs)
# confusion matrix
cm = metrics.confusion_matrix(y_test, y_binary)
np.set_printoptions(precision=2)
logger.info('Confusion matrix:')
logger.info(cm)
# classification
logger.info('Classification:')
logger.info(metrics.classification_report(y_test, y_binary))
# roc
logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred)))
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _decompose_header(self, header):
score = header.split('<score>')[1]
score = float(score)
loc = header.split('<loc>')[1]
begin, end = loc.split(':')
begin = int(begin)
end = int(end)
subseq = header.split('<subseq>')[1]
orig_header = header.split('<loc>')[0]
return orig_header, score, begin, end, subseq
def decompose(self, seqs=None, p_value=0.05):
"""decomposition_scores."""
try:
subarrays_items = multiprocess_subarray(
seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
min_subarray_size=self.min_subarray_size,
max_subarray_size=self.max_subarray_size,
block_size=self.pos_block_size,
n_jobs=self.n_jobs)
for header, seq in subarrays_items:
components = self._decompose_header(header)
orig_header, score, begin, end, subseq = components
p = self.compute_p_value(score)
if p <= p_value:
yield orig_header, begin, end, p, subseq
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def decomposition_scores(self, seqs=None):
"""decomposition_scores."""
try:
subarrays_items = multiprocess_subarray(
seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
min_subarray_size=self.min_subarray_size,
max_subarray_size=self.max_subarray_size,
block_size=self.pos_block_size,
n_jobs=self.n_jobs)
for header, seq in subarrays_items:
yield self._decompose_header(header)
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def fit_decomposition(self, seqs=None):
"""fit_decomposition."""
self.a, self.b = -4, 1
scores = [score for header, score, begin, end, subseq in
self.decomposition_scores(seqs)]
if scores:
xs, ys = ecdf(scores)
popt, pcov = curve_fit(sigmoid, xs, ys)
self.a, self.b = popt
else:
logger.debug('Warning: reverting to default values')
logger.debug('ECDF fit on %d values' % (len(scores)))
logger.debug('Optimal params: a:%.2f b:%.2f' % (self.a, self.b))
def compute_p_value(self, value):
"""p_value."""
y = sigmoid(value, self.a, self.b)
p_val = 1 - y
return p_val
def compute_clusters(self, seqs=None, p_value=0.05):
"""compute_clusters."""
try:
subsequences = []
iterable = self.decompose(seqs, p_value=p_value)
for header, begin, end, p, subseq in iterable:
new_header = header
new_header += '<loc>' + str(begin) + ':'
new_header += str(end) + '<loc>'
subsequences.append((new_header, subseq))
if not subsequences:
raise Exception('No subarray was selected. Increase p_value.')
logger.debug('Working on: %d fragments' % len(subsequences))
n = multiprocessing.cpu_count()
pos_block_size = len(subsequences) / n
data_matrix = multiprocess_vectorize(
subsequences,
vectorizer=self.vectorizer,
pos_block_size=pos_block_size,
n_jobs=self.n_jobs)
logger.debug('Clustering')
logger.debug('working on %d instances' % data_matrix.shape[0])
start_time = time.time()
self.clusterer.set_params(n_clusters=self.n_clusters)
if self.clusterer_is_fit:
preds = self.class_estimator.predict(data_matrix)
else:
preds = self.clusterer.fit_predict(data_matrix)
self.class_estimator.fit(data_matrix, preds)
self.clusterer_is_fit = True
dtime = time.time() - start_time
logger.debug('...done in %.2f secs' % (dtime))
self.clusters = defaultdict(list)
for pred, seq in zip(preds, subsequences):
self.clusters[pred].append(seq)
logger.debug('After clustering, %d motives' % len(self.clusters))
return self.clusters
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def score(self, seqs=None):
"""fit."""
try:
for score in multiprocess_score(seqs,
vectorizer=self.vectorizer,
estimator=self.estimator,
block_size=self.pos_block_size,
n_jobs=self.n_jobs):
yield score
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _order_clusters(self, clusters, complexity=3):
sep = ' ' * (complexity * 2)
# join all sequences in a cluster with enough space that
# kmers dont interfere
cluster_seqs = []
for cluster_id in clusters:
if len(clusters[cluster_id]) > 0:
seqs = [s for h, s in clusters[cluster_id]]
seq = sep.join(seqs)
cluster_seqs.append(seq)
# vectorize the seqs and compute their gram matrix K
cluster_vecs = Vectorizer(complexity).transform(cluster_seqs)
gram_matrix = metrics.pairwise.pairwise_kernels(
cluster_vecs, metric='linear')
c = linkage(gram_matrix, method='single')
orders = []
for id1, id2 in c[:, 0:2]:
if id1 < len(cluster_seqs):
orders.append(int(id1))
if id2 < len(cluster_seqs):
orders.append(int(id2))
return orders
def _compute_consensus_seq(self, align_seqs):
cluster = []
for h, align_seq in align_seqs:
str_list = [c for c in align_seq]
concat_str = np.array(str_list, dtype=np.dtype('a'))
cluster.append(concat_str)
cluster = np.vstack(cluster)
seq = ''
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
seq += k[0][0]
return seq
def _compute_score(self, align_seqs, min_freq=0.8):
dim = len(align_seqs)
cluster = []
for h, align_seq in align_seqs:
str_list = [c for c in align_seq]
concat_str = np.array(str_list, dtype=np.dtype('a'))
cluster.append(concat_str)
cluster = np.vstack(cluster)
score = 0
to_be_removed = []
for i, row in enumerate(cluster.T):
c = Counter(row)
k = c.most_common()
if k[0][0] == '-':
to_be_removed.append(i)
val = k[1][1]
else:
val = k[0][1]
if float(val) / dim >= min_freq:
score += 1
trimmed_align_seqs = []
for h, align_seq in align_seqs:
trimmed_align_seq = [a for i, a in enumerate(align_seq)
if i not in to_be_removed]
trimmed_align_seqs.append((h, ''.join(trimmed_align_seq)))
return score, trimmed_align_seqs
def _is_high_quality(self,
seqs,
min_score=4,
min_freq=0.6,
min_cluster_size=10,
sample_size=200):
ma = MuscleAlignWrapper(alphabet='rna')
if len(seqs) > sample_size:
sample_seqs = random.sample(seqs, sample_size)
else:
sample_seqs = seqs
align_seqs = ma.transform(seqs=sample_seqs)
score, trimmed_align_seqs = self._compute_score(align_seqs,
min_freq=min_freq)
if score >= min_score and len(align_seqs) > min_cluster_size:
return True
else:
return False
def compute_motif(self,
seqs=None,
min_score=4,
min_freq=0.6,
min_cluster_size=10,
regex_th=.3,
sample_size=200):
"""compute_motif."""
ma = MuscleAlignWrapper(alphabet='rna')
if len(seqs) > sample_size:
sample_seqs = random.sample(seqs, sample_size)
else:
sample_seqs = seqs
align_seqs = ma.transform(seqs=sample_seqs)
score, trimmed_align_seqs = self._compute_score(align_seqs,
min_freq=min_freq)
if score >= min_score and len(align_seqs) > min_cluster_size:
consensus_seq = self._compute_consensus_seq(trimmed_align_seqs)
regex_seq = consensus_regex(trimmed_align_seqs, regex_th)
motif = {'consensus_seq': consensus_seq,
'regex_seq': regex_seq,
'trimmed_align_seqs': trimmed_align_seqs,
'align_seqs': align_seqs,
'seqs': seqs}
return True, motif
else:
return False, None
def compute_motives(self,
clusters,
min_score=4,
min_freq=0.6,
min_cluster_size=10,
regex_th=.3,
sample_size=200):
"""compute_motives."""
if not clusters:
raise Exception('Error: No clusters.')
mcs = min_cluster_size
logger.debug('Alignment')
motives = dict()
for cluster_id in clusters:
start_time = time.time()
# align with muscle
is_high_quality, motif = self.compute_motif(
seqs=clusters[cluster_id],
min_score=min_score,
min_freq=min_freq,
min_cluster_size=mcs,
regex_th=regex_th,
sample_size=sample_size)
if is_high_quality:
motives[cluster_id] = motif
dtime = time.time() - start_time
logger.debug(
'Cluster %d (#%d) (%.2f secs)' %
(cluster_id, len(clusters[cluster_id]), dtime))
logger.debug('After motives computation, %d motives' % len(motives))
return motives
def _identify_mergeable_clusters(self, motives, similarity_th=0.8):
for i in motives:
for j in motives:
if j > i:
seq_i = motives[i]['consensus_seq']
seq_j = motives[j]['consensus_seq']
nw_score = edit_distance(seq_i, seq_j, gap_penalty=-1)
rel_nw_score = 2 * nw_score / (len(seq_i) + len(seq_j))
if rel_nw_score > similarity_th:
yield rel_nw_score, i, j
def merge(self,
motives,
similarity_th=0.5,
min_score=4,
min_freq=0.5,
min_cluster_size=10,
regex_th=.3,
sample_size=200):
"""merge."""
while True:
ms = sorted([m for m in self._identify_mergeable_clusters(
motives, similarity_th=similarity_th)], reverse=True)
success = False
for rel_nw_score, i, j in ms:
if motives.get(i, None) and motives.get(j, None):
n_i = len(motives[i]['seqs'])
n_j = len(motives[j]['seqs'])
seqs = motives[i]['seqs'] + motives[j]['seqs']
is_high_quality, motif = self.compute_motif(
seqs=seqs,
min_score=min_score,
min_freq=min_freq,
min_cluster_size=min_cluster_size,
regex_th=regex_th,
sample_size=sample_size)
if is_high_quality:
info1 = 'Joining: %d (#%d), %d (#%d) score: %.2f' % \
(i, n_i, j, n_j, rel_nw_score)
info2 = ' deleting: %d [%d is now #%d]' % \
(j, i, n_i + n_j)
logger.debug(info1 + info2)
# update motives
motives[i] = motif
del motives[j]
success = True
if success is False:
break
# TODO: run the predictor to learn the new class definition
logger.debug('After merge, %d motives' % len(motives))
return motives
def quality_filter(self,
seqs=None,
motives=None,
freq_th=None,
std_th=None):
"""quality_filter."""
_motives = dict()
for cluster_id in motives:
regex_seq = motives[cluster_id]['regex_seq']
counts, freq = occurrences(regex_seq, seqs)
motives[cluster_id]['freq'] = freq
motives[cluster_id]['counts'] = counts
avg, std = extract_location(regex_seq, seqs)
motives[cluster_id]['avg_pos'] = avg
motives[cluster_id]['std_pos'] = std
if freq_th is None or freq >= freq_th:
if std_th is None or std <= std_th:
_motives[cluster_id] = motives[cluster_id]
if len(_motives) == 0:
logger.warning('Quality filter is too strict. Ignoring filter.')
return motives
else:
logger.debug('After quality filter, %d motives' % len(_motives))
return _motives
def select_motives(self,
seqs=None,
p_value=0.05,
similarity_th=0.5,
min_score=4,
min_freq=0.5,
min_cluster_size=10,
regex_th=.3,
sample_size=200,
freq_th=None,
std_th=None):
"""select_motives."""
orig_clusters = self.compute_clusters(seqs, p_value=p_value)
motives = self.compute_motives(
orig_clusters,
min_score=min_score,
min_freq=min_freq,
min_cluster_size=min_cluster_size,
regex_th=regex_th,
sample_size=sample_size)
motives = self.merge(
motives,
similarity_th=similarity_th,
min_score=min_score,
min_freq=min_freq,
min_cluster_size=min_cluster_size,
regex_th=regex_th,
sample_size=sample_size)
motives = self.quality_filter(
seqs,
motives,
freq_th=freq_th,
std_th=std_th)
return motives
def compute_logo(self,
cluster_id=None,
motif=None):
"""compute_logo."""
alphabet = 'rna'
color_scheme = 'classic'
wb = Weblogo(output_format='png',
sequence_type=alphabet,
resolution=200,
stacks_per_line=60,
units='bits',
color_scheme=color_scheme)
logo_image = wb.create_logo(seqs=motif['trimmed_align_seqs'])
logo_txt = []
info = ' - num subarrays: %d' % len(motif['seqs'])
logo_txt.append(info)
info = ' - consensus sequence: %s' % motif['consensus_seq']
logo_txt.append(info)
info = ' - consensus regex: %s' % motif['regex_seq']
logo_txt.append(info)
return logo_image, logo_txt
def compute_logos(self,
motives,
ids=None):
"""compute_logos."""
if motives:
if ids is None:
ids = [cluster_id for cluster_id in motives]
logos = dict()
for cluster_id in ids:
logo_image, logo_txt = self.compute_logo(
cluster_id=cluster_id,
motif=motives[cluster_id])
logos[cluster_id] = (logo_image, logo_txt)
return logos
else:
logger.warning(
'No logo to compute. Try more permissive parameters.')
def _save_logo(self, logo, cluster_id, fname):
imagename = '%s_logo_cl_%d.png' % (fname, cluster_id)
with open(imagename, 'wb') as f:
f.write(logo)
return imagename
def _wrap_image(self, fname, fill_width=True, output_type='screen'):
pwd = os.getcwd()
url = pwd + '/' + fname
txt = []
if fill_width:
if output_type == 'pdf':
txt.append('<p align="left"><img src="file://' + url +
'" style="width: 100%"></p>')
else:
txt.append('<p align="left"><img src="' + fname +
'" style="width: 100%"></p>')
else:
if output_type == 'pdf':
txt.append('<p align="left"><img src="file://' + url +
'"></p>')
else:
txt.append('<p align="left"><img src="' + fname +
'"></p>')
return '\n'.join(txt)
def report(self,
pos_seqs,
all_seqs,
motives,
nbins=40,
size=(17, 2),
output_type='screen',
fname=None):
"""Report in markdown format."""
txt = []
if motives:
_, norm_cooccurence_mtx, distances = compute_cooccurence(motives)
info = '### Summary: %d motives' % len(motives)
txt.append(info)
figname = plot_cumulative_score(
self, pos_seqs, size=size, fname=fname)
txt.append(self._wrap_image(figname, output_type=output_type))
for freq, cluster_id in sorted([(motives[i]['freq'], i)
for i in motives], reverse=True):
info = ' - %.2s %s' % \
(cluster_id, motives[cluster_id]['consensus_seq'])
txt.append(info)
for freq, cluster_id in sorted([(motives[i]['freq'], i)
for i in motives], reverse=True):
info = '#### Motif id: %d' % cluster_id
txt.append(info)
logo_image, logo_txts = self.compute_logo(
cluster_id, motif=motives[cluster_id])
figname = self._save_logo(logo_image, cluster_id, fname)
for logo_txt in logo_txts:
txt.append(logo_txt)
co = motives[cluster_id]['counts']
fr = motives[cluster_id]['freq']
info = ' - num occurrences of regex: %d' % (co)
txt.append(info)
info = ' - freq of occurrences of regex: %.2f' % (fr)
txt.append(info)
av = motives[cluster_id]['avg_pos']
st = motives[cluster_id]['std_pos']
info = ' - average location: %.1f +- %.1f' % (av, st)
txt.append(info)
txt.append(self._wrap_image(figname,
fill_width=False,
output_type=output_type))
regex_i = motives[cluster_id]['regex_seq']
figname = plot_location(
regex_i, all_seqs, cluster_id=cluster_id,
nbins=nbins, size=size, fname=fname)
txt.append(self._wrap_image(figname, output_type=output_type))
for j in motives:
regex_i = motives[i]['regex_seq']
if j != cluster_id:
regex_j = motives[j]['regex_seq']
ds = distances[(cluster_id, j)]
info = ' - num co-occurences %d %s vs %d %s: %d' % \
(cluster_id, regex_i, j, regex_j, len(ds))
txt.append(info)
if len(ds):
figname = plot_distance(
cluster_id, j,
regex_i, regex_j,
distances,
nbins=nbins, size=size, fname=fname)
txt.append(self._wrap_image(
figname,
output_type=output_type))
txt.append('_' * 100)
else:
logger.warning(
'No motives to report. Try more permissive parameters.')
txt = '\n'.join(txt)
return txt
|
[
"pylab.close",
"sklearn.cluster.MiniBatchKMeans",
"weblogolib.jpeg_formatter",
"Bio.Seq.Seq",
"Bio.SeqIO.write",
"numpy.nan_to_num",
"weblogolib.eps_formatter",
"random.sample",
"scipy.cluster.hierarchy.linkage",
"joblib.dump",
"eden.apply_async",
"sklearn.metrics.classification_report",
"collections.defaultdict",
"pylab.figure",
"numpy.exp",
"StringIO.StringIO",
"multiprocessing.cpu_count",
"pylab.title",
"numpy.set_printoptions",
"numpy.copy",
"sklearn.linear_model.SGDClassifier",
"eden.util.iterated_maximum_subarray.compute_max_subarrays_sequence",
"pylab.draw",
"pylab.ylabel",
"weblogolib.LogoData",
"pylab.xlabel",
"collections.Counter",
"pylab.hist",
"sklearn.metrics.roc_auc_score",
"corebio.seq.SeqList",
"pylab.grid",
"numpy.hstack",
"numpy.percentile",
"numpy.sort",
"pylab.savefig",
"numpy.min",
"Bio.Align.Applications.MuscleCommandline",
"sklearn.metrics.pairwise.pairwise_kernels",
"scipy.optimize.curve_fit",
"multiprocessing.Pool",
"weblogolib.png_formatter",
"eden.util.NeedlemanWunsh.edit_distance",
"numpy.vstack",
"weblogolib.LogoFormat",
"weblogolib.LogoOptions",
"pylab.show",
"scipy.sparse.vstack",
"regex.findall",
"os.getcwd",
"numpy.dtype",
"numpy.zeros",
"eden.sequence.Vectorizer",
"time.time",
"corebio.seq.Alphabet",
"numpy.array",
"itertools.izip",
"sklearn.metrics.confusion_matrix",
"joblib.load",
"weblogolib.png_print_formatter",
"logging.getLogger",
"regex.finditer"
] |
[((1145, 1172), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1162, 1172), False, 'import logging\n'), ((2325, 2335), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (2332, 2335), True, 'import numpy as np\n'), ((3092, 3110), 'numpy.vstack', 'np.vstack', (['cluster'], {}), '(cluster)\n', (3101, 3110), True, 'import numpy as np\n'), ((4503, 4527), 'pylab.figure', 'plt.figure', ([], {'figsize': 'size'}), '(figsize=size)\n', (4513, 4527), True, 'import pylab as plt\n'), ((4551, 4611), 'pylab.hist', 'plt.hist', (['locs', 'nbins'], {'normed': '(0)', 'facecolor': '"""blue"""', 'alpha': '(0.3)'}), "(locs, nbins, normed=0, facecolor='blue', alpha=0.3)\n", (4559, 4611), True, 'import pylab as plt\n'), ((4625, 4635), 'pylab.grid', 'plt.grid', ([], {}), '()\n', (4633, 4635), True, 'import pylab as plt\n'), ((4640, 4657), 'pylab.title', 'plt.title', (['needle'], {}), '(needle)\n', (4649, 4657), True, 'import pylab as plt\n'), ((4662, 4684), 'pylab.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (4672, 4684), True, 'import pylab as plt\n'), ((4689, 4718), 'pylab.ylabel', 'plt.ylabel', (['"""Num occurrences"""'], {}), "('Num occurrences')\n", (4699, 4718), True, 'import pylab as plt\n'), ((4959, 4970), 'pylab.close', 'plt.close', ([], {}), '()\n', (4968, 4970), True, 'import pylab as plt\n'), ((5889, 5906), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5900, 5906), False, 'from collections import defaultdict\n'), ((6034, 6051), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6045, 6051), False, 'from collections import defaultdict\n'), ((6115, 6137), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (6123, 6137), True, 'import numpy as np\n'), ((7196, 7226), 'numpy.nan_to_num', 'np.nan_to_num', (['cooccurence_mtx'], {}), '(cooccurence_mtx)\n', (7209, 7226), True, 'import numpy as np\n'), ((7552, 7579), 'numpy.vstack', 'np.vstack', (['cooccurence_list'], {}), '(cooccurence_list)\n', (7561, 7579), True, 'import numpy as np\n'), ((7961, 7985), 'pylab.figure', 'plt.figure', ([], {'figsize': 'size'}), '(figsize=size)\n', (7971, 7985), True, 'import pylab as plt\n'), ((8009, 8068), 'pylab.hist', 'plt.hist', (['ds', 'nbins'], {'normed': '(0)', 'facecolor': '"""green"""', 'alpha': '(0.3)'}), "(ds, nbins, normed=0, facecolor='green', alpha=0.3)\n", (8017, 8068), True, 'import pylab as plt\n'), ((8082, 8092), 'pylab.grid', 'plt.grid', ([], {}), '()\n', (8090, 8092), True, 'import pylab as plt\n'), ((8097, 8139), 'pylab.title', 'plt.title', (["('%s vs %s' % (regex_i, regex_j))"], {}), "('%s vs %s' % (regex_i, regex_j))\n", (8106, 8139), True, 'import pylab as plt\n'), ((8144, 8175), 'pylab.xlabel', 'plt.xlabel', (['"""Relative position"""'], {}), "('Relative position')\n", (8154, 8175), True, 'import pylab as plt\n'), ((8180, 8209), 'pylab.ylabel', 'plt.ylabel', (['"""Num occurrences"""'], {}), "('Num occurrences')\n", (8190, 8209), True, 'import pylab as plt\n'), ((8473, 8484), 'pylab.close', 'plt.close', ([], {}), '()\n', (8482, 8484), True, 'import pylab as plt\n'), ((10268, 10292), 'pylab.figure', 'plt.figure', ([], {'figsize': 'size'}), '(figsize=size)\n', (10278, 10292), True, 'import pylab as plt\n'), ((10304, 10316), 'numpy.copy', 'np.copy', (['sig'], {}), '(sig)\n', (10311, 10316), True, 'import numpy as np\n'), ((10409, 10421), 'numpy.copy', 'np.copy', (['sig'], {}), '(sig)\n', (10416, 10421), True, 'import numpy as np\n'), ((10508, 10518), 'pylab.grid', 'plt.grid', ([], {}), '()\n', (10516, 10518), True, 'import pylab as plt\n'), ((10523, 10545), 'pylab.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (10533, 10545), True, 'import pylab as plt\n'), ((10550, 10580), 'pylab.ylabel', 'plt.ylabel', (['"""Importance score"""'], {}), "('Importance score')\n", (10560, 10580), True, 'import pylab as plt\n'), ((10813, 10824), 'pylab.close', 'plt.close', ([], {}), '()\n', (10822, 10824), True, 'import pylab as plt\n'), ((11519, 11530), 'time.time', 'time.time', ([], {}), '()\n', (11528, 11530), False, 'import time\n'), ((11883, 11894), 'time.time', 'time.time', ([], {}), '()\n', (11892, 11894), False, 'import time\n'), ((12346, 12362), 'scipy.sparse.vstack', 'vstack', (['matrices'], {}), '(matrices)\n', (12352, 12362), False, 'from scipy.sparse import vstack\n'), ((12672, 12683), 'time.time', 'time.time', ([], {}), '()\n', (12681, 12683), False, 'import time\n'), ((12698, 12715), 'numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (12706, 12715), True, 'import numpy as np\n'), ((13229, 13240), 'time.time', 'time.time', ([], {}), '()\n', (13238, 13240), False, 'import time\n'), ((14286, 14297), 'time.time', 'time.time', ([], {}), '()\n', (14295, 14297), False, 'import time\n'), ((14826, 14837), 'time.time', 'time.time', ([], {}), '()\n', (14835, 14837), False, 'import time\n'), ((15735, 15751), 'numpy.hstack', 'np.hstack', (['preds'], {}), '(preds)\n', (15744, 15751), True, 'import numpy as np\n'), ((15771, 15794), 'numpy.hstack', 'np.hstack', (['binary_preds'], {}), '(binary_preds)\n', (15780, 15794), True, 'import numpy as np\n'), ((15814, 15837), 'numpy.hstack', 'np.hstack', (['true_targets'], {}), '(true_targets)\n', (15823, 15837), True, 'import numpy as np\n'), ((17406, 17417), 'time.time', 'time.time', ([], {}), '()\n', (17415, 17417), False, 'import time\n'), ((17867, 17878), 'time.time', 'time.time', ([], {}), '()\n', (17876, 17878), False, 'import time\n'), ((18805, 18816), 'time.time', 'time.time', ([], {}), '()\n', (18814, 18816), False, 'import time\n'), ((19197, 19208), 'time.time', 'time.time', ([], {}), '()\n', (19206, 19208), False, 'import time\n'), ((1613, 1623), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (1620, 1623), True, 'import numpy as np\n'), ((3198, 3210), 'collections.Counter', 'Counter', (['row'], {}), '(row)\n', (3205, 3210), False, 'from collections import Counter\n'), ((3305, 3317), 'collections.Counter', 'Counter', (['row'], {}), '(row)\n', (3312, 3317), False, 'from collections import Counter\n'), ((3565, 3603), 'regex.findall', 're.findall', (['needle', 's'], {'overlapped': '(True)'}), '(needle, s, overlapped=True)\n', (3575, 3603), True, 'import regex as re\n'), ((4358, 4380), 'regex.finditer', 're.finditer', (['needle', 's'], {}), '(needle, s)\n', (4369, 4380), True, 'import regex as re\n'), ((4741, 4751), 'pylab.draw', 'plt.draw', ([], {}), '()\n', (4749, 4751), True, 'import pylab as plt\n'), ((4816, 4889), 'pylab.savefig', 'plt.savefig', (['figname'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(figname, bbox_inches='tight', transparent=True, pad_inches=0)\n", (4827, 4889), True, 'import pylab as plt\n'), ((4944, 4954), 'pylab.show', 'plt.show', ([], {}), '()\n', (4952, 4954), True, 'import pylab as plt\n'), ((5121, 5143), 'regex.finditer', 're.finditer', (['needle', 's'], {}), '(needle, s)\n', (5132, 5143), True, 'import regex as re\n'), ((5293, 5316), 'numpy.percentile', 'np.percentile', (['locs', '(50)'], {}), '(locs, 50)\n', (5306, 5316), True, 'import numpy as np\n'), ((6305, 6322), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6316, 6322), False, 'from collections import defaultdict\n'), ((8232, 8242), 'pylab.draw', 'plt.draw', ([], {}), '()\n', (8240, 8242), True, 'import pylab as plt\n'), ((8330, 8403), 'pylab.savefig', 'plt.savefig', (['figname'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(figname, bbox_inches='tight', transparent=True, pad_inches=0)\n", (8341, 8403), True, 'import pylab as plt\n'), ((8458, 8468), 'pylab.show', 'plt.show', ([], {}), '()\n', (8466, 8468), True, 'import pylab as plt\n'), ((8701, 8753), 'numpy.min', 'np.min', (['sig[i - half_windw_size:i + half_windw_size]'], {}), '(sig[i - half_windw_size:i + half_windw_size])\n', (8707, 8753), True, 'import numpy as np\n'), ((9310, 9326), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (9318, 9326), True, 'import numpy as np\n'), ((9655, 9669), 'numpy.array', 'np.array', (['sigs'], {}), '(sigs)\n', (9663, 9669), True, 'import numpy as np\n'), ((10603, 10613), 'pylab.draw', 'plt.draw', ([], {}), '()\n', (10611, 10613), True, 'import pylab as plt\n'), ((10670, 10743), 'pylab.savefig', 'plt.savefig', (['figname'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(figname, bbox_inches='tight', transparent=True, pad_inches=0)\n", (10681, 10743), True, 'import pylab as plt\n'), ((10798, 10808), 'pylab.show', 'plt.show', ([], {}), '()\n', (10806, 10808), True, 'import pylab as plt\n'), ((11567, 11576), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (11574, 11576), True, 'import multiprocessing as mp\n'), ((11602, 11617), 'multiprocessing.Pool', 'mp.Pool', (['n_jobs'], {}), '(n_jobs)\n', (11609, 11617), True, 'import multiprocessing as mp\n'), ((11634, 11696), 'eden.apply_async', 'apply_async', (['pool', 'serial_pre_process'], {'args': '(seqs, vectorizer)'}), '(pool, serial_pre_process, args=(seqs, vectorizer))\n', (11645, 11696), False, 'from eden import apply_async\n'), ((11974, 11985), 'time.time', 'time.time', ([], {}), '()\n', (11983, 11985), False, 'import time\n'), ((12752, 12761), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (12759, 12761), True, 'import multiprocessing as mp\n'), ((12787, 12802), 'multiprocessing.Pool', 'mp.Pool', (['n_jobs'], {}), '(n_jobs)\n', (12794, 12802), True, 'import multiprocessing as mp\n'), ((12823, 12885), 'eden.apply_async', 'apply_async', (['pool', 'serial_pre_process'], {'args': '(seqs, vectorizer)'}), '(pool, serial_pre_process, args=(seqs, vectorizer))\n', (12834, 12885), False, 'from eden import apply_async\n'), ((12980, 13042), 'eden.apply_async', 'apply_async', (['pool', 'serial_pre_process'], {'args': '(seqs, vectorizer)'}), '(pool, serial_pre_process, args=(seqs, vectorizer))\n', (12991, 13042), False, 'from eden import apply_async\n'), ((13272, 13302), 'itertools.izip', 'izip', (['pos_results', 'neg_results'], {}), '(pos_results, neg_results)\n', (13276, 13302), False, 'from itertools import izip\n'), ((13330, 13341), 'time.time', 'time.time', ([], {}), '()\n', (13339, 13341), False, 'import time\n'), ((13510, 13521), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (13518, 13521), True, 'import numpy as np\n'), ((13544, 13586), 'scipy.sparse.vstack', 'vstack', (['[pos_data_matrix, neg_data_matrix]'], {}), '([pos_data_matrix, neg_data_matrix])\n', (13550, 13586), False, 'from scipy.sparse import vstack\n'), ((14334, 14343), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (14341, 14343), True, 'import multiprocessing as mp\n'), ((14369, 14384), 'multiprocessing.Pool', 'mp.Pool', (['n_jobs'], {}), '(n_jobs)\n', (14376, 14384), True, 'import multiprocessing as mp\n'), ((14405, 14467), 'eden.apply_async', 'apply_async', (['pool', 'serial_pre_process'], {'args': '(seqs, vectorizer)'}), '(pool, serial_pre_process, args=(seqs, vectorizer))\n', (14416, 14467), False, 'from eden import apply_async\n'), ((14562, 14624), 'eden.apply_async', 'apply_async', (['pool', 'serial_pre_process'], {'args': '(seqs, vectorizer)'}), '(pool, serial_pre_process, args=(seqs, vectorizer))\n', (14573, 14624), False, 'from eden import apply_async\n'), ((14928, 14958), 'itertools.izip', 'izip', (['pos_results', 'neg_results'], {}), '(pos_results, neg_results)\n', (14932, 14958), False, 'from itertools import izip\n'), ((14986, 14997), 'time.time', 'time.time', ([], {}), '()\n', (14995, 14997), False, 'import time\n'), ((15166, 15177), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (15174, 15177), True, 'import numpy as np\n'), ((15231, 15273), 'scipy.sparse.vstack', 'vstack', (['[pos_data_matrix, neg_data_matrix]'], {}), '([pos_data_matrix, neg_data_matrix])\n', (15237, 15273), False, 'from scipy.sparse import vstack\n'), ((16296, 16455), 'eden.util.iterated_maximum_subarray.compute_max_subarrays_sequence', 'compute_max_subarrays_sequence', ([], {'seq': 'seq', 'score': 'score', 'min_subarray_size': 'min_subarray_size', 'max_subarray_size': 'max_subarray_size', 'margin': '(1)', 'output': '"""all"""'}), "(seq=seq, score=score, min_subarray_size=\n min_subarray_size, max_subarray_size=max_subarray_size, margin=1,\n output='all')\n", (16326, 16455), False, 'from eden.util.iterated_maximum_subarray import compute_max_subarrays_sequence\n'), ((17454, 17463), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (17461, 17463), True, 'import multiprocessing as mp\n'), ((17489, 17504), 'multiprocessing.Pool', 'mp.Pool', (['n_jobs'], {}), '(n_jobs)\n', (17496, 17504), True, 'import multiprocessing as mp\n'), ((17521, 17633), 'eden.apply_async', 'apply_async', (['pool', 'serial_subarray'], {'args': '(seqs, vectorizer, estimator, min_subarray_size, max_subarray_size)'}), '(pool, serial_subarray, args=(seqs, vectorizer, estimator,\n min_subarray_size, max_subarray_size))\n', (17532, 17633), False, 'from eden import apply_async\n'), ((17965, 17976), 'time.time', 'time.time', ([], {}), '()\n', (17974, 17976), False, 'import time\n'), ((18853, 18862), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (18860, 18862), True, 'import multiprocessing as mp\n'), ((18888, 18903), 'multiprocessing.Pool', 'mp.Pool', (['n_jobs'], {}), '(n_jobs)\n', (18895, 18903), True, 'import multiprocessing as mp\n'), ((18920, 18987), 'eden.apply_async', 'apply_async', (['pool', 'serial_score'], {'args': '(seqs, vectorizer, estimator)'}), '(pool, serial_score, args=(seqs, vectorizer, estimator))\n', (18931, 18987), False, 'from eden import apply_async\n'), ((19292, 19303), 'time.time', 'time.time', ([], {}), '()\n', (19301, 19303), False, 'import time\n'), ((21299, 21309), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (21307, 21309), False, 'from StringIO import StringIO\n'), ((21318, 21367), 'Bio.SeqIO.write', 'SeqIO.write', (['instances_seqrecord', 'handle', '"""fasta"""'], {}), "(instances_seqrecord, handle, 'fasta')\n", (21329, 21367), False, 'from Bio import SeqIO\n'), ((21672, 21699), 'Bio.Align.Applications.MuscleCommandline', 'MuscleCommandline', ([], {}), '(**params)\n', (21689, 21699), False, 'from Bio.Align.Applications import MuscleCommandline\n'), ((23594, 23611), 'weblogolib.LogoOptions', 'wbl.LogoOptions', ([], {}), '()\n', (23609, 23611), True, 'import weblogolib as wbl\n'), ((25205, 25248), 'corebio.seq.SeqList', 'SeqList', ([], {'alist': 'instances', 'alphabet': 'alphabet'}), '(alist=instances, alphabet=alphabet)\n', (25212, 25248), False, 'from corebio.seq import Alphabet, SeqList\n'), ((25322, 25356), 'weblogolib.LogoFormat', 'wbl.LogoFormat', (['data', 'self.options'], {}), '(data, self.options)\n', (25336, 25356), True, 'import weblogolib as wbl\n'), ((26088, 26118), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'warm_start': '(True)'}), '(warm_start=True)\n', (26101, 26118), False, 'from sklearn.linear_model import SGDClassifier\n'), ((26153, 26168), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '()\n', (26166, 26168), False, 'from sklearn.linear_model import SGDClassifier\n'), ((26197, 26214), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {}), '()\n', (26212, 26214), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((26665, 26727), 'eden.sequence.Vectorizer', 'Vectorizer', ([], {'complexity': 'complexity', 'auto_weights': '(True)', 'nbits': '(15)'}), '(complexity=complexity, auto_weights=True, nbits=15)\n', (26675, 26727), False, 'from eden.sequence import Vectorizer\n'), ((27018, 27059), 'joblib.dump', 'joblib.dump', (['self', 'model_name'], {'compress': '(1)'}), '(self, model_name, compress=1)\n', (27029, 27059), False, 'import joblib\n'), ((34655, 34719), 'sklearn.metrics.pairwise.pairwise_kernels', 'metrics.pairwise.pairwise_kernels', (['cluster_vecs'], {'metric': '"""linear"""'}), "(cluster_vecs, metric='linear')\n", (34688, 34719), False, 'from sklearn import metrics\n'), ((34745, 34782), 'scipy.cluster.hierarchy.linkage', 'linkage', (['gram_matrix'], {'method': '"""single"""'}), "(gram_matrix, method='single')\n", (34752, 34782), False, 'from scipy.cluster.hierarchy import linkage\n'), ((35300, 35318), 'numpy.vstack', 'np.vstack', (['cluster'], {}), '(cluster)\n', (35309, 35318), True, 'import numpy as np\n'), ((35803, 35821), 'numpy.vstack', 'np.vstack', (['cluster'], {}), '(cluster)\n', (35812, 35821), True, 'import numpy as np\n'), ((45971, 45982), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45980, 45982), False, 'import os\n'), ((1236, 1256), 'numpy.exp', 'np.exp', (['(-(x - a) / b)'], {}), '(-(x - a) / b)\n', (1242, 1256), True, 'import numpy as np\n'), ((1832, 1858), 'scipy.optimize.curve_fit', 'curve_fit', (['sigmoid', 'xs', 'ys'], {}), '(sigmoid, xs, ys)\n', (1841, 1858), False, 'from scipy.optimize import curve_fit\n'), ((5335, 5358), 'numpy.percentile', 'np.percentile', (['locs', '(70)'], {}), '(locs, 70)\n', (5348, 5358), True, 'import numpy as np\n'), ((5361, 5384), 'numpy.percentile', 'np.percentile', (['locs', '(30)'], {}), '(locs, 30)\n', (5374, 5384), True, 'import numpy as np\n'), ((7449, 7468), 'numpy.zeros', 'np.zeros', (['row.shape'], {}), '(row.shape)\n', (7457, 7468), True, 'import numpy as np\n'), ((12073, 12084), 'time.time', 'time.time', ([], {}), '()\n', (12082, 12084), False, 'import time\n'), ((12119, 12130), 'time.time', 'time.time', ([], {}), '()\n', (12128, 12130), False, 'import time\n'), ((13667, 13678), 'time.time', 'time.time', ([], {}), '()\n', (13676, 13678), False, 'import time\n'), ((13713, 13724), 'time.time', 'time.time', ([], {}), '()\n', (13722, 13724), False, 'import time\n'), ((15468, 15479), 'time.time', 'time.time', ([], {}), '()\n', (15477, 15479), False, 'import time\n'), ((15514, 15525), 'time.time', 'time.time', ([], {}), '()\n', (15523, 15525), False, 'import time\n'), ((18069, 18080), 'time.time', 'time.time', ([], {}), '()\n', (18078, 18080), False, 'import time\n'), ((18115, 18126), 'time.time', 'time.time', ([], {}), '()\n', (18124, 18126), False, 'import time\n'), ((19377, 19388), 'time.time', 'time.time', ([], {}), '()\n', (19386, 19388), False, 'import time\n'), ((19423, 19434), 'time.time', 'time.time', ([], {}), '()\n', (19432, 19434), False, 'import time\n'), ((25000, 25016), 'corebio.seq.Alphabet', 'Alphabet', (['"""ACGU"""'], {}), "('ACGU')\n", (25008, 25016), False, 'from corebio.seq import Alphabet, SeqList\n'), ((25417, 25448), 'weblogolib.png_formatter', 'wbl.png_formatter', (['data', 'format'], {}), '(data, format)\n', (25434, 25448), True, 'import weblogolib as wbl\n'), ((28240, 28282), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_test', 'y_binary'], {}), '(y_test, y_binary)\n', (28264, 28282), False, 'from sklearn import metrics\n'), ((28295, 28327), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (28314, 28327), True, 'import numpy as np\n'), ((31111, 31137), 'scipy.optimize.curve_fit', 'curve_fit', (['sigmoid', 'xs', 'ys'], {}), '(sigmoid, xs, ys)\n', (31120, 31137), False, 'from scipy.optimize import curve_fit\n'), ((32189, 32216), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (32214, 32216), False, 'import multiprocessing\n'), ((32614, 32625), 'time.time', 'time.time', ([], {}), '()\n', (32623, 32625), False, 'import time\n'), ((33118, 33135), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (33129, 33135), False, 'from collections import defaultdict\n'), ((35396, 35408), 'collections.Counter', 'Counter', (['row'], {}), '(row)\n', (35403, 35408), False, 'from collections import Counter\n'), ((35927, 35939), 'collections.Counter', 'Counter', (['row'], {}), '(row)\n', (35934, 35939), False, 'from collections import Counter\n'), ((36844, 36876), 'random.sample', 'random.sample', (['seqs', 'sample_size'], {}), '(seqs, sample_size)\n', (36857, 36876), False, 'import random\n'), ((37640, 37672), 'random.sample', 'random.sample', (['seqs', 'sample_size'], {}), '(seqs, sample_size)\n', (37653, 37672), False, 'import random\n'), ((38970, 38981), 'time.time', 'time.time', ([], {}), '()\n', (38979, 38981), False, 'import time\n'), ((3028, 3041), 'numpy.dtype', 'np.dtype', (['"""a"""'], {}), "('a')\n", (3036, 3041), True, 'import numpy as np\n'), ((11806, 11817), 'time.time', 'time.time', ([], {}), '()\n', (11815, 11817), False, 'import time\n'), ((13156, 13167), 'time.time', 'time.time', ([], {}), '()\n', (13165, 13167), False, 'import time\n'), ((14738, 14749), 'time.time', 'time.time', ([], {}), '()\n', (14747, 14749), False, 'import time\n'), ((17791, 17802), 'time.time', 'time.time', ([], {}), '()\n', (17800, 17802), False, 'import time\n'), ((19121, 19132), 'time.time', 'time.time', ([], {}), '()\n', (19130, 19132), False, 'import time\n'), ((25094, 25126), 'corebio.seq.Alphabet', 'Alphabet', (['"""ACDEFGHIKLMNPQRSTVWY"""'], {}), "('ACDEFGHIKLMNPQRSTVWY')\n", (25102, 25126), False, 'from corebio.seq import Alphabet, SeqList\n'), ((25164, 25180), 'corebio.seq.Alphabet', 'Alphabet', (['"""AGCT"""'], {}), "('AGCT')\n", (25172, 25180), False, 'from corebio.seq import Alphabet, SeqList\n'), ((25264, 25278), 'weblogolib.LogoData', 'wbl.LogoData', ([], {}), '()\n', (25276, 25278), True, 'import weblogolib as wbl\n'), ((25516, 25553), 'weblogolib.png_print_formatter', 'wbl.png_print_formatter', (['data', 'format'], {}), '(data, format)\n', (25539, 25553), True, 'import weblogolib as wbl\n'), ((27135, 27151), 'joblib.load', 'joblib.load', (['obj'], {}), '(obj)\n', (27146, 27151), False, 'import joblib\n'), ((28498, 28545), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_test', 'y_binary'], {}), '(y_test, y_binary)\n', (28527, 28545), False, 'from sklearn import metrics\n'), ((33004, 33015), 'time.time', 'time.time', ([], {}), '()\n', (33013, 33015), False, 'import time\n'), ((34586, 34608), 'eden.sequence.Vectorizer', 'Vectorizer', (['complexity'], {}), '(complexity)\n', (34596, 34608), False, 'from eden.sequence import Vectorizer\n'), ((21246, 21267), 'Bio.Seq.Seq', 'Seq', (['j', 'self.alphabet'], {}), '(j, self.alphabet)\n', (21249, 21267), False, 'from Bio.Seq import Seq\n'), ((25616, 25648), 'weblogolib.jpeg_formatter', 'wbl.jpeg_formatter', (['data', 'format'], {}), '(data, format)\n', (25634, 25648), True, 'import weblogolib as wbl\n'), ((25682, 25713), 'weblogolib.eps_formatter', 'wbl.eps_formatter', (['data', 'format'], {}), '(data, format)\n', (25699, 25713), True, 'import weblogolib as wbl\n'), ((28605, 28642), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (28626, 28642), False, 'from sklearn import metrics\n'), ((35228, 35241), 'numpy.dtype', 'np.dtype', (['"""a"""'], {}), "('a')\n", (35236, 35241), True, 'import numpy as np\n'), ((35731, 35744), 'numpy.dtype', 'np.dtype', (['"""a"""'], {}), "('a')\n", (35739, 35744), True, 'import numpy as np\n'), ((39400, 39411), 'time.time', 'time.time', ([], {}), '()\n', (39409, 39411), False, 'import time\n'), ((39974, 40017), 'eden.util.NeedlemanWunsh.edit_distance', 'edit_distance', (['seq_i', 'seq_j'], {'gap_penalty': '(-1)'}), '(seq_i, seq_j, gap_penalty=-1)\n', (39987, 40017), False, 'from eden.util.NeedlemanWunsh import edit_distance\n')]
|
import yaml
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import torch
import torchvision
import matplotlib.pyplot as plt
import seaborn as sns
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torchvision import datasets,models
import math
import torch.optim as optim
from torch.optim import lr_scheduler
import copy
import time
from PIL import Image
from datetime import datetime
from utils import *
data_dir = '.'
test_path = os.path.join(data_dir, 'test')
sample_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))
sample_sub['path'] = sample_sub['file_name'].apply(lambda x: os.path.join(test_path, x))
# Get configs from config file
stream = open("config.yaml", 'r')
config_dict = yaml.safe_load(stream)
batch_size = config_dict['batch_size']
learning_rate = config_dict['lr']
model_pth = config_dict['model_pth']
train_data = config_dict['train_data']
valid_data = config_dict['valid_data']
test_data = config_dict['test_data']
# Apply transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data_transforms = {
'train': transforms.Compose([
transforms.Resize((230, 230)),
transforms.RandomRotation(30,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
normalize
]),
'valid': transforms.Compose([
transforms.Resize((400, 400)),
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
normalize
]),
'test': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize
]),
}
# Load dataloaders
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'valid']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle= True, num_workers=0)
for x in ['train', 'valid']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Trains Model
def train_model2(model, criterion, optimizer,
num_epochs=3, dataloaders= dataloaders, print_progress=False):
"""
:param model: Model type object
:param criterion: Loss function
:param optimizer: Optimizer
:param num_epochs: Number of epochs
:param dataloaders: Dataloaders, must be a dictionary having train and val as keys
:param print_progress: prints progress if true
:return: trained model object
"""
min_val_loss = np.Inf
best_model_wts = copy.deepcopy(model.state_dict())
since = time.time()
best_epoch = -1
for epoch in range(num_epochs):
valid_loss = 0.0
train_loss = 0.0
model.train()
running_corrects = 0
for iter1, (inputs, labels) in enumerate(dataloaders['train']):
inputs = inputs.to(device)
inputs = inputs.type(torch.float)
labels = labels.to(device)
labels = labels.type(torch.long)
optimizer.zero_grad()
out = model(inputs)
_, preds = torch.max(out, 1)
# out = torch.mul(out,100)
loss = criterion(out, labels)
loss.backward()
optimizer.step()
train_loss += loss.item() * inputs.size(0)
# running_corrects += torch.sum(preds == labels.data)
if print_progress:
print(
f"Epoch: {epoch}\t{100 * (iter1 + 1) / len(dataloaders['train']):.2f}" + '%',
end='\r')
else:
print()
with torch.no_grad():
model.eval()
for iter2, (inputs, labels) in enumerate(dataloaders['valid']):
inputs = inputs.to(device)
inputs = inputs.type(torch.float)
labels = labels.to(device)
labels = labels.type(torch.long)
output1 = model(inputs)
_, preds1 = torch.max(output1, 1)
# output1 = torch.mul(output1,100).to(device)
loss = criterion(output1, labels)
valid_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds1 == labels.data)
print(
f'Epoch: {epoch}\t{100 * (iter2 + 1) / len(dataloaders["valid"]):.2f} %',
end='\r')
len_train1 = 6552
len_val1 = len(dataloaders['valid'].dataset)
train_loss = train_loss / len_train1
valid_loss = valid_loss / len_val1
if print_progress:
print(
f'\nEpoch: {epoch + 1} \tTraining Loss: {math.sqrt(train_loss):.4f} \tValidation Loss: {math.sqrt(valid_loss):.4f}')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print(f'Accuracy : {100 * running_corrects / len_val1} %')
if valid_loss < min_val_loss:
min_val_loss = valid_loss
best_epoch = epoch
best_model_wts = copy.deepcopy(model.state_dict())
print('Best val Loss: {:4f}'.format(math.sqrt(min_val_loss)))
print(f'Epoch completed: {epoch+1}')
print(f'Best Epoch: {best_epoch+1}')
model.load_state_dict(best_model_wts)
return model
def process_image(img_path):
"""
:param img_path: Path of image to be processed
:returns processed numpy array
Scales, crops, and normalizes a PIL image for a PyTorch model,
returns a Numpy array
"""
img = Image.open(img_path)
# Resize
if img.size[0] > img.size[1]:
img.thumbnail((10000, 256))
else:
img.thumbnail((256, 10000))
# Crop Image
left_margin = (img.width - 224) / 2
bottom_margin = (img.height - 224) / 2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin,
top_margin))
# Normalize
img = np.array(img) / 255
mean = np.array([0.485, 0.456, 0.406]) # provided mean
std = np.array([0.229, 0.224, 0.225]) # provided std
img = (img - mean) / std
return img
# Load test dataset from class defined in utils
test_dataset = TestDataset(data_dir+'test', sample_sub,data_transforms['test'])
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Load Class to idx dictionary
class_to_idx = image_datasets['valid'].class_to_idx
idx_to_class = {val: key for key, val in class_to_idx.items()}
def predict(model_path, dataloader, print_progress=False):
"""
:param model_path: Path of Model used for prediction
:param dataloader: Test DataLoader
:param print_progress: Prints progress if True
:return: Prediction(as a list) on test folder defined by config file
"""
model = torch.load(model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
predictions = {}
with torch.no_grad():
for ii, (images, _, img_names) in enumerate(dataloader, start=1):
if print_progress:
if ii % 5 == 0:
print('Batch {}/{}'.format(ii, len(dataloader)))
images = images.to(device)
logps = model(images)
ps = torch.exp(logps)
# Top indices
_, top_indices = ps.topk(1)
top_indices = top_indices.detach().cpu().numpy().tolist()
# Convert indices to classes
top_classes = [idx_to_class[idx[0]] for idx in top_indices]
# print("Img:" ,img_names)
for i, img_name in enumerate(img_names):
predictions[img_name] = top_classes[i]
print('\nPrediction Generation Completed')
return predictions
|
[
"yaml.safe_load",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomRotation",
"torch.load",
"torch.exp",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomHorizontalFlip",
"math.sqrt",
"torch.cuda.is_available",
"torch.max",
"torch.sum",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Resize",
"torchvision.transforms.RandomVerticalFlip",
"time.time",
"PIL.Image.open",
"numpy.array",
"torchvision.transforms.ToTensor"
] |
[((596, 626), 'os.path.join', 'os.path.join', (['data_dir', '"""test"""'], {}), "(data_dir, 'test')\n", (608, 626), False, 'import os\n'), ((876, 898), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (890, 898), False, 'import yaml\n'), ((1167, 1242), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1187, 1242), True, 'import torchvision.transforms as transforms\n'), ((6924, 7003), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(test_dataset, batch_size=batch_size, shuffle=False)\n', (6951, 7003), False, 'import torch\n'), ((653, 700), 'os.path.join', 'os.path.join', (['data_dir', '"""sample_submission.csv"""'], {}), "(data_dir, 'sample_submission.csv')\n", (665, 700), False, 'import os\n'), ((2101, 2203), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['image_datasets[x]'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(image_datasets[x], batch_size=batch_size,\n shuffle=True, num_workers=0)\n', (2128, 2203), False, 'import torch\n'), ((3033, 3044), 'time.time', 'time.time', ([], {}), '()\n', (3042, 3044), False, 'import time\n'), ((6118, 6138), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (6128, 6138), False, 'from PIL import Image\n'), ((6617, 6648), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (6625, 6648), True, 'import numpy as np\n'), ((6677, 6708), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (6685, 6708), True, 'import numpy as np\n'), ((7482, 7504), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (7492, 7504), False, 'import torch\n'), ((764, 790), 'os.path.join', 'os.path.join', (['test_path', 'x'], {}), '(test_path, x)\n', (776, 790), False, 'import os\n'), ((1944, 1969), 'os.path.join', 'os.path.join', (['data_dir', 'x'], {}), '(data_dir, x)\n', (1956, 1969), False, 'import os\n'), ((2402, 2427), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2425, 2427), False, 'import torch\n'), ((6585, 6598), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (6593, 6598), True, 'import numpy as np\n'), ((7656, 7671), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7669, 7671), False, 'import torch\n'), ((1308, 1337), 'torchvision.transforms.Resize', 'transforms.Resize', (['(230, 230)'], {}), '((230, 230))\n', (1325, 1337), True, 'import torchvision.transforms as transforms\n'), ((1348, 1377), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (1373, 1377), True, 'import torchvision.transforms as transforms\n'), ((1389, 1415), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224)'], {}), '(224)\n', (1410, 1415), True, 'import torchvision.transforms as transforms\n'), ((1426, 1459), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1457, 1459), True, 'import torchvision.transforms as transforms\n'), ((1470, 1501), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (1499, 1501), True, 'import torchvision.transforms as transforms\n'), ((1512, 1533), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1531, 1533), True, 'import torchvision.transforms as transforms\n'), ((1607, 1636), 'torchvision.transforms.Resize', 'transforms.Resize', (['(400, 400)'], {}), '((400, 400))\n', (1624, 1636), True, 'import torchvision.transforms as transforms\n'), ((1647, 1680), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224, 224)'], {}), '((224, 224))\n', (1668, 1680), True, 'import torchvision.transforms as transforms\n'), ((1691, 1712), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1710, 1712), True, 'import torchvision.transforms as transforms\n'), ((1785, 1814), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1802, 1814), True, 'import torchvision.transforms as transforms\n'), ((1825, 1846), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1844, 1846), True, 'import torchvision.transforms as transforms\n'), ((3554, 3571), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (3563, 3571), False, 'import torch\n'), ((5261, 5272), 'time.time', 'time.time', ([], {}), '()\n', (5270, 5272), False, 'import time\n'), ((7542, 7567), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7565, 7567), False, 'import torch\n'), ((7978, 7994), 'torch.exp', 'torch.exp', (['logps'], {}), '(logps)\n', (7987, 7994), False, 'import torch\n'), ((4081, 4096), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4094, 4096), False, 'import torch\n'), ((4468, 4489), 'torch.max', 'torch.max', (['output1', '(1)'], {}), '(output1, 1)\n', (4477, 4489), False, 'import torch\n'), ((4701, 4733), 'torch.sum', 'torch.sum', (['(preds1 == labels.data)'], {}), '(preds1 == labels.data)\n', (4710, 4733), False, 'import torch\n'), ((5669, 5692), 'math.sqrt', 'math.sqrt', (['min_val_loss'], {}), '(min_val_loss)\n', (5678, 5692), False, 'import math\n'), ((5161, 5182), 'math.sqrt', 'math.sqrt', (['train_loss'], {}), '(train_loss)\n', (5170, 5182), False, 'import math\n'), ((5208, 5229), 'math.sqrt', 'math.sqrt', (['valid_loss'], {}), '(valid_loss)\n', (5217, 5229), False, 'import math\n')]
|
import more_itertools as mit
import numpy as np
# Methods to do dynamic error thresholding on timeseries data
# Implementation inspired by: https://arxiv.org/pdf/1802.04431.pdf
def get_forecast_errors(y_hat,
y_true,
window_size=5,
batch_size=30,
smoothing_percent=0.05,
smoothed=True):
"""
Calculates the forecasting error for two arrays of data. If smoothed errors desired,
runs EWMA.
Args:
y_hat (list): forecasted values. len(y_hat)==len(y_true).
y_true (list): true values. len(y_hat)==len(y_true).
window_size (int):
batch_size (int):
smoothing_percent (float):
smoothed (bool): whether the returned errors should be smoothed with EWMA.
Returns:
(list): error residuals. Smoothed if specified by user.
"""
errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)]
if not smoothed:
return errors
historical_error_window = int(window_size * batch_size * smoothing_percent)
moving_avg = []
for i in range(len(errors)):
left_window = i - historical_error_window
right_window = i + historical_error_window + 1
if left_window < 0:
left_window = 0
if right_window > len(errors):
right_window = len(errors)
moving_avg.append(np.mean(errors[left_window:right_window]))
return moving_avg
def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer):
"""
Extracts anomalies from the errors.
Args:
y_true ():
smoothed_errors ():
window_size (int):
batch_size (int):
error_buffer (int):
Returns:
"""
if len(y_true) <= batch_size * window_size:
raise ValueError("Window size (%s) larger than y_true (len=%s)."
% (batch_size, len(y_true)))
num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size)
anomalies_indices = []
for i in range(num_windows + 1):
prev_index = i * batch_size
curr_index = (window_size * batch_size) + (i * batch_size)
if i == num_windows + 1:
curr_index = len(y_true)
window_smoothed_errors = smoothed_errors[prev_index:curr_index]
window_y_true = y_true[prev_index:curr_index]
epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer)
window_anom_indices = get_anomalies(
window_smoothed_errors,
window_y_true,
sd_threshold,
i,
anomalies_indices,
error_buffer
)
# get anomalies from inverse of smoothed errors
# This was done in the implementation of NASA paper but
# wasn't referenced in the paper
# we get the inverse by flipping around the mean
mu = np.mean(window_smoothed_errors)
smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors]
epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer)
inv_anom_indices = get_anomalies(
smoothed_errors_inv,
window_y_true,
sd_inv,
i,
anomalies_indices,
len(y_true)
)
anomalies_indices = list(set(anomalies_indices + inv_anom_indices))
anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices])
# group anomalies
anomalies_indices = sorted(list(set(anomalies_indices)))
anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)]
anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]]
# generate "scores" for anomalies based on the max distance from epsilon for each sequence
anomalies_scores = []
for e_seq in anomaly_sequences:
denominator = np.mean(smoothed_errors) + np.std(smoothed_errors)
score = max([
abs(smoothed_errors[x] - epsilon) / denominator
for x in range(e_seq[0], e_seq[1])
])
anomalies_scores.append(score)
return anomaly_sequences, anomalies_scores
def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0):
"""Helper method for `extract_anomalies` method.
Calculates the epsilon (threshold) for anomalies.
"""
mu = np.mean(smoothed_errors)
sigma = np.std(smoothed_errors)
max_epsilon = 0
sd_threshold = sd_limit
# The treshold is determined dynamically by testing multiple Zs.
# z is drawn from an ordered set of positive values representing the
# number of standard deviations above mean(smoothed_errors)
# here we iterate in increments of 0.5 on the range that the NASA paper found to be good
for z in np.arange(2.5, sd_limit, 0.5):
epsilon = mu + (sigma * z)
below_epsilon, below_indices, above_epsilon = [], [], []
for i in range(len(smoothed_errors)):
e = smoothed_errors[i]
if e < epsilon:
# save to compute delta mean and delta std
# these are important for epsilon calculation
below_epsilon.append(e)
below_indices.append(i)
if e > epsilon:
# above_epsilon values are anomalies
for j in range(0, error_buffer):
if (i + j) not in above_epsilon and (i + j) < len(smoothed_errors):
above_epsilon.append(i + j)
if (i - j) not in above_epsilon and (i - j) >= 0:
above_epsilon.append(i - j)
if len(above_epsilon) == 0:
continue
# generate sequences
above_epsilon = sorted(list(set(above_epsilon)))
groups = [list(group) for group in mit.consecutive_groups(above_epsilon)]
above_sequences = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]
mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu
sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma
epsilon = (mean_perc_decrease + sd_perc_decrease) /\
(len(above_sequences)**2 + len(above_epsilon))
# update the largest epsilon we've seen so far
if epsilon > max_epsilon:
sd_threshold = z
max_epsilon = epsilon
# sd_threshold can be multiplied by sigma to get epsilon
return max_epsilon, sd_threshold
def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer):
"""
Helper method to get anomalies.
"""
mu = np.mean(smoothed_errors)
sigma = np.std(smoothed_errors)
epsilon = mu + (z * sigma)
# compare to epsilon
errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies(
smoothed_errors,
epsilon,
y_true,
error_buffer,
window,
all_anomalies
)
if len(errors_seq) > 0:
anomaly_indices = prune_anomalies(
errors_seq,
smoothed_errors,
max_error_below_e,
anomaly_indices
)
return anomaly_indices
def group_consecutive_anomalies(smoothed_errors,
epsilon,
y_true,
error_buffer,
window,
all_anomalies,
batch_size=30):
upper_percentile, lower_percentile = np.percentile(y_true, [95, 5])
accepted_range = upper_percentile - lower_percentile
minimum_index = 100 # have a cutoff value for anomalies until model is trained enough
anomaly_indices = []
max_error_below_e = 0
for i in range(len(smoothed_errors)):
if smoothed_errors[i] <= epsilon or smoothed_errors[i] <= 0.05 * accepted_range:
# not an anomaly
continue
for j in range(error_buffer):
if (i + j) < len(smoothed_errors) and (i + j) not in anomaly_indices:
if (i + j) > minimum_index:
anomaly_indices.append(i + j)
if (i - j) < len(smoothed_errors) and (i - j) not in anomaly_indices:
if (i - j) > minimum_index:
anomaly_indices.append(i - j)
# get all the errors that are below epsilon and which
# weren't identified as anomalies to process them
for i in range(len(smoothed_errors)):
adjusted_index = i + (window - 1) * batch_size
if smoothed_errors[i] > max_error_below_e and adjusted_index not in all_anomalies:
if i not in anomaly_indices:
max_error_below_e = smoothed_errors[i]
# group anomalies into continuous sequences
anomaly_indices = sorted(list(set(anomaly_indices)))
groups = [list(group) for group in mit.consecutive_groups(anomaly_indices)]
e_seq = [(g[0], g[-1]) for g in groups if g[0] != g[-1]]
return e_seq, anomaly_indices, max_error_below_e
def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices):
""" Helper method that removes anomalies which don't meet
a minimum separation from next anomaly.
"""
# min accepted perc decrease btwn max errors in anomalous sequences
MIN_PERCENT_DECREASE = 0.05
e_seq_max, smoothed_errors_max = [], []
for error_seq in e_seq:
if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0:
sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]]
e_seq_max.append(max(sliced_errors))
smoothed_errors_max.append(max(sliced_errors))
smoothed_errors_max.sort(reverse=True)
if max_error_below_e > 0:
smoothed_errors_max.append(max_error_below_e)
indices_remove = []
for i in range(len(smoothed_errors_max)):
if i < len(smoothed_errors_max) - 1:
delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1]
perc_change = delta / smoothed_errors_max[i]
if perc_change < MIN_PERCENT_DECREASE:
indices_remove.append(e_seq_max.index(smoothed_errors_max[i]))
for index in sorted(indices_remove, reverse=True):
del e_seq[index]
pruned_indices = []
for i in anomaly_indices:
for error_seq in e_seq:
if i >= error_seq[0] and i <= error_seq[1]:
pruned_indices.append(i)
return pruned_indices
|
[
"numpy.std",
"more_itertools.consecutive_groups",
"numpy.percentile",
"numpy.mean",
"numpy.arange"
] |
[((4460, 4484), 'numpy.mean', 'np.mean', (['smoothed_errors'], {}), '(smoothed_errors)\n', (4467, 4484), True, 'import numpy as np\n'), ((4497, 4520), 'numpy.std', 'np.std', (['smoothed_errors'], {}), '(smoothed_errors)\n', (4503, 4520), True, 'import numpy as np\n'), ((4884, 4913), 'numpy.arange', 'np.arange', (['(2.5)', 'sd_limit', '(0.5)'], {}), '(2.5, sd_limit, 0.5)\n', (4893, 4913), True, 'import numpy as np\n'), ((6688, 6712), 'numpy.mean', 'np.mean', (['smoothed_errors'], {}), '(smoothed_errors)\n', (6695, 6712), True, 'import numpy as np\n'), ((6725, 6748), 'numpy.std', 'np.std', (['smoothed_errors'], {}), '(smoothed_errors)\n', (6731, 6748), True, 'import numpy as np\n'), ((7589, 7619), 'numpy.percentile', 'np.percentile', (['y_true', '[95, 5]'], {}), '(y_true, [95, 5])\n', (7602, 7619), True, 'import numpy as np\n'), ((2982, 3013), 'numpy.mean', 'np.mean', (['window_smoothed_errors'], {}), '(window_smoothed_errors)\n', (2989, 3013), True, 'import numpy as np\n'), ((1428, 1469), 'numpy.mean', 'np.mean', (['errors[left_window:right_window]'], {}), '(errors[left_window:right_window])\n', (1435, 1469), True, 'import numpy as np\n'), ((3676, 3717), 'more_itertools.consecutive_groups', 'mit.consecutive_groups', (['anomalies_indices'], {}), '(anomalies_indices)\n', (3698, 3717), True, 'import more_itertools as mit\n'), ((3986, 4010), 'numpy.mean', 'np.mean', (['smoothed_errors'], {}), '(smoothed_errors)\n', (3993, 4010), True, 'import numpy as np\n'), ((4013, 4036), 'numpy.std', 'np.std', (['smoothed_errors'], {}), '(smoothed_errors)\n', (4019, 4036), True, 'import numpy as np\n'), ((8938, 8977), 'more_itertools.consecutive_groups', 'mit.consecutive_groups', (['anomaly_indices'], {}), '(anomaly_indices)\n', (8960, 8977), True, 'import more_itertools as mit\n'), ((5908, 5945), 'more_itertools.consecutive_groups', 'mit.consecutive_groups', (['above_epsilon'], {}), '(above_epsilon)\n', (5930, 5945), True, 'import more_itertools as mit\n'), ((6062, 6084), 'numpy.mean', 'np.mean', (['below_epsilon'], {}), '(below_epsilon)\n', (6069, 6084), True, 'import numpy as np\n'), ((6127, 6148), 'numpy.std', 'np.std', (['below_epsilon'], {}), '(below_epsilon)\n', (6133, 6148), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 21:27:18 2019
@author: biyef
"""
from PIL import Image, ImageFilter
import tensorflow as tf
import matplotlib.pyplot as plt
import mnist_lenet5_backward
import mnist_lenet5_forward
import numpy as np
def imageprepare():
im = Image.open('D:/workspace/machine-learning/mnist/img/origin-9.png')
plt.imshow(im)
plt.show()
#print(type(im.getdata()))
tv = list(im.getdata())
tva = [(255-x)*1.0/255.0 for x in tv]
#return np.asarray(im)
return tva
result=imageprepare()
#x = tf.placeholder(tf.float32, [None, 784])
#x = result
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32,[1,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.NUM_CHANNELS])
#x = tf.placeholder(tf.float32, [None, 784])
#ipt = imageprepare()
#y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])
#y = mnist_lenet5_forward.forward(x,False,None)
# x = tf.placeholder(tf.float32,[
# [ipt],
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.NUM_CHANNELS])
# y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])
# y = mnist_lenet5_forward.forward(x,False,None)
#
# ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY)
# ema_restore = ema.variables_to_restore()
# saver = tf.train.Saver(ema_restore)
#
# correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#image = tf.image.decode_png('D:/workspace/machine-learning/mnist/img/origin-2.png')
# image = tf.cast(image, tf.float32)
y_conv = mnist_lenet5_forward.forward(x,False,None)
#eva = mnist_lenet5_forward.forward([image],False,None)
#prediction = tf.argmax(y,1)
saver = tf.train.Saver()
with tf.Session(graph=g) as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH)
saver.restore(sess, ckpt.model_checkpoint_path)
reshaped_xs = np.reshape([result],(
1,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.NUM_CHANNELS))
# reshaped_x = np.reshape([ipt],(
# [ipt],
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.IMAGE_SIZE,
# mnist_lenet5_forward.NUM_CHANNELS))
# accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:[2]})
prediction=tf.argmax(y_conv,1)
predint=prediction.eval(feed_dict={x: reshaped_xs}, session=sess)
print('recognize result:')
print(predint[0])
|
[
"matplotlib.pyplot.show",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.imshow",
"tensorflow.argmax",
"tensorflow.Session",
"PIL.Image.open",
"mnist_lenet5_forward.forward",
"tensorflow.placeholder",
"numpy.reshape",
"tensorflow.Graph",
"tensorflow.train.get_checkpoint_state"
] |
[((282, 348), 'PIL.Image.open', 'Image.open', (['"""D:/workspace/machine-learning/mnist/img/origin-9.png"""'], {}), "('D:/workspace/machine-learning/mnist/img/origin-9.png')\n", (292, 348), False, 'from PIL import Image, ImageFilter\n'), ((353, 367), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (363, 367), True, 'import matplotlib.pyplot as plt\n'), ((372, 382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (380, 382), True, 'import matplotlib.pyplot as plt\n'), ((653, 789), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE,\n mnist_lenet5_forward.NUM_CHANNELS]'], {}), '(tf.float32, [1, mnist_lenet5_forward.IMAGE_SIZE,\n mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS])\n', (667, 789), True, 'import tensorflow as tf\n'), ((1805, 1849), 'mnist_lenet5_forward.forward', 'mnist_lenet5_forward.forward', (['x', '(False)', 'None'], {}), '(x, False, None)\n', (1833, 1849), False, 'import mnist_lenet5_forward\n'), ((1954, 1970), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1968, 1970), True, 'import tensorflow as tf\n'), ((1981, 2000), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (1991, 2000), True, 'import tensorflow as tf\n'), ((2028, 2061), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2059, 2061), True, 'import tensorflow as tf\n'), ((2104, 2172), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['mnist_lenet5_backward.MODEL_SAVE_PATH'], {}), '(mnist_lenet5_backward.MODEL_SAVE_PATH)\n', (2133, 2172), True, 'import tensorflow as tf\n'), ((2251, 2381), 'numpy.reshape', 'np.reshape', (['[result]', '(1, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE,\n mnist_lenet5_forward.NUM_CHANNELS)'], {}), '([result], (1, mnist_lenet5_forward.IMAGE_SIZE,\n mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS))\n', (2261, 2381), True, 'import numpy as np\n'), ((2748, 2768), 'tensorflow.argmax', 'tf.argmax', (['y_conv', '(1)'], {}), '(y_conv, 1)\n', (2757, 2768), True, 'import tensorflow as tf\n'), ((614, 624), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (622, 624), True, 'import tensorflow as tf\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import jsonlines
import torch
import random
import numpy as np
import _pickle as cPickle
class Flickr30kRetrievalDatabase(torch.utils.data.Dataset):
def __init__(self, imdb_path, dataset_type, test_id_file_path, hard_neg_file_path):
super().__init__()
self._dataset_type = dataset_type
self._load_annotations(imdb_path, test_id_file_path, hard_neg_file_path)
self._metadata = {}
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, x):
self._metadata = x
def _load_annotations(self, imdb_path, test_id_path, hard_neg_file_path):
if self._dataset_type != "train":
self.imgs = []
with jsonlines.open(imdb_path) as reader:
# Build an index which maps image id with a list of caption annotations.
entries = []
imgid2entry = {}
count = 0
remove_ids = []
if test_id_path:
remove_ids = np.load(test_id_path)
remove_ids = [int(x) for x in remove_ids]
for annotation in reader:
image_id = int(annotation["img_path"].split(".")[0])
if self._dataset_type != "train":
self.imgs.append(image_id)
if self._dataset_type == "train" and int(image_id) in remove_ids:
continue
imgid2entry[image_id] = []
for sentences in annotation["sentences"]:
entries.append({"caption": sentences, "image_id": image_id})
imgid2entry[image_id].append(count)
count += 1
self._entries = entries
self.imgid2entry = imgid2entry
self.image_id_list = [*self.imgid2entry]
if self._dataset_type == "train":
with open(hard_neg_file_path, "rb") as f:
image_info = cPickle.load(f)
for key, value in image_info.items():
setattr(self, key, value)
self.train_imgId2pool = {
imageId: i for i, imageId in enumerate(self.train_image_list)
}
self.db_size = len(self._entries)
def __len__(self):
return self.db_size
def __getitem__(self, idx):
entry = self._entries[idx]
if self._dataset_type != "train":
return entry, self.imgs
image_id = entry["image_id"]
while True:
# sample a random image:
img_id2 = random.choice(self.image_id_list)
if img_id2 != image_id:
break
entry2 = self._entries[random.choice(self.imgid2entry[img_id2])]
# random image wrong
while True:
# sample a random image:
img_id3 = random.choice(self.image_id_list)
if img_id3 != image_id:
break
entry3 = self._entries[self.imgid2entry[img_id3][0]]
if self._dataset_type == "train":
# random hard caption.
rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]]
pool_img_idx = int(
rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))]
)
img_id4 = self.train_image_list[pool_img_idx]
else:
while True:
# sample a random image:
img_id4 = random.choice(self.image_id_list)
if img_id4 != image_id:
break
entry4 = self._entries[random.choice(self.imgid2entry[img_id4])]
return [entry, entry2, entry3, entry4]
|
[
"_pickle.load",
"numpy.load",
"jsonlines.open",
"random.choice"
] |
[((902, 927), 'jsonlines.open', 'jsonlines.open', (['imdb_path'], {}), '(imdb_path)\n', (916, 927), False, 'import jsonlines\n'), ((2703, 2736), 'random.choice', 'random.choice', (['self.image_id_list'], {}), '(self.image_id_list)\n', (2716, 2736), False, 'import random\n'), ((2827, 2867), 'random.choice', 'random.choice', (['self.imgid2entry[img_id2]'], {}), '(self.imgid2entry[img_id2])\n', (2840, 2867), False, 'import random\n'), ((2978, 3011), 'random.choice', 'random.choice', (['self.image_id_list'], {}), '(self.image_id_list)\n', (2991, 3011), False, 'import random\n'), ((3714, 3754), 'random.choice', 'random.choice', (['self.imgid2entry[img_id4]'], {}), '(self.imgid2entry[img_id4])\n', (3727, 3754), False, 'import random\n'), ((1189, 1210), 'numpy.load', 'np.load', (['test_id_path'], {}), '(test_id_path)\n', (1196, 1210), True, 'import numpy as np\n'), ((2104, 2119), '_pickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (2116, 2119), True, 'import _pickle as cPickle\n'), ((3582, 3615), 'random.choice', 'random.choice', (['self.image_id_list'], {}), '(self.image_id_list)\n', (3595, 3615), False, 'import random\n')]
|
""" Script for generating a reversed dictionary """
import argparse
import numpy as np
import sys
def parse_arguments(args_to_parse):
description = "Load a *.npy archive of a dictionary and swap (reverse) the dictionary keys and values around"
parser = argparse.ArgumentParser(description=description)
general = parser.add_argument_group('General options')
general.add_argument(
'-i', '--input-file', type=str, required=True,
help="The file path to the word vector dictionary into *.npy format"
)
general.add_argument(
'-o', '--output-file', type=str, required=True,
help="The target file to save the reversed dictionary"
)
args = parser.parse_args(args_to_parse)
return args
def main(args):
wordvec = np.load(args.input_file).item()
reversed_wordvec = {str(v): k for k, v in wordvec.items()}
np.save(args.output_file, reversed_wordvec)
if __name__=='__main__':
args = parse_arguments(sys.argv[1:])
main(args)
|
[
"numpy.load",
"numpy.save",
"argparse.ArgumentParser"
] |
[((264, 312), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (287, 312), False, 'import argparse\n'), ((877, 920), 'numpy.save', 'np.save', (['args.output_file', 'reversed_wordvec'], {}), '(args.output_file, reversed_wordvec)\n', (884, 920), True, 'import numpy as np\n'), ((778, 802), 'numpy.load', 'np.load', (['args.input_file'], {}), '(args.input_file)\n', (785, 802), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import glob
import sys
sys.path.append("../")
import calipy
Rt_path = "./CameraData/Rt.json"
TVRt_path = "./Cameradata/TVRt.json"
Rt_back_to_front = calipy.Transform(Rt_path).inv()
Rt_TV_to_back = calipy.Transform(TVRt_path)
Rt_TV_to_front = Rt_back_to_front.dot(Rt_TV_to_back)
#origin = calipy.Transform()
#ren = calipy.vtkRenderer()
#ren.addCamera("front_cam", Rt_TV_to_front.inv().R, Rt_TV_to_front.inv().T, cs=0.3)
#ren.addCamera("back_cam", Rt_TV_to_back.inv().R, Rt_TV_to_back.inv().T, cs=0.5)
#TV_width = 1.70
#TV_height = 0.95
#objectPoints = np.array( [ [0,0,0],
# [TV_width, 0, 0],
# [0, TV_height,0],
# [TV_width, TV_height, 0] ] ).astype(np.float64)
#tvpoints_on_camera = np.transpose(objectPoints)
#ren.addLines("TV", np.transpose(tvpoints_on_camera), [0,1,3,2,0])
##ren.addCamera("TV_origin", TVRt.R, TVRt.T, cs=0.5)
#ren.render()
#exit()
origin = calipy.Transform()
ren = calipy.vtkRenderer()
ren.addCamera("front_cam", cs=0.5)
ren.addCamera("back_cam", Rt_back_to_front.R, Rt_back_to_front.T, cs=0.5)
TV_width = 1.70
TV_height = 0.95
objectPoints = np.array( [ [0,0,0],
[TV_width, 0, 0],
[0, TV_height,0],
[TV_width, TV_height, 0] ] ).astype(np.float64)
tvpoints_on_camera = Rt_TV_to_front.move(np.transpose(objectPoints))
ren.addLines("TV", np.transpose(tvpoints_on_camera), [0,1,3,2,0])
#ren.addCamera("TV_origin", TVRt.R, TVRt.T, cs=0.5)
ren.render()
Rt_back_to_front.saveJson("./CameraData/Rt_back_to_front.json")
Rt_TV_to_front.saveJson("./CameraData/Rt_TV_to_front.json")
|
[
"sys.path.append",
"numpy.transpose",
"calipy.Transform",
"calipy.vtkRenderer",
"numpy.array"
] |
[((53, 75), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (68, 75), False, 'import sys\n'), ((229, 256), 'calipy.Transform', 'calipy.Transform', (['TVRt_path'], {}), '(TVRt_path)\n', (245, 256), False, 'import calipy\n'), ((950, 968), 'calipy.Transform', 'calipy.Transform', ([], {}), '()\n', (966, 968), False, 'import calipy\n'), ((975, 995), 'calipy.vtkRenderer', 'calipy.vtkRenderer', ([], {}), '()\n', (993, 995), False, 'import calipy\n'), ((1353, 1379), 'numpy.transpose', 'np.transpose', (['objectPoints'], {}), '(objectPoints)\n', (1365, 1379), True, 'import numpy as np\n'), ((1400, 1432), 'numpy.transpose', 'np.transpose', (['tvpoints_on_camera'], {}), '(tvpoints_on_camera)\n', (1412, 1432), True, 'import numpy as np\n'), ((181, 206), 'calipy.Transform', 'calipy.Transform', (['Rt_path'], {}), '(Rt_path)\n', (197, 206), False, 'import calipy\n'), ((1155, 1243), 'numpy.array', 'np.array', (['[[0, 0, 0], [TV_width, 0, 0], [0, TV_height, 0], [TV_width, TV_height, 0]]'], {}), '([[0, 0, 0], [TV_width, 0, 0], [0, TV_height, 0], [TV_width,\n TV_height, 0]])\n', (1163, 1243), True, 'import numpy as np\n')]
|
from .generator_traj import generate_traj, EmptyError
from .motion_type import random_rot
from ..features.prePostTools import traj_to_dist
import numpy as np
def generate_n_steps(N, nstep, ndim, sub=False, noise_level=0.25):
add = 0
if ndim == 3:
add = 1
size = nstep
X_train = np.zeros((N, size, (5 + add)))
if sub:
Y_trains = np.zeros((N, size, 10))
Y_train_cat = np.zeros((N, 27))
else:
Y_trains = np.zeros((N, size, 7))
Y_train_cat = np.zeros((N, 12))
Y_train_traj = []
# 12
for i in range(N):
# for i in range(1000):
# if i % 1000 == 0:
# print i
sigma = max(np.random.normal(0.5, 1), 0.05)
step = max(np.random.normal(1, 1), 0.2)
tryagain = True
while tryagain:
try:
clean = 4
if size >= 50:
clean = 8
clean = False
"""
ModelN,Model_num,s,sc,real_traj,norm,Z = generate_traj(size,sub=True,
clean=clean,diff_sigma=2.0,
delta_sigma_directed=1.,ndim=ndim,
anisentropy=0.1,deltav=0.2,rho_fixed=False)
"""
clean = 4
ModelN, Model_num, s, sc, real_traj, norm, Z = generate_traj(size, sub=sub,
clean=clean, diff_sigma=2.0,
delta_sigma_directed=6., ndim=ndim,
anisentropy=0.1, deltav=.4, rho_fixed=False,
random_rotation=False)
mu = 2
Ra0 = [0, 1.]
alpharot = 2 * 3.14 * np.random.random()
dt = real_traj[1:] - real_traj[:-1]
std = np.mean(np.sum(dt**2, axis=1) / 3)**0.5
noise_l = noise_level * np.random.rand()
real_traj += np.random.normal(0, noise_l * std, real_traj.shape)
real_traj = random_rot(real_traj, alpharot, ndim=ndim)
# print real_traj.shape
alligned_traj, normed, alpha, _ = traj_to_dist(real_traj, ndim=ndim)
simple = True
if not simple:
real_traj1 = np.array([Propertie(real_traj[::, 0]).smooth(2),
Propertie(real_traj[::, 1]).smooth(2)])
alligned_traj1, normed1, alpha1, _ = traj_to_dist(real_traj1.T, ndim=ndim)
real_traj2 = np.array([Propertie(real_traj[::, 0]).smooth(5),
Propertie(real_traj[::, 1]).smooth(5)])
alligned_traj2, normed2, alpha2, _ = traj_to_dist(real_traj2.T, ndim=ndim)
normed = np.concatenate((normed[::, :4], normed1[::, :4], normed2), axis=1)
for zero in Z:
normed[zero, ::] = 0
tryagain = False
except:
tryagain = True
Y_train_traj.append(real_traj)
X_train[i] = normed
Y_trains[i][range(size), np.array(sc, dtype=np.int)] = 1
Y_train_cat[i, Model_num] = 1
return X_train, Y_trains, Y_train_cat, Y_train_traj
# print np.sum(np.isnan(X_train))
|
[
"numpy.sum",
"numpy.zeros",
"numpy.random.random",
"numpy.array",
"numpy.random.normal",
"numpy.random.rand",
"numpy.concatenate"
] |
[((307, 335), 'numpy.zeros', 'np.zeros', (['(N, size, 5 + add)'], {}), '((N, size, 5 + add))\n', (315, 335), True, 'import numpy as np\n'), ((370, 393), 'numpy.zeros', 'np.zeros', (['(N, size, 10)'], {}), '((N, size, 10))\n', (378, 393), True, 'import numpy as np\n'), ((416, 433), 'numpy.zeros', 'np.zeros', (['(N, 27)'], {}), '((N, 27))\n', (424, 433), True, 'import numpy as np\n'), ((463, 485), 'numpy.zeros', 'np.zeros', (['(N, size, 7)'], {}), '((N, size, 7))\n', (471, 485), True, 'import numpy as np\n'), ((508, 525), 'numpy.zeros', 'np.zeros', (['(N, 12)'], {}), '((N, 12))\n', (516, 525), True, 'import numpy as np\n'), ((683, 707), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (699, 707), True, 'import numpy as np\n'), ((734, 756), 'numpy.random.normal', 'np.random.normal', (['(1)', '(1)'], {}), '(1, 1)\n', (750, 756), True, 'import numpy as np\n'), ((2264, 2315), 'numpy.random.normal', 'np.random.normal', (['(0)', '(noise_l * std)', 'real_traj.shape'], {}), '(0, noise_l * std, real_traj.shape)\n', (2280, 2315), True, 'import numpy as np\n'), ((3454, 3480), 'numpy.array', 'np.array', (['sc'], {'dtype': 'np.int'}), '(sc, dtype=np.int)\n', (3462, 3480), True, 'import numpy as np\n'), ((2043, 2061), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2059, 2061), True, 'import numpy as np\n'), ((2218, 2234), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2232, 2234), True, 'import numpy as np\n'), ((3125, 3189), 'numpy.concatenate', 'np.concatenate', (['(normed[:, :4], normed1[:, :4], normed2)'], {'axis': '(1)'}), '((normed[:, :4], normed1[:, :4], normed2), axis=1)\n', (3139, 3189), True, 'import numpy as np\n'), ((2145, 2168), 'numpy.sum', 'np.sum', (['(dt ** 2)'], {'axis': '(1)'}), '(dt ** 2, axis=1)\n', (2151, 2168), True, 'import numpy as np\n')]
|
import numpy as np, networkx as nx, math
from scipy.sparse.csgraph import dijkstra
from scipy.sparse import csr_matrix, identity
def make_Zs(Y,ind1,ind0,pscores1,pscores0,subsample=False):
"""Generates vector of Z_i's, used to construct HT estimator.
Parameters
----------
Y : numpy float array
n-dimensional outcome vector.
ind1 : numpy boolean array
n-dimensional vector of indicators for first exposure mapping.
ind0 : numpy boolean array
n-dimensional vector of indicators for second exposure mapping.
pscores1 : numpy float array
n-dimensional vector of probabilities of first exposure mapping for each unit.
pscores0 : numpy float array
n-dimensional vector of probabilities of second exposure mapping for each unit
subsample : numpy boolean array
When set to an object that's not a numpy array, the function will define subsample to be an n-dimensional array of ones, meaning it is assumed that all n units are included in the population. Otherwise, it must be an boolean array of the same dimension as Z where True components indicate population inclusion.
Returns
-------
n-dimensional numpy float array, where entries corresponding to the True entries of subsample are equal to the desired Z's, and entries corresponding to False subsample entries are set to -1000.
"""
if type(subsample) != np.ndarray: subsample = np.ones(Y.size, dtype=bool)
i1 = ind1[subsample]
i0 = ind0[subsample]
ps1 = pscores1[subsample]
ps0 = pscores0[subsample]
weight1 = i1.copy().astype('float')
weight0 = i0.copy().astype('float')
weight1[weight1 == 1] = i1[weight1 == 1] / ps1[weight1 == 1]
weight0[weight0 == 1] = i0[weight0 == 1] / ps0[weight0 == 1]
Z = np.ones(Y.size) * (-1000) # filler entries that won't be used
Z[subsample] = Y[subsample] * (weight1 - weight0)
return Z
def network_SE(Zs, A, subsample=False, K=0, exp_nbhd=True, disp=False, b=-1):
"""Network-dependence robust standard errors.
Returns our standard errors for the sample mean of each array in Zs.
Parameters
----------
Zs : a list of numpy float arrays
Each array is n-dimensional.
A : NetworkX undirected graph
Graph on n nodes. NOTE: Assumes nodes are labeled 0 through n-1, so that the data for node i is given by the ith component of each array in Zs.
subsample : numpy boolean array
When set to an object that's not a numpy array, the function will define subsample to be an n-dimensional array of ones, meaning it is assumed that all n units are included in the population. Otherwise, it must be an boolean array of the same dimension as each array in Zs where True components indicate population inclusion.
K : integer
K used to define the K-neighborhood exposure mapping.
exp_nbhd : boolean
Boolean for whether neighborhood growth is exponential (True) or polynomial (False). Used to determine recommended bandwidth.
b : float
User-specified bandwidth. If a negative value is specified, function will compute our recommended bandwidth choice.
disp : boolean
Boolean for whether to also return more than just the SE (see below).
Returns
-------
SE : float
List of network-dependence robust standard error, one for each array of Zs.
APL : float
Average path length of A.
b : int
Bandwidth.
PSD_failure : list of booleans
True if substitute PSD variance estimator needed to be used for that component of Zs.
"""
if type(Zs) == np.ndarray:
is_list = False
Z_list = [Zs] # handle case where Zs is just an array
else:
is_list = True
Z_list = Zs
if type(subsample) != np.ndarray:
subsample = np.ones(Z_list[0].size, dtype=bool) # handle case where subsample is False
n = subsample.sum()
SEs = []
PSD_failures = []
if b == 0:
for Z in Z_list:
SEs.append(Z[subsample].std() / math.sqrt(subsample.sum())) # iid SE
APL = 0
PSD_failures.append(False)
else:
# compute path distances
G = nx.to_scipy_sparse_matrix(A, nodelist=range(A.number_of_nodes()), format='csr')
dist_matrix = dijkstra(csgraph=G, directed=False, unweighted=True)
Gcc = [A.subgraph(c).copy() for c in sorted(nx.connected_components(A), key=len, reverse=True)]
giant = [i for i in Gcc[0]] # set of nodes in giant component
APL = dist_matrix[np.ix_(giant,giant)].sum() / len(giant) / (len(giant)-1) # average path length
# default bandwidth
if b < 0:
b = round(APL/2) if exp_nbhd else round(APL**(1/3)) # rec bandwidth
b = max(2*K,b)
weights = dist_matrix <= b # weight matrix
for Z in Z_list:
Zc = Z[subsample] - Z[subsample].mean() # demeaned data
# default variance estimator (not guaranteed PSD)
var_est = Zc.dot(weights[np.ix_(subsample,subsample)].dot(Zc[:,None])) / n
# PSD variance estimator from the older draft (Leung, 2019)
if var_est <= 0:
PSD_failures.append(True)
if b < 0: b = round(APL/4) if exp_nbhd else round(APL**(1/3)) # rec bandwidth
b = max(K,b)
b_neighbors = dist_matrix <= b
row_sums = np.squeeze(b_neighbors.dot(np.ones(Z.size)[:,None]))
b_norm = b_neighbors / np.sqrt(row_sums)[:,None]
weights = b_norm.dot(b_norm.T)
var_est = Zc.dot(weights[np.ix_(subsample,subsample)].dot(Zc[:,None])) / n
else:
PSD_failures.append(False)
SEs.append(math.sqrt(var_est / n))
if disp:
if is_list:
return SEs, APL, b, PSD_failures
else:
return SEs[0], APL, b, PSD_failures
else:
if is_list:
return SEs
else:
return SEs[0]
|
[
"math.sqrt",
"numpy.ix_",
"numpy.ones",
"networkx.connected_components",
"scipy.sparse.csgraph.dijkstra",
"numpy.sqrt"
] |
[((1433, 1460), 'numpy.ones', 'np.ones', (['Y.size'], {'dtype': 'bool'}), '(Y.size, dtype=bool)\n', (1440, 1460), True, 'import numpy as np, networkx as nx, math\n'), ((1789, 1804), 'numpy.ones', 'np.ones', (['Y.size'], {}), '(Y.size)\n', (1796, 1804), True, 'import numpy as np, networkx as nx, math\n'), ((3827, 3862), 'numpy.ones', 'np.ones', (['Z_list[0].size'], {'dtype': 'bool'}), '(Z_list[0].size, dtype=bool)\n', (3834, 3862), True, 'import numpy as np, networkx as nx, math\n'), ((4299, 4351), 'scipy.sparse.csgraph.dijkstra', 'dijkstra', ([], {'csgraph': 'G', 'directed': '(False)', 'unweighted': '(True)'}), '(csgraph=G, directed=False, unweighted=True)\n', (4307, 4351), False, 'from scipy.sparse.csgraph import dijkstra\n'), ((5764, 5786), 'math.sqrt', 'math.sqrt', (['(var_est / n)'], {}), '(var_est / n)\n', (5773, 5786), False, 'import numpy as np, networkx as nx, math\n'), ((4404, 4430), 'networkx.connected_components', 'nx.connected_components', (['A'], {}), '(A)\n', (4427, 4430), True, 'import numpy as np, networkx as nx, math\n'), ((5515, 5532), 'numpy.sqrt', 'np.sqrt', (['row_sums'], {}), '(row_sums)\n', (5522, 5532), True, 'import numpy as np, networkx as nx, math\n'), ((4552, 4572), 'numpy.ix_', 'np.ix_', (['giant', 'giant'], {}), '(giant, giant)\n', (4558, 4572), True, 'import numpy as np, networkx as nx, math\n'), ((5450, 5465), 'numpy.ones', 'np.ones', (['Z.size'], {}), '(Z.size)\n', (5457, 5465), True, 'import numpy as np, networkx as nx, math\n'), ((5031, 5059), 'numpy.ix_', 'np.ix_', (['subsample', 'subsample'], {}), '(subsample, subsample)\n', (5037, 5059), True, 'import numpy as np, networkx as nx, math\n'), ((5629, 5657), 'numpy.ix_', 'np.ix_', (['subsample', 'subsample'], {}), '(subsample, subsample)\n', (5635, 5657), True, 'import numpy as np, networkx as nx, math\n')]
|
# THE FOLLOWING CODE CAN BE USED IN YOUR SAGEMAKER NOTEBOOK TO TEST AN UPLOADED IMAGE TO YOUR S3 BUCKET AGAINST YOUR MODEL
import os
import urllib.request
import boto3
from IPython.display import Image
import cv2
import json
import numpy as np
# input the S3 bucket you are using for this project and the file path for a folder and file that contains your uploaded test image
test_image_bucket = 'deeplens-sagemaker-socksortingeast'
test_image_name = 'testimages/image0.jpeg'
tmp_file_name = 'tmp-test-image-jpg'
resized_file_name = 'resized-test-image.jpg'
s3 = boto3.client('s3')
with open(tmp_file_name, 'wb') as f:
s3.download_fileobj(test_image_bucket, test_image_name, f)
# width
W = 500
oriimg = cv2.imread(tmp_file_name)
height, width, depth = oriimg.shape
# scale the image
imgScale = W/width
newX,newY = oriimg.shape[1].imgScale, oriimg.shape[0]*imgScale
newimg = cv2.resize(oriimg, (int(newX),int(newY)))
cv2.imwrite(resized_file_name, newimg)
with open(resized_file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
result = json.loads(ic_classifier.predict(payload, initial_args={'ContentType': 'application/x-image'}))
# find the index of the class that matches the test image with the highest probability
index = np.argmax(result)
# input your own output categories
object_categories = ['BlueStripes', 'DarkGray', 'IronMan']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
print()
print(result)
print(ic._current_job_name)
Image(resized_file_name)
|
[
"boto3.client",
"numpy.argmax",
"cv2.imwrite",
"cv2.imread",
"IPython.display.Image"
] |
[((567, 585), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (579, 585), False, 'import boto3\n'), ((712, 737), 'cv2.imread', 'cv2.imread', (['tmp_file_name'], {}), '(tmp_file_name)\n', (722, 737), False, 'import cv2\n'), ((925, 963), 'cv2.imwrite', 'cv2.imwrite', (['resized_file_name', 'newimg'], {}), '(resized_file_name, newimg)\n', (936, 963), False, 'import cv2\n'), ((1263, 1280), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (1272, 1280), True, 'import numpy as np\n'), ((1520, 1544), 'IPython.display.Image', 'Image', (['resized_file_name'], {}), '(resized_file_name)\n', (1525, 1544), False, 'from IPython.display import Image\n')]
|
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from scripts.visualization import plot_roc_curve
from scripts.visualization import plot_precision_recall_curve
from scripts.visualization import plot_confusion_matrix
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
def get_cut_off_threshold_from_precision_recall(precision:list,
recall:list,
thresholds:list)->int:
try:
# convert to f score
fscore = (2 * precision * recall) / (precision + recall)
# locate the index of the largest f score
ix = np.argmax(fscore)
print('PR-curve threshold=%f, F-Score=%.3f' % (thresholds[ix], fscore[ix]))
return ix
except Exception as error:
raise Exception('Caught this error: ' + repr(error))
def get_cut_off_threshold_through_iteration(pos_probs:list, y_test:list)->float:
"""
Extracts cut off thresholds by itrating all possible values up to 3 decimal places
from 0.0001-1. Returns the value maximizes macro f1 score.
"""
try:
# define thresholds
thresholds = np.arange(0, 1, 0.0001)
# evaluate each threshold
scores = [f1_score(y_test, to_labels(pos_probs, t), average='macro') for t in thresholds]
# get best threshold
ix = np.argmax(scores)
print('Threshold=%.3f, Best macro F1-Score=%.5f' % (thresholds[ix], scores[ix]))
return thresholds[ix]
except Exception as error:
raise Exception('Caught this error: ' + repr(error))
def get_evaluation_report(test_set:list,
prediction_proba:list,
labels:list,
threshold:float = None,
plot:str='precision-recall',
save_path:str = None)->dict:
"""
Args:
test_set:list -> original target values
prediction_proba:list -> extension to use for serializing
labels:list -> target label names
threshold:float -> Probability cut off threshold
plot:str -> roc or precision-recall
save_path:str -> save directory
"""
try:
auc_score = 0
if plot=='roc':
fpr, tpr, _ = roc_curve(test_set, prediction_proba)
auc_score = roc_auc_score(test_set, prediction_proba)
plot_roc_curve(auc_score, fpr, tpr)
elif plot=='precision-recall':
precision, recall, thresholds = precision_recall_curve(test_set, prediction_proba)
auc_score = auc(recall, precision)
no_skill = np.sum(test_set==1)/test_set.shape
ix = get_cut_off_threshold_from_precision_recall(precision, recall, thresholds)
best_threshold_pos = (recall[ix], precision[ix])
plot_precision_recall_curve(auc_score,
recall,
precision,
best_threshold_pos,
round(no_skill[0], 2),
save_path)
#threshold = round(thresholds[ix], 3) if not threshold else None
if not threshold:
threshold = get_cut_off_threshold_through_iteration(prediction_proba, test_set)
predictions = prediction_proba>threshold
cr = classification_report(test_set, predictions, target_names=labels)
cm = confusion_matrix(test_set, predictions)
mcc = matthews_corrcoef(test_set, predictions)
print('\n',cr)
print('Matthews correlation coefficient: ', mcc)
plot_confusion_matrix(cm,
labels,
save_path=save_path)
return {'threshold':threshold,
'auc':auc_score,
'mcc':mcc,
'confusion_matrix': cm,
'classification_report':classification_report(test_set,
predictions,
target_names=labels,
output_dict=True)}
except Exception as error:
raise Exception('Caught this error: ' + repr(error))
|
[
"numpy.sum",
"scripts.visualization.plot_roc_curve",
"sklearn.metrics.roc_curve",
"numpy.argmax",
"scripts.visualization.plot_confusion_matrix",
"sklearn.metrics.classification_report",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.auc",
"sklearn.metrics.matthews_corrcoef",
"numpy.arange",
"sklearn.metrics.confusion_matrix"
] |
[((1023, 1040), 'numpy.argmax', 'np.argmax', (['fscore'], {}), '(fscore)\n', (1032, 1040), True, 'import numpy as np\n'), ((1546, 1569), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.0001)'], {}), '(0, 1, 0.0001)\n', (1555, 1569), True, 'import numpy as np\n'), ((1746, 1763), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1755, 1763), True, 'import numpy as np\n'), ((3875, 3940), 'sklearn.metrics.classification_report', 'classification_report', (['test_set', 'predictions'], {'target_names': 'labels'}), '(test_set, predictions, target_names=labels)\n', (3896, 3940), False, 'from sklearn.metrics import classification_report\n'), ((3954, 3993), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_set', 'predictions'], {}), '(test_set, predictions)\n', (3970, 3993), False, 'from sklearn.metrics import confusion_matrix\n'), ((4009, 4049), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['test_set', 'predictions'], {}), '(test_set, predictions)\n', (4026, 4049), False, 'from sklearn.metrics import matthews_corrcoef\n'), ((4149, 4203), 'scripts.visualization.plot_confusion_matrix', 'plot_confusion_matrix', (['cm', 'labels'], {'save_path': 'save_path'}), '(cm, labels, save_path=save_path)\n', (4170, 4203), False, 'from scripts.visualization import plot_confusion_matrix\n'), ((2725, 2762), 'sklearn.metrics.roc_curve', 'roc_curve', (['test_set', 'prediction_proba'], {}), '(test_set, prediction_proba)\n', (2734, 2762), False, 'from sklearn.metrics import roc_curve\n'), ((2787, 2828), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['test_set', 'prediction_proba'], {}), '(test_set, prediction_proba)\n', (2800, 2828), False, 'from sklearn.metrics import roc_auc_score\n'), ((2841, 2876), 'scripts.visualization.plot_roc_curve', 'plot_roc_curve', (['auc_score', 'fpr', 'tpr'], {}), '(auc_score, fpr, tpr)\n', (2855, 2876), False, 'from scripts.visualization import plot_roc_curve\n'), ((4456, 4543), 'sklearn.metrics.classification_report', 'classification_report', (['test_set', 'predictions'], {'target_names': 'labels', 'output_dict': '(True)'}), '(test_set, predictions, target_names=labels,\n output_dict=True)\n', (4477, 4543), False, 'from sklearn.metrics import classification_report\n'), ((2962, 3012), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['test_set', 'prediction_proba'], {}), '(test_set, prediction_proba)\n', (2984, 3012), False, 'from sklearn.metrics import precision_recall_curve\n'), ((3037, 3059), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (3040, 3059), False, 'from sklearn.metrics import auc\n'), ((3083, 3104), 'numpy.sum', 'np.sum', (['(test_set == 1)'], {}), '(test_set == 1)\n', (3089, 3104), True, 'import numpy as np\n')]
|
#####################################################################################################################
#####################################################################################################################
# See how TROPOMI NO2 responds to the Suez Canal blockage
# When downloading the data, look at a larger domain (Suez and its surrounding + Mediterranean Sea)
import os
import glob
import numpy as np
import pandas as pd
from netCDF4 import Dataset
import xarray as xr
'''
Note on this Suez Canal blockage
Blockage period: 23-29 March 2021
Data download period: 5 January - 26 April 2021
Domain (lon_min,lat_min,lon_max,lat_max): -20,5,60,50
Corresponding hour windows for data donwload: [6,7,8,9,10,11,12,13,14]
First test: sample weekly data before, during and after the blockage, get maps and time serires plot
Second test: get daily maps and combine with GeoViews
'''
#####################################################################################################################
# build a function to read oversampled TROPOMI NO2 as pandas dataframes
def read_oversampled_NO2(TROPOMI_oversampled_NO2_output_file):
'''read the output file for oversampled TROPOMI NO2'''
df = pd.read_csv(TROPOMI_oversampled_NO2_output_file,sep="\s+",header=None)
df = df.iloc[:,2:7]
df.columns = ['lat','lon','NO2','Count','NO2_uncertainty']
return df
#####################################################################################################################
# the spatial coverage may not be consistent on different days or during different weeks
# read all the data from the weekly results
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
Oversampled_NO2_files = sorted(glob.glob("Oversample_output_Suez_NO2_week*"), key=lambda x: int(x.split("_")[-2]))
print(*Oversampled_NO2_files,sep="\n")
oversampled_data = [read_oversampled_NO2(file) for file in Oversampled_NO2_files]
# use all the data ever sampled to decide the max dimension
lat_min = []
lat_max = []
lon_min = []
lon_max = []
for i in range(len(oversampled_data)):
lat_min.append(oversampled_data[i].lat.min())
lat_max.append(oversampled_data[i].lat.max())
lon_min.append(oversampled_data[i].lon.min())
lon_max.append(oversampled_data[i].lon.max())
lat_min = min(lat_min)
lat_max = max(lat_max)
lon_min = min(lon_min)
lon_max = max(lon_max)
# check the full dimension
print("lat_min:",lat_min)
print("lat_max:",lat_max)
print("lon_min:",lon_min)
print("lon_max:",lon_max)
# With the dimension above and the resolution, we can create a consistent domain ("the full grid")
# so that we can combine the data from different days/weeks together
# first list all the lats and lons: use (min,max+1/2 resolutions, resolution) to keep the max value in Python
# just round the floats created by Python to be safe
# as the "pd.merge" step later will require the values of "keys" to be excatly the same
Res = 0.05
domain_lat = np.arange(lat_min,lat_max+Res/2,Res,dtype=None)
domain_lon = np.arange(lon_min,lon_max+Res/2,Res,dtype=None)
domain_lat = np.round(domain_lat,3)
domain_lon = np.round(domain_lon,3)
# build a function to create a "full grid" by listing the full combinations of lats and lons in the domain
def expand_grid(lat,lon):
'''list all combinations of lats and lons using expand_grid(lat,lon)'''
test = [(A,B) for A in lat for B in lon]
test = np.array(test)
test_lat = test[:,0]
test_lon = test[:,1]
full_grid = pd.DataFrame({'lat': test_lat, 'lon': test_lon})
return full_grid
# create the "full grid"
domain_grid = expand_grid(domain_lat,domain_lon)
print(domain_grid)
################################################################################################
# Now we can read each single dataset and match it with the full grid
# Step 1> select the oversampled data
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
# change input time to read daily data or weekly data
time = 'week_1'
Oversampled_NO2_file = "Oversample_output_Suez_NO2_"+str(time)+"_0.05"
# check the selected data
print(Oversampled_NO2_file)
# Step 2> feed the oversampled data into this data cleaning routine
# read oversampled NO2 data
NO2_data = read_oversampled_NO2(Oversampled_NO2_file)
# combine the data with the full domain grids
NO2_data = pd.merge(domain_grid,NO2_data,how='left', on=['lat','lon'])
NO2_data = NO2_data.sort_values(by=['lat','lon'], ascending=[True, True])
# reshape the variables from 1D in the dataframe to the map dimension
NO2 = NO2_data['NO2'].values.reshape(len(domain_lat),len(domain_lon))
NO2_uncertainty = NO2_data['NO2_uncertainty'].values.reshape(len(domain_lat),len(domain_lon))
Count = NO2_data['Count'].values.reshape(len(domain_lat),len(domain_lon))
# convert to xarray for plotting
NO2_xarray = xr.DataArray(NO2, coords=[('lat', domain_lat),('lon', domain_lon)])
NO2_uncertainty_xarray = xr.DataArray(NO2_uncertainty, coords=[('lat', domain_lat),('lon', domain_lon)])
Count_xarray = xr.DataArray(Count, coords=[('lat', domain_lat),('lon', domain_lon)])
# but it is complicated to save out the results one by one for multiple days or weeks
################################################################################################
################################################################################################
# So here we use the list comprehensions to process multiple files
#################
# weekly data
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
# select the files and sort them numerically
Oversampled_NO2_files_weekly = sorted(glob.glob("Oversample_output_Suez_NO2_week*"), key=lambda x: int(x.split("_")[-2]))
print(*Oversampled_NO2_files_weekly,sep="\n")
# read oversampled data and match with the "full grid"
Oversampled_NO2_week = [read_oversampled_NO2(file) for file in Oversampled_NO2_files_weekly]
Oversampled_NO2_week = [pd.merge(domain_grid,data,how='left', on=['lat','lon']) for data in Oversampled_NO2_week]
Oversampled_NO2_week = [data.sort_values(by=['lat','lon'], ascending=[True, True]) for data in Oversampled_NO2_week]
# convert the data to the xarray format for plotting
NO2_week = [data['NO2'].values.reshape(len(domain_lat),len(domain_lon)) for data in Oversampled_NO2_week]
NO2_week_xr = [xr.DataArray(data, coords=[('lat', domain_lat),('lon', domain_lon)]) for data in NO2_week]
#################
# daily data
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
# select the files and sort them numerically
Oversampled_NO2_files_daily = sorted(glob.glob("Oversample_output_Suez_NO2_day*"), key=lambda x: int(x.split("_")[-2]))
print(*Oversampled_NO2_files_daily,sep="\n")
# read oversampled data and match with the "full grid"
Oversampled_NO2_day = [read_oversampled_NO2(file) for file in Oversampled_NO2_files_daily]
Oversampled_NO2_day = [pd.merge(domain_grid,data,how='left', on=['lat','lon']) for data in Oversampled_NO2_day]
Oversampled_NO2_day = [data.sort_values(by=['lat','lon'], ascending=[True, True]) for data in Oversampled_NO2_day]
# convert the data to the xarray format for plotting
NO2_day = [data['NO2'].values.reshape(len(domain_lat),len(domain_lon)) for data in Oversampled_NO2_day]
NO2_day_xr = [xr.DataArray(data, coords=[('lat', domain_lat),('lon', domain_lon)]) for data in NO2_day]
################################################################################################
# Start making maps to have a quick look at the results
# avoid setting "%matplotlib inline" as it is time consuming when we need to produce many figures
import matplotlib.pyplot as plt
import cartopy.crs as crs
import geopandas as gpd
# read shape file (Global high resolution shoreline database from NOAA: https://www.ngdc.noaa.gov/mgg/shorelines/)
# use "full reolution" here to avoid misrepresentation of land and water
os.chdir("/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/shapefiles/gshhg-shp-2.3.7/GSHHS_shp/f")
world_shore = gpd.read_file("GSHHS_f_L1.shp")
################################################################################################
# build a function to quickly generate maps without a legend to save space on a slide
def quick_plot(input_xr,plot_domain,var_min,var_max,output_figure_name):
'''
Input a xarray data array, define the map domain, provide the min and max of the values on map. Provide a outputfile name.
'''
# set the figure size, the aspect ratio is set to be 2:1 due to the sampling region
fig = plt.figure(figsize=[20,10])
# set the map projection and domain: https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html#cartopy-projection
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(plot_domain)
# plot the value on map
im = input_xr.plot(ax=ax,cmap='jet',vmin=var_min,vmax=var_max)
# add shapefile
ax.add_geometries(world_shore.geometry, crs=ccrs.PlateCarree(),edgecolor='black',facecolor='none')
# remove the colorbar and tile
plt.delaxes(fig.axes[1])
ax.set_title('')
# save out
fig.savefig(output_figure_name, dpi=100,bbox_inches='tight')
# close the figure to avoid taking CPU memory
plt.close()
################################################################################################
# build a function to generatet the bar for the figures above
def plot_color_bar(input_xr,plot_domain,label,var_min,var_max,output_figure_name):
'''
Draw the figure in the same way as above, but remove the plot rather than the colorbar.
'''
fig = plt.figure(figsize=[20,10])
cbar_keys = {'shrink': 1, 'pad' : 0.05,'orientation':'horizontal','label':label}
# set the map projection: https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html#cartopy-projection
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(plot_domain)
# plotthe value on map
im = input_xr.plot(ax=ax,cmap='jet',cbar_kwargs=cbar_keys,vmin=var_min,vmax=var_max)
# set color bar label size
plt.rcParams.update({'font.size':25})
ax.xaxis.label.set_size(25)
# remove the plot
plt.delaxes(fig.axes[0])
# save out
fig.savefig(output_figure_name, dpi=100,bbox_inches='tight')
# close the figure to avoid taking CPU memory
plt.close()
################################################################################################
# check again the data for plotting
print("weekly data:",len(NO2_week_xr))
print("daily data:",len(NO2_day_xr))
# generate corresponding output file names
# weekly maps
Suez_weeks = list(range(1,17))
Suez_weeks = [str('Suez_NO2_map_week_') + str(week_number) for week_number in Suez_weeks]
print(*Suez_weeks,sep="\n")
# daily maps
Suez_days = list(range(1,29))
Suez_days = [str('Suez_NO2_map_day_') + str(date_number) for date_number in Suez_days]
print(*Suez_days,sep="\n")
################################################################################################
# output multiple plots together
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures')
# maps during the blockage
# week 12
# day 8-14
# plot weekly data
# plot over the big domain [lon_min,lon_max,lat_min,lat_max]
Suez_domain_big = [-20,60,5,50]
for i in range(len(NO2_week_xr)):
quick_plot(NO2_week_xr[i],Suez_domain_big,0,2,Suez_weeks[i]+str('_big'))
# plot over the small domain [lon_min,lon_max,lat_min,lat_max]
Suez_domain_small = [26,60,10,35]
for i in range(len(NO2_week_xr)):
quick_plot(NO2_week_xr[i],Suez_domain_small,0,2,Suez_weeks[i]+str('_small'))
# generate the color bar at the end
plot_color_bar(NO2_week_xr[0],Suez_domain_small,'NO$_2$ tropospheric column [$10^{15}$ molec. cm$^{-2}$]',0,2,"Suez_NO2_color_bar")
# plot daily data
# plot over the small domain [lon_min,lon_max,lat_min,lat_max]
Suez_domain_small = [26,60,10,35]
for i in range(len(NO2_day_xr)):
quick_plot(NO2_day_xr[i],Suez_domain_small,0,2,Suez_days[i]+str('_small'))
################################################################################################
################################################################################################
# Use GeoViews to combine the maps together in time series
# load GeoViews package
import geoviews as gv
import geoviews.feature as gf
import cartopy.crs as crs
# it is important to check your geoviews version, some commands may not work in a wrong version
# this script is written under version 1.9.1
print(gv.__version__)
# there are two backends ('bokeh', 'matplotlib') for the GeoViews
# later we will use "bokeh" for interactive plots
################################################################################################
# weekly maps
# list all the weeks
Suez_weeks = ['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16']
print(*Suez_weeks,sep="\n")
# combine the xarray data arrays from weekly results
# make a copy first
weekly_data = NO2_week_xr.copy()
# add the variable name
weekly_data = [data.rename('NO2') for data in weekly_data]
# add a time dimension to the data
for i in range(len(NO2_week_xr)):
NO2_week_xr[i] = NO2_week_xr[i].assign_coords(week=Suez_weeks[i])
NO2_week_xr[i] = NO2_week_xr[i].expand_dims('week')
# combine the data together
NO2_week_xr_combined = xr.concat(NO2_week_xr,'week')
# you can zoom in and change maps, so normally there is no need to make a small map
# but if you have to reduce the file size, you can subset over the small domain
# weekly_data = [data.sel(lat=slice(10,35),lon = slice(26,60)) for data in weekly_data]
# check the results
NO2_week_xr_combined
# output the plots
# first move to the output directory
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures')
# turn on "bokeh" backend to enable interactive map
gv.extension('bokeh')
# extract data from the combined xarray
gv_data = gv.Dataset(NO2_week_xr_combined,['lon','lat','week'],'NO2',crs=crs.PlateCarree())
# use the data to generate the geoviews image
gv_image = gv_data.to(gv.Image)
# decide features of the output figure
gv_image_out = gv_image.opts(cmap='jet', clim=(0,2), colorbar=True, width=800, height=500) * gf.coastline
# save out the interactive map
renderer = gv.renderer('bokeh')
renderer.save(gv_image_out, 'weekly_maps')
################################################################################################
# daily maps
# list all the dates
def list_dates_between(start_date,end_date):
'''Select TROPOMI files within the start date ('yyyymmdd') and end date ('yyyymmdd')'''
# list all the dates between the start and the end
from datetime import date, timedelta
start_date = date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
end_date = date(int(end_date[0:4]),int(end_date[4:6]),int(end_date[6:8]))
delta = end_date - start_date
sampling_dates = []
for i in range(delta.days + 1):
sampling_dates.append((start_date + timedelta(days=i)).strftime('%Y%m%d'))
# print out all the sampling dates
return sampling_dates
# list all the dates
Suez_days = list_dates_between("20210316","20210412")
print("number of days:",len(Suez_days))
print(*Suez_days,sep="\n")
# combine the xarray data arrays from daily results
# make a copy first
daily_data = NO2_day_xr.copy()
# add the variable name
daily_data = [data.rename('NO2') for data in daily_data]
# add a time dimension to the data
for i in range(len(NO2_day_xr)):
NO2_day_xr[i] = NO2_day_xr[i].assign_coords(date=Suez_days[i])
NO2_day_xr[i] = NO2_day_xr[i].expand_dims('date')
# combine the data together
NO2_day_xr_combined = xr.concat(NO2_day_xr,'date')
# check the results
NO2_day_xr_combined
# output the plots
# first move to the output directory
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures')
# turn on "bokeh" backend to enable interactive map
gv.extension('bokeh')
# extract data from the combined xarray
gv_data = gv.Dataset(NO2_day_xr_combined,['lon','lat','date'],'NO2',crs=crs.PlateCarree())
# use the data to generate the geoviews image
gv_image = gv_data.to(gv.Image)
# decide features of the output figure
gv_image_out = gv_image.opts(cmap='jet', clim=(0,2), colorbar=True, width=800, height=500) * gf.coastline
# save out the interactive map
renderer = gv.renderer('bokeh')
renderer.save(gv_image_out, 'daily_maps')
# For now, the default coastline from GeoViews is used
# If you can crop and create your own shapefile, you should be able to use high resolution shorelines from NOAA
# Think about how to do this with geopandas
#####################################################################################################################
#####################################################################################################################
|
[
"pandas.read_csv",
"geoviews.extension",
"matplotlib.pyplot.figure",
"numpy.arange",
"glob.glob",
"geoviews.renderer",
"numpy.round",
"os.chdir",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"pandas.merge",
"matplotlib.pyplot.rcParams.update",
"datetime.timedelta",
"geopandas.read_file",
"xarray.concat",
"matplotlib.pyplot.delaxes",
"numpy.array",
"xarray.DataArray",
"cartopy.crs.PlateCarree"
] |
[((1696, 1819), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output'\n )\n", (1704, 1819), False, 'import os\n'), ((3108, 3162), 'numpy.arange', 'np.arange', (['lat_min', '(lat_max + Res / 2)', 'Res'], {'dtype': 'None'}), '(lat_min, lat_max + Res / 2, Res, dtype=None)\n', (3117, 3162), True, 'import numpy as np\n'), ((3170, 3224), 'numpy.arange', 'np.arange', (['lon_min', '(lon_max + Res / 2)', 'Res'], {'dtype': 'None'}), '(lon_min, lon_max + Res / 2, Res, dtype=None)\n', (3179, 3224), True, 'import numpy as np\n'), ((3235, 3258), 'numpy.round', 'np.round', (['domain_lat', '(3)'], {}), '(domain_lat, 3)\n', (3243, 3258), True, 'import numpy as np\n'), ((3272, 3295), 'numpy.round', 'np.round', (['domain_lon', '(3)'], {}), '(domain_lon, 3)\n', (3280, 3295), True, 'import numpy as np\n'), ((4033, 4156), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output'\n )\n", (4041, 4156), False, 'import os\n'), ((4573, 4635), 'pandas.merge', 'pd.merge', (['domain_grid', 'NO2_data'], {'how': '"""left"""', 'on': "['lat', 'lon']"}), "(domain_grid, NO2_data, how='left', on=['lat', 'lon'])\n", (4581, 4635), True, 'import pandas as pd\n'), ((5072, 5140), 'xarray.DataArray', 'xr.DataArray', (['NO2'], {'coords': "[('lat', domain_lat), ('lon', domain_lon)]"}), "(NO2, coords=[('lat', domain_lat), ('lon', domain_lon)])\n", (5084, 5140), True, 'import xarray as xr\n'), ((5166, 5251), 'xarray.DataArray', 'xr.DataArray', (['NO2_uncertainty'], {'coords': "[('lat', domain_lat), ('lon', domain_lon)]"}), "(NO2_uncertainty, coords=[('lat', domain_lat), ('lon', domain_lon)]\n )\n", (5178, 5251), True, 'import xarray as xr\n'), ((5262, 5332), 'xarray.DataArray', 'xr.DataArray', (['Count'], {'coords': "[('lat', domain_lat), ('lon', domain_lon)]"}), "(Count, coords=[('lat', domain_lat), ('lon', domain_lon)])\n", (5274, 5332), True, 'import xarray as xr\n'), ((5723, 5846), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output'\n )\n", (5731, 5846), False, 'import os\n'), ((6746, 6869), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output'\n )\n", (6754, 6869), False, 'import os\n'), ((8257, 8380), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/shapefiles/gshhg-shp-2.3.7/GSHHS_shp/f"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/shapefiles/gshhg-shp-2.3.7/GSHHS_shp/f'\n )\n", (8265, 8380), False, 'import os\n'), ((8386, 8417), 'geopandas.read_file', 'gpd.read_file', (['"""GSHHS_f_L1.shp"""'], {}), "('GSHHS_f_L1.shp')\n", (8399, 8417), True, 'import geopandas as gpd\n'), ((11533, 11646), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures'\n )\n", (11541, 11646), False, 'import os\n'), ((13932, 13962), 'xarray.concat', 'xr.concat', (['NO2_week_xr', '"""week"""'], {}), "(NO2_week_xr, 'week')\n", (13941, 13962), True, 'import xarray as xr\n'), ((14327, 14440), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures'\n )\n", (14335, 14440), False, 'import os\n'), ((14487, 14508), 'geoviews.extension', 'gv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (14499, 14508), True, 'import geoviews as gv\n'), ((14924, 14944), 'geoviews.renderer', 'gv.renderer', (['"""bokeh"""'], {}), "('bokeh')\n", (14935, 14944), True, 'import geoviews as gv\n'), ((16385, 16414), 'xarray.concat', 'xr.concat', (['NO2_day_xr', '"""date"""'], {}), "(NO2_day_xr, 'date')\n", (16394, 16414), True, 'import xarray as xr\n'), ((16521, 16634), 'os.chdir', 'os.chdir', (['"""/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures"""'], {}), "(\n '/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures'\n )\n", (16529, 16634), False, 'import os\n'), ((16681, 16702), 'geoviews.extension', 'gv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (16693, 16702), True, 'import geoviews as gv\n'), ((17117, 17137), 'geoviews.renderer', 'gv.renderer', (['"""bokeh"""'], {}), "('bokeh')\n", (17128, 17137), True, 'import geoviews as gv\n'), ((1262, 1335), 'pandas.read_csv', 'pd.read_csv', (['TROPOMI_oversampled_NO2_output_file'], {'sep': '"""\\\\s+"""', 'header': 'None'}), "(TROPOMI_oversampled_NO2_output_file, sep='\\\\s+', header=None)\n", (1273, 1335), True, 'import pandas as pd\n'), ((1842, 1887), 'glob.glob', 'glob.glob', (['"""Oversample_output_Suez_NO2_week*"""'], {}), "('Oversample_output_Suez_NO2_week*')\n", (1851, 1887), False, 'import glob\n'), ((3567, 3581), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (3575, 3581), True, 'import numpy as np\n'), ((3651, 3699), 'pandas.DataFrame', 'pd.DataFrame', (["{'lat': test_lat, 'lon': test_lon}"], {}), "({'lat': test_lat, 'lon': test_lon})\n", (3663, 3699), True, 'import pandas as pd\n'), ((5924, 5969), 'glob.glob', 'glob.glob', (['"""Oversample_output_Suez_NO2_week*"""'], {}), "('Oversample_output_Suez_NO2_week*')\n", (5933, 5969), False, 'import glob\n'), ((6232, 6290), 'pandas.merge', 'pd.merge', (['domain_grid', 'data'], {'how': '"""left"""', 'on': "['lat', 'lon']"}), "(domain_grid, data, how='left', on=['lat', 'lon'])\n", (6240, 6290), True, 'import pandas as pd\n'), ((6619, 6688), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('lat', domain_lat), ('lon', domain_lon)]"}), "(data, coords=[('lat', domain_lat), ('lon', domain_lon)])\n", (6631, 6688), True, 'import xarray as xr\n'), ((6946, 6990), 'glob.glob', 'glob.glob', (['"""Oversample_output_Suez_NO2_day*"""'], {}), "('Oversample_output_Suez_NO2_day*')\n", (6955, 6990), False, 'import glob\n'), ((7249, 7307), 'pandas.merge', 'pd.merge', (['domain_grid', 'data'], {'how': '"""left"""', 'on': "['lat', 'lon']"}), "(domain_grid, data, how='left', on=['lat', 'lon'])\n", (7257, 7307), True, 'import pandas as pd\n'), ((7630, 7699), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('lat', domain_lat), ('lon', domain_lon)]"}), "(data, coords=[('lat', domain_lat), ('lon', domain_lon)])\n", (7642, 7699), True, 'import xarray as xr\n'), ((8927, 8955), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 10]'}), '(figsize=[20, 10])\n', (8937, 8955), True, 'import matplotlib.pyplot as plt\n'), ((9449, 9473), 'matplotlib.pyplot.delaxes', 'plt.delaxes', (['fig.axes[1]'], {}), '(fig.axes[1])\n', (9460, 9473), True, 'import matplotlib.pyplot as plt\n'), ((9638, 9649), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9647, 9649), True, 'import matplotlib.pyplot as plt\n'), ((10025, 10053), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 10]'}), '(figsize=[20, 10])\n', (10035, 10053), True, 'import matplotlib.pyplot as plt\n'), ((10509, 10547), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 25}"], {}), "({'font.size': 25})\n", (10528, 10547), True, 'import matplotlib.pyplot as plt\n'), ((10614, 10638), 'matplotlib.pyplot.delaxes', 'plt.delaxes', (['fig.axes[0]'], {}), '(fig.axes[0])\n', (10625, 10638), True, 'import matplotlib.pyplot as plt\n'), ((10789, 10800), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10798, 10800), True, 'import matplotlib.pyplot as plt\n'), ((14627, 14644), 'cartopy.crs.PlateCarree', 'crs.PlateCarree', ([], {}), '()\n', (14642, 14644), True, 'import cartopy.crs as crs\n'), ((16820, 16837), 'cartopy.crs.PlateCarree', 'crs.PlateCarree', ([], {}), '()\n', (16835, 16837), True, 'import cartopy.crs as crs\n'), ((15677, 15694), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (15686, 15694), False, 'from datetime import date, timedelta\n')]
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributions as dists
import numpy as np
import scipy.io
import foolbox
import input_data
import argparse
from tqdm import tqdm
import data_loader
import math
import os
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
from cleverhans.model import CallableModelWrapper
from cleverhans.utils import AccuracyReport
from cleverhans.utils_pytorch import convert_pytorch_model_to_tf
parser = argparse.ArgumentParser()
parser.add_argument('--use_dropout', default=False, action='store_true')
parser.add_argument('--normalize', default=False, action='store_true')
parser.add_argument('--load', default=False, action='store_true')
parser.add_argument('--train_samples', type=int, default=1)
parser.add_argument('--n_samples', type=int, default=100)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--wd', type=float, default=0)
parser.add_argument('--lam', type=float, default=1e-7)
parser.add_argument('--n_hidden', type=int, default=100)
parser.add_argument('--n_hidden_hypernet', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--n_iter', type=int, default=100)
parser.add_argument('--randseed', type=int, default=9999)
args = parser.parse_args()
np.random.seed(args.randseed)
torch.manual_seed(args.randseed)
name = 'mlcdn'
if args.use_dropout:
name = 'dropout'
os.makedirs('./results/cifar', exist_ok=True)
os.makedirs('./models/cifar', exist_ok=True)
# Load training data
trainset, testset = data_loader.load_dataset('cifar10_pretrained')
class ProbHypernet(nn.Module):
def __init__(self, in_dim, out_dim, h_dim=100):
super(ProbHypernet, self).__init__()
self.in_dim = in_dim + 1
self.out_dim = out_dim
self.h_dim = h_dim
self.M = nn.Parameter(torch.randn(self.in_dim, out_dim))
self.fc_xh = nn.Linear(in_dim, h_dim)
nn.init.uniform_(self.fc_xh.weight, -0.0001, 0.0001)
self.fc_hmu = nn.Linear(h_dim, self.in_dim)
nn.init.uniform_(self.fc_hmu.weight, -0.0001, 0.0001)
self.fc_hlogvar_in = nn.Linear(h_dim, self.in_dim)
nn.init.uniform_(self.fc_hlogvar_in.weight, -0.0001, 0.0001)
self.fc_hlogvar_out = nn.Linear(h_dim, out_dim)
nn.init.uniform_(self.fc_hlogvar_out.weight, -0.0001, 0.0001)
def forward(self, x, output_weight_params=False):
m = x.shape[0]
r, c = self.in_dim, self.out_dim
h = self.fc_xh(x)
h = F.relu(h)
mu_scaling = self.fc_hmu(h)
logvar_r = self.fc_hlogvar_in(h)
logvar_c = self.fc_hlogvar_out(h)
M = self.M
M = mu_scaling.view(m, r, 1) * M # Broadcasted: M is (m, r, c)
var_r = torch.exp(logvar_r)
var_c = torch.exp(logvar_c)
E = torch.randn(m, r, c, device='cuda')
# Reparametrization trick
W = M + torch.sqrt(var_r).view(m, r, 1) * E * torch.sqrt(var_c).view(m, 1, c)
# KL divergence to prior MVN(0, I, I)
D_KL = torch.mean(
1/2 * (torch.sum(var_r, 1)*torch.sum(var_c, 1) \
+ torch.norm(M.view(m, -1), dim=1)**2 \
- r*c - c*torch.sum(logvar_r, 1) - r*torch.sum(logvar_c, 1))
)
x = torch.cat([x, torch.ones(m, 1, device='cuda')], 1)
h = torch.bmm(x.unsqueeze(1), W).squeeze()
if output_weight_params:
return h, D_KL, (M, var_r, var_c)
else:
return h, D_KL
class Model(nn.Module):
def __init__(self, h_dim=100, h_dim_hypernet=50, use_dropout=False):
super(Model, self).__init__()
self.use_dropout = use_dropout
if not self.use_dropout:
self.fc_xh = ProbHypernet(1024, h_dim, h_dim_hypernet)
self.fc_hy = ProbHypernet(h_dim, 10, h_dim_hypernet)
else:
self.fc_xh = nn.Linear(1024, h_dim)
self.fc_hy = nn.Linear(h_dim, 10)
def forward(self, X):
X = X.squeeze()
if not self.use_dropout:
h, D_KL1 = self.fc_xh(X)
h = F.relu(h)
y, D_KL2 = self.fc_hy(h)
return (y, D_KL1+D_KL2) if self.training else y
else:
h = F.relu(self.fc_xh(X))
if self.use_dropout:
h = F.dropout(h, p=0.5, training=True)
y = self.fc_hy(h)
return y
def validate(m=args.batch_size):
model.eval()
val_acc = 0
total = 0
for x, y in testset:
x = x.cuda()
y_i = model.forward(x)
val_acc += np.sum(y_i.argmax(dim=1).cpu().numpy() == y.numpy())
total += x.shape[0]
model.train()
return val_acc/total
""" Training """
S = args.train_samples
m = args.batch_size
lr = args.lr
lam = args.lam
h_dim = args.n_hidden
h_dim_hypernet = args.n_hidden_hypernet
model = Model(h_dim, h_dim_hypernet, args.use_dropout).cuda()
print(f'Parameter count: {np.sum([value.numel() for value in model.parameters()])}')
if args.load:
model.load_state_dict(torch.load(f'models/cifar/model_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.bin'))
else:
opt = optim.Adam(model.parameters(), lr, weight_decay=args.wd)
pbar = tqdm(range(args.n_iter))
for i in pbar:
for x, y in trainset:
x = x.cuda()
y = y.cuda()
if not args.use_dropout:
log_p_y = []
D_KL = 0
for _ in range(S):
y_s, D_KL = model.forward(x)
log_p_y_s = dists.Categorical(logits=y_s).log_prob(y)
log_p_y.append(log_p_y_s)
loss = -torch.mean(torch.logsumexp(torch.stack(log_p_y), 0) - math.log(S))
loss += args.lam*D_KL
else:
out = model.forward(x)
loss = F.cross_entropy(out, y)
loss.backward()
nn.utils.clip_grad_value_(model.parameters(), 5)
opt.step()
opt.zero_grad()
val_acc = validate(m)
pbar.set_description(f'[Loss: {loss.data.item():.3f}; val acc: {val_acc:.3f}]')
# Save model
if not args.load:
torch.save(model.state_dict(), f'models/cifar/model_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.bin')
""" =============================== Validate ======================================= """
def test():
model.eval()
y = []
t = []
for x_test, y_test in testset:
x_test = x_test.cuda()
y_i = model.forward(x_test)
y.append(F.softmax(y_i, dim=1).cpu().data.numpy())
t.append(y_test)
y = np.concatenate(y, 0)
t = np.concatenate(t)
return y, t
y_val = 0
for _ in tqdm(range(args.n_samples)):
y_s, t = test()
y_val += 1/args.n_samples*y_s
# Print accuracy
acc = np.mean(y_val.argmax(1) == t)
print(f'Test accuracy on CIFAR-10: {acc:.3f}')
""" ======================= Adversarial examples experiments ======================= """
model.eval()
input_shape = (None, 3, 32, 32)
trainset, testset = data_loader.load_dataset('cifar10')
pretrained_model = torchvision.models.densenet121(pretrained=True).cuda()
pretrained_model = torch.nn.Sequential(*(list(pretrained_model.children())[:-1]))
pretrained_model.eval()
model = nn.Sequential(pretrained_model, model)
model.eval()
# We use tf for evaluation on adversarial data
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
x_op = tf.placeholder(tf.float32, shape=input_shape)
# Convert pytorch model to a tf_model and wrap it in cleverhans
tf_model_fn = convert_pytorch_model_to_tf(model, out_dims=10)
cleverhans_model = CallableModelWrapper(tf_model_fn, output_layer='logits')
adv_accs = []
adv_ents = []
def test_tf(use_adv=True):
preds = []
y_test = []
total = 0
for x, y in testset:
x = x.permute(0, 3, 1, 2)
if use_adv:
pred = sess.run(adv_preds_op, feed_dict={x_op: x})
pred = F.softmax(torch.from_numpy(pred), 1).numpy()
else:
pred = model.forward(x.cuda())
pred = F.softmax(pred, 1).cpu().data.numpy()
preds.append(pred)
y_test.append(y)
total += x.shape[0]
if total >= 1000:
break
preds = np.concatenate(preds, 0)
y_test = np.concatenate(y_test, 0)
return np.nan_to_num(preds), y_test
adv_preds = 0
for _ in tqdm(range(args.n_samples)):
preds, y_test = test_tf(False)
adv_preds += 1/args.n_samples * preds
# Compute acc and entropy
acc = (np.argmax(adv_preds, axis=1) == y_test).mean()
ent = (-adv_preds*np.log(adv_preds+1e-8)).sum(1).mean()
adv_accs.append(acc)
adv_ents.append(ent)
print('Adv accuracy: {:.3f}'.format(acc))
print('Avg entropy: {:.3f}'.format(ent))
for eps in np.arange(0.1, 1.01, 0.1):
# Create an FGSM attack
fgsm_op = FastGradientMethod(cleverhans_model, sess=sess)
fgsm_params = {'eps': eps,
'clip_min': 0.,
'clip_max': 1.}
adv_x_op = fgsm_op.generate(x_op, **fgsm_params)
adv_preds_op = tf_model_fn(adv_x_op)
# Run an evaluation of our model against fgsm
# Use M data
adv_preds = 0
for _ in tqdm(range(args.n_samples)):
preds, y_test = test_tf()
adv_preds += 1/args.n_samples * preds
# Compute acc and entropy
acc = (np.argmax(adv_preds, axis=1) == y_test).mean()
ent = (-adv_preds*np.log(adv_preds+1e-8)).sum(1).mean()
adv_accs.append(acc)
adv_ents.append(ent)
print('Adv accuracy: {:.3f}'.format(acc))
print('Avg entropy: {:.3f}'.format(ent))
sess.close()
# Save data
np.save(f'results/cifar/accs_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy', adv_accs)
np.save(f'results/cifar/ents_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy', adv_ents)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"data_loader.load_dataset",
"numpy.nan_to_num",
"numpy.argmax",
"torch.sqrt",
"torch.nn.init.uniform_",
"cleverhans.attacks.FastGradientMethod",
"torch.nn.functional.dropout",
"torch.randn",
"tensorflow.ConfigProto",
"numpy.arange",
"cleverhans.utils_pytorch.convert_pytorch_model_to_tf",
"torch.ones",
"torch.load",
"tensorflow.placeholder",
"torch.exp",
"torch.nn.Linear",
"torch.nn.functional.relu",
"math.log",
"numpy.save",
"torch.manual_seed",
"tensorflow.Session",
"torch.nn.functional.cross_entropy",
"cleverhans.model.CallableModelWrapper",
"torch.sum",
"numpy.concatenate",
"torch.from_numpy",
"os.makedirs",
"numpy.log",
"torch.nn.Sequential",
"torch.stack",
"torchvision.models.densenet121",
"torch.nn.functional.softmax",
"torch.distributions.Categorical"
] |
[((542, 567), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (565, 567), False, 'import argparse\n'), ((1379, 1408), 'numpy.random.seed', 'np.random.seed', (['args.randseed'], {}), '(args.randseed)\n', (1393, 1408), True, 'import numpy as np\n'), ((1409, 1441), 'torch.manual_seed', 'torch.manual_seed', (['args.randseed'], {}), '(args.randseed)\n', (1426, 1441), False, 'import torch\n'), ((1502, 1547), 'os.makedirs', 'os.makedirs', (['"""./results/cifar"""'], {'exist_ok': '(True)'}), "('./results/cifar', exist_ok=True)\n", (1513, 1547), False, 'import os\n'), ((1548, 1592), 'os.makedirs', 'os.makedirs', (['"""./models/cifar"""'], {'exist_ok': '(True)'}), "('./models/cifar', exist_ok=True)\n", (1559, 1592), False, 'import os\n'), ((1635, 1681), 'data_loader.load_dataset', 'data_loader.load_dataset', (['"""cifar10_pretrained"""'], {}), "('cifar10_pretrained')\n", (1659, 1681), False, 'import data_loader\n'), ((7165, 7200), 'data_loader.load_dataset', 'data_loader.load_dataset', (['"""cifar10"""'], {}), "('cifar10')\n", (7189, 7200), False, 'import data_loader\n'), ((7389, 7427), 'torch.nn.Sequential', 'nn.Sequential', (['pretrained_model', 'model'], {}), '(pretrained_model, model)\n', (7402, 7427), True, 'import torch.nn as nn\n'), ((7499, 7515), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7513, 7515), True, 'import tensorflow as tf\n'), ((7562, 7587), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (7572, 7587), True, 'import tensorflow as tf\n'), ((7595, 7640), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'input_shape'}), '(tf.float32, shape=input_shape)\n', (7609, 7640), True, 'import tensorflow as tf\n'), ((7721, 7768), 'cleverhans.utils_pytorch.convert_pytorch_model_to_tf', 'convert_pytorch_model_to_tf', (['model'], {'out_dims': '(10)'}), '(model, out_dims=10)\n', (7748, 7768), False, 'from cleverhans.utils_pytorch import convert_pytorch_model_to_tf\n'), ((7788, 7844), 'cleverhans.model.CallableModelWrapper', 'CallableModelWrapper', (['tf_model_fn'], {'output_layer': '"""logits"""'}), "(tf_model_fn, output_layer='logits')\n", (7808, 7844), False, 'from cleverhans.model import CallableModelWrapper\n'), ((8923, 8948), 'numpy.arange', 'np.arange', (['(0.1)', '(1.01)', '(0.1)'], {}), '(0.1, 1.01, 0.1)\n', (8932, 8948), True, 'import numpy as np\n'), ((9766, 9890), 'numpy.save', 'np.save', (['f"""results/cifar/accs_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy"""', 'adv_accs'], {}), "(\n f'results/cifar/accs_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy'\n , adv_accs)\n", (9773, 9890), True, 'import numpy as np\n'), ((9881, 10005), 'numpy.save', 'np.save', (['f"""results/cifar/ents_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy"""', 'adv_ents'], {}), "(\n f'results/cifar/ents_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy'\n , adv_ents)\n", (9888, 10005), True, 'import numpy as np\n'), ((6736, 6756), 'numpy.concatenate', 'np.concatenate', (['y', '(0)'], {}), '(y, 0)\n', (6750, 6756), True, 'import numpy as np\n'), ((6765, 6782), 'numpy.concatenate', 'np.concatenate', (['t'], {}), '(t)\n', (6779, 6782), True, 'import numpy as np\n'), ((8409, 8433), 'numpy.concatenate', 'np.concatenate', (['preds', '(0)'], {}), '(preds, 0)\n', (8423, 8433), True, 'import numpy as np\n'), ((8447, 8472), 'numpy.concatenate', 'np.concatenate', (['y_test', '(0)'], {}), '(y_test, 0)\n', (8461, 8472), True, 'import numpy as np\n'), ((8993, 9040), 'cleverhans.attacks.FastGradientMethod', 'FastGradientMethod', (['cleverhans_model'], {'sess': 'sess'}), '(cleverhans_model, sess=sess)\n', (9011, 9040), False, 'from cleverhans.attacks import FastGradientMethod\n'), ((1993, 2017), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'h_dim'], {}), '(in_dim, h_dim)\n', (2002, 2017), True, 'import torch.nn as nn\n'), ((2026, 2078), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.fc_xh.weight', '(-0.0001)', '(0.0001)'], {}), '(self.fc_xh.weight, -0.0001, 0.0001)\n', (2042, 2078), True, 'import torch.nn as nn\n'), ((2102, 2131), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'self.in_dim'], {}), '(h_dim, self.in_dim)\n', (2111, 2131), True, 'import torch.nn as nn\n'), ((2140, 2193), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.fc_hmu.weight', '(-0.0001)', '(0.0001)'], {}), '(self.fc_hmu.weight, -0.0001, 0.0001)\n', (2156, 2193), True, 'import torch.nn as nn\n'), ((2224, 2253), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'self.in_dim'], {}), '(h_dim, self.in_dim)\n', (2233, 2253), True, 'import torch.nn as nn\n'), ((2262, 2322), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.fc_hlogvar_in.weight', '(-0.0001)', '(0.0001)'], {}), '(self.fc_hlogvar_in.weight, -0.0001, 0.0001)\n', (2278, 2322), True, 'import torch.nn as nn\n'), ((2354, 2379), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'out_dim'], {}), '(h_dim, out_dim)\n', (2363, 2379), True, 'import torch.nn as nn\n'), ((2388, 2449), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.fc_hlogvar_out.weight', '(-0.0001)', '(0.0001)'], {}), '(self.fc_hlogvar_out.weight, -0.0001, 0.0001)\n', (2404, 2449), True, 'import torch.nn as nn\n'), ((2608, 2617), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (2614, 2617), True, 'import torch.nn.functional as F\n'), ((2846, 2865), 'torch.exp', 'torch.exp', (['logvar_r'], {}), '(logvar_r)\n', (2855, 2865), False, 'import torch\n'), ((2882, 2901), 'torch.exp', 'torch.exp', (['logvar_c'], {}), '(logvar_c)\n', (2891, 2901), False, 'import torch\n'), ((2915, 2950), 'torch.randn', 'torch.randn', (['m', 'r', 'c'], {'device': '"""cuda"""'}), "(m, r, c, device='cuda')\n", (2926, 2950), False, 'import torch\n'), ((5133, 5246), 'torch.load', 'torch.load', (['f"""models/cifar/model_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.bin"""'], {}), "(\n f'models/cifar/model_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.bin'\n )\n", (5143, 5246), False, 'import torch\n'), ((7220, 7267), 'torchvision.models.densenet121', 'torchvision.models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (7250, 7267), False, 'import torchvision\n'), ((8485, 8505), 'numpy.nan_to_num', 'np.nan_to_num', (['preds'], {}), '(preds)\n', (8498, 8505), True, 'import numpy as np\n'), ((1936, 1969), 'torch.randn', 'torch.randn', (['self.in_dim', 'out_dim'], {}), '(self.in_dim, out_dim)\n', (1947, 1969), False, 'import torch\n'), ((3978, 4000), 'torch.nn.Linear', 'nn.Linear', (['(1024)', 'h_dim'], {}), '(1024, h_dim)\n', (3987, 4000), True, 'import torch.nn as nn\n'), ((4026, 4046), 'torch.nn.Linear', 'nn.Linear', (['h_dim', '(10)'], {}), '(h_dim, 10)\n', (4035, 4046), True, 'import torch.nn as nn\n'), ((4185, 4194), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (4191, 4194), True, 'import torch.nn.functional as F\n'), ((8680, 8708), 'numpy.argmax', 'np.argmax', (['adv_preds'], {'axis': '(1)'}), '(adv_preds, axis=1)\n', (8689, 8708), True, 'import numpy as np\n'), ((3386, 3417), 'torch.ones', 'torch.ones', (['m', '(1)'], {'device': '"""cuda"""'}), "(m, 1, device='cuda')\n", (3396, 3417), False, 'import torch\n'), ((4399, 4433), 'torch.nn.functional.dropout', 'F.dropout', (['h'], {'p': '(0.5)', 'training': '(True)'}), '(h, p=0.5, training=True)\n', (4408, 4433), True, 'import torch.nn.functional as F\n'), ((5954, 5977), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['out', 'y'], {}), '(out, y)\n', (5969, 5977), True, 'import torch.nn.functional as F\n'), ((9489, 9517), 'numpy.argmax', 'np.argmax', (['adv_preds'], {'axis': '(1)'}), '(adv_preds, axis=1)\n', (9498, 9517), True, 'import numpy as np\n'), ((8745, 8770), 'numpy.log', 'np.log', (['(adv_preds + 1e-08)'], {}), '(adv_preds + 1e-08)\n', (8751, 8770), True, 'import numpy as np\n'), ((3040, 3057), 'torch.sqrt', 'torch.sqrt', (['var_c'], {}), '(var_c)\n', (3050, 3057), False, 'import torch\n'), ((3325, 3347), 'torch.sum', 'torch.sum', (['logvar_c', '(1)'], {}), '(logvar_c, 1)\n', (3334, 3347), False, 'import torch\n'), ((8121, 8143), 'torch.from_numpy', 'torch.from_numpy', (['pred'], {}), '(pred)\n', (8137, 8143), False, 'import torch\n'), ((9558, 9583), 'numpy.log', 'np.log', (['(adv_preds + 1e-08)'], {}), '(adv_preds + 1e-08)\n', (9564, 9583), True, 'import numpy as np\n'), ((3002, 3019), 'torch.sqrt', 'torch.sqrt', (['var_r'], {}), '(var_r)\n', (3012, 3019), False, 'import torch\n'), ((3298, 3320), 'torch.sum', 'torch.sum', (['logvar_r', '(1)'], {}), '(logvar_r, 1)\n', (3307, 3320), False, 'import torch\n'), ((5656, 5685), 'torch.distributions.Categorical', 'dists.Categorical', ([], {'logits': 'y_s'}), '(logits=y_s)\n', (5673, 5685), True, 'import torch.distributions as dists\n'), ((5823, 5834), 'math.log', 'math.log', (['S'], {}), '(S)\n', (5831, 5834), False, 'import math\n'), ((5796, 5816), 'torch.stack', 'torch.stack', (['log_p_y'], {}), '(log_p_y)\n', (5807, 5816), False, 'import torch\n'), ((6660, 6681), 'torch.nn.functional.softmax', 'F.softmax', (['y_i'], {'dim': '(1)'}), '(y_i, dim=1)\n', (6669, 6681), True, 'import torch.nn.functional as F\n'), ((8232, 8250), 'torch.nn.functional.softmax', 'F.softmax', (['pred', '(1)'], {}), '(pred, 1)\n', (8241, 8250), True, 'import torch.nn.functional as F\n'), ((3166, 3185), 'torch.sum', 'torch.sum', (['var_r', '(1)'], {}), '(var_r, 1)\n', (3175, 3185), False, 'import torch\n'), ((3186, 3205), 'torch.sum', 'torch.sum', (['var_c', '(1)'], {}), '(var_c, 1)\n', (3195, 3205), False, 'import torch\n')]
|
"""
From http://arxiv.org/pdf/1204.0375.pdf
"""
from numpy import dot, sum, tile, linalg
from numpy.linalg import inv
def kf_predict(X, P, A, Q, B, U):
"""
X: The mean state estimate of the previous step (k−1).
P: The state covariance of previous step (k−1).
A: The transition n × n matrix.
Q: The process noise covariance matrix.
B: The input effect matrix.
U: The control input.
"""
X = dot(A, X) + dot(B, U)
P = dot(A, dot(P, A.T)) + Q
return(X,P)
def kf_update(X, P, Y, H, R):
"""
K: the Kalman Gain matrix
IM: the Mean of predictive distribution of Y
IS: the Covariance or predictive mean of Y
LH: the Predictive probability (likelihood) of measurement which is computed using the Python function gauss_pdf.
"""
IM = dot(H, X)
IS = R + dot(H, dot(P, H.T))
K = dot(P, dot(H.T, inv(IS)))
X = X + dot(K, (Y-IM))
P = P - dot(K, dot(IS, K.T))
LH = gauss_pdf(Y, IM, IS)
return (X,P,K,IM,IS,LH)
def gauss_pdf(X, M, S):
if M.shape()[1] == 1:
DX = X - tile(M, X.shape()[1])
E = 0.5 * sum(DX * (dot(inv(S), DX)), axis=0)
E = E + 0.5 * M.shape()[0] * log(2 * pi) + 0.5 * log(det(S))
P = exp(-E)
elif X.shape()[1] == 1:
DX = tile(X, M.shape()[1])- M
E = 0.5 * sum(DX * (dot(inv(S), DX)), axis=0)
E = E + 0.5 * M.shape()[0] * log(2 * pi) + 0.5 * log(det(S))
P = exp(-E)
else:
DX = X-M
E = 0.5 * dot(DX.T, dot(inv(S), DX))
E = E + 0.5 * M.shape()[0] * log(2 * pi) + 0.5 * log(det(S))
P = exp(-E)
return (P[0],E[0])
from numpy import *
from numpy.linalg import inv
#time step of mobile movement
dt = 0.1
# Initialization of state matrices
X = array([[0.0], [0.0], [0.1], [0.1]])
P = diag((0.01, 0.01, 0.01, 0.01))
A = array([[1, 0, dt , 0], [0, 1, 0, dt], [0, 0, 1, 0], [0, 0, 0, 1]])
Q = eye(X.shape()[0])
B = eye(X.shape()[0])
U = zeros((X.shape()[0],1))
# Measurement matrices
Y = array([[X[0,0] + abs(randn(1)[0])], [X[1,0] + abs(randn(1)[0])]])
H = array([[1, 0, 0, 0], [0, 1, 0, 0]])
R = eye(Y.shape()[0])
# Number of iterations in Kalman Filter
N_iter = 50
# Applying the Kalman Filter
for i in arange(0, N_iter):
(X, P) = kf_predict(X, P, A, Q, B, U)
(X, P, K, IM, IS, LH) = kf_update(X, P, Y, H, R)
Y = array([[X[0,0] + abs(0.1 * randn(1)[0])],[X[1, 0] + abs(0.1 * randn(1)[0])]])
|
[
"numpy.dot",
"numpy.linalg.inv"
] |
[((811, 820), 'numpy.dot', 'dot', (['H', 'X'], {}), '(H, X)\n', (814, 820), False, 'from numpy import dot, sum, tile, linalg\n'), ((427, 436), 'numpy.dot', 'dot', (['A', 'X'], {}), '(A, X)\n', (430, 436), False, 'from numpy import dot, sum, tile, linalg\n'), ((439, 448), 'numpy.dot', 'dot', (['B', 'U'], {}), '(B, U)\n', (442, 448), False, 'from numpy import dot, sum, tile, linalg\n'), ((900, 914), 'numpy.dot', 'dot', (['K', '(Y - IM)'], {}), '(K, Y - IM)\n', (903, 914), False, 'from numpy import dot, sum, tile, linalg\n'), ((464, 475), 'numpy.dot', 'dot', (['P', 'A.T'], {}), '(P, A.T)\n', (467, 475), False, 'from numpy import dot, sum, tile, linalg\n'), ((841, 852), 'numpy.dot', 'dot', (['P', 'H.T'], {}), '(P, H.T)\n', (844, 852), False, 'from numpy import dot, sum, tile, linalg\n'), ((878, 885), 'numpy.linalg.inv', 'inv', (['IS'], {}), '(IS)\n', (881, 885), False, 'from numpy.linalg import inv\n'), ((934, 946), 'numpy.dot', 'dot', (['IS', 'K.T'], {}), '(IS, K.T)\n', (937, 946), False, 'from numpy import dot, sum, tile, linalg\n'), ((1128, 1134), 'numpy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (1131, 1134), False, 'from numpy.linalg import inv\n'), ((1507, 1513), 'numpy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (1510, 1513), False, 'from numpy.linalg import inv\n'), ((1337, 1343), 'numpy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (1340, 1343), False, 'from numpy.linalg import inv\n')]
|
import numpy as np
def LoadData(FileName):
'''
Loads hollow data into structured numpy array of floats and returns a tuple
of column headers along with the structured array.
'''
data = np.genfromtxt(FileName, names=True, delimiter=',')
return data.dtype.names, data
def SegmentDataByAspect(FileName):
'''
Loads hollow data into structured numpy array of floats, and splits the
data into separate structured arrays by aspect band and returns a tuple
of column headers along with the structured arrays.
'''
Headers, A = LoadData(FileName)
NE = A[(A['Aspect'] >= 0) & (A['Aspect'] <= 85)]
SE = A[(A['Aspect'] > 85) & (A['Aspect'] <= 165)]
E = A[(A['Aspect'] >= 0) & (A['Aspect'] <= 165)]
W = A[(A['Aspect'] > 165)]
return Headers, NE, SE, E, W
def DataFilter(DataFile, Parameter, Value):
'''
Split hollows around Value of a given property. returns Small and
Large, two lists of IDs corresponding to hollows above and below the
median.
'''
Headers, A = LoadData(DataFile)
Small = A[(A[Parameter] < Value)]['ID']
Large = A[(A[Parameter] >= Value)]['ID']
return Small, Large
def VegDataFilter(DataFile):
'''
Split hollows into vegetation categories of a given property. returns
4 lists of IDs corresponding to specific vegetation types
'''
Headers, A = LoadData(DataFile)
a = A[(A['Veg'] == 1)]['ID']
b = A[(A['Veg'] == 2)]['ID']
c = A[(A['Veg'] == 3)]['ID']
d = A[(A['Veg'] == 4)]['ID']
return a, b, c, d
|
[
"numpy.genfromtxt"
] |
[((208, 258), 'numpy.genfromtxt', 'np.genfromtxt', (['FileName'], {'names': '(True)', 'delimiter': '""","""'}), "(FileName, names=True, delimiter=',')\n", (221, 258), True, 'import numpy as np\n')]
|
import os
from os.path import join
import cv2
import pickle
import torch
import numpy as np
import pandas as pd
import torch.utils.data as data
class InteriorNet(data.Dataset):
def __init__(self, root_dir, label_name='_raycastingV2',
pred_dir='pred', method_name='sharpnet_pred',
gt_dir='data', depth_ext='-depth-plane.png', normal_ext='-normal.png', im_ext='-rgb.png',
label_dir='label', label_ext='-order-pix.npy'):
super(InteriorNet, self).__init__()
self.root_dir = root_dir
self.label_name = label_name
self.method_name = method_name
self.im_ext = im_ext
self.gt_dir = gt_dir
self.label_dir = label_dir
self.pred_dir = pred_dir
self.depth_ext = depth_ext
self.normal_ext = normal_ext
self.label_ext = label_ext
self.df = pd.read_csv(join(root_dir, 'InteriorNet.txt'))
def __len__(self):
return len(self.df)
def __getitem__(self, index):
depth_gt, depth_pred, label, normal, img = self._fetch_data(index)
depth_gt = torch.from_numpy(np.ascontiguousarray(depth_gt)).float().unsqueeze(0)
depth_pred = torch.from_numpy(np.ascontiguousarray(depth_pred)).float().unsqueeze(0)
label = torch.from_numpy(np.ascontiguousarray(label)).float().permute(2, 0, 1)
normal = torch.from_numpy(np.ascontiguousarray(normal)).float().permute(2, 0, 1)
img = torch.from_numpy(np.ascontiguousarray(img)).float().permute(2, 0, 1)
return depth_gt, depth_pred, label, normal, img
def _fetch_data(self, index):
# fetch predicted depth map in meters
depth_pred_path = join(self.root_dir, self.pred_dir, self.df.iloc[index]['scene'],
self.method_name, 'data', '{}.pkl'.format(self.df.iloc[index]['image']))
with open(depth_pred_path, 'rb') as f:
depth_pred = pickle.load(f)
# fetch ground truth depth map in meters
depth_gt_path = join(self.root_dir, self.gt_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.depth_ext))
if not os.path.exists(depth_gt_path):
print(depth_gt_path)
depth_gt = cv2.imread(depth_gt_path, -1) / 1000
# fetch normal map in norm-1 vectors
normal_path = join(self.root_dir, self.gt_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.normal_ext))
normal = cv2.imread(normal_path, -1) / (2 ** 16 - 1) * 2 - 1
normal = normal[:, :, ::-1]
# fetch rgb image
image_path = join(self.root_dir, self.gt_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.im_ext))
img = cv2.imread(image_path, -1) / 255
img = img[:, :, ::-1]
# fetch occlusion orientation labels
label_path = join(self.root_dir, self.label_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.label_ext))
label = np.load(label_path)
return depth_gt, depth_pred, label, normal, img
if __name__ == "__main__":
root_dir = '/space_sdd/InteriorNet'
dataset = InteriorNet(root_dir)
print(len(dataset))
from tqdm import tqdm
from torch.utils.data import DataLoader
import sys
test_loader = DataLoader(dataset, batch_size=4, shuffle=False)
for i, data in tqdm(enumerate(test_loader)):
if i == 0:
print(data[0].shape, data[1].shape, data[2].shape, data[3].shape, data[4].shape)
sys.exit()
|
[
"numpy.load",
"torch.utils.data.DataLoader",
"numpy.ascontiguousarray",
"os.path.exists",
"cv2.imread",
"pickle.load",
"os.path.join",
"sys.exit"
] |
[((3710, 3758), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(4)', 'shuffle': '(False)'}), '(dataset, batch_size=4, shuffle=False)\n', (3720, 3758), False, 'from torch.utils.data import DataLoader\n'), ((3400, 3419), 'numpy.load', 'np.load', (['label_path'], {}), '(label_path)\n', (3407, 3419), True, 'import numpy as np\n'), ((892, 925), 'os.path.join', 'join', (['root_dir', '"""InteriorNet.txt"""'], {}), "(root_dir, 'InteriorNet.txt')\n", (896, 925), False, 'from os.path import join\n'), ((1936, 1950), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1947, 1950), False, 'import pickle\n'), ((2259, 2288), 'os.path.exists', 'os.path.exists', (['depth_gt_path'], {}), '(depth_gt_path)\n', (2273, 2288), False, 'import os\n'), ((2342, 2371), 'cv2.imread', 'cv2.imread', (['depth_gt_path', '(-1)'], {}), '(depth_gt_path, -1)\n', (2352, 2371), False, 'import cv2\n'), ((3038, 3064), 'cv2.imread', 'cv2.imread', (['image_path', '(-1)'], {}), '(image_path, -1)\n', (3048, 3064), False, 'import cv2\n'), ((3933, 3943), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3941, 3943), False, 'import sys\n'), ((2679, 2706), 'cv2.imread', 'cv2.imread', (['normal_path', '(-1)'], {}), '(normal_path, -1)\n', (2689, 2706), False, 'import cv2\n'), ((1126, 1156), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['depth_gt'], {}), '(depth_gt)\n', (1146, 1156), True, 'import numpy as np\n'), ((1217, 1249), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['depth_pred'], {}), '(depth_pred)\n', (1237, 1249), True, 'import numpy as np\n'), ((1305, 1332), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['label'], {}), '(label)\n', (1325, 1332), True, 'import numpy as np\n'), ((1393, 1421), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['normal'], {}), '(normal)\n', (1413, 1421), True, 'import numpy as np\n'), ((1479, 1504), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1499, 1504), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 15:47:36 2018
@author: akurnizk
"""
import flopy
import numpy as np
import sys,os
import matplotlib.pyplot as plt
# Location of BitBucket folder containing cgw folder
cgw_code_dir = 'E:\python'
sys.path.insert(0,cgw_code_dir)
from cgw.utils import general_utils as genu
from cgw.utils import feature_utils as shpu
from cgw.utils import raster_utils as rastu
# Assign name and create modflow model object
modelname = 'CheqModel1'
work_dir = r'E:\Herring'
mf = flopy.modflow.Modflow(modelname, exe_name='mf2005',model_ws=work_dir)
swt = flopy.seawat.Seawat(modelname, exe_name='swtv4')
print(swt.namefile)
mean_sea_level = 0.843 # in meters at closest NOAA station
#%%
# Example of making a MODFLOW-like grid from a shapefile
data_dir = r'E:\ArcGIS'
shp_fname = os.path.join(data_dir,'Chequesset_Model_Area_UTM.shp')
cell_spacing = 10. # model grid cell spacing in meters
# Define inputs for shp_to_grid function
shp_to_grid_dict = {'shp':shp_fname,'cell_spacing':cell_spacing}
grid_outputs = shpu.shp_to_grid(**shp_to_grid_dict)
# Pop out all of the outputs into individual variables
[X_nodes,Y_nodes],model_polygon,[out_proj,[xshift,yshift],min_angle] = grid_outputs
grid_transform = [out_proj,[xshift,yshift],min_angle] # make transform list
# Can calculate cell centers (where heads are calculated), in different coordinates
cc,cc_proj,cc_ll = shpu.nodes_to_cc([X_nodes,Y_nodes],grid_transform)
# Use model_polygon to define active cells in the model
ir,ic,_ = shpu.gridpts_in_shp(model_polygon,cc)
active_cells = genu.define_mask(cc,[ir,ic])
"""
Plot active cells
"""
#fig,ax = genu.plt.subplots(1,2)
#genu.quick_plot(active_cells.astype(int),ax=ax[0]) # in row, column space
#ax[0].set_xlabel('column #')
#ax[0].set_ylabel('row #')
#c1=ax[1].pcolormesh(cc[0],cc[1],active_cells.astype(int)) # in model coordinates
#genu.plt.colorbar(c1,ax=ax[1],orientation='horizontal')
#ax[1].set_xlabel('X [m]')
#ax[1].set_ylabel('Y [m]')
#%% Example of loading DEM data for that area
dem_fname = os.path.join(data_dir,'Cheq10mx10m_UTM.tif')
# Experimental part \/
dem_X,dem_Y,dem_da = rastu.load_geotif(dem_fname) # da is an xarray data array
dem_vals = dem_da.values.squeeze()
#dem_X, dem_Y, dem_vals = rastu.read_griddata(dem_fname)
# Know that dem is way higher resolution...can decimate it to save time
decimate_by_ncells = 1 # by every n cells
#dem_X = dem_X[::decimate_by_ncells,::decimate_by_ncells]
#dem_Y = dem_Y[::decimate_by_ncells,::decimate_by_ncells]
#dem_vals = dem_vals[::decimate_by_ncells,::decimate_by_ncells]
# Set no-data value to nan
dem_vals[dem_vals==dem_da.nodatavals[0]] = genu.np.nan
# Transform dem to model coordinates with linear interpolation
trans_dict = {'orig_xy':[dem_X,dem_Y],'orig_val':dem_vals,'active_method':'linear',
'new_xy':cc_proj} # if dem in same projection as model boundary shp
dem_trans = rastu.subsection_griddata(**trans_dict)
dem_trans[dem_trans<-1000] = genu.np.nan
genu.quick_plot(dem_trans)
#%% DEM model inputs
Lx = np.amax(dem_X)-np.amin(dem_X)
Ly = np.amax(dem_Y)-np.amin(dem_Y)
zbot = -100 # if bottom of model is horizontal, approx. bedrock (check Masterson)
nlay = 1 # 1 layer model
nrow, ncol = cc[0].shape # to use when cheq_griddev is implemented
delr = cell_spacing
delc = cell_spacing
delv = (dem_trans - zbot) / nlay
botm = zbot
# Tutorial 1 model domain and grid definition
#Lx = 1000.
#Ly = 1000.
#ztop = 0.
#zbot = -50.
#nlay = 1
#nrow = 10
#ncol = 10
#delr = Lx/ncol
#delc = Ly/nrow
#delv = (ztop - zbot) / nlay
#botm = np.linspace(ztop, zbot, nlay + 1)
#%%
"""
Time Stepping
"""
# Time step parameters
total_length = 10 # days
dt = 6 # stress period time step, hrs
perlen_days = dt/24. # stress period time step, days
nper = int(total_length/perlen_days) # the number of stress periods in the simulation
nstp_default = dt/0.5 # stress period time step divided by step time length (to better interpolate tidal changes, set to 0.5 hrs)
perlen = [perlen_days]*nper # length of a stress period; each item in the matrix is the amount
# of elapsed time since the previous point (need to change the first)
perlen[0] = 1 # set first step as steady state
steady = [False]*nper
steady[0] = True # first step steady state
nstp = [nstp_default]*nper # number of time steps in a stress period
nstp[0] = 1
#Tutorial 2 default time step parameters
#nper = 3
#perlen = [1, 100, 100]
#nstp = [1, 100, 100]
#steady = [True, False, False]
#%% # Create the discretization (DIS) object
dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc,
top=dem_trans, botm=botm)
# Tutorial 1 DIS object
#dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc,
#top=dem_vals, botm=botm[1:])
# Tutorial 2 DIS object when transient conditions are implemented
# dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc,
# top=ztop, botm=botm[1:],
# nper=nper, perlen=perlen, nstp=nstp, steady=steady)
#%% # Variables for the BAS (basic) package
# Added 5/28/19
"""
Active cells and the like are defined with the Basic package (BAS), which is required for every MOD-FLOW model.
It contains the ibound array, which is used to specify which cells are active (value is positive), inactive (value is 0),
or fixed head (value is negative). The numpy package (aliased as np) can be used to quickly initialize the ibound array
with values of 1, and then set the ibound value for the first and last columns to −1. The numpy package (and Python, in general)
uses zero-based indexing and supports negative indexing so that row 1 and column 1, and row 1 and column 201, can be
referenced as [0,0], and [0,−1], respectively. Although this simulation is for steady flow, starting heads still need
to be specified. They are used as the head for fixed-head cells (where ibound is negative), and as a starting point to compute
the saturated thickness for cases of unconfined flow.
ibound = np.ones((1, 201))
ibound[0, 0] = ibound[0, -1] = -1
"""
ibound = np.ones((nlay, nrow, ncol), dtype=np.int32)
ibound[:,~active_cells] = 0 # far offshore cells are inactive
ibound[0,dem_trans<mean_sea_level] = -1 # fixed head for everything less than msl
ibound[:,np.isnan(dem_trans)] = 0 # nan cells are inactive
genu.quick_plot(ibound) # plots boundary condition: 1 is above mean sea level (msl), 0 is msl, -1 is under msl.
strt = np.ones((nlay, nrow, ncol), dtype=np.float32)
active_dem_heights = dem_trans[active_cells & ~np.isnan(dem_trans)]
strt[0, active_cells & ~np.isnan(dem_trans)] = active_dem_heights # start with freshwater at surface elevation
strt[0, dem_trans<mean_sea_level] = mean_sea_level # start with water at sea level
genu.quick_plot(strt) # plots starting condition
bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt)
#%% # added 3/8/19 - creates matrix where hydraulic conductivities (hk = horiz, vk = vert) can be implemented
hk1 = np.ones((nlay,nrow,ncol), np.float)
hk1[:,:,:]=10. # everything set to 10 - use data? calculate?
vka1 = np.ones((nlay,nrow,ncol), np.float)
vka1[:,:,:]=10. # everything set to 10.
# Add LPF package to the MODFLOW model
lpf = flopy.modflow.ModflowLpf(mf, hk=hk1, vka=vka1, ipakcb=53)
#%%
"""
Transient General-Head Boundary Package
First, we will create the GHB object, which is of the following type:
flopy.modflow.ModflowGhb.
The key to creating Flopy transient boundary packages is recognizing that the
boundary data is stored in a dictionary with key values equal to the
zero-based stress period number and values equal to the boundary conditions
for that stress period. For a GHB the values can be a two-dimensional nested
list of [layer, row, column, stage, conductance]:
Datums for 8447435, Chatham, Lydia Cove MA
https://tidesandcurrents.noaa.gov/datums.html?units=1&epoch=0&id=8447435&name=Chatham%2C+Lydia+Cove&state=MA
"""
# Make list for stress period 1
# Using Mean Sea Level (MSL) in meters at closest NOAA station for stages
#stageleft = mean_sea_level
#stageright = mean_sea_level
#bound_sp1 = []
#for il in range(nlay):
# # Figure out looping through hk1 array to get hk values at each cell for changing conductance.
# condleft = hk1[0,0,0] * (stageleft - zbot) * delc
# condright = hk1[0,0,0] * (stageright - zbot) * delc
# for ir in range(nrow):
# bound_sp1.append([il, ir, 0, stageleft, condleft])
# bound_sp1.append([il, ir, ncol - 1, stageright, condright])
## Only 1 stress period for steady-state model
#print('Adding ', len(bound_sp1), 'GHBs for stress period 1.')
#
#stress_period_data = {0: bound_sp1}
#ghb = flopy.modflow.ModflowGhb(mf, stress_period_data=stress_period_data)
# using single conductance value (see drain for modification based on Masterson, 2004)
conductance = 1000. # (modify 1000 to actual conductance)
bound_sp1 = []
stress_period_data = {0: bound_sp1}
ghb = flopy.modflow.ModflowGhb(mf, stress_period_data=stress_period_data)
#%% # Add drain condition
#Darcy's law states that
#Q = -KA(h1 - h0)/(X1 - X0)
#Where Q is the flow (L3/T)
#K is the hydraulic conductivity (L/T)
#A is the area perpendicular to flow (L2)
#h is head (L)
#X is the position at which head is measured (L)
#Conductance combines the K, A and X terms so that Darcy's law can be expressed as
#Q = -C(h1 - h0)
#where C is the conductance (L2/T)
# https://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?drn.htm
# from Masterson, 2004
# C = KWL/M where
#C is hydraulic conductance of the seabed (ft2/d);
#K is vertical hydraulic conductivity of seabed deposits
#(ft/d);
#W is width of the model cell containing the seabed (ft);
#L is length of the model cell containing the seabed (ft);
#and
#M is thickness of seabed deposits (ft).
#The vertical hydraulic conductivity (K) of the seabed
#deposits in most of the study area was assumed to be 1 ft/d,
#which is consistent with model simulations of similar coastal
#discharge areas in other areas on Cape Cod (Masterson and
#others, 1998). In the area occupied by Salt Pond and Nauset
#Marsh, it was assumed that there were thick deposits of lowpermeability
#material (<NAME>, U.S. Geological Survey,
#oral commun., 2002) and the vertical hydraulic conductivity
#was set to 0.1 ft/d. The thickness of the seabed deposits was
#assumed to be half the thickness of the model cell containing the
#boundary.
# still using simple conductance
land_cells = active_cells & ~np.isnan(dem_trans) & (dem_trans>mean_sea_level)
landrows, landcols = land_cells.nonzero()
lrcec = {0:np.column_stack([np.zeros_like(landrows),landrows,landcols,dem_trans[land_cells],conductance*np.ones_like(landrows)])} # this drain will be applied to all stress periods
drn = flopy.modflow.ModflowDrn(mf, stress_period_data=lrcec)
#%% # Add recharge condition
# steady state, units in [m/day]?
rch = flopy.modflow.ModflowRch(mf, nrchop=3, rech=1.4e-3) # from https://pubs.usgs.gov/wsp/2447/report.pdf
#%% # Add OC package to the MODFLOW model
spd = {(0, 0): ['print head', 'print budget', 'save head', 'save budget']}
oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, compact=True)
#%% # Add PCG package to the MODFLOW model
pcg = flopy.modflow.ModflowPcg(mf)
#%% # Write the MODFLOW model input files
mf.write_input()
#%% # Run the MODFLOW model
success, buff = mf.run_model()
#%%
"""
Post-Processing the Results
Now that we have successfully built and run our MODFLOW model, we can look at the results.
MODFLOW writes the simulated heads to a binary data output file.
We cannot look at these heads with a text editor, but flopy has a binary utility that can be used to read the heads.
The following statements will read the binary head file and create a plot of simulated heads for layer 1:
"""
import flopy.utils.binaryfile as bf
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.subplot(1,1,1,aspect='equal')
hds = bf.HeadFile(os.path.join(work_dir,modelname+'.hds'))
head = hds.get_data(totim=hds.get_times()[-1])
head[head<-100] = np.nan
#levels = np.arange(1,10,1)
extent = (delr/2., Lx - delr/2., Ly - delc/2., delc/2.)
# headplot = plt.contour(head[0, :, :], levels=levels, extent=extent) #
headplot = plt.contour(head[0, :, :], extent=extent)
plt.xlabel('Lx')
plt.ylabel('Ly')
plt.colorbar(headplot) # plots heads as contours
#plt.colorbar.set_label('heads')
plt.savefig('CheqModel1a.png')
genu.quick_plot(head) # plots heads with color gradient
genu.quick_plot(dem_trans) # plots elevations
#%%
"""
Flopy also has some pre-canned plotting capabilities can can be accessed using the ModelMap class.
The following code shows how to use the modelmap class to plot boundary conditions (IBOUND),
plot the grid, plot head contours, and plot vectors:
"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
hds = bf.HeadFile(modelname+'.hds')
times = hds.get_times()
head = hds.get_data(totim=times[-1])
levels = np.linspace(0, 10, 11)
cbb = bf.CellBudgetFile(modelname+'.cbc')
kstpkper_list = cbb.get_kstpkper()
frf = cbb.get_data(text='FLOW RIGHT FACE', totim=times[-1])[0]
fff = cbb.get_data(text='FLOW FRONT FACE', totim=times[-1])[0]
#%%
"""
The pre-canned plotting doesn't seem to be able to allow averaging to reduce nrow and ncol
on the plot, making it difficult to plot a large grid. The commented section below uses the
modelmap class from Tutorial 1, followed by use of the plotting from the Henry Problem.
"""
#modelmap = flopy.plot.ModelMap(model=mf, layer=0)
#qm = modelmap.plot_ibound()
#lc = modelmap.plot_grid() # Need to fix grid to have fewer rows and columns
#cs = modelmap.contour_array(head, levels=levels)
#quiver = modelmap.plot_discharge(frf, fff, head=head)
#plt.savefig('CheqModel1b.png')
"""
# Load data (when implementing SEAWAT)
ucnobj = bf.UcnFile('MT3D001.UCN', model=swt)
times = ucnobj.get_times()
concentration = ucnobj.get_data(totim=times[-1])
"""
# Average flows to cell centers
qx_avg = np.empty(frf.shape, dtype=frf.dtype)
qx_avg[:, :, 1:] = 0.5 * (frf[:, :, 0:ncol-1] + frf[:, :, 1:ncol])
qx_avg[:, :, 0] = 0.5 * frf[:, :, 0]
qy_avg = np.empty(fff.shape, dtype=fff.dtype)
qy_avg[1:, :, :] = 0.5 * (fff[0:nlay-1, :, :] + fff[1:nlay, :, :])
qy_avg[0, :, :] = 0.5 * fff[0, :, :]
# Make the plot
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
#ax.imshow(concentration[:, 0, :], interpolation='nearest',
# extent=(0, Lx, 0, Ly))
y, x, z = dis.get_node_coordinates()
X, Y = np.meshgrid(x, y)
iskip = 3
ax.quiver(X[::iskip, ::iskip], Y[::iskip, ::iskip],
qx_avg[::iskip, 0, ::iskip], -qy_avg[::iskip, 0, ::iskip],
color='k', scale=5, headwidth=3, headlength=2,
headaxislength=2, width=0.0025)
plt.savefig('CheqModel1b.png')
plt.show()
#%%
"""
Post-Processing the Results
Once again, we can read heads from the MODFLOW binary output file, using the flopy.utils.binaryfile module. Included with the HeadFile object are several methods that we will use here: * get_times() will return a list of times contained in the binary head file * get_data() will return a three-dimensional head array for the specified time * get_ts() will return a time series array [ntimes, headval] for the specified cell
Using these methods, we can create head plots and hydrographs from the model results.:
"""
# headfile and budget file objects already created
# Setup contour parameters (levels already set)
extent = (delr/2., Lx - delr/2., delc/2., Ly - delc/2.)
print('Levels: ', levels)
print('Extent: ', extent)
# Make the plots
#Print statistics
print('Head statistics')
print(' min: ', head.min())
print(' max: ', head.max())
print(' std: ', head.std())
"""
Again, commented out section using modelmap
"""
## Flow right face and flow front face already extracted
##%%
##Create the plot
#f = plt.figure()
#plt.subplot(1, 1, 1, aspect='equal')
#
#
#modelmap = flopy.plot.ModelMap(model=mf, layer=0)
#qm = modelmap.plot_ibound()
##
## lc = modelmap.plot_grid()
#qm = modelmap.plot_bc('GHB', alpha=0.5)
#cs = modelmap.contour_array(head, levels=levels)
#plt.clabel(cs, inline=1, fontsize=10, fmt='%1.1f', zorder=11)
#quiver = modelmap.plot_discharge(frf, fff, head=head)
#
#mfc='black'
#plt.plot(lw=0, marker='o', markersize=8,
# markeredgewidth=0.5,
# markeredgecolor='black', markerfacecolor=mfc, zorder=9)
#plt.savefig('CheqModel2-{}.png')
"""
From <NAME>
"""
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
im = ax.imshow(head[:, 0, :], interpolation='nearest',
extent=(0, Lx, 0, Ly))
ax.set_title('Simulated Heads')
|
[
"flopy.modflow.ModflowOc",
"numpy.amin",
"cgw.utils.raster_utils.subsection_griddata",
"numpy.empty",
"numpy.ones",
"numpy.isnan",
"cgw.utils.general_utils.quick_plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contour",
"cgw.utils.feature_utils.nodes_to_cc",
"os.path.join",
"numpy.meshgrid",
"numpy.zeros_like",
"flopy.modflow.ModflowPcg",
"flopy.utils.binaryfile.HeadFile",
"flopy.modflow.ModflowGhb",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"flopy.modflow.ModflowDrn",
"cgw.utils.feature_utils.gridpts_in_shp",
"flopy.modflow.Modflow",
"flopy.modflow.ModflowRch",
"matplotlib.pyplot.show",
"numpy.ones_like",
"cgw.utils.feature_utils.shp_to_grid",
"flopy.utils.binaryfile.CellBudgetFile",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"cgw.utils.raster_utils.load_geotif",
"flopy.modflow.ModflowDis",
"sys.path.insert",
"flopy.modflow.ModflowLpf",
"numpy.amax",
"flopy.modflow.ModflowBas",
"flopy.seawat.Seawat",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"cgw.utils.general_utils.define_mask"
] |
[((261, 293), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cgw_code_dir'], {}), '(0, cgw_code_dir)\n', (276, 293), False, 'import sys, os\n'), ((537, 607), 'flopy.modflow.Modflow', 'flopy.modflow.Modflow', (['modelname'], {'exe_name': '"""mf2005"""', 'model_ws': 'work_dir'}), "(modelname, exe_name='mf2005', model_ws=work_dir)\n", (558, 607), False, 'import flopy\n'), ((614, 662), 'flopy.seawat.Seawat', 'flopy.seawat.Seawat', (['modelname'], {'exe_name': '"""swtv4"""'}), "(modelname, exe_name='swtv4')\n", (633, 662), False, 'import flopy\n'), ((849, 904), 'os.path.join', 'os.path.join', (['data_dir', '"""Chequesset_Model_Area_UTM.shp"""'], {}), "(data_dir, 'Chequesset_Model_Area_UTM.shp')\n", (861, 904), False, 'import sys, os\n'), ((1088, 1124), 'cgw.utils.feature_utils.shp_to_grid', 'shpu.shp_to_grid', ([], {}), '(**shp_to_grid_dict)\n', (1104, 1124), True, 'from cgw.utils import feature_utils as shpu\n'), ((1454, 1506), 'cgw.utils.feature_utils.nodes_to_cc', 'shpu.nodes_to_cc', (['[X_nodes, Y_nodes]', 'grid_transform'], {}), '([X_nodes, Y_nodes], grid_transform)\n', (1470, 1506), True, 'from cgw.utils import feature_utils as shpu\n'), ((1575, 1613), 'cgw.utils.feature_utils.gridpts_in_shp', 'shpu.gridpts_in_shp', (['model_polygon', 'cc'], {}), '(model_polygon, cc)\n', (1594, 1613), True, 'from cgw.utils import feature_utils as shpu\n'), ((1629, 1659), 'cgw.utils.general_utils.define_mask', 'genu.define_mask', (['cc', '[ir, ic]'], {}), '(cc, [ir, ic])\n', (1645, 1659), True, 'from cgw.utils import general_utils as genu\n'), ((2117, 2162), 'os.path.join', 'os.path.join', (['data_dir', '"""Cheq10mx10m_UTM.tif"""'], {}), "(data_dir, 'Cheq10mx10m_UTM.tif')\n", (2129, 2162), False, 'import sys, os\n'), ((2210, 2238), 'cgw.utils.raster_utils.load_geotif', 'rastu.load_geotif', (['dem_fname'], {}), '(dem_fname)\n', (2227, 2238), True, 'from cgw.utils import raster_utils as rastu\n'), ((2996, 3035), 'cgw.utils.raster_utils.subsection_griddata', 'rastu.subsection_griddata', ([], {}), '(**trans_dict)\n', (3021, 3035), True, 'from cgw.utils import raster_utils as rastu\n'), ((3081, 3107), 'cgw.utils.general_utils.quick_plot', 'genu.quick_plot', (['dem_trans'], {}), '(dem_trans)\n', (3096, 3107), True, 'from cgw.utils import general_utils as genu\n'), ((4694, 4793), 'flopy.modflow.ModflowDis', 'flopy.modflow.ModflowDis', (['mf', 'nlay', 'nrow', 'ncol'], {'delr': 'delr', 'delc': 'delc', 'top': 'dem_trans', 'botm': 'botm'}), '(mf, nlay, nrow, ncol, delr=delr, delc=delc, top=\n dem_trans, botm=botm)\n', (4718, 4793), False, 'import flopy\n'), ((6408, 6451), 'numpy.ones', 'np.ones', (['(nlay, nrow, ncol)'], {'dtype': 'np.int32'}), '((nlay, nrow, ncol), dtype=np.int32)\n', (6415, 6451), True, 'import numpy as np\n'), ((6661, 6684), 'cgw.utils.general_utils.quick_plot', 'genu.quick_plot', (['ibound'], {}), '(ibound)\n', (6676, 6684), True, 'from cgw.utils import general_utils as genu\n'), ((6783, 6828), 'numpy.ones', 'np.ones', (['(nlay, nrow, ncol)'], {'dtype': 'np.float32'}), '((nlay, nrow, ncol), dtype=np.float32)\n', (6790, 6828), True, 'import numpy as np\n'), ((7097, 7118), 'cgw.utils.general_utils.quick_plot', 'genu.quick_plot', (['strt'], {}), '(strt)\n', (7112, 7118), True, 'from cgw.utils import general_utils as genu\n'), ((7155, 7209), 'flopy.modflow.ModflowBas', 'flopy.modflow.ModflowBas', (['mf'], {'ibound': 'ibound', 'strt': 'strt'}), '(mf, ibound=ibound, strt=strt)\n', (7179, 7209), False, 'import flopy\n'), ((7330, 7367), 'numpy.ones', 'np.ones', (['(nlay, nrow, ncol)', 'np.float'], {}), '((nlay, nrow, ncol), np.float)\n', (7337, 7367), True, 'import numpy as np\n'), ((7436, 7473), 'numpy.ones', 'np.ones', (['(nlay, nrow, ncol)', 'np.float'], {}), '((nlay, nrow, ncol), np.float)\n', (7443, 7473), True, 'import numpy as np\n'), ((7562, 7619), 'flopy.modflow.ModflowLpf', 'flopy.modflow.ModflowLpf', (['mf'], {'hk': 'hk1', 'vka': 'vka1', 'ipakcb': '(53)'}), '(mf, hk=hk1, vka=vka1, ipakcb=53)\n', (7586, 7619), False, 'import flopy\n'), ((9327, 9394), 'flopy.modflow.ModflowGhb', 'flopy.modflow.ModflowGhb', (['mf'], {'stress_period_data': 'stress_period_data'}), '(mf, stress_period_data=stress_period_data)\n', (9351, 9394), False, 'import flopy\n'), ((11186, 11240), 'flopy.modflow.ModflowDrn', 'flopy.modflow.ModflowDrn', (['mf'], {'stress_period_data': 'lrcec'}), '(mf, stress_period_data=lrcec)\n', (11210, 11240), False, 'import flopy\n'), ((11450, 11501), 'flopy.modflow.ModflowRch', 'flopy.modflow.ModflowRch', (['mf'], {'nrchop': '(3)', 'rech': '(0.0014)'}), '(mf, nrchop=3, rech=0.0014)\n', (11474, 11501), False, 'import flopy\n'), ((11678, 11743), 'flopy.modflow.ModflowOc', 'flopy.modflow.ModflowOc', (['mf'], {'stress_period_data': 'spd', 'compact': '(True)'}), '(mf, stress_period_data=spd, compact=True)\n', (11701, 11743), False, 'import flopy\n'), ((11797, 11825), 'flopy.modflow.ModflowPcg', 'flopy.modflow.ModflowPcg', (['mf'], {}), '(mf)\n', (11821, 11825), False, 'import flopy\n'), ((12479, 12515), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {'aspect': '"""equal"""'}), "(1, 1, 1, aspect='equal')\n", (12490, 12515), True, 'import matplotlib.pyplot as plt\n'), ((12819, 12860), 'matplotlib.pyplot.contour', 'plt.contour', (['head[0, :, :]'], {'extent': 'extent'}), '(head[0, :, :], extent=extent)\n', (12830, 12860), True, 'import matplotlib.pyplot as plt\n'), ((12862, 12878), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lx"""'], {}), "('Lx')\n", (12872, 12878), True, 'import matplotlib.pyplot as plt\n'), ((12880, 12896), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ly"""'], {}), "('Ly')\n", (12890, 12896), True, 'import matplotlib.pyplot as plt\n'), ((12898, 12920), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['headplot'], {}), '(headplot)\n', (12910, 12920), True, 'import matplotlib.pyplot as plt\n'), ((12982, 13012), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""CheqModel1a.png"""'], {}), "('CheqModel1a.png')\n", (12993, 13012), True, 'import matplotlib.pyplot as plt\n'), ((13014, 13035), 'cgw.utils.general_utils.quick_plot', 'genu.quick_plot', (['head'], {}), '(head)\n', (13029, 13035), True, 'from cgw.utils import general_utils as genu\n'), ((13071, 13097), 'cgw.utils.general_utils.quick_plot', 'genu.quick_plot', (['dem_trans'], {}), '(dem_trans)\n', (13086, 13097), True, 'from cgw.utils import general_utils as genu\n'), ((13391, 13419), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (13401, 13419), True, 'import matplotlib.pyplot as plt\n'), ((13475, 13506), 'flopy.utils.binaryfile.HeadFile', 'bf.HeadFile', (["(modelname + '.hds')"], {}), "(modelname + '.hds')\n", (13486, 13506), True, 'import flopy.utils.binaryfile as bf\n'), ((13578, 13600), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(11)'], {}), '(0, 10, 11)\n', (13589, 13600), True, 'import numpy as np\n'), ((13610, 13647), 'flopy.utils.binaryfile.CellBudgetFile', 'bf.CellBudgetFile', (["(modelname + '.cbc')"], {}), "(modelname + '.cbc')\n", (13627, 13647), True, 'import flopy.utils.binaryfile as bf\n'), ((14623, 14659), 'numpy.empty', 'np.empty', (['frf.shape'], {'dtype': 'frf.dtype'}), '(frf.shape, dtype=frf.dtype)\n', (14631, 14659), True, 'import numpy as np\n'), ((14776, 14812), 'numpy.empty', 'np.empty', (['fff.shape'], {'dtype': 'fff.dtype'}), '(fff.shape, dtype=fff.dtype)\n', (14784, 14812), True, 'import numpy as np\n'), ((14945, 14973), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (14955, 14973), True, 'import matplotlib.pyplot as plt\n'), ((15166, 15183), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (15177, 15183), True, 'import numpy as np\n'), ((15423, 15453), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""CheqModel1b.png"""'], {}), "('CheqModel1b.png')\n", (15434, 15453), True, 'import matplotlib.pyplot as plt\n'), ((15455, 15465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15463, 15465), True, 'import matplotlib.pyplot as plt\n'), ((17164, 17192), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (17174, 17192), True, 'import matplotlib.pyplot as plt\n'), ((3138, 3152), 'numpy.amax', 'np.amax', (['dem_X'], {}), '(dem_X)\n', (3145, 3152), True, 'import numpy as np\n'), ((3153, 3167), 'numpy.amin', 'np.amin', (['dem_X'], {}), '(dem_X)\n', (3160, 3167), True, 'import numpy as np\n'), ((3174, 3188), 'numpy.amax', 'np.amax', (['dem_Y'], {}), '(dem_Y)\n', (3181, 3188), True, 'import numpy as np\n'), ((3189, 3203), 'numpy.amin', 'np.amin', (['dem_Y'], {}), '(dem_Y)\n', (3196, 3203), True, 'import numpy as np\n'), ((12532, 12574), 'os.path.join', 'os.path.join', (['work_dir', "(modelname + '.hds')"], {}), "(work_dir, modelname + '.hds')\n", (12544, 12574), False, 'import sys, os\n'), ((6608, 6627), 'numpy.isnan', 'np.isnan', (['dem_trans'], {}), '(dem_trans)\n', (6616, 6627), True, 'import numpy as np\n'), ((6877, 6896), 'numpy.isnan', 'np.isnan', (['dem_trans'], {}), '(dem_trans)\n', (6885, 6896), True, 'import numpy as np\n'), ((10905, 10924), 'numpy.isnan', 'np.isnan', (['dem_trans'], {}), '(dem_trans)\n', (10913, 10924), True, 'import numpy as np\n'), ((11026, 11049), 'numpy.zeros_like', 'np.zeros_like', (['landrows'], {}), '(landrows)\n', (11039, 11049), True, 'import numpy as np\n'), ((6923, 6942), 'numpy.isnan', 'np.isnan', (['dem_trans'], {}), '(dem_trans)\n', (6931, 6942), True, 'import numpy as np\n'), ((11102, 11124), 'numpy.ones_like', 'np.ones_like', (['landrows'], {}), '(landrows)\n', (11114, 11124), True, 'import numpy as np\n')]
|
# Functions of img processing.
from functools import total_ordering
import config
import numpy as np
import copy
import torch
import cv2
from skimage.color import rgb2gray
from XCSLBP import XCSLBP
def extractPixelBlock(originalImg, labels):
'''
input_param:
originalImg: Original pixels matrix that squeezed to 2 dimentions of input img. np.ndarray
labels: label matrix of input img. np.ndarray
output_param:
pixelBlockList: a list contains all pixelblock which incoporates same label pixels.
'''
# Copy a new labels due to max() function alters dimentions of its parameter
newLabels = copy.deepcopy(labels)
maxLabel = max(newLabels)
pixelBlockList = []
labels = labels.reshape(-1,1)
blankBlock = np.array([255, 255, 255])
for i in range(maxLabel + 1):
# Uncomment line24 and comment line25 to visualize pixelBlock.
# pixelBlock = [pixel if label == i else blankBlock for pixel, label in zip(originalImg, labels)]
pixelBlock = [pixel if label == i else config.blankBlock for pixel, label in zip(originalImg, labels)]
pixelBlock = np.array(pixelBlock)
pixelBlock = pixelBlock.reshape(config.imgSize[0], config.imgSize[1], -1)
pixelBlockList.append(pixelBlock)
return pixelBlockList
def extractFeature(pixelBlockList):
'''
input_param:
pixelBlockList: A list contains all element.
output_param:
featureList: A list contains each element's feature. feature contains 3 channel's mean value and mean position info.
'''
featureList = []
for i in range(len(pixelBlockList)):
pixelList = []
locationList = []
for y in range(len(pixelBlockList[0])):
for x in range(len(pixelBlockList[1])):
if (pixelBlockList[i][y][x] != config.blankBlock).any():
pixelList.append(list(pixelBlockList[i][y][x]))
locationList.append((x,y))
colorFeature = np.mean(np.array(pixelList), axis=0)
locationFeature = np.mean(np.array(locationList), axis=0)
features = np.append(colorFeature, locationFeature)
featureList.append(features)
featureList = np.array(featureList)
return featureList
# Optimized version
def regionColorFeatures(img, labels):
'''
input_param:
img: img matrix. torch.tensor
labels: Kmeans clustering labels. torch.tensor
output_param:
colorFeatureList: A list contains each element's feature. feature contains 3 channel's mean value.
'''
numlab = max(labels)
rlabels = labels.view(config.imgSize)
colorFeatureList = []
grayFrame = torch.tensor(rgb2gray(img))
redFrame = img[:, :, 0]
greenFrame = img[:, :, 1]
blueFrame = img[:, :, 2]
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
graySpLocal = torch.mean(grayFrame[f].float())
redSpLocal = torch.mean(redFrame[f].float())
greenSpLocal = torch.mean(greenFrame[f].float())
blueSpLocal = torch.mean(blueFrame[f].float())
colorFeature = [redSpLocal, greenSpLocal, blueSpLocal, graySpLocal]
colorFeatureList.append(colorFeature)
colorFeatureList = torch.tensor(colorFeatureList)
return colorFeatureList
def regionTextureFeatures(img, labels):
'''
input_param:
img: CV2.imread
labels
'''
numlab = max(labels)
rlabels = labels.view(config.imgSize)
# I = rgb2gray(img)
XCS = XCSLBP(img)
XCS = XCS * (255/ 16)
XCSframe = torch.tensor(XCS)
textureFeatureList = []
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
XCSSpLocal = torch.mean(XCSframe[f].float())
textureFeatureList.append(XCSSpLocal)
textureFeatureList = torch.tensor(textureFeatureList)
textureFeatureList = textureFeatureList.unsqueeze(1)
return textureFeatureList
def regionEdgeFeatures(img, labels):
'''
input_param:
img: CV2.imread
labels
'''
numlab = max(labels)
rlabels = labels.view(config.imgSize)
# frame = rgb2gray(img)
Gx = cv2.Sobel(img, cv2.CV_64F, 1, 0)
Gy = cv2.Sobel(img, cv2.CV_64F, 0, 1)
Gmag = np.sqrt(Gx**2.0 + Gy**2.0)
Gdir = np.arctan2(Gy, Gx) * (180 / np.pi)
Gx, Gy, Gmag, Gdir = torch.tensor(Gx), torch.tensor(Gy), torch.tensor(Gmag), torch.tensor(Gdir)
edgeFeatureList = []
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
GxSpLocal = torch.mean(Gx[f].float())
GySpLocal = torch.mean(Gy[f].float())
GmagSpLocal = torch.mean(Gmag[f].float())
GdirSpLocal = torch.mean(Gdir[f].float())
edgeFeature = [GxSpLocal, GySpLocal, GmagSpLocal, GdirSpLocal]
edgeFeatureList.append(edgeFeature)
edgeFeatureList = torch.tensor(edgeFeatureList)
return edgeFeatureList
def regionSpatialFeatures(labels):
numlab = max(labels)
rlabels = labels.view(config.imgSize)
col, row = config.imgSize
x = range(1, col + 1)
y = range(1, row + 1)
Sx, Sy = np.meshgrid(y, x)
Sx, Sy = torch.tensor(Sx), torch.tensor(Sy)
spatialFeatureList = []
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
SxSpLocal = torch.mean(Sx[f].float())
SySpLocal = torch.mean(Sy[f].float())
spatialFeature = [SxSpLocal, SySpLocal]
spatialFeatureList.append(spatialFeature)
spatialFeatureList = torch.tensor(spatialFeatureList)
return spatialFeatureList
|
[
"torch.eq",
"copy.deepcopy",
"numpy.meshgrid",
"skimage.color.rgb2gray",
"numpy.arctan2",
"numpy.append",
"numpy.array",
"numpy.sqrt",
"cv2.Sobel",
"torch.tensor",
"XCSLBP.XCSLBP"
] |
[((637, 658), 'copy.deepcopy', 'copy.deepcopy', (['labels'], {}), '(labels)\n', (650, 658), False, 'import copy\n'), ((766, 791), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (774, 791), True, 'import numpy as np\n'), ((2226, 2247), 'numpy.array', 'np.array', (['featureList'], {}), '(featureList)\n', (2234, 2247), True, 'import numpy as np\n'), ((3242, 3272), 'torch.tensor', 'torch.tensor', (['colorFeatureList'], {}), '(colorFeatureList)\n', (3254, 3272), False, 'import torch\n'), ((3521, 3532), 'XCSLBP.XCSLBP', 'XCSLBP', (['img'], {}), '(img)\n', (3527, 3532), False, 'from XCSLBP import XCSLBP\n'), ((3575, 3592), 'torch.tensor', 'torch.tensor', (['XCS'], {}), '(XCS)\n', (3587, 3592), False, 'import torch\n'), ((3816, 3848), 'torch.tensor', 'torch.tensor', (['textureFeatureList'], {}), '(textureFeatureList)\n', (3828, 3848), False, 'import torch\n'), ((4156, 4188), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {}), '(img, cv2.CV_64F, 1, 0)\n', (4165, 4188), False, 'import cv2\n'), ((4198, 4230), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {}), '(img, cv2.CV_64F, 0, 1)\n', (4207, 4230), False, 'import cv2\n'), ((4243, 4273), 'numpy.sqrt', 'np.sqrt', (['(Gx ** 2.0 + Gy ** 2.0)'], {}), '(Gx ** 2.0 + Gy ** 2.0)\n', (4250, 4273), True, 'import numpy as np\n'), ((4839, 4868), 'torch.tensor', 'torch.tensor', (['edgeFeatureList'], {}), '(edgeFeatureList)\n', (4851, 4868), False, 'import torch\n'), ((5096, 5113), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (5107, 5113), True, 'import numpy as np\n'), ((5473, 5505), 'torch.tensor', 'torch.tensor', (['spatialFeatureList'], {}), '(spatialFeatureList)\n', (5485, 5505), False, 'import torch\n'), ((1135, 1155), 'numpy.array', 'np.array', (['pixelBlock'], {}), '(pixelBlock)\n', (1143, 1155), True, 'import numpy as np\n'), ((2124, 2164), 'numpy.append', 'np.append', (['colorFeature', 'locationFeature'], {}), '(colorFeature, locationFeature)\n', (2133, 2164), True, 'import numpy as np\n'), ((2709, 2722), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (2717, 2722), False, 'from skimage.color import rgb2gray\n'), ((2855, 2875), 'torch.eq', 'torch.eq', (['rlabels', 'i'], {}), '(rlabels, i)\n', (2863, 2875), False, 'import torch\n'), ((3670, 3690), 'torch.eq', 'torch.eq', (['rlabels', 'i'], {}), '(rlabels, i)\n', (3678, 3690), False, 'import torch\n'), ((4281, 4299), 'numpy.arctan2', 'np.arctan2', (['Gy', 'Gx'], {}), '(Gy, Gx)\n', (4291, 4299), True, 'import numpy as np\n'), ((4342, 4358), 'torch.tensor', 'torch.tensor', (['Gx'], {}), '(Gx)\n', (4354, 4358), False, 'import torch\n'), ((4360, 4376), 'torch.tensor', 'torch.tensor', (['Gy'], {}), '(Gy)\n', (4372, 4376), False, 'import torch\n'), ((4378, 4396), 'torch.tensor', 'torch.tensor', (['Gmag'], {}), '(Gmag)\n', (4390, 4396), False, 'import torch\n'), ((4398, 4416), 'torch.tensor', 'torch.tensor', (['Gdir'], {}), '(Gdir)\n', (4410, 4416), False, 'import torch\n'), ((4488, 4508), 'torch.eq', 'torch.eq', (['rlabels', 'i'], {}), '(rlabels, i)\n', (4496, 4508), False, 'import torch\n'), ((5127, 5143), 'torch.tensor', 'torch.tensor', (['Sx'], {}), '(Sx)\n', (5139, 5143), False, 'import torch\n'), ((5145, 5161), 'torch.tensor', 'torch.tensor', (['Sy'], {}), '(Sy)\n', (5157, 5161), False, 'import torch\n'), ((5236, 5256), 'torch.eq', 'torch.eq', (['rlabels', 'i'], {}), '(rlabels, i)\n', (5244, 5256), False, 'import torch\n'), ((2010, 2029), 'numpy.array', 'np.array', (['pixelList'], {}), '(pixelList)\n', (2018, 2029), True, 'import numpy as np\n'), ((2073, 2095), 'numpy.array', 'np.array', (['locationList'], {}), '(locationList)\n', (2081, 2095), True, 'import numpy as np\n')]
|
#fuzzytest.py
#<NAME>
#<NAME>
#fuzzy clustering for testFun.dat
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import skfuzzy as fuzz
colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen']
# Insert his test data instead !!!!
# Then our data
# Collect Test Data
with open("testFun.dat") as textFile:
y = [line.split() for line in textFile]
y = np.array(y)
X = np.zeros(shape=(200,2))
# stores test data as number in array X (converts from strings)
for i in range(0,len(y)): # num rows
for j in range(0,len(y[0])): # num columns
X[i,j] = float(y[i,j])
xpts = np.zeros(len(y))
ypts = np.zeros(len(y))
labels = np.zeros(len(y)) # no labels
# xpts = x[all rows][0]
for i in range (0, len(y)):
xpts[i] = X[i][0]
# ypts = x[all rows][1]
for i in range (0, len(y)):
ypts[i] = X[i][1]
# Visualize the test data
fig0, ax0 = plt.subplots()
for label in range(2): # need 2 different kinds of labels, only have 1 cuz theyre not labeled...
ax0.plot(xpts[labels == label], ypts[labels == label], '.',
color=colors[label])
ax0.set_title('Test data: 200 points x2 clusters.')
plt.show()
# Set up the loop and plot
fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of figures
alldata = np.vstack((xpts, ypts))
fpcs = []
for ncenters, ax in enumerate(axes1.reshape(-1), 2):
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(
alldata, ncenters, 2, error=0.005, maxiter=1000, init=None)
print("Centers = ", str(ncenters), "\n") # u0 is the array of the memberiship functions
for i in range (len(y)): # columns
print ("Data point: ",xpts[i], ",", ypts[i]) #data point
print("Membership: ")
for j in range(ncenters): #number of clusters
print("Cluster: ", j, "\n", u0[j][i]) #membership for cluster
print()
# Store fpc values for later
fpcs.append(fpc)
# Plot assigned clusters, for each data point in training set
cluster_membership = np.argmax(u, axis=0)
for j in range(ncenters):
ax.plot(xpts[cluster_membership == j],
ypts[cluster_membership == j], '.', color=colors[j])
# Mark the center of each fuzzy cluster
for pt in cntr:
ax.plot(pt[0], pt[1], 'rs')
ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters, fpc))
ax.axis('off')
fig1.tight_layout()
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.argmax",
"numpy.zeros",
"skfuzzy.cluster.cmeans",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.vstack"
] |
[((430, 441), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (438, 441), True, 'import numpy as np\n'), ((446, 470), 'numpy.zeros', 'np.zeros', ([], {'shape': '(200, 2)'}), '(shape=(200, 2))\n', (454, 470), True, 'import numpy as np\n'), ((925, 939), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (937, 939), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1195, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1274), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(8, 8)'}), '(2, 1, figsize=(8, 8))\n', (1252, 1274), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1327), 'numpy.vstack', 'np.vstack', (['(xpts, ypts)'], {}), '((xpts, ypts))\n', (1313, 1327), True, 'import numpy as np\n'), ((2400, 2410), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2408, 2410), True, 'import matplotlib.pyplot as plt\n'), ((1425, 1504), 'skfuzzy.cluster.cmeans', 'fuzz.cluster.cmeans', (['alldata', 'ncenters', '(2)'], {'error': '(0.005)', 'maxiter': '(1000)', 'init': 'None'}), '(alldata, ncenters, 2, error=0.005, maxiter=1000, init=None)\n', (1444, 1504), True, 'import skfuzzy as fuzz\n'), ((2020, 2040), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (2029, 2040), True, 'import numpy as np\n')]
|
from sklearn.neighbors import NearestNeighbors
import Sv
import logging
import pandas as pd
import numpy as np
import functools
import os
import math
logger = logging.getLogger('marin')
logger.setLevel(logging.DEBUG)
def point_processing(tracks_data):
"""
input: tracking data matrix
ouput: column of distances to nearest neighbors in meters
"""
tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of each tracks
tracks['long_m'] = tracks.y_gps * (
40075000 * np.cos(tracks.x_gps) / 360) # Get the equivalent of the longitude in meters
tracks['lat_m'] = tracks.x_gps * 60 * 1852 # Get the equivalent of the latitude in meters
array = np.vstack(
[tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for nearest neighbors algorithm
array = np.transpose(array)
nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm
distances, indices = nbrs.kneighbors(array)
return distances[:, 1]
def conjunction(*conditions):
"""Multiple conditions filter for panda"""
return functools.reduce(np.logical_and, conditions)
def calc_distance_lat(lat1, lat2):
"""Returns a distance between 2 latitudes"""
dlat = lat2 - lat1
dist = dlat * 60 * 1852
return dist
def calc_distance_long(lat, lon1, lon2):
"""Returns a distance between 2 longitudes for a given latitude"""
dlon = lon2 - lon1
dist = dlon * (40075000 * math.cos(lat) / 360)
return dist
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient):
"""
Process the pickle file from pymovies tracking and returns several key parameters for each track.
input:
- path_pickle: path to a pickle file, output of movies TS analysis
- path_output: path to store output csv
- transducer; name of the used transducer
- freq_TS: reference frequence for TS extraction
- TS_parameters: parameter for the TS detection and tracks selection
- hac_info: complementary info on the different runs, same for all tracks of each run
- orient: orientation ('H' or 'V')
outputs: multiple csv
- tracks: matrix of tracks with:
- track, target: relative and absolute index for each tracks
- TSrange: mean distance in m to transducer
- TSalong, TSarthwart: mean angle in the transducer beam
- x, y, z, x_gps, y_gps, z_gps: relative and absolute position
- TScomp_mean, TScomp: mean TS of all frequencies or for the closest frequency from reference frequency
- nb_target: number of targets per tracks
- timeInt and Time: mean time in ns since 1970 and in string formats
- k_dist: distance in m to the nearest neighbour
- State, Abrv, tailleMoyenne: variables from the hac info file
- b20: b20 value
- Nv: Nv value
- dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in m following different axis
- tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute and relative) in degrees (according to orientation)
- vit_x, vit_y, vit_z, vit_range: speed following different axis
- sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of previous displacement and angle
- sd_tot: sum of standard deviation
- targets: matrix of all targets
- freq: mean TScomp for each frequency
"""
if path_pickle[-7:] != ".pickle": # Check the pickle file
logger.error("Not a pickle file !")
return
name_transect = os.path.basename(path_pickle)[:-18]
logger.info("reading...")
if os.path.getsize(path_pickle) > 0:
result = pd.read_pickle(path_pickle) # read the pickle file
else:
logger.error("File empty !") # Si le fichier Pickle est vide
logger.info("done !")
for i in range(len(result[10])): # get index for the sounder and transducer according to given transducer
for j in range(len(result[10][i])):
if result[10][i][j] == transducer:
indexSounder = i
indexTransducer = j
logger.info("creating tables...") # Extract the pickle data in several panda tables.
nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for the given sounder and transducer
if nb_target > 0: # check if any targets
nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target)
index_targets = []
for i in range(nb_target):
index_targets += [i for j in range(nb_freq)]
targets = pd.DataFrame( # individual target data
{
"track": np.array(result[8][indexSounder][indexTransducer]),
"target": range(nb_target),
"timeTarget": np.array(result[0][indexSounder][indexTransducer]),
"TSrange": np.array(result[1][indexSounder][indexTransducer]),
"TSalong": np.array(result[4][indexSounder][indexTransducer]),
"TSathwart": np.array(result[5][indexSounder][indexTransducer]),
},
index=range(nb_target)
)
freq = pd.DataFrame( # TS and frequency data
{
"target": index_targets,
"TScomp": np.array(result[2][indexSounder][indexTransducer]),
"TSucomp": np.array(result[3][indexSounder][indexTransducer]),
"TSfreq": np.array(result[9][indexSounder][indexTransducer]),
},
index=range(nb_freq * nb_target)
)
# get the position of each targets (relative and absolute)
position = pd.DataFrame(result[6][indexSounder][indexTransducer],
index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z'])
positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer],
index=range(0, len(result[0][indexSounder][indexTransducer])),
columns=['x_gps', 'y_gps', 'z_gps'])
TS_means = freq.groupby(by="target").mean() # get the TScomp_mean: mean TScomp for all frequencies
TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'})
freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest frequency from the reference
# frequency freq_TS
TS_freq = freq[freq.TSfreq == freq_TS] # get the TScomp for the given reference frequency
TS_freq.index = range(len(TS_freq))
logger.info("done !")
targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']],
axis=1) # merge of all the data
tracks = targets.groupby(by="track").target.agg('count') # get number of target per tracks
tracks_len = pd.DataFrame(
{'track': tracks.index,
'nb_target': tracks.values},
index=range(len(tracks.index))
)
targets = pd.merge(targets, tracks_len, how='inner', on='track') # add the track length to the target data
targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length
targets_data = targets_selected.sort_values('track')
targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int (ns, 1970)
logger.info("targets ready !")
##### Tracks grouping and analysis
logger.info('Gathering tracks data...')
tracks_data = targets_data.groupby('track').mean() # group targets by tracks, keep each parameters as mean
tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime
tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor
for index, row in hac_info.iterrows(): # add the hac_info columns (same for each run)
if row.Name == name_transect:
for header in hac_info.columns[1:]:
tracks_data[header] = row[header]
tracks_data['b20'] = tracks_data['TScomp'] - (
20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and taille moyenne
# get the Nv value for each track
path_Nv = path_output + '/' + name_transect + "_Nv.csv"
if os.path.exists(path_Nv):
Nv = pd.read_csv(path_Nv)
tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv)
else:
tracks_data['Nv'] = -999 # No Nv data provided
# tracks movement analysis
tracks_id = list(targets_data.groupby('track').groups)
scores = []
for i in tracks_id: # for each track
track_i = targets_data.loc[
targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']]
track_i = track_i.sort_values('timeTarget') # Sort by time
deltas = [[], [], [], [], [], [], [], [], []]
for j in range(1, len(track_i)):
deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in x axis
deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in y axis
deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in z axis
deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in range
deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j],
track_i.x_gps.iloc[j - 1])) # distance between the 2 latitudes
deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j],
track_i.y_gps.iloc[j - 1])) # distance between the 2 longitudes
if orient == 'H': #Horizontal echo sounder
if track_i.x.iloc[
j] > 0: # check if x is coherent (beam is oriented on starboard), corrects direction
# accordingly
cap_rel = abs(math.degrees(
math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) # heading relative to the boat
else:
cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1])))
cap_abs = math.degrees(
math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute (geographical) heading
if cap_abs < 0:
cap_abs = 360 + cap_abs # correct to have 0-360° headings
tilt_angle = (math.degrees(
math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2),
deltas[2][j - 1])) - 90) # tilt angle of the track
deltas[6].append(tilt_angle)
deltas[7].append(cap_rel)
deltas[8].append(cap_abs)
else: #vertical echo sounder
tilt_angle = (math.degrees(
math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2),
deltas[2][j - 1])) - 90) # tilt angle of the track
deltas[6].append(tilt_angle)
deltas[7].append(999) # relative and absolute heading is irrelevant on vertical echo sounder
deltas[8].append(999)
delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0]
delta_t = delta_t.total_seconds() # time length of the track (s)
dist_x = np.sum(deltas[4]) # dist is the length of the track on several dimensions
dist_y = np.sum(deltas[5])
dist_z = np.sum(deltas[2])
dist_range = np.sum(deltas[3])
dist_tot = dist_x + dist_y + dist_z
tilt_angle = np.mean(deltas[6]) # mean tilt angle of the track
cap_rel = np.mean(deltas[7]) # mean relative heading of the track
cap_abs = np.mean(deltas[8]) # mean absolute heading of the track
vit_x = dist_x / delta_t # speed
vit_y = dist_y / delta_t
vit_z = dist_z / delta_t
vit_range = dist_range / delta_t
sd_x = np.std(deltas[4]) # standard deviation
sd_y = np.std(deltas[5])
sd_z = np.std(deltas[2])
sd_range = np.std(deltas[3])
sd_ta = np.std(deltas[6])
sd_cr = np.std(deltas[7])
sd_ca = np.std(deltas[8])
sd_tot = sd_x + sd_y + sd_z
scores.append(
[i, dist_x / len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot,
tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot,
sd_ta, sd_cr, sd_ca]
)
dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda data frame
columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot',
'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range',
'sd_x',
'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca'])
tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the main data frame
logger.info("Done !")
logger.debug('Tracks summary :')
logger.debug(str(tracks_data.describe()))
# Storing 2 different data frames as csv:
# - targets, with individual targets of each points
# - tracks, with the run track data
filename_1 = path_output + "/" + name_transect + "_tracks.csv"
filename_2 = path_output + "/" + name_transect + "_targets.csv"
tracks_data.to_csv(filename_1, index=False)
targets_data.to_csv(filename_2, index=False)
logger.info("files saved !")
freq_data = freq.groupby('TSfreq').mean()
freq_data['freq'] = freq_data.index
filename_3 = path_output + "/" + name_transect + "_freq.csv"
freq_data.to_csv(filename_3, index=False)
else:
logger.error("No targets !!!")
|
[
"numpy.sum",
"math.atan2",
"pandas.read_csv",
"numpy.mean",
"numpy.linalg.norm",
"numpy.std",
"pandas.merge",
"numpy.transpose",
"os.path.exists",
"sklearn.neighbors.NearestNeighbors",
"math.cos",
"numpy.log10",
"pandas.concat",
"math.sqrt",
"os.path.basename",
"os.path.getsize",
"functools.reduce",
"pandas.to_datetime",
"numpy.cos",
"numpy.vstack",
"Sv.get_nv",
"numpy.array",
"pandas.read_pickle",
"logging.getLogger"
] |
[((160, 186), 'logging.getLogger', 'logging.getLogger', (['"""marin"""'], {}), "('marin')\n", (177, 186), False, 'import logging\n'), ((705, 759), 'numpy.vstack', 'np.vstack', (['[tracks.lat_m, tracks.long_m, tracks.z_gps]'], {}), '([tracks.lat_m, tracks.long_m, tracks.z_gps])\n', (714, 759), True, 'import numpy as np\n'), ((836, 855), 'numpy.transpose', 'np.transpose', (['array'], {}), '(array)\n', (848, 855), True, 'import numpy as np\n'), ((1129, 1173), 'functools.reduce', 'functools.reduce', (['np.logical_and', 'conditions'], {}), '(np.logical_and, conditions)\n', (1145, 1173), False, 'import functools\n'), ((1630, 1652), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (1644, 1652), True, 'import numpy as np\n'), ((3811, 3840), 'os.path.basename', 'os.path.basename', (['path_pickle'], {}), '(path_pickle)\n', (3827, 3840), False, 'import os\n'), ((3885, 3913), 'os.path.getsize', 'os.path.getsize', (['path_pickle'], {}), '(path_pickle)\n', (3900, 3913), False, 'import os\n'), ((3936, 3963), 'pandas.read_pickle', 'pd.read_pickle', (['path_pickle'], {}), '(path_pickle)\n', (3950, 3963), True, 'import pandas as pd\n'), ((6835, 6935), 'pandas.concat', 'pd.concat', (["[targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']]"], {'axis': '(1)'}), "([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq\n ['TScomp']], axis=1)\n", (6844, 6935), True, 'import pandas as pd\n'), ((7270, 7324), 'pandas.merge', 'pd.merge', (['targets', 'tracks_len'], {'how': '"""inner"""', 'on': '"""track"""'}), "(targets, tracks_len, how='inner', on='track')\n", (7278, 7324), True, 'import pandas as pd\n'), ((7950, 7988), 'pandas.to_datetime', 'pd.to_datetime', (["tracks_data['timeInt']"], {}), "(tracks_data['timeInt'])\n", (7964, 7988), True, 'import pandas as pd\n'), ((8628, 8651), 'os.path.exists', 'os.path.exists', (['path_Nv'], {}), '(path_Nv)\n', (8642, 8651), False, 'import os\n'), ((13745, 13804), 'pandas.merge', 'pd.merge', (['tracks_data', 'dist_scores'], {'how': '"""inner"""', 'on': '"""track"""'}), "(tracks_data, dist_scores, how='inner', on='track')\n", (13753, 13804), True, 'import pandas as pd\n'), ((867, 921), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(2)', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=2, algorithm='ball_tree')\n", (883, 921), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((8670, 8690), 'pandas.read_csv', 'pd.read_csv', (['path_Nv'], {}), '(path_Nv)\n', (8681, 8690), True, 'import pandas as pd\n'), ((8723, 8749), 'Sv.get_nv', 'Sv.get_nv', (['tracks_data', 'Nv'], {}), '(tracks_data, Nv)\n', (8732, 8749), False, 'import Sv\n'), ((11946, 11963), 'numpy.sum', 'np.sum', (['deltas[4]'], {}), '(deltas[4])\n', (11952, 11963), True, 'import numpy as np\n'), ((12042, 12059), 'numpy.sum', 'np.sum', (['deltas[5]'], {}), '(deltas[5])\n', (12048, 12059), True, 'import numpy as np\n'), ((12081, 12098), 'numpy.sum', 'np.sum', (['deltas[2]'], {}), '(deltas[2])\n', (12087, 12098), True, 'import numpy as np\n'), ((12124, 12141), 'numpy.sum', 'np.sum', (['deltas[3]'], {}), '(deltas[3])\n', (12130, 12141), True, 'import numpy as np\n'), ((12215, 12233), 'numpy.mean', 'np.mean', (['deltas[6]'], {}), '(deltas[6])\n', (12222, 12233), True, 'import numpy as np\n'), ((12288, 12306), 'numpy.mean', 'np.mean', (['deltas[7]'], {}), '(deltas[7])\n', (12295, 12306), True, 'import numpy as np\n'), ((12367, 12385), 'numpy.mean', 'np.mean', (['deltas[8]'], {}), '(deltas[8])\n', (12374, 12385), True, 'import numpy as np\n'), ((12608, 12625), 'numpy.std', 'np.std', (['deltas[4]'], {}), '(deltas[4])\n', (12614, 12625), True, 'import numpy as np\n'), ((12667, 12684), 'numpy.std', 'np.std', (['deltas[5]'], {}), '(deltas[5])\n', (12673, 12684), True, 'import numpy as np\n'), ((12704, 12721), 'numpy.std', 'np.std', (['deltas[2]'], {}), '(deltas[2])\n', (12710, 12721), True, 'import numpy as np\n'), ((12745, 12762), 'numpy.std', 'np.std', (['deltas[3]'], {}), '(deltas[3])\n', (12751, 12762), True, 'import numpy as np\n'), ((12783, 12800), 'numpy.std', 'np.std', (['deltas[6]'], {}), '(deltas[6])\n', (12789, 12800), True, 'import numpy as np\n'), ((12821, 12838), 'numpy.std', 'np.std', (['deltas[7]'], {}), '(deltas[7])\n', (12827, 12838), True, 'import numpy as np\n'), ((12859, 12876), 'numpy.std', 'np.std', (['deltas[8]'], {}), '(deltas[8])\n', (12865, 12876), True, 'import numpy as np\n'), ((520, 540), 'numpy.cos', 'np.cos', (['tracks.x_gps'], {}), '(tracks.x_gps)\n', (526, 540), True, 'import numpy as np\n'), ((1494, 1507), 'math.cos', 'math.cos', (['lat'], {}), '(lat)\n', (1502, 1507), False, 'import math\n'), ((4919, 4969), 'numpy.array', 'np.array', (['result[8][indexSounder][indexTransducer]'], {}), '(result[8][indexSounder][indexTransducer])\n', (4927, 4969), True, 'import numpy as np\n'), ((5045, 5095), 'numpy.array', 'np.array', (['result[0][indexSounder][indexTransducer]'], {}), '(result[0][indexSounder][indexTransducer])\n', (5053, 5095), True, 'import numpy as np\n'), ((5124, 5174), 'numpy.array', 'np.array', (['result[1][indexSounder][indexTransducer]'], {}), '(result[1][indexSounder][indexTransducer])\n', (5132, 5174), True, 'import numpy as np\n'), ((5203, 5253), 'numpy.array', 'np.array', (['result[4][indexSounder][indexTransducer]'], {}), '(result[4][indexSounder][indexTransducer])\n', (5211, 5253), True, 'import numpy as np\n'), ((5284, 5334), 'numpy.array', 'np.array', (['result[5][indexSounder][indexTransducer]'], {}), '(result[5][indexSounder][indexTransducer])\n', (5292, 5334), True, 'import numpy as np\n'), ((5532, 5582), 'numpy.array', 'np.array', (['result[2][indexSounder][indexTransducer]'], {}), '(result[2][indexSounder][indexTransducer])\n', (5540, 5582), True, 'import numpy as np\n'), ((5611, 5661), 'numpy.array', 'np.array', (['result[3][indexSounder][indexTransducer]'], {}), '(result[3][indexSounder][indexTransducer])\n', (5619, 5661), True, 'import numpy as np\n'), ((5689, 5739), 'numpy.array', 'np.array', (['result[9][indexSounder][indexTransducer]'], {}), '(result[9][indexSounder][indexTransducer])\n', (5697, 5739), True, 'import numpy as np\n'), ((8424, 8462), 'numpy.log10', 'np.log10', (["tracks_data['tailleMoyenne']"], {}), "(tracks_data['tailleMoyenne'])\n", (8432, 8462), True, 'import numpy as np\n'), ((10705, 10751), 'math.atan2', 'math.atan2', (['deltas[5][j - 1]', 'deltas[4][j - 1]'], {}), '(deltas[5][j - 1], deltas[4][j - 1])\n', (10715, 10751), False, 'import math\n'), ((10428, 10475), 'math.atan2', 'math.atan2', (['deltas[1][j - 1]', '(-deltas[0][j - 1])'], {}), '(deltas[1][j - 1], -deltas[0][j - 1])\n', (10438, 10475), False, 'import math\n'), ((10588, 10634), 'math.atan2', 'math.atan2', (['deltas[1][j - 1]', 'deltas[0][j - 1]'], {}), '(deltas[1][j - 1], deltas[0][j - 1])\n', (10598, 10634), False, 'import math\n'), ((10990, 11046), 'math.sqrt', 'math.sqrt', (['(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2)'], {}), '(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2)\n', (10999, 11046), False, 'import math\n'), ((11404, 11460), 'math.sqrt', 'math.sqrt', (['(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2)'], {}), '(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2)\n', (11413, 11460), False, 'import math\n')]
|
# This is sample baseline for CIKM Personalization Cup 2016
# by <NAME> & <NAME>
import numpy as np
import pandas as pd
import datetime
start_time = datetime.datetime.now()
print("Running baseline. Now it's", start_time.isoformat())
# Loading queries (assuming data placed in <dataset-train/>
queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']]
print('Total queries', len(queries))
# Leaving only test queries (the ones which items we have to sort)
queries = queries[queries['is.test'] == True][['queryId', 'items']]
print('Test queries', len(queries))
queries.reset_index(inplace=True)
queries.drop(['index'], axis=1, inplace=True)
# Loading item views; taking itemId column
item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']]
print('Item views', len(item_views))
# Loading clicks; taking itemId column
clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']]
print('Clicks', len(clicks))
# Loading purchases; taking itemId column
purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']]
print('Purchases', len(purchases))
# Calculating popularity as [Amount of views] * 1 + Amount of clicks * 2 + [Amount of purchases] * 3
print('Scoring popularity for each item ...')
prod_pop = {}
for cost, container in enumerate([item_views, clicks, purchases]):
for prod in container.values:
product = str(prod[0])
if product not in prod_pop:
prod_pop[product] = cost
else:
prod_pop[product] += cost
print('Popularity scored for', len(prod_pop), 'products')
# For each query:
# parse items (comma-separated values in last column)
# sort them by score;
# write them to the submission file.
# This is longest part; it usually takes around 5 minutes.
print('Sorting items per query by popularity...')
answers = []
step = int(len(queries) / 20)
with open('submission.txt', 'w+') as submission:
for i, q in enumerate(queries.values):
# Fancy progressbar
if i % step == 0:
print(5 * i / step, '%...')
# Splitting last column which contains comma-separated items
items = q[-1].split(',')
# Getting scores for each item. Also, inverting scores here, so we can use argsort
items_scores = list(map(lambda x: -prod_pop.get(x, 0), items))
# Sorting items using items_scores order permutation
sorted_items = np.array(items)[np.array(items_scores).argsort()]
# Squashing items together
s = ','.join(sorted_items)
# and writing them to submission
submission.write(str(q[0]) + " " + s + "\n")
end_time = datetime.datetime.now()
print("Done. Now it's ", end_time.isoformat())
print("Calculated baseline in ", (end_time - start_time).seconds, " seconds")
|
[
"pandas.read_csv",
"datetime.datetime.now",
"numpy.array"
] |
[((151, 174), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (172, 174), False, 'import datetime\n'), ((2686, 2709), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2707, 2709), False, 'import datetime\n'), ((306, 361), 'pandas.read_csv', 'pd.read_csv', (['"""dataset-train/train-queries.csv"""'], {'sep': '""";"""'}), "('dataset-train/train-queries.csv', sep=';')\n", (317, 361), True, 'import pandas as pd\n'), ((741, 799), 'pandas.read_csv', 'pd.read_csv', (['"""dataset-train/train-item-views.csv"""'], {'sep': '""";"""'}), "('dataset-train/train-item-views.csv', sep=';')\n", (752, 799), True, 'import pandas as pd\n'), ((898, 952), 'pandas.read_csv', 'pd.read_csv', (['"""dataset-train/train-clicks.csv"""'], {'sep': '""";"""'}), "('dataset-train/train-clicks.csv', sep=';')\n", (909, 952), True, 'import pandas as pd\n'), ((1049, 1106), 'pandas.read_csv', 'pd.read_csv', (['"""dataset-train/train-purchases.csv"""'], {'sep': '""";"""'}), "('dataset-train/train-purchases.csv', sep=';')\n", (1060, 1106), True, 'import pandas as pd\n'), ((2460, 2475), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (2468, 2475), True, 'import numpy as np\n'), ((2476, 2498), 'numpy.array', 'np.array', (['items_scores'], {}), '(items_scores)\n', (2484, 2498), True, 'import numpy as np\n')]
|
# python
import warnings
# Third party imports
import numpy as np
# grAdapt
from .base import Initial
from grAdapt.utils.sampling import sample_corner_bounds
class VerticesForceRandom(Initial):
"""
Samples all vertices if n_evals >= 2 ** len(bounds).
Else, a subset of vertices is sampled.
"""
def __init__(self, sampling_method):
"""
Parameters
----------
sampling_method : grAdapt.sampling.equidistributed Object
Sample low discrepancy sequences when initial point method is not feasible
"""
super().__init__(sampling_method)
def sample(self, bounds, n_evals):
"""Returns a numpy array of sampled points.
Does not include corner points of the hypercube/search space.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n_evals : int
number of initial points sampled by method
Returns
-------
(self.n_evals, len(self.bounds)) numpy array
"""
super().sample(bounds, n_evals)
if 2 ** len(self.bounds) >= self.n_evals:
# sample corner points first which fits in n_evals
d_tilde = int(np.floor(np.log2(self.n_evals)))
corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array
n_tilde = 2 ** d_tilde
# sample random fixed corner points
random_binary_array = np.random.randint(2, size=(len(self.bounds),))
remainder_bounds = self.bounds[d_tilde:]
fix_corners = np.zeros((1, len(remainder_bounds)))
for i in range(len(remainder_bounds)):
if random_binary_array[i] == 0:
fix_corners[0][i] = remainder_bounds[i][0]
else:
fix_corners[0][i] = remainder_bounds[i][1]
fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde)
# corner points with fixed rest dimensions
corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d))
# because 2 ** n_tilde <= n, sample n - n_tilde
if self.n_evals - n_tilde > 0:
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - n_tilde),
x_history=corner_points_fixed)
return np.vstack((corner_points_fixed, random_points))
else:
return corner_points_fixed
else:
corner_points = sample_corner_bounds(self.bounds)
num_corner_points = corner_points.shape[0]
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - num_corner_points),
x_history=corner_points)
return np.vstack((corner_points, random_points))
|
[
"numpy.log2",
"numpy.hstack",
"grAdapt.utils.sampling.sample_corner_bounds",
"numpy.tile",
"numpy.vstack"
] |
[((1438, 1481), 'grAdapt.utils.sampling.sample_corner_bounds', 'sample_corner_bounds', (['self.bounds[:d_tilde]'], {}), '(self.bounds[:d_tilde])\n', (1458, 1481), False, 'from grAdapt.utils.sampling import sample_corner_bounds\n'), ((2071, 2105), 'numpy.tile', 'np.tile', (['fix_corners', '(n_tilde, 1)'], {}), '(fix_corners, (n_tilde, 1))\n', (2078, 2105), True, 'import numpy as np\n'), ((2214, 2258), 'numpy.hstack', 'np.hstack', (['(corners_d_tilde, fix_corners_2d)'], {}), '((corners_d_tilde, fix_corners_2d))\n', (2223, 2258), True, 'import numpy as np\n'), ((2797, 2830), 'grAdapt.utils.sampling.sample_corner_bounds', 'sample_corner_bounds', (['self.bounds'], {}), '(self.bounds)\n', (2817, 2830), False, 'from grAdapt.utils.sampling import sample_corner_bounds\n'), ((3157, 3198), 'numpy.vstack', 'np.vstack', (['(corner_points, random_points)'], {}), '((corner_points, random_points))\n', (3166, 3198), True, 'import numpy as np\n'), ((2645, 2692), 'numpy.vstack', 'np.vstack', (['(corner_points_fixed, random_points)'], {}), '((corner_points_fixed, random_points))\n', (2654, 2692), True, 'import numpy as np\n'), ((1384, 1405), 'numpy.log2', 'np.log2', (['self.n_evals'], {}), '(self.n_evals)\n', (1391, 1405), True, 'import numpy as np\n')]
|
import numpy as np
import pickle
class onehot:
def __init__(self, sentences):
self.__sentences = sentences
self.__data = {}
self.__count = {}
self.__build()
def __build(self):
self.__word_num = 1
for sentence in self.__sentences:
for word in sentence:
if word in self.__data:
self.__count[word] += 1
else:
self.__count[word] = 1
self.__data[word] = self.__word_num
self.__word_num += 1
def __getitem__(self, word):
if word not in self.__data:
print('Error! The word not in it\'s map!')
else:
ret = np.zeros((self.__word_num - 1, 1))
ret[self.__data[word] - 1] = 1
return ret
def get_voca_size(self):
return self.__word_num - 1
def get_word_frequency(self, word):
if word not in self.__data:
print('Error! The word not in it\'s map!')
else:
return self.__count[word]
def get_index_of_word(self, word):
if word not in self.__data:
print('Error! The word not in it\'s map!')
else:
return self.__data[word] - 1
|
[
"numpy.zeros"
] |
[((748, 782), 'numpy.zeros', 'np.zeros', (['(self.__word_num - 1, 1)'], {}), '((self.__word_num - 1, 1))\n', (756, 782), True, 'import numpy as np\n')]
|
import torch
import torch.nn.functional as F
import gym
import gym.spaces
import numpy as np
def autocrop_observations(observations, cell_size, output_size=None):
shape = observations.size()[3:]
if output_size is None:
new_shape = tuple(map(lambda x: (x // cell_size) * cell_size, shape))
else:
new_shape = tuple(map(lambda x: x * cell_size, output_size))
margin3_top = (shape[0] - new_shape[0]) // 2
margin3_bottom = -(shape[0] - new_shape[0] - margin3_top)
margin4_top = (shape[1] - new_shape[1]) // 2
margin4_bottom = -(shape[1] - new_shape[1] - margin4_top)
if margin3_bottom == 0:
margin3_bottom = None
if margin4_bottom == 0:
margin4_bottom = None
return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom]
def pixel_control_reward(observations, cell_size=4, output_size=None):
'''
Args:
observations: A tensor of shape `[B,T+1,C,H,W]`, where
* `T` is the sequence length, `B` is the batch size.
* `H` is height, `W` is width.
* `C...` is at least one channel dimension (e.g., colour, stack).
* `T` and `B` can be statically unknown.
cell_size: The size of each cell.
Returns:
shape (B, T, 1, H / cell_size, W / cell_size)
'''
with torch.no_grad():
observations = autocrop_observations(observations, cell_size, output_size=output_size)
abs_observation_diff = observations[:, 1:] - observations[:, :-1]
abs_observation_diff.abs_()
obs_shape = abs_observation_diff.size()
abs_diff = abs_observation_diff.view(-1, *obs_shape[2:])
avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size)
avg_abs_diff = avg_abs_diff.mean(1, keepdim=True)
return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:])
def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4):
action_value_shape = action_values.size()
batch_shape = actions.size()[:2]
with torch.no_grad():
T = observations.size()[1] - 1
pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:])
last_rewards = action_values[:, -1].max(1, keepdim=True)[0]
for i in reversed(range(T)):
previous_rewards = last_rewards if i + 1 == T else pseudo_rewards[:, i + 1]
pseudo_rewards[:, i].add_(gamma, previous_rewards)
q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4])
q_actions = torch.gather(action_values[:, :-1], 2, q_actions)
loss = F.mse_loss(pseudo_rewards, q_actions)
return loss
def reward_prediction_loss(predictions, rewards):
with torch.no_grad():
target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device)
target[:, 0] = rewards == 0
target[:, 1] = rewards > 0
target[:, 2] = rewards < 0
return F.binary_cross_entropy_with_logits(predictions, target)
def discounted_commulative_reward(rewards, base_value, gamma):
cummulative_reward = rewards.clone()
max_t = cummulative_reward.size()[1]
for i in reversed(range(max_t)):
next_values = base_value if i + 1 == max_t else cummulative_reward[:, i + 1]
cummulative_reward[:, i].add_(gamma, next_values)
return cummulative_reward
def value_loss(values, rewards, gamma):
base_value = values[:, -1]
with torch.no_grad():
cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma)
return F.mse_loss(values[:, :-1], cummulative_reward)
class UnrealEnvBaseWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.last_action_reward = None
self.observation_space = gym.spaces.Tuple((
env.observation_space,
gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32)
))
def reset(self):
self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32)
return self.observation(self.env.reset())
def step(self, action):
observation, reward, done, stats = self.env.step(action)
self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32)
self.last_action_reward[action] = 1.0
self.last_action_reward[-1] = np.clip(reward, -1, 1)
return self.observation(observation), reward, done, stats
def observation(self, observation):
return (observation, self.last_action_reward)
|
[
"torch.gather",
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.mse_loss",
"numpy.zeros",
"torch.nn.functional.binary_cross_entropy_with_logits",
"numpy.clip",
"gym.spaces.Box",
"torch.no_grad"
] |
[((2582, 2631), 'torch.gather', 'torch.gather', (['action_values[:, :-1]', '(2)', 'q_actions'], {}), '(action_values[:, :-1], 2, q_actions)\n', (2594, 2631), False, 'import torch\n'), ((2644, 2681), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['pseudo_rewards', 'q_actions'], {}), '(pseudo_rewards, q_actions)\n', (2654, 2681), True, 'import torch.nn.functional as F\n'), ((2991, 3046), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['predictions', 'target'], {}), '(predictions, target)\n', (3025, 3046), True, 'import torch.nn.functional as F\n'), ((3602, 3648), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['values[:, :-1]', 'cummulative_reward'], {}), '(values[:, :-1], cummulative_reward)\n', (3612, 3648), True, 'import torch.nn.functional as F\n'), ((1302, 1317), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1315, 1317), False, 'import torch\n'), ((1661, 1712), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['abs_diff', 'cell_size'], {'stride': 'cell_size'}), '(abs_diff, cell_size, stride=cell_size)\n', (1673, 1712), True, 'import torch.nn.functional as F\n'), ((2026, 2041), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2039, 2041), False, 'import torch\n'), ((2759, 2774), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2772, 2774), False, 'import torch\n'), ((3487, 3502), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3500, 3502), False, 'import torch\n'), ((4026, 4077), 'numpy.zeros', 'np.zeros', (['(self.action_space.n + 1)'], {'dtype': 'np.float32'}), '(self.action_space.n + 1, dtype=np.float32)\n', (4034, 4077), True, 'import numpy as np\n'), ((4256, 4307), 'numpy.zeros', 'np.zeros', (['(self.action_space.n + 1)'], {'dtype': 'np.float32'}), '(self.action_space.n + 1, dtype=np.float32)\n', (4264, 4307), True, 'import numpy as np\n'), ((4392, 4414), 'numpy.clip', 'np.clip', (['reward', '(-1)', '(1)'], {}), '(reward, -1, 1)\n', (4399, 4414), True, 'import numpy as np\n'), ((3889, 3958), 'gym.spaces.Box', 'gym.spaces.Box', (['(0.0)', '(1.0)', '(env.action_space.n + 1,)'], {'dtype': 'np.float32'}), '(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32)\n', (3903, 3958), False, 'import gym\n')]
|
import numpy as np
from scipy.spatial.transform import Rotation
import numpy as np
import pybullet as p
def todegree(w):
return w*180/np.pi
def torad(w):
return w*np.pi/180
def angle(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def add_one(index):
if index+1 == 3:
index_out = 0
else:
index_out = index+1
return index_out
def to_H(R, T=np.zeros(3)):
H = np.eye(4)
H[:-1,:-1] = R
H[:-1,-1] = T
return H
def closest_axis_2_userdefined(H, vec):
#print (H)
#print (np.linalg.inv(H[:-1,:-1]))
min_angle = 190
x_des = np.array(vec)
index = 0
sign = 0
reverse = False
for i in range(3):
x = H[:-1, i]
theta = todegree(angle(x, x_des))
#print (theta)
if theta > 90:
theta = theta - 180
if theta ==0:
reverse = True
if min_angle > np.abs(theta):
min_angle = np.abs(theta)
index = i
if theta == 0.:
if reverse:
sign = -1
else:
sign = 1
else:
sign = np.sign(theta)
return min_angle, index, sign
def R_2vect(vector_orig, vector_fin):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
@param vector_orig: The unrotated vector defined in the reference frame.
@type vector_orig: numpy array, len 3
@param vector_fin: The rotated vector defined in the reference frame.
@type vector_fin: numpy array, len 3
"""
# Convert the vectors to unit vectors.
vector_orig = vector_orig / np.linalg.norm(vector_orig)
vector_fin = vector_fin / np.linalg.norm(vector_fin)
# The rotation axis (normalised).
axis = np.cross(vector_orig, vector_fin)
axis_len = np.linalg.norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
if x==0 and y==0 and z==0:
z=1
# The rotation angle.
angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1))
# Trig functions (only need to do this maths once!).
ca = np.cos(angle)
sa = np.sin(angle)
R = np.eye(4)
# Calculate the rotation matrix elements.
R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2 - 1.0)
R[0, 1] = -z * sa + (1.0 - ca) * x * y
R[0, 2] = y * sa + (1.0 - ca) * x * z
R[1, 0] = z * sa + (1.0 - ca) * x * y
R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2 - 1.0)
R[1, 2] = -x * sa + (1.0 - ca) * y * z
R[2, 0] = -y * sa + (1.0 - ca) * x * z
R[2, 1] = x * sa + (1.0 - ca) * y * z
R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2 - 1.0)
return R, axis, angle
class RotationPrimitives():
def __init__(self, H0, Hg):
self.H0 = H0
self.Hg = Hg
def set_goal(self, Hg):
self.Hg = Hg
def set_current_pose(self,H0):
self.H0 = H0
def get_control_seq(self, ax=None):
## Control Sequence will provide rotation vector and desired rotation to achieve target ##
################## Goal to Viapoint 2 ###################################
theta, index, sign = self.closest_axis_2_normal(self.Hg)
des_vec = np.array([0, 0, sign * 1])
R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec)
H_via2 = np.matmul(R, self.Hg)
H_via2[:-1,-1] = self.Hg[:-1,-1]
H_via2[2, -1] = 0.
r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw)
c2g = [r_vec_via2g, -ang_via, r_vec_via2gw]
#########################################################################
############# From Floor to Viapoint 1 ####################
index_H0, sign_H0 = self.find_index_z(self.H0)
#theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0)
# print (index_H0, sign_H0, index, sign)
# input ("WAIT")
rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign)
if rot_index is not None:
r_vec_floor = np.zeros(3)
r_vec_floor[rot_index] = 1
rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor)
R_floor_1 = rotation_floor.as_matrix()
R_floor_1 = to_H(R=R_floor_1)
H_via1 = np.matmul(self.H0, R_floor_1)
#H_via1[1,-1] = 0.3
else:
r_vec_floor = np.zeros(3)
r_vec_floor[index] = 1
ang_floor = 0.
H_via1 = self.H0
r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor)
c01 = [r_vec_floor, ang_floor, r_vec_floor_w]
####################################################
############ From Viapoint 1 to Viapoint 2 ################
if index == 0:
vec_1 = H_via1[:-1, 1]
vec_2 = H_via2[:-1, 1]
else:
vec_1 = H_via1[:-1, 0]
vec_2 = H_via2[:-1, 0]
R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2)
r_vec_via12w = np.zeros(3)
r_vec_via12w[2] = np.sign(r_vec_via12_p[2])
r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w)
c12 = [r_vec_via12, ang_via12, r_vec_via12w]
###########################################################
##### COMPUTE SHORTCUT: ########
rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix())
rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix())
rot1g = np.matmul(rot_12,rot2g)
if np.allclose(rot1g, np.eye(4)):
c1g = [np.array([0,0,1]), 0.]
else:
rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec()
c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)]
##### Compute rotation from start to Via-2 ##
R_via2 = H_via2[:-1,:-1]
R_init = self.H0[:-1,:-1]
R_to_2 = np.matmul(R_init.T, R_via2)
if np.allclose(R_to_2, np.eye(3)):
c_to_2 = [np.array([0, 0, 1]), 0.]
else:
rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec()
c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)]
##### Compute rotation from start to Goal ###
R_g = self.Hg[:-1, :-1]
R_init = self.H0[:-1, :-1]
R_to_g = np.matmul(R_init.T, R_g)
if np.allclose(R_to_g, np.eye(3)):
c_to_g = [np.array([0, 0, 1]), 0.]
else:
rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec()
c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)]
command_seq = [c01, c12,c2g]
return command_seq, [c1g], [c_to_2, c_to_g]
def find_index_z(self, H):
big_Z = 0.0
index = 0
for i in range(3):
z = H[2, i]
# print(z)
if np.abs(z) > big_Z:
big_Z = np.abs(z)
sign = np.sign(z)
index = i
return index, sign
def find_rot_z(self, index_H0, sign_H0, index, sign):
if index == index_H0:
if sign == sign_H0:
return None, None
else:
angle = np.pi
if index == 0:
rot_over = 1
else:
rot_over = 0
return rot_over, angle
else:
rot_over = 0
while (rot_over == index or rot_over == index_H0):
rot_over += 1
if sign == sign_H0:
angle = -np.pi / 2
if add_one(rot_over) != index_H0:
angle = -angle
else:
angle = np.pi / 2
if add_one(rot_over) != index_H0:
angle = -angle
return rot_over, angle
def closest_axis_2_normal(self, H):
# print (H)
# print (np.linalg.inv(H[:-1,:-1]))
min_angle = 190
x_des = np.array([0, 0, 1])
index = 0
sign = 0
reverse = False
for i in range(3):
x = H[:-1, i]
theta = todegree(angle(x, x_des))
# print (theta)
if theta > 90:
theta = theta - 180
if theta ==0:
reverse = True
if min_angle > np.abs(theta):
min_angle = np.abs(theta)
index = i
if theta == 0.:
if reverse:
sign = -1
else:
sign = 1
else:
sign = np.sign(theta)
return min_angle, index, sign
def R_2vect(self, vector_orig, vector_fin):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
@param vector_orig: The unrotated vector defined in the reference frame.
@type vector_orig: numpy array, len 3
@param vector_fin: The rotated vector defined in the reference frame.
@type vector_fin: numpy array, len 3
"""
# Convert the vectors to unit vectors.
vector_orig = vector_orig / np.linalg.norm(vector_orig)
vector_fin = vector_fin / np.linalg.norm(vector_fin)
# The rotation axis (normalised).
axis = np.cross(vector_orig, vector_fin)
axis_len = np.linalg.norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
if x==0 and y==0 and z==0:
z=1
# The rotation angle.
angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1))
# Trig functions (only need to do this maths once!).
ca = np.cos(angle)
sa = np.sin(angle)
R = np.eye(4)
# Calculate the rotation matrix elements.
R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2 - 1.0)
R[0, 1] = -z * sa + (1.0 - ca) * x * y
R[0, 2] = y * sa + (1.0 - ca) * x * z
R[1, 0] = z * sa + (1.0 - ca) * x * y
R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2 - 1.0)
R[1, 2] = -x * sa + (1.0 - ca) * y * z
R[2, 0] = -y * sa + (1.0 - ca) * x * z
R[2, 1] = x * sa + (1.0 - ca) * y * z
R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2 - 1.0)
return R, axis, angle
def calculate_mutltiple_goals(init_information, obs):
goal_orients = []
# This first calcuation step allows to calculate a viapoint! I.e. it yields a goal orientation for some axis alignment
init_orient = np.zeros(3)
init_orient[:2] = np.asarray(init_information[:2])
init_orient = init_orient / np.linalg.norm(init_orient)
current_orient = np.asarray(p.getMatrixFromQuaternion(obs["object_orientation"])).reshape(3, 3)
theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient)
des_vec = sign * np.array(init_orient)
Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec)
first_goal = np.matmul(Rot1[:-1, :-1], current_orient)
first_goal = Rotation.from_matrix(first_goal)
goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems code,...
# this second calculation applies the relative transformation which is desired based on the current observation!!!
# now take into account the desired rotation from the target information:
des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3)
init_orient = np.asarray([1,0,0])
theta, index, sign = closest_axis_2_userdefined(
to_H(current_orient), init_orient)
des_vec = sign * np.array(init_orient)
Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec)
second_goal = np.matmul(Rot1[:-1, :-1], current_orient)
# now apply rotation:
second_goal = np.matmul(des_rotation, second_goal)
# now rotate back to orientation that we are now at:
second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal)
second_goal = Rotation.from_matrix(second_goal)
goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since otherwise problems code,...
return goal_orients
|
[
"numpy.abs",
"numpy.dot",
"numpy.asarray",
"pybullet.getMatrixFromQuaternion",
"numpy.cross",
"numpy.zeros",
"numpy.sign",
"numpy.sin",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"numpy.matmul",
"scipy.spatial.transform.Rotation.from_matrix",
"numpy.eye",
"scipy.spatial.transform.Rotation.from_rotvec"
] |
[((583, 594), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (591, 594), True, 'import numpy as np\n'), ((605, 614), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (611, 614), True, 'import numpy as np\n'), ((792, 805), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (800, 805), True, 'import numpy as np\n'), ((2950, 2983), 'numpy.cross', 'np.cross', (['vector_orig', 'vector_fin'], {}), '(vector_orig, vector_fin)\n', (2958, 2983), True, 'import numpy as np\n'), ((2999, 3019), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (3013, 3019), True, 'import numpy as np\n'), ((3364, 3377), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3370, 3377), True, 'import numpy as np\n'), ((3387, 3400), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3393, 3400), True, 'import numpy as np\n'), ((3410, 3419), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3416, 3419), True, 'import numpy as np\n'), ((12756, 12767), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12764, 12767), True, 'import numpy as np\n'), ((12790, 12822), 'numpy.asarray', 'np.asarray', (['init_information[:2]'], {}), '(init_information[:2])\n', (12800, 12822), True, 'import numpy as np\n'), ((13207, 13248), 'numpy.matmul', 'np.matmul', (['Rot1[:-1, :-1]', 'current_orient'], {}), '(Rot1[:-1, :-1], current_orient)\n', (13216, 13248), True, 'import numpy as np\n'), ((13266, 13298), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['first_goal'], {}), '(first_goal)\n', (13286, 13298), False, 'from scipy.spatial.transform import Rotation\n'), ((13758, 13779), 'numpy.asarray', 'np.asarray', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (13768, 13779), True, 'import numpy as np\n'), ((14012, 14053), 'numpy.matmul', 'np.matmul', (['Rot1[:-1, :-1]', 'current_orient'], {}), '(Rot1[:-1, :-1], current_orient)\n', (14021, 14053), True, 'import numpy as np\n'), ((14098, 14134), 'numpy.matmul', 'np.matmul', (['des_rotation', 'second_goal'], {}), '(des_rotation, second_goal)\n', (14107, 14134), True, 'import numpy as np\n'), ((14210, 14250), 'numpy.matmul', 'np.matmul', (['Rot1[:-1, :-1].T', 'second_goal'], {}), '(Rot1[:-1, :-1].T, second_goal)\n', (14219, 14250), True, 'import numpy as np\n'), ((14269, 14302), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['second_goal'], {}), '(second_goal)\n', (14289, 14302), False, 'from scipy.spatial.transform import Rotation\n'), ((420, 442), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (434, 442), True, 'import numpy as np\n'), ((2815, 2842), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_orig'], {}), '(vector_orig)\n', (2829, 2842), True, 'import numpy as np\n'), ((2873, 2899), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_fin'], {}), '(vector_fin)\n', (2887, 2899), True, 'import numpy as np\n'), ((4410, 4436), 'numpy.array', 'np.array', (['[0, 0, sign * 1]'], {}), '([0, 0, sign * 1])\n', (4418, 4436), True, 'import numpy as np\n'), ((4532, 4553), 'numpy.matmul', 'np.matmul', (['R', 'self.Hg'], {}), '(R, self.Hg)\n', (4541, 4553), True, 'import numpy as np\n'), ((4644, 4687), 'numpy.matmul', 'np.matmul', (['H_via2[:-1, :-1].T', 'r_vec_via2gw'], {}), '(H_via2[:-1, :-1].T, r_vec_via2gw)\n', (4653, 4687), True, 'import numpy as np\n'), ((5702, 5743), 'numpy.matmul', 'np.matmul', (['self.H0[:-1, :-1]', 'r_vec_floor'], {}), '(self.H0[:-1, :-1], r_vec_floor)\n', (5711, 5743), True, 'import numpy as np\n'), ((6194, 6205), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6202, 6205), True, 'import numpy as np\n'), ((6232, 6257), 'numpy.sign', 'np.sign', (['r_vec_via12_p[2]'], {}), '(r_vec_via12_p[2])\n', (6239, 6257), True, 'import numpy as np\n'), ((6280, 6323), 'numpy.matmul', 'np.matmul', (['H_via1[:-1, :-1].T', 'r_vec_via12w'], {}), '(H_via1[:-1, :-1].T, r_vec_via12w)\n', (6289, 6323), True, 'import numpy as np\n'), ((6643, 6667), 'numpy.matmul', 'np.matmul', (['rot_12', 'rot2g'], {}), '(rot_12, rot2g)\n', (6652, 6667), True, 'import numpy as np\n'), ((7058, 7085), 'numpy.matmul', 'np.matmul', (['R_init.T', 'R_via2'], {}), '(R_init.T, R_via2)\n', (7067, 7085), True, 'import numpy as np\n'), ((7492, 7516), 'numpy.matmul', 'np.matmul', (['R_init.T', 'R_g'], {}), '(R_init.T, R_g)\n', (7501, 7516), True, 'import numpy as np\n'), ((9135, 9154), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9143, 9154), True, 'import numpy as np\n'), ((11491, 11524), 'numpy.cross', 'np.cross', (['vector_orig', 'vector_fin'], {}), '(vector_orig, vector_fin)\n', (11499, 11524), True, 'import numpy as np\n'), ((11544, 11564), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (11558, 11564), True, 'import numpy as np\n'), ((11957, 11970), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (11963, 11970), True, 'import numpy as np\n'), ((11984, 11997), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (11990, 11997), True, 'import numpy as np\n'), ((12011, 12020), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12017, 12020), True, 'import numpy as np\n'), ((12855, 12882), 'numpy.linalg.norm', 'np.linalg.norm', (['init_orient'], {}), '(init_orient)\n', (12869, 12882), True, 'import numpy as np\n'), ((13091, 13112), 'numpy.array', 'np.array', (['init_orient'], {}), '(init_orient)\n', (13099, 13112), True, 'import numpy as np\n'), ((13895, 13916), 'numpy.array', 'np.array', (['init_orient'], {}), '(init_orient)\n', (13903, 13916), True, 'import numpy as np\n'), ((289, 307), 'numpy.dot', 'np.dot', (['v1_u', 'v2_u'], {}), '(v1_u, v2_u)\n', (295, 307), True, 'import numpy as np\n'), ((1098, 1111), 'numpy.abs', 'np.abs', (['theta'], {}), '(theta)\n', (1104, 1111), True, 'import numpy as np\n'), ((1137, 1150), 'numpy.abs', 'np.abs', (['theta'], {}), '(theta)\n', (1143, 1150), True, 'import numpy as np\n'), ((3258, 3289), 'numpy.dot', 'np.dot', (['vector_orig', 'vector_fin'], {}), '(vector_orig, vector_fin)\n', (3264, 3289), True, 'import numpy as np\n'), ((5233, 5244), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5241, 5244), True, 'import numpy as np\n'), ((5313, 5358), 'scipy.spatial.transform.Rotation.from_rotvec', 'Rotation.from_rotvec', (['(ang_floor * r_vec_floor)'], {}), '(ang_floor * r_vec_floor)\n', (5333, 5358), False, 'from scipy.spatial.transform import Rotation\n'), ((5473, 5502), 'numpy.matmul', 'np.matmul', (['self.H0', 'R_floor_1'], {}), '(self.H0, R_floor_1)\n', (5482, 5502), True, 'import numpy as np\n'), ((5575, 5586), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5583, 5586), True, 'import numpy as np\n'), ((6697, 6706), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6703, 6706), True, 'import numpy as np\n'), ((7117, 7126), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7123, 7126), True, 'import numpy as np\n'), ((7548, 7557), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7554, 7557), True, 'import numpy as np\n'), ((11344, 11371), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_orig'], {}), '(vector_orig)\n', (11358, 11371), True, 'import numpy as np\n'), ((11406, 11432), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_fin'], {}), '(vector_fin)\n', (11420, 11432), True, 'import numpy as np\n'), ((1351, 1365), 'numpy.sign', 'np.sign', (['theta'], {}), '(theta)\n', (1358, 1365), True, 'import numpy as np\n'), ((6728, 6747), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6736, 6747), True, 'import numpy as np\n'), ((6890, 6918), 'numpy.linalg.norm', 'np.linalg.norm', (['rot1g'], {'ord': '(2)'}), '(rot1g, ord=2)\n', (6904, 6918), True, 'import numpy as np\n'), ((7151, 7170), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (7159, 7170), True, 'import numpy as np\n'), ((7320, 7351), 'numpy.linalg.norm', 'np.linalg.norm', (['rot_to_2'], {'ord': '(2)'}), '(rot_to_2, ord=2)\n', (7334, 7351), True, 'import numpy as np\n'), ((7582, 7601), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (7590, 7601), True, 'import numpy as np\n'), ((7751, 7782), 'numpy.linalg.norm', 'np.linalg.norm', (['rot_to_g'], {'ord': '(2)'}), '(rot_to_g, ord=2)\n', (7765, 7782), True, 'import numpy as np\n'), ((8033, 8042), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (8039, 8042), True, 'import numpy as np\n'), ((8076, 8085), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (8082, 8085), True, 'import numpy as np\n'), ((8109, 8119), 'numpy.sign', 'np.sign', (['z'], {}), '(z)\n', (8116, 8119), True, 'import numpy as np\n'), ((9496, 9509), 'numpy.abs', 'np.abs', (['theta'], {}), '(theta)\n', (9502, 9509), True, 'import numpy as np\n'), ((9539, 9552), 'numpy.abs', 'np.abs', (['theta'], {}), '(theta)\n', (9545, 9552), True, 'import numpy as np\n'), ((11843, 11874), 'numpy.dot', 'np.dot', (['vector_orig', 'vector_fin'], {}), '(vector_orig, vector_fin)\n', (11849, 11874), True, 'import numpy as np\n'), ((12915, 12967), 'pybullet.getMatrixFromQuaternion', 'p.getMatrixFromQuaternion', (["obs['object_orientation']"], {}), "(obs['object_orientation'])\n", (12940, 12967), True, 'import pybullet as p\n'), ((13674, 13724), 'pybullet.getMatrixFromQuaternion', 'p.getMatrixFromQuaternion', (['init_information[10:14]'], {}), '(init_information[10:14])\n', (13699, 13724), True, 'import pybullet as p\n'), ((6508, 6545), 'scipy.spatial.transform.Rotation.from_rotvec', 'Rotation.from_rotvec', (['(c12[0] * c12[1])'], {}), '(c12[0] * c12[1])\n', (6528, 6545), False, 'from scipy.spatial.transform import Rotation\n'), ((6578, 6615), 'scipy.spatial.transform.Rotation.from_rotvec', 'Rotation.from_rotvec', (['(c2g[0] * c2g[1])'], {}), '(c2g[0] * c2g[1])\n', (6598, 6615), False, 'from scipy.spatial.transform import Rotation\n'), ((6785, 6822), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['rot1g[:-1, :-1]'], {}), '(rot1g[:-1, :-1])\n', (6805, 6822), False, 'from scipy.spatial.transform import Rotation\n'), ((6861, 6889), 'numpy.linalg.norm', 'np.linalg.norm', (['rot1g'], {'ord': '(2)'}), '(rot1g, ord=2)\n', (6875, 6889), True, 'import numpy as np\n'), ((7213, 7241), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['R_to_2'], {}), '(R_to_2)\n', (7233, 7241), False, 'from scipy.spatial.transform import Rotation\n'), ((7287, 7318), 'numpy.linalg.norm', 'np.linalg.norm', (['rot_to_2'], {'ord': '(2)'}), '(rot_to_2, ord=2)\n', (7301, 7318), True, 'import numpy as np\n'), ((7644, 7672), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['R_to_g'], {}), '(R_to_g)\n', (7664, 7672), False, 'from scipy.spatial.transform import Rotation\n'), ((7718, 7749), 'numpy.linalg.norm', 'np.linalg.norm', (['rot_to_g'], {'ord': '(2)'}), '(rot_to_g, ord=2)\n', (7732, 7749), True, 'import numpy as np\n'), ((9785, 9799), 'numpy.sign', 'np.sign', (['theta'], {}), '(theta)\n', (9792, 9799), True, 'import numpy as np\n')]
|
from collections.abc import Iterable
import numpy as np
import pandas as pd
import param
import xarray as xr
from matplotlib.colors import LinearSegmentedColormap, rgb2hex
from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS
from .util import is_str
class Easing(param.Parameterized):
interp = param.ClassSelector(
default=None,
class_=Iterable,
doc=f"Interpolation method; {INTERPS}",
precedence=PRECEDENCES["interp"],
)
ease = param.ClassSelector(
default="in_out",
class_=Iterable,
doc=f"Type of easing; {EASES}",
precedence=PRECEDENCES["interp"],
)
frames = param.Integer(
default=None,
bounds=(1, None),
doc="Number of frames between each base state",
precedence=PRECEDENCES["interp"],
)
revert = param.ObjectSelector(
default=None,
objects=REVERTS,
doc="Method for reverting to the initial state; "
"boomerang finds the shortest path to the initial state, "
"traceback backtracks the original path to the initial state, and "
"rollback is like traceback, but disregards the "
"original's path durations",
precedence=PRECEDENCES["interp"],
)
num_states = param.Integer(doc="Number of states", **DEFAULTS["num_kwds"])
num_steps = param.Integer(
doc="Number of frames between each base state", **DEFAULTS["num_kwds"]
)
def __init__(self, **kwds):
super().__init__(**kwds)
def interpolate(self, da, name=""):
interp = self.interp or "cubic"
ease = self.ease
da_origin = da.copy()
is_xarray = isinstance(da, xr.DataArray)
is_bar = False
if is_xarray:
if "state" not in da.dims:
return da_origin
(
da,
name,
dims,
coords,
interp,
ease,
is_bar,
is_errorbar_morph,
) = self._prep_xarray(da)
array = self._prep_array(da)
num_items, num_states, num_steps, num_result = self._calc_shapes(array)
if (num_steps == 1 or num_states == 1) and self.revert is None:
return da_origin
steps = np.linspace(0, 1, num_steps)
interp_args = (steps, interp, ease, num_states, num_steps, num_items)
array_dtype = array.dtype
if name in ["duration", "remark", "xerr", "yerr"] and not is_errorbar_morph:
result = self._interp_first(
array, num_states, num_steps, num_items, num_result, name
)
elif interp == "fill" or name.endswith(
("zoom", "discrete_trail", "morph_trail", "tick_label", "bar_label")
):
result = self._interp_fill(array, num_states, num_steps, name)
elif np.issubdtype(array_dtype, np.datetime64):
result = self._interp_time(array, pd.to_datetime, *interp_args)
elif np.issubdtype(array_dtype, np.timedelta64):
result = self._interp_time(array, pd.to_timedelta, *interp_args)
elif np.issubdtype(array_dtype, np.number) and not is_bar:
if name == "central_longitude":
interp = "linear"
result = self._interp_numeric(array, *interp_args)
elif name in "c": # must be after number
result = self._interp_color(array, num_result)
elif is_bar:
result = self._interp_fill(array, num_states, num_steps, name)
else: # str
result = self._interp_text(array, num_states, num_steps, num_result)
if self.revert in ["traceback", "rollback"]:
result = self._apply_revert(result, name)
if is_xarray:
result = self._rebuild_da(result, da, dims, coords)
return result
def _prep_xarray(self, da):
name = da.name
interp = da.attrs.get("interp")
ease = da.attrs.get("ease")
for item_dim in da.dims:
if "item" in item_dim:
if "batch" in da.dims:
da = da.transpose(item_dim, "batch", "state", ...)
else:
da = da.transpose(item_dim, "state", ...)
break
dims = da.dims
if da.ndim > 2: # more than (item, state)
if "grid_item" in dims:
da = da.stack({"stacked": ["grid_item", "grid_y", "grid_x"]})
elif "batch" in dims:
da = da.stack({"stacked": [item_dim, "batch"]})
da = da.transpose("stacked", "state")
coords = da.drop_vars("state", errors="ignore").coords
is_bar = da.attrs.get("is_bar")
is_errorbar_morph = da.attrs.get("is_errorbar_morph")
return da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph
def _prep_array(self, da):
array = np.array(da)
if array.ndim == 1:
array = array[np.newaxis, :]
if self.revert == "boomerang":
array = np.hstack([array, array[:, :1]])
return array
def _calc_shapes(self, array):
num_items, num_states = array.shape
if self.frames is None:
if num_states < 10:
num_steps = int(np.ceil(60 / num_states))
else:
num_steps = int(np.ceil(100 / num_states))
else:
num_steps = self.frames
with param.edit_constant(self):
self.num_steps = num_steps
num_result = (num_states - 1) * num_steps
return num_items, num_states, num_steps, num_result
def _apply_revert(self, result, name):
if result.ndim == 1:
result_back = result[::-1]
else:
result_back = result[:, ::-1]
if name == "duration" and self.revert == "rollback":
result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :]
result = np.hstack([result, result_back])
return result
def _rebuild_da(self, result, da, dims, coords):
if len(dims) == 1:
result = result.squeeze()
result = xr.DataArray(
result,
dims=da.dims,
coords=coords,
name=da.name,
attrs=da.attrs,
)
if "stacked" in result.dims:
result = result.unstack().transpose(*dims)
return result
def _interp_first(self, array, num_states, num_steps, num_items, num_result, name):
if is_str(array):
fill = ""
dtype = np.object
else:
fill = 0.0
dtype = None
result = np.full((num_items, num_result), fill, dtype=dtype)
indices = np.arange(num_states) * num_steps
indices[-1] -= 1
result[:, indices] = array # (1, num_states)
return result
def _interp_fill(self, array, num_states, num_steps, name):
indices = np.arange(num_states * num_steps - num_steps)
result = (
pd.DataFrame(
array,
columns=np.arange(0, num_states * num_steps, num_steps),
)
.T.reindex(indices)
.T
)
if not name.endswith("discrete_trail"):
result = result.ffill(axis=1).fillna("").values
result[:, -1] = array[:, -1]
else:
result = result.values
return result
def _interp_color(self, array, num_result):
results = []
for colors in array: # item, state
cmap = LinearSegmentedColormap.from_list("eased", colors, N=num_result)
results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))])
result = np.array(results)
return result
def _interp_text(self, array, num_states, num_steps, num_result):
result = np.repeat(array, num_steps, axis=-1)
num_roll = -int(np.ceil(num_steps / num_states * 2))
if num_states > 2:
result = np.roll(result, num_roll, axis=-1)
result = result[:, :num_result]
else:
half_way = int(num_result / 2)
result = result[:, half_way:-half_way]
if num_steps % 2 != 0:
result = result[:, :-1]
return result
def _interp_time(
self, array, conversion, steps, interp, ease, num_states, num_steps, num_items
):
array = array.astype(float)
result = self._interp_numeric(
array, steps, interp, ease, num_states, num_steps, num_items
)
result = conversion(result.ravel()).values
result = result.reshape(num_items, -1)
return result
def _interp_numeric(
self, array, steps, interp, ease, num_states, num_steps, num_items
):
init = np.repeat(array[:, :-1], num_steps, axis=-1)
init_nans = np.isnan(init)
init[init_nans] = 0 # temporarily fill the nans
stop = np.repeat(array[:, 1:], num_steps, axis=-1)
stop_nans = np.isnan(stop)
tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape(
num_items, -1
)
weights = getattr(self, f"_{interp.lower()}")(tiled_steps, ease)
result = stop * weights + init * (1 - weights)
result[init_nans | stop_nans] = np.nan # replace nans
return result
def _linear(self, ts, ease):
return ts
def _quadratic(self, ts, ease):
if ease == "in":
ts = ts * ts
elif ease == "out":
ts = -(ts * (ts - 2))
elif ease == "in_out":
index = ts < 0.5
ts[index] = 2 * ts[index] * ts[index]
ts[~index] = (-2 * ts[~index] * ts[~index]) + (4 * ts[~index]) - 1
return ts
def _cubic(self, ts, ease):
if ease == "in":
ts = ts * ts * ts
elif ease == "out":
ts = (ts - 1) * (ts - 1) * (ts - 1) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = 4 * ts[index] * ts[index] * ts[index]
ts[~index] = 2 * ts[~index] - 2
ts[~index] = 0.5 * ts[~index] * ts[~index] * ts[~index] + 1
return ts
def _quartic(self, ts, ease):
if ease == "in":
ts = ts * ts * ts * ts
elif ease == "out":
ts = (ts - 1) * (ts - 1) * (ts - 1) * (1 - ts) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = 8 * ts[index] * ts[index] * ts[index] * ts[index]
ts[~index] = ts[~index] - 1
ts[~index] = -8 * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1
return ts
def _quintic(self, ts, ease):
if ease == "in":
ts = ts * ts * ts * ts * ts
elif ease == "out":
ts = (ts - 1) * (ts - 1) * (ts - 1) * (ts - 1) * (ts - 1) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = 16 * ts[index] * ts[index] * ts[index] * ts[index] * ts[index]
ts[~index] = (2 * ts[~index]) - 2
ts[~index] = (
0.5 * ts[~index] * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1
)
return ts
def _sine(self, ts, ease):
if ease == "in":
ts = np.sin((ts - 1) * np.pi / 2) + 1
elif ease == "out":
ts = np.sin(ts * np.pi / 2)
elif ease == "in_out":
ts = 0.5 * (1 - np.cos(ts * np.pi))
return ts
def _circular(self, ts, ease):
if ease == "in":
ts = 1 - np.sqrt(1 - (ts * ts))
elif ease == "out":
ts = np.sqrt((2 - ts) * ts)
elif ease == "in_out":
index = ts < 0.5
ts[index] = 0.5 * (1 - np.sqrt(1 - 4 * (ts[index] * ts[index])))
ts[~index] = 0.5 * (
np.sqrt(-((2 * ts[~index]) - 3) * ((2 * ts[~index]) - 1)) + 1
)
return ts
def _exponential(self, ts, ease):
if ease == "in":
index = ts != 0
ts[~index] = 0
ts[index] = np.power(2, 10 * (ts[index] - 1))
elif ease == "out":
index = ts != 1
ts[~index] = 1
ts[index] = 1 - np.power(2, -10 * ts[index])
elif ease == "in_out":
index0 = (ts != 0) & (ts < 0.5) & (ts != 1)
index1 = (ts != 0) & (ts >= 0.5) & (ts != 1)
ts[index0] = 0.5 * np.power(2, (20 * ts[index0]) - 10)
ts[index1] = -0.5 * np.power(2, (-20 * ts[index1]) + 10) + 1
return ts
def _elastic(self, ts, ease):
if ease == "in":
ts = np.sin(13 * np.pi / 2 * ts) * np.power(2, 10 * (ts - 1))
elif ease == "out":
ts = np.sin(-13 * np.pi / 2 * (ts + 1)) * np.power(2, -10 * ts) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = (
0.5
* np.sin(13 * np.pi / 2 * (2 * ts[index]))
* np.power(2, 10 * ((2 * ts[index]) - 1))
)
ts[~index] = 0.5 * (
np.sin(-13 * np.pi / 2 * ((2 * ts[~index] - 1) + 1))
* np.power(2, -10 * (2 * ts[~index] - 1))
+ 2
)
return ts
def _back(self, ts, ease):
if ease == "in":
ts = ts * ts * ts - ts * np.sin(ts * np.pi)
elif ease == "out":
ts = 1 - ts
ts = 1 - (ts * ts * ts - ts * np.sin(ts * np.pi))
elif ease == "in_out":
index = ts < 0.5
ts[index] = 2 * ts[index]
ts[index] = 0.5 * (
ts[index] * ts[index] * ts[index]
- ts[index] * np.sin(ts[index] * np.pi)
)
ts[~index] = 1 - (2 * ts[~index] - 1)
ts[~index] = (
0.5
* (
1
- (
ts[~index] * ts[~index] * ts[~index]
- ts[~index] * np.sin(ts[~index] * np.pi)
)
)
+ 0.5
)
return ts
def _bounce(self, ts, ease):
index = ts < 0.5
if ease == "in":
ts = 1 - ts
elif ease == "in_out":
ts[index] = 1 - (ts[index] * 2)
ts[~index] = ts[~index] * 2 - 1
index0 = ts < 4 / 11
index1 = (ts < 8 / 11) & ~index0
index2 = (ts < 9 / 10) & ~index1 & ~index0
index3 = ts >= 9 / 10
ts[index0] = 121 * ts[index0] * ts[index0] / 16
ts[index1] = (
(363 / 40.0 * ts[index1] * ts[index1]) - (99 / 10.0 * ts[index1]) + 17 / 5.0
)
ts[index2] = (
(4356 / 361.0 * ts[index2] * ts[index2])
- (35442 / 1805.0 * ts[index2])
+ 16061 / 1805.0
)
ts[index3] = (
(54 / 5.0 * ts[index3] * ts[index3])
- (513 / 25.0 * ts[index3])
+ 268 / 25.0
)
if ease == "in":
ts = 1 - ts
elif ease == "out":
pass
elif ease == "in_out":
ts[index] = 0.5 * (1 - ts[index])
ts[~index] = 0.5 * ts[~index] + 0.5
return ts
|
[
"numpy.isnan",
"numpy.sin",
"numpy.arange",
"numpy.tile",
"matplotlib.colors.rgb2hex",
"param.Integer",
"numpy.full",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.power",
"param.edit_constant",
"numpy.linspace",
"numpy.repeat",
"numpy.ceil",
"numpy.roll",
"numpy.hstack",
"numpy.cos",
"numpy.issubdtype",
"numpy.array",
"xarray.DataArray",
"param.ClassSelector",
"param.ObjectSelector",
"numpy.sqrt"
] |
[((324, 453), 'param.ClassSelector', 'param.ClassSelector', ([], {'default': 'None', 'class_': 'Iterable', 'doc': 'f"""Interpolation method; {INTERPS}"""', 'precedence': "PRECEDENCES['interp']"}), "(default=None, class_=Iterable, doc=\n f'Interpolation method; {INTERPS}', precedence=PRECEDENCES['interp'])\n", (343, 453), False, 'import param\n'), ((499, 624), 'param.ClassSelector', 'param.ClassSelector', ([], {'default': '"""in_out"""', 'class_': 'Iterable', 'doc': 'f"""Type of easing; {EASES}"""', 'precedence': "PRECEDENCES['interp']"}), "(default='in_out', class_=Iterable, doc=\n f'Type of easing; {EASES}', precedence=PRECEDENCES['interp'])\n", (518, 624), False, 'import param\n'), ((672, 809), 'param.Integer', 'param.Integer', ([], {'default': 'None', 'bounds': '(1, None)', 'doc': '"""Number of frames between each base state"""', 'precedence': "PRECEDENCES['interp']"}), "(default=None, bounds=(1, None), doc=\n 'Number of frames between each base state', precedence=PRECEDENCES[\n 'interp'])\n", (685, 809), False, 'import param\n'), ((852, 1191), 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': 'None', 'objects': 'REVERTS', 'doc': '"""Method for reverting to the initial state; boomerang finds the shortest path to the initial state, traceback backtracks the original path to the initial state, and rollback is like traceback, but disregards the original\'s path durations"""', 'precedence': "PRECEDENCES['interp']"}), '(default=None, objects=REVERTS, doc=\n "Method for reverting to the initial state; boomerang finds the shortest path to the initial state, traceback backtracks the original path to the initial state, and rollback is like traceback, but disregards the original\'s path durations"\n , precedence=PRECEDENCES[\'interp\'])\n', (872, 1191), False, 'import param\n'), ((1283, 1344), 'param.Integer', 'param.Integer', ([], {'doc': '"""Number of states"""'}), "(doc='Number of states', **DEFAULTS['num_kwds'])\n", (1296, 1344), False, 'import param\n'), ((1361, 1451), 'param.Integer', 'param.Integer', ([], {'doc': '"""Number of frames between each base state"""'}), "(doc='Number of frames between each base state', **DEFAULTS[\n 'num_kwds'])\n", (1374, 1451), False, 'import param\n'), ((2313, 2341), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_steps'], {}), '(0, 1, num_steps)\n', (2324, 2341), True, 'import numpy as np\n'), ((4928, 4940), 'numpy.array', 'np.array', (['da'], {}), '(da)\n', (4936, 4940), True, 'import numpy as np\n'), ((5975, 6007), 'numpy.hstack', 'np.hstack', (['[result, result_back]'], {}), '([result, result_back])\n', (5984, 6007), True, 'import numpy as np\n'), ((6166, 6245), 'xarray.DataArray', 'xr.DataArray', (['result'], {'dims': 'da.dims', 'coords': 'coords', 'name': 'da.name', 'attrs': 'da.attrs'}), '(result, dims=da.dims, coords=coords, name=da.name, attrs=da.attrs)\n', (6178, 6245), True, 'import xarray as xr\n'), ((6677, 6728), 'numpy.full', 'np.full', (['(num_items, num_result)', 'fill'], {'dtype': 'dtype'}), '((num_items, num_result), fill, dtype=dtype)\n', (6684, 6728), True, 'import numpy as np\n'), ((6965, 7010), 'numpy.arange', 'np.arange', (['(num_states * num_steps - num_steps)'], {}), '(num_states * num_steps - num_steps)\n', (6974, 7010), True, 'import numpy as np\n'), ((7740, 7757), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (7748, 7757), True, 'import numpy as np\n'), ((7868, 7904), 'numpy.repeat', 'np.repeat', (['array', 'num_steps'], {'axis': '(-1)'}), '(array, num_steps, axis=-1)\n', (7877, 7904), True, 'import numpy as np\n'), ((8816, 8860), 'numpy.repeat', 'np.repeat', (['array[:, :-1]', 'num_steps'], {'axis': '(-1)'}), '(array[:, :-1], num_steps, axis=-1)\n', (8825, 8860), True, 'import numpy as np\n'), ((8881, 8895), 'numpy.isnan', 'np.isnan', (['init'], {}), '(init)\n', (8889, 8895), True, 'import numpy as np\n'), ((8968, 9011), 'numpy.repeat', 'np.repeat', (['array[:, 1:]', 'num_steps'], {'axis': '(-1)'}), '(array[:, 1:], num_steps, axis=-1)\n', (8977, 9011), True, 'import numpy as np\n'), ((9032, 9046), 'numpy.isnan', 'np.isnan', (['stop'], {}), '(stop)\n', (9040, 9046), True, 'import numpy as np\n'), ((5071, 5103), 'numpy.hstack', 'np.hstack', (['[array, array[:, :1]]'], {}), '([array, array[:, :1]])\n', (5080, 5103), True, 'import numpy as np\n'), ((5470, 5495), 'param.edit_constant', 'param.edit_constant', (['self'], {}), '(self)\n', (5489, 5495), False, 'import param\n'), ((6747, 6768), 'numpy.arange', 'np.arange', (['num_states'], {}), '(num_states)\n', (6756, 6768), True, 'import numpy as np\n'), ((7576, 7640), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""eased"""', 'colors'], {'N': 'num_result'}), "('eased', colors, N=num_result)\n", (7609, 7640), False, 'from matplotlib.colors import LinearSegmentedColormap, rgb2hex\n'), ((8014, 8048), 'numpy.roll', 'np.roll', (['result', 'num_roll'], {'axis': '(-1)'}), '(result, num_roll, axis=-1)\n', (8021, 8048), True, 'import numpy as np\n'), ((12077, 12110), 'numpy.power', 'np.power', (['(2)', '(10 * (ts[index] - 1))'], {}), '(2, 10 * (ts[index] - 1))\n', (12085, 12110), True, 'import numpy as np\n'), ((2896, 2937), 'numpy.issubdtype', 'np.issubdtype', (['array_dtype', 'np.datetime64'], {}), '(array_dtype, np.datetime64)\n', (2909, 2937), True, 'import numpy as np\n'), ((5902, 5942), 'numpy.repeat', 'np.repeat', (['(1 / 60)', 'result_back.shape[-1]'], {}), '(1 / 60, result_back.shape[-1])\n', (5911, 5942), True, 'import numpy as np\n'), ((7929, 7964), 'numpy.ceil', 'np.ceil', (['(num_steps / num_states * 2)'], {}), '(num_steps / num_states * 2)\n', (7936, 7964), True, 'import numpy as np\n'), ((9069, 9113), 'numpy.tile', 'np.tile', (['steps', '((num_states - 1) * num_items)'], {}), '(steps, (num_states - 1) * num_items)\n', (9076, 9113), True, 'import numpy as np\n'), ((11283, 11311), 'numpy.sin', 'np.sin', (['((ts - 1) * np.pi / 2)'], {}), '((ts - 1) * np.pi / 2)\n', (11289, 11311), True, 'import numpy as np\n'), ((11361, 11383), 'numpy.sin', 'np.sin', (['(ts * np.pi / 2)'], {}), '(ts * np.pi / 2)\n', (11367, 11383), True, 'import numpy as np\n'), ((11563, 11583), 'numpy.sqrt', 'np.sqrt', (['(1 - ts * ts)'], {}), '(1 - ts * ts)\n', (11570, 11583), True, 'import numpy as np\n'), ((11631, 11653), 'numpy.sqrt', 'np.sqrt', (['((2 - ts) * ts)'], {}), '((2 - ts) * ts)\n', (11638, 11653), True, 'import numpy as np\n'), ((12630, 12657), 'numpy.sin', 'np.sin', (['(13 * np.pi / 2 * ts)'], {}), '(13 * np.pi / 2 * ts)\n', (12636, 12657), True, 'import numpy as np\n'), ((12660, 12686), 'numpy.power', 'np.power', (['(2)', '(10 * (ts - 1))'], {}), '(2, 10 * (ts - 1))\n', (12668, 12686), True, 'import numpy as np\n'), ((3028, 3070), 'numpy.issubdtype', 'np.issubdtype', (['array_dtype', 'np.timedelta64'], {}), '(array_dtype, np.timedelta64)\n', (3041, 3070), True, 'import numpy as np\n'), ((5303, 5327), 'numpy.ceil', 'np.ceil', (['(60 / num_states)'], {}), '(60 / num_states)\n', (5310, 5327), True, 'import numpy as np\n'), ((5379, 5404), 'numpy.ceil', 'np.ceil', (['(100 / num_states)'], {}), '(100 / num_states)\n', (5386, 5404), True, 'import numpy as np\n'), ((7669, 7681), 'matplotlib.colors.rgb2hex', 'rgb2hex', (['rgb'], {}), '(rgb)\n', (7676, 7681), False, 'from matplotlib.colors import LinearSegmentedColormap, rgb2hex\n'), ((12222, 12250), 'numpy.power', 'np.power', (['(2)', '(-10 * ts[index])'], {}), '(2, -10 * ts[index])\n', (12230, 12250), True, 'import numpy as np\n'), ((13338, 13356), 'numpy.sin', 'np.sin', (['(ts * np.pi)'], {}), '(ts * np.pi)\n', (13344, 13356), True, 'import numpy as np\n'), ((12426, 12459), 'numpy.power', 'np.power', (['(2)', '(20 * ts[index0] - 10)'], {}), '(2, 20 * ts[index0] - 10)\n', (12434, 12459), True, 'import numpy as np\n'), ((12732, 12766), 'numpy.sin', 'np.sin', (['(-13 * np.pi / 2 * (ts + 1))'], {}), '(-13 * np.pi / 2 * (ts + 1))\n', (12738, 12766), True, 'import numpy as np\n'), ((12769, 12790), 'numpy.power', 'np.power', (['(2)', '(-10 * ts)'], {}), '(2, -10 * ts)\n', (12777, 12790), True, 'import numpy as np\n'), ((12978, 13015), 'numpy.power', 'np.power', (['(2)', '(10 * (2 * ts[index] - 1))'], {}), '(2, 10 * (2 * ts[index] - 1))\n', (12986, 13015), True, 'import numpy as np\n'), ((3162, 3199), 'numpy.issubdtype', 'np.issubdtype', (['array_dtype', 'np.number'], {}), '(array_dtype, np.number)\n', (3175, 3199), True, 'import numpy as np\n'), ((7698, 7719), 'numpy.arange', 'np.arange', (['num_result'], {}), '(num_result)\n', (7707, 7719), True, 'import numpy as np\n'), ((11443, 11461), 'numpy.cos', 'np.cos', (['(ts * np.pi)'], {}), '(ts * np.pi)\n', (11449, 11461), True, 'import numpy as np\n'), ((11749, 11789), 'numpy.sqrt', 'np.sqrt', (['(1 - 4 * (ts[index] * ts[index]))'], {}), '(1 - 4 * (ts[index] * ts[index]))\n', (11756, 11789), True, 'import numpy as np\n'), ((11840, 11893), 'numpy.sqrt', 'np.sqrt', (['(-(2 * ts[~index] - 3) * (2 * ts[~index] - 1))'], {}), '(-(2 * ts[~index] - 3) * (2 * ts[~index] - 1))\n', (11847, 11893), True, 'import numpy as np\n'), ((12494, 12528), 'numpy.power', 'np.power', (['(2)', '(-20 * ts[index1] + 10)'], {}), '(2, -20 * ts[index1] + 10)\n', (12502, 12528), True, 'import numpy as np\n'), ((12919, 12959), 'numpy.sin', 'np.sin', (['(13 * np.pi / 2 * (2 * ts[index]))'], {}), '(13 * np.pi / 2 * (2 * ts[index]))\n', (12925, 12959), True, 'import numpy as np\n'), ((13451, 13469), 'numpy.sin', 'np.sin', (['(ts * np.pi)'], {}), '(ts * np.pi)\n', (13457, 13469), True, 'import numpy as np\n'), ((7103, 7150), 'numpy.arange', 'np.arange', (['(0)', '(num_states * num_steps)', 'num_steps'], {}), '(0, num_states * num_steps, num_steps)\n', (7112, 7150), True, 'import numpy as np\n'), ((13081, 13131), 'numpy.sin', 'np.sin', (['(-13 * np.pi / 2 * (2 * ts[~index] - 1 + 1))'], {}), '(-13 * np.pi / 2 * (2 * ts[~index] - 1 + 1))\n', (13087, 13131), True, 'import numpy as np\n'), ((13152, 13191), 'numpy.power', 'np.power', (['(2)', '(-10 * (2 * ts[~index] - 1))'], {}), '(2, -10 * (2 * ts[~index] - 1))\n', (13160, 13191), True, 'import numpy as np\n'), ((13681, 13706), 'numpy.sin', 'np.sin', (['(ts[index] * np.pi)'], {}), '(ts[index] * np.pi)\n', (13687, 13706), True, 'import numpy as np\n'), ((13984, 14010), 'numpy.sin', 'np.sin', (['(ts[~index] * np.pi)'], {}), '(ts[~index] * np.pi)\n', (13990, 14010), True, 'import numpy as np\n')]
|
from mmdnn.conversion.rewriter.rewriter import UnitRewriterBase
import numpy as np
import re
class LSTMRewriter(UnitRewriterBase):
def __init__(self, graph, weights_dict):
return super(LSTMRewriter, self).__init__(graph, weights_dict)
def process_lstm_cell(self, match_result):
if 'lstm_cell' not in match_result._pattern_to_op.keys():
return
kwargs = dict()
top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']]
w_e = match_result.get_op("cell_kernel")
w = self._weights_dict[w_e.name.replace('/read', '')]
num_units = w.shape[1]//4
[wx, wh] = np.split(w, [-1 * num_units])
input_size = wx.shape[0]
kwargs['num_units'] = num_units
kwargs['input_size'] = input_size
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def process_rnn_h_zero(self, match_result):
if 'h_zero' not in match_result._name_to_pattern.keys():
return
kwargs = dict()
top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']]
fill_size = match_result.get_op('fill_size')
fill_value = match_result.get_op('fill_value')
kwargs['fill_size'] = fill_size.get_attr('value').int_val[0]
kwargs['fill_value'] = fill_value.get_attr('value').float_val[0]
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def process_match_result(self, match_result, pattern_name):
if pattern_name == 'lstm_cell':
self.process_lstm_cell(match_result)
elif pattern_name == 'h_zero':
if self.check_match_scope(match_result, 'LSTMCellZeroState'):
self.process_rnn_h_zero(match_result)
'''For some short pattern, to avoid match other pattern, check it's scope'''
def check_match_scope(self, match_result, scope_name):
ops = match_result._pattern_to_op.values()
for op in ops:
op_name_splits = op.name.split('/')
if len(op_name_splits) < 2:
return False
if re.sub(r'(_\d+)*$', '', op_name_splits[-2]) != scope_name:
if len(op_name_splits) > 2:
if re.sub(r'(_\d+)*$', '', op_name_splits[-3]) != scope_name:
return False
else:
return False
return True
def run(self):
return super(LSTMRewriter, self).run(['lstm_cell', 'h_zero'], 'tensorflow')
|
[
"re.sub",
"numpy.split"
] |
[((666, 695), 'numpy.split', 'np.split', (['w', '[-1 * num_units]'], {}), '(w, [-1 * num_units])\n', (674, 695), True, 'import numpy as np\n'), ((2259, 2302), 're.sub', 're.sub', (['"""(_\\\\d+)*$"""', '""""""', 'op_name_splits[-2]'], {}), "('(_\\\\d+)*$', '', op_name_splits[-2])\n", (2265, 2302), False, 'import re\n'), ((2385, 2428), 're.sub', 're.sub', (['"""(_\\\\d+)*$"""', '""""""', 'op_name_splits[-3]'], {}), "('(_\\\\d+)*$', '', op_name_splits[-3])\n", (2391, 2428), False, 'import re\n')]
|
from spefit.pdf.base import PDFParameter, PDF
from spefit.common.stats import normal_pdf
import numpy as np
from numpy.testing import assert_allclose
import pytest
def test_pdf_parameter():
initial = 1
limits = (0, 4)
fixed = True
multi = True
param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi)
assert param.initial == initial
assert param.limits == limits
assert param.fixed is fixed
assert param.multi is multi
param = PDFParameter(initial=initial, limits=limits)
assert param.initial == initial
assert param.limits == limits
assert param.fixed is False
assert param.multi is False
def test_pdf_class():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(1, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 1
assert len(pdf.parameters) == 2
assert pdf.parameters["sigma"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 1]]))
pdf = PDF(2, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 2
assert len(pdf.parameters) == 2
assert pdf.parameters["sigma"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]]))
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 2
assert len(pdf.parameters) == 3
assert pdf.parameters["sigma0"].initial == 0.1
assert pdf.parameters["sigma1"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]]))
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 2
assert len(pdf.parameters) == 4
assert pdf.parameters["sigma0"].initial == 0.1
assert pdf.parameters["sigma1"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]]))
key_array = np.array(list(pdf.parameters.keys()))
assert np.array_equal(key_array[pdf._lookup[0]], ["mean0", "sigma0"])
assert np.array_equal(key_array[pdf._lookup[1]], ["mean1", "sigma1"])
def test_lookup_parameters():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
pdf.update_parameters_initial(sigma1=0.3)
initial = np.array(list(pdf.initial.values()))
assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1]))
assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3]))
def test_call():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
x = np.linspace(-1, 6, 100)
assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1))
assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2))
with pytest.raises(IndexError):
pdf(x, np.array([0, 0.1, 0.2]), 2)
with pytest.raises(IndexError):
pdf(x, np.array([0, 0.1]), 1)
with pytest.raises(TypeError):
# noinspection PyTypeChecker
pdf(x, [0, 0.1, 0.2], 1)
def test_update_parameters_initial():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.parameters["mean"].initial == 0
assert pdf.parameters["sigma"].initial == 0.1
pdf.update_parameters_initial(mean=2, sigma=0.4)
assert pdf.parameters["mean"].initial == 2
assert pdf.parameters["sigma"].initial == 0.4
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.parameters["mean"].initial == 0
assert pdf.parameters["sigma0"].initial == 0.1
assert pdf.parameters["sigma1"].initial == 0.1
pdf.update_parameters_initial(mean=2, sigma=0.4)
assert pdf.parameters["mean"].initial == 2
assert pdf.parameters["sigma0"].initial == 0.4
assert pdf.parameters["sigma1"].initial == 0.4
pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5)
assert pdf.parameters["mean"].initial == 2
assert pdf.parameters["sigma0"].initial == 0.4
assert pdf.parameters["sigma1"].initial == 0.5
with pytest.raises(ValueError):
pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5)
with pytest.raises(ValueError):
pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5)
def test_update_parameters_limits():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(1, normal_pdf, parameters)
assert pdf.parameters["mean"].limits == (-2, 2)
assert pdf.parameters["sigma"].limits == (0, 2)
pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4))
assert pdf.parameters["mean"].limits == (-3, 3)
assert pdf.parameters["sigma"].limits == (0, 4)
# Test mutable
limit = [2, 3]
# noinspection PyTypeChecker
pdf.update_parameters_limits(mean=limit)
assert tuple(pdf.parameters["mean"].limits) == (2, 3)
limit[0] = 1
assert tuple(pdf.parameters["mean"].limits) == (2, 3)
def test_update_parameters_fixed():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(1, normal_pdf, parameters)
assert pdf.parameters["mean"].fixed is False
assert pdf.parameters["sigma"].fixed is False
pdf.update_parameters_fixed(mean=True, sigma=True)
assert pdf.parameters["mean"].fixed is True
assert pdf.parameters["sigma"].fixed is True
# noinspection DuplicatedCode
def test_prepare_multi_illumination_parameters():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
results = PDF._prepare_parameters(parameters, 1)
parameters, is_multi, lookup = results
assert len(parameters) == 2
assert len(is_multi) == 2
assert len(lookup) == 1
assert len(lookup[0]) == 2
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
results = PDF._prepare_parameters(parameters, 1)
parameters, is_multi, lookup = results
assert len(parameters) == 2
assert len(is_multi) == 2
assert len(lookup) == 1
assert len(lookup[0]) == 2
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
results = PDF._prepare_parameters(parameters, 2)
parameters, is_multi, lookup = results
assert len(parameters) == 4
assert len(is_multi) == 2
assert len(lookup) == 2
assert len(lookup[0]) == 2
def test_initial():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
pdf.update_parameters_initial(sigma1=0.2)
assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2)
def test_n_free_parameters():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.n_free_parameters == 3
pdf.update_parameters_fixed(sigma1=True)
assert pdf.n_free_parameters == 2
def test_parameter_names():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.parameter_names == ["mean", "sigma0", "sigma1"]
def test_iminuit_kwargs():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
pdf.update_parameters_initial(sigma1=0.2)
pdf.update_parameters_limits(sigma1=(1, 2))
pdf.update_parameters_fixed(sigma1=True)
iminuit_kwargs = pdf.iminuit_kwargs
assert len(iminuit_kwargs) == 9
assert iminuit_kwargs["mean"] == 0
assert iminuit_kwargs["sigma0"] == 0.1
assert iminuit_kwargs["sigma1"] == 0.2
assert iminuit_kwargs["limit_mean"] == (-2, 2)
assert iminuit_kwargs["limit_sigma0"] == (0, 2)
assert iminuit_kwargs["limit_sigma1"] == (1, 2)
assert iminuit_kwargs["fix_mean"] is False
assert iminuit_kwargs["fix_sigma0"] is False
assert iminuit_kwargs["fix_sigma1"] is True
# noinspection PyPep8Naming,PyArgumentList
@pytest.mark.parametrize("PDFSubclass", PDF.__subclasses__())
def test_pdf_subclasses(PDFSubclass):
pdf = PDFSubclass(n_illuminations=1)
x = np.linspace(-5, 100, 1000)
y = pdf(x, np.array(list(pdf.initial.values())), 0)
np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3)
# noinspection PyPep8Naming,PyArgumentList
@pytest.mark.parametrize("PDFSubclass", PDF.__subclasses__())
def test_disable_pedestal(PDFSubclass):
pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True)
x = np.linspace(-5, 100, 1000)
y = pdf(x, np.array(list(pdf.initial.values())), 0)
lambda_ = pdf.initial["lambda_0"]
pedestal_contribution = np.exp(-lambda_)
np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3)
def test_from_name():
pdf = PDF.from_name("SiPMGentile", n_illuminations=1)
assert pdf.__class__.__name__ == "SiPMGentile"
with pytest.raises(ValueError):
PDF.from_name("NULL", n_illuminations=1)
|
[
"spefit.pdf.base.PDF.__subclasses__",
"numpy.trapz",
"spefit.pdf.base.PDF",
"spefit.pdf.base.PDF._prepare_parameters",
"spefit.pdf.base.PDFParameter",
"spefit.pdf.base.PDF.from_name",
"pytest.raises",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"numpy.array_equal"
] |
[((275, 345), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': 'initial', 'limits': 'limits', 'fixed': 'fixed', 'multi': 'multi'}), '(initial=initial, limits=limits, fixed=fixed, multi=multi)\n', (287, 345), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((493, 537), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': 'initial', 'limits': 'limits'}), '(initial=initial, limits=limits)\n', (505, 537), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((845, 875), 'spefit.pdf.base.PDF', 'PDF', (['(1)', 'normal_pdf', 'parameters'], {}), '(1, normal_pdf, parameters)\n', (848, 875), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((1106, 1136), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (1109, 1136), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((1526, 1556), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (1529, 1556), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((2010, 2040), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (2013, 2040), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((2385, 2447), 'numpy.array_equal', 'np.array_equal', (['key_array[pdf._lookup[0]]', "['mean0', 'sigma0']"], {}), "(key_array[pdf._lookup[0]], ['mean0', 'sigma0'])\n", (2399, 2447), True, 'import numpy as np\n'), ((2459, 2521), 'numpy.array_equal', 'np.array_equal', (['key_array[pdf._lookup[1]]', "['mean1', 'sigma1']"], {}), "(key_array[pdf._lookup[1]], ['mean1', 'sigma1'])\n", (2473, 2521), True, 'import numpy as np\n'), ((2715, 2745), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (2718, 2745), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((3187, 3217), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (3190, 3217), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((3227, 3250), 'numpy.linspace', 'np.linspace', (['(-1)', '(6)', '(100)'], {}), '(-1, 6, 100)\n', (3238, 3250), True, 'import numpy as np\n'), ((3865, 3895), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (3868, 3895), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((4305, 4335), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (4308, 4335), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((5317, 5347), 'spefit.pdf.base.PDF', 'PDF', (['(1)', 'normal_pdf', 'parameters'], {}), '(1, normal_pdf, parameters)\n', (5320, 5347), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((6054, 6084), 'spefit.pdf.base.PDF', 'PDF', (['(1)', 'normal_pdf', 'parameters'], {}), '(1, normal_pdf, parameters)\n', (6057, 6084), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((6571, 6609), 'spefit.pdf.base.PDF._prepare_parameters', 'PDF._prepare_parameters', (['parameters', '(1)'], {}), '(parameters, 1)\n', (6594, 6609), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((6952, 6990), 'spefit.pdf.base.PDF._prepare_parameters', 'PDF._prepare_parameters', (['parameters', '(1)'], {}), '(parameters, 1)\n', (6975, 6990), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7333, 7371), 'spefit.pdf.base.PDF._prepare_parameters', 'PDF._prepare_parameters', (['parameters', '(2)'], {}), '(parameters, 2)\n', (7356, 7371), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7719, 7749), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (7722, 7749), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((8052, 8082), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (8055, 8082), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((8395, 8425), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (8398, 8425), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((8679, 8709), 'spefit.pdf.base.PDF', 'PDF', (['(2)', 'normal_pdf', 'parameters'], {}), '(2, normal_pdf, parameters)\n', (8682, 8709), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((9543, 9569), 'numpy.linspace', 'np.linspace', (['(-5)', '(100)', '(1000)'], {}), '(-5, 100, 1000)\n', (9554, 9569), True, 'import numpy as np\n'), ((9434, 9454), 'spefit.pdf.base.PDF.__subclasses__', 'PDF.__subclasses__', ([], {}), '()\n', (9452, 9454), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((9906, 9932), 'numpy.linspace', 'np.linspace', (['(-5)', '(100)', '(1000)'], {}), '(-5, 100, 1000)\n', (9917, 9932), True, 'import numpy as np\n'), ((10055, 10071), 'numpy.exp', 'np.exp', (['(-lambda_)'], {}), '(-lambda_)\n', (10061, 10071), True, 'import numpy as np\n'), ((9772, 9792), 'spefit.pdf.base.PDF.__subclasses__', 'PDF.__subclasses__', ([], {}), '()\n', (9790, 9792), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((10191, 10238), 'spefit.pdf.base.PDF.from_name', 'PDF.from_name', (['"""SiPMGentile"""'], {'n_illuminations': '(1)'}), "('SiPMGentile', n_illuminations=1)\n", (10204, 10238), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((1075, 1093), 'numpy.array', 'np.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (1083, 1093), True, 'import numpy as np\n'), ((1336, 1362), 'numpy.array', 'np.array', (['[[0, 1], [0, 1]]'], {}), '([[0, 1], [0, 1]])\n', (1344, 1362), True, 'import numpy as np\n'), ((1808, 1834), 'numpy.array', 'np.array', (['[[0, 1], [0, 2]]'], {}), '([[0, 1], [0, 2]])\n', (1816, 1834), True, 'import numpy as np\n'), ((2292, 2318), 'numpy.array', 'np.array', (['[[0, 2], [1, 3]]'], {}), '([[0, 2], [1, 3]])\n', (2300, 2318), True, 'import numpy as np\n'), ((2905, 2923), 'numpy.array', 'np.array', (['[0, 0.1]'], {}), '([0, 0.1])\n', (2913, 2923), True, 'import numpy as np\n'), ((2987, 3005), 'numpy.array', 'np.array', (['[0, 0.3]'], {}), '([0, 0.3])\n', (2995, 3005), True, 'import numpy as np\n'), ((3425, 3450), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (3438, 3450), False, 'import pytest\n'), ((3505, 3530), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (3518, 3530), False, 'import pytest\n'), ((3580, 3604), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3593, 3604), False, 'import pytest\n'), ((4912, 4937), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4925, 4937), False, 'import pytest\n'), ((5020, 5045), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5033, 5045), False, 'import pytest\n'), ((9657, 9671), 'numpy.trapz', 'np.trapz', (['y', 'x'], {}), '(y, x)\n', (9665, 9671), True, 'import numpy as np\n'), ((10103, 10117), 'numpy.trapz', 'np.trapz', (['y', 'x'], {}), '(y, x)\n', (10111, 10117), True, 'import numpy as np\n'), ((10300, 10325), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10313, 10325), False, 'import pytest\n'), ((10335, 10375), 'spefit.pdf.base.PDF.from_name', 'PDF.from_name', (['"""NULL"""'], {'n_illuminations': '(1)'}), "('NULL', n_illuminations=1)\n", (10348, 10375), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((732, 771), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (744, 771), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((787, 827), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)'}), '(initial=0.1, limits=(0, 2))\n', (799, 827), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((1401, 1440), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (1413, 1440), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((1456, 1508), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (1468, 1508), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((1873, 1924), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)', 'multi': '(True)'}), '(initial=0, limits=(-2, 2), multi=True)\n', (1885, 1924), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((1940, 1992), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (1952, 1992), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((2590, 2629), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (2602, 2629), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((2645, 2697), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (2657, 2697), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((3062, 3101), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (3074, 3101), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((3117, 3169), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (3129, 3169), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((3278, 3301), 'numpy.array', 'np.array', (['[0, 0.1, 0.2]'], {}), '([0, 0.1, 0.2])\n', (3286, 3301), True, 'import numpy as np\n'), ((3360, 3383), 'numpy.array', 'np.array', (['[0, 0.1, 0.2]'], {}), '([0, 0.1, 0.2])\n', (3368, 3383), True, 'import numpy as np\n'), ((3467, 3490), 'numpy.array', 'np.array', (['[0, 0.1, 0.2]'], {}), '([0, 0.1, 0.2])\n', (3475, 3490), True, 'import numpy as np\n'), ((3547, 3565), 'numpy.array', 'np.array', (['[0, 0.1]'], {}), '([0, 0.1])\n', (3555, 3565), True, 'import numpy as np\n'), ((3752, 3791), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (3764, 3791), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((3807, 3847), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)'}), '(initial=0.1, limits=(0, 2))\n', (3819, 3847), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((4180, 4219), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (4192, 4219), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((4235, 4287), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (4247, 4287), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((5204, 5243), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (5216, 5243), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((5259, 5299), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)'}), '(initial=0.1, limits=(0, 2))\n', (5271, 5299), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((5941, 5980), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (5953, 5980), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((5996, 6036), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)'}), '(initial=0.1, limits=(0, 2))\n', (6008, 6036), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((6454, 6493), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (6466, 6493), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((6509, 6549), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)'}), '(initial=0.1, limits=(0, 2))\n', (6521, 6549), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((6811, 6862), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)', 'multi': '(True)'}), '(initial=0, limits=(-2, 2), multi=True)\n', (6823, 6862), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((6878, 6930), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (6890, 6930), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7192, 7243), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)', 'multi': '(True)'}), '(initial=0, limits=(-2, 2), multi=True)\n', (7204, 7243), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7259, 7311), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (7271, 7311), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7594, 7633), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (7606, 7633), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7649, 7701), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (7661, 7701), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7927, 7966), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (7939, 7966), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((7982, 8034), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (7994, 8034), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((8270, 8309), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (8282, 8309), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((8325, 8377), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (8337, 8377), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((8554, 8593), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0)', 'limits': '(-2, 2)'}), '(initial=0, limits=(-2, 2))\n', (8566, 8593), False, 'from spefit.pdf.base import PDFParameter, PDF\n'), ((8609, 8661), 'spefit.pdf.base.PDFParameter', 'PDFParameter', ([], {'initial': '(0.1)', 'limits': '(0, 2)', 'multi': '(True)'}), '(initial=0.1, limits=(0, 2), multi=True)\n', (8621, 8661), False, 'from spefit.pdf.base import PDFParameter, PDF\n')]
|
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_euler.python.euler_ops import base
from tf_euler.python.euler_ops import type_ops
import numpy as np
gen_pair = base._LIB_OP.gen_pair
_random_walk = base._LIB_OP.random_walk
def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1):
'''
Random walk from a list of nodes.
Args:
nodes: start node ids, 1-d Tensor
edge_types: list of 1-d Tensor of edge types
p: back probality
q: forward probality
default_node: default fill nodes
'''
if base.nebula_ops['random_walk']:
return nebula_random_walk(nodes, edge_types, p, q, default_node)
edge_types = [type_ops.get_edge_type_id(edge_type)
for edge_type in edge_types]
return _random_walk(nodes, edge_types, p, q, default_node)
def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1):
result = tf.py_func(
_nebula_random_walk,
[nodes, edge_types, p, q, default_node],
[tf.int64],
True,
'NebulaRandomWalk'
)
result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1))
return result[0]
def _nebula_random_walk(nodes, edge_types, p, q, default_node):
paths = []
uniq_nodes = {}.fromkeys(nodes).keys()
nql = 'USE {}; randomwalk {} from {} over {} where p=={} and q=={}'.format(
base.nebula_space,
len(edge_types),
', '.join(str(x) for x in uniq_nodes),
', '.join(str('e_' + x) for x in edge_types[0]),
p,
q
)
path_cache = {}
resp = base.nebula_client.execute_query(nql)
if resp.rows is not None:
for row in resp.rows:
path = row.columns[0].get_str()
path_nodes = map(lambda x: long(x if x != '-1' else default_node), path.split('#'))
path_cache[path_nodes[0]] = path_nodes
for node in nodes:
paths.append(path_cache[node])
return np.asarray(paths, np.int64)
|
[
"numpy.asarray",
"tf_euler.python.euler_ops.type_ops.get_edge_type_id",
"tf_euler.python.euler_ops.base.nebula_client.execute_query",
"tensorflow.py_func"
] |
[((1686, 1801), 'tensorflow.py_func', 'tf.py_func', (['_nebula_random_walk', '[nodes, edge_types, p, q, default_node]', '[tf.int64]', '(True)', '"""NebulaRandomWalk"""'], {}), "(_nebula_random_walk, [nodes, edge_types, p, q, default_node], [\n tf.int64], True, 'NebulaRandomWalk')\n", (1696, 1801), True, 'import tensorflow as tf\n'), ((2358, 2395), 'tf_euler.python.euler_ops.base.nebula_client.execute_query', 'base.nebula_client.execute_query', (['nql'], {}), '(nql)\n', (2390, 2395), False, 'from tf_euler.python.euler_ops import base\n'), ((2720, 2747), 'numpy.asarray', 'np.asarray', (['paths', 'np.int64'], {}), '(paths, np.int64)\n', (2730, 2747), True, 'import numpy as np\n'), ((1451, 1487), 'tf_euler.python.euler_ops.type_ops.get_edge_type_id', 'type_ops.get_edge_type_id', (['edge_type'], {}), '(edge_type)\n', (1476, 1487), False, 'from tf_euler.python.euler_ops import type_ops\n')]
|
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch
from torch.autograd import Variable
from load_memmap import *
class AxonDataset(Dataset):
"""" Inherits pytorch Dataset class to load Axon Dataset """
def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'):
"""
:param data_name (string)- data name to load/ save
:param folder- location of dataset
:param type - train or test dataset
"""
self.data_name = data_name
self.read = read
self.transform = transform
self.resize = resize
self.normalise = normalise
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
if self.read == 'npy':
self.x_data, self.y_data, _ = load_dataset(type, folder, data_name)
self.len_data = len(self.x_data)
elif self.read == 'image':
self.folder = os.path.join(__location__,self.data_name,'train')
images_original = [img for img in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "original"))]
images_mask = [img for img in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "mask"))]
self.images_mask = images_mask
self.images_original = images_original
self.images_mask.sort()
self.images_original.sort()
self.len_data = len(images_original)
def __len__(self):
""" get length of data
example: len(data) """
return self.len_data
def __getitem__(self, idx):
"""gets samples from data according to idx
:param idx- index to take
example: data[10] -to get the 10th data sample"""
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
if self.read == 'npy':
if self.resize:
sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize))
sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize))
else:
sample_x_data = self.x_data[idx]
sample_y_data = self.y_data[idx]
elif self.read == 'image':
data_path = self.images_original[idx]
mask_path = self.images_mask[idx]
sample_x_data = plt.imread(
os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "original", data_path))
sample_y_data = (plt.imread(
os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "mask", mask_path))).astype(
float)
sample_x_data = torch.Tensor(sample_x_data)
sample_y_data = torch.Tensor(sample_y_data)
if len(sample_x_data.shape) == 2:
sample_x_data.unsqueeze_(0)
if len(sample_y_data.shape) == 2:
sample_y_data.unsqueeze_(0)
# normalise between [-1,1]
if self.normalise:
sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1
data = [sample_x_data, sample_y_data]
return data
class SyntheticDataset(Dataset):
"""" Inherits pytorch Dataset class to load Synthetic Axon Dataset """
def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None):
"""
:param num - number of data to generate
:param data_name (string)- data name to load/ save
:param type - train or test dataset
"""
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type + '.npy')
name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type + '.npy')
name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type + '.npy')
try:
self.x_data = np.load(name_x, mmap_mode='r')
self.y_data = np.load(name_y, mmap_mode='r')
self.y_data_points = np.load(name_y_points)
except:
# if no dataset currently created, generate a new synthetic dataset with parameters args
print('no dataset with the name')
self.data_name = data_name
self.transform = transform
self.resize = resize
def read_tensor_dataset(self):
""" converts dataset to tensors """
tt = ToTensor()
x_data = tt(self.x_data)
y_data = tt(self.y_data)
return x_data, y_data
def __len__(self):
""" get length of data
example: len(data) """
return (len(self.x_data))
def __getitem__(self, idx):
"""gets samples from data according to idx
:param idx- index to take
example: data[10] -to get the 10th data sample"""
if self.resize:
sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize))
sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize))
else:
sample_x_data = self.x_data[idx]
sample_y_data = self.y_data[idx]
sample_x_data = np.expand_dims(sample_x_data, axis=0)
sample_y_data = np.expand_dims(sample_y_data, axis=0)
sample_x_data = torch.Tensor(sample_x_data)
sample_y_data = torch.Tensor(sample_y_data)
data = [sample_x_data, sample_y_data]
return data
class ToTensor:
"""Convert ndarrays in data to Tensors."""
@staticmethod
def __call__(data):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
#data = data.transpose((1, 0))
data = np.array([data])
data = torch.Tensor(data)
if torch.cuda.is_available():
data = data.cuda()
return data
@staticmethod
def data_to_tensor(x_data, y_data):
"""takes data and splits into a list of tensors- of which each list contains
tensors of several samples (i.e. one id)
:param x_data - the data
:param y_data - the labels
"""
tt = ToTensor()
x_train_temp = tt(x_data)
y_train_temp = tt(y_data)
data = [x_train_temp, y_train_temp]
return data
@staticmethod
def data_ids_to_tensor_list(x_data, y_data, ids):
"""takes data and splits into a list of tensors- of which each list contains
tensors of several samples (i.e. one id)
:param x_data - the data
:param y_data - the labels
:param ids - the ids corresponding to each sample
"""
tt = ToTensor()
unique_ids = np.unique(ids)
data = [None] * unique_ids.size
len = np.zeros(unique_ids.size).astype(int)
for i in np.arange(unique_ids.size):
ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int)
len[i] = int(ind_id.size)
x_train_temp = tt(x_data[ind_id])
y_train_temp = tt(y_data[ind_id])
data[i] = [x_train_temp[0], y_train_temp[0], len[i]]
max_len = int(np.max(len))
return data, max_len
@staticmethod
def create_variable(tensor):
"""creates a Variable tensor with gpu if available
:param tensor - the tensor to wrap with Variable """
# Do cuda() before wrapping with variable
if torch.cuda.is_available():
return Variable(tensor.cuda())
else:
return Variable(tensor)
|
[
"numpy.load",
"torch.autograd.Variable",
"numpy.zeros",
"numpy.expand_dims",
"numpy.nonzero",
"numpy.max",
"torch.Tensor",
"numpy.array",
"numpy.arange",
"torch.cuda.is_available",
"torch.max",
"torch.min",
"numpy.unique"
] |
[((2945, 2972), 'torch.Tensor', 'torch.Tensor', (['sample_x_data'], {}), '(sample_x_data)\n', (2957, 2972), False, 'import torch\n'), ((2997, 3024), 'torch.Tensor', 'torch.Tensor', (['sample_y_data'], {}), '(sample_y_data)\n', (3009, 3024), False, 'import torch\n'), ((5674, 5701), 'torch.Tensor', 'torch.Tensor', (['sample_x_data'], {}), '(sample_x_data)\n', (5686, 5701), False, 'import torch\n'), ((5726, 5753), 'torch.Tensor', 'torch.Tensor', (['sample_y_data'], {}), '(sample_y_data)\n', (5738, 5753), False, 'import torch\n'), ((6085, 6101), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (6093, 6101), True, 'import numpy as np\n'), ((6117, 6135), 'torch.Tensor', 'torch.Tensor', (['data'], {}), '(data)\n', (6129, 6135), False, 'import torch\n'), ((6147, 6172), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6170, 6172), False, 'import torch\n'), ((7048, 7062), 'numpy.unique', 'np.unique', (['ids'], {}), '(ids)\n', (7057, 7062), True, 'import numpy as np\n'), ((7172, 7198), 'numpy.arange', 'np.arange', (['unique_ids.size'], {}), '(unique_ids.size)\n', (7181, 7198), True, 'import numpy as np\n'), ((7762, 7787), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7785, 7787), False, 'import torch\n'), ((4289, 4319), 'numpy.load', 'np.load', (['name_x'], {'mmap_mode': '"""r"""'}), "(name_x, mmap_mode='r')\n", (4296, 4319), True, 'import numpy as np\n'), ((4346, 4376), 'numpy.load', 'np.load', (['name_y'], {'mmap_mode': '"""r"""'}), "(name_y, mmap_mode='r')\n", (4353, 4376), True, 'import numpy as np\n'), ((4410, 4432), 'numpy.load', 'np.load', (['name_y_points'], {}), '(name_y_points)\n', (4417, 4432), True, 'import numpy as np\n'), ((5545, 5582), 'numpy.expand_dims', 'np.expand_dims', (['sample_x_data'], {'axis': '(0)'}), '(sample_x_data, axis=0)\n', (5559, 5582), True, 'import numpy as np\n'), ((5611, 5648), 'numpy.expand_dims', 'np.expand_dims', (['sample_y_data'], {'axis': '(0)'}), '(sample_y_data, axis=0)\n', (5625, 5648), True, 'import numpy as np\n'), ((7486, 7497), 'numpy.max', 'np.max', (['len'], {}), '(len)\n', (7492, 7497), True, 'import numpy as np\n'), ((7865, 7881), 'torch.autograd.Variable', 'Variable', (['tensor'], {}), '(tensor)\n', (7873, 7881), False, 'from torch.autograd import Variable\n'), ((5255, 5283), 'numpy.array', 'np.array', (['[self.x_data[idx]]'], {}), '([self.x_data[idx]])\n', (5263, 5283), True, 'import numpy as np\n'), ((5353, 5381), 'numpy.array', 'np.array', (['[self.y_data[idx]]'], {}), '([self.y_data[idx]])\n', (5361, 5381), True, 'import numpy as np\n'), ((7117, 7142), 'numpy.zeros', 'np.zeros', (['unique_ids.size'], {}), '(unique_ids.size)\n', (7125, 7142), True, 'import numpy as np\n'), ((2183, 2211), 'numpy.array', 'np.array', (['[self.x_data[idx]]'], {}), '([self.x_data[idx]])\n', (2191, 2211), True, 'import numpy as np\n'), ((2285, 2313), 'numpy.array', 'np.array', (['[self.y_data[idx]]'], {}), '([self.y_data[idx]])\n', (2293, 2313), True, 'import numpy as np\n'), ((7221, 7253), 'numpy.nonzero', 'np.nonzero', (['(unique_ids[i] == ids)'], {}), '(unique_ids[i] == ids)\n', (7231, 7253), True, 'import numpy as np\n'), ((3301, 3325), 'torch.min', 'torch.min', (['sample_x_data'], {}), '(sample_x_data)\n', (3310, 3325), False, 'import torch\n'), ((3329, 3353), 'torch.max', 'torch.max', (['sample_x_data'], {}), '(sample_x_data)\n', (3338, 3353), False, 'import torch\n'), ((3356, 3380), 'torch.min', 'torch.min', (['sample_x_data'], {}), '(sample_x_data)\n', (3365, 3380), False, 'import torch\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Truthcoin's consensus functions.
Verifies that the consensus algorithm works as expected.
Check test_answers.txt for expected results.
"""
from __future__ import division, unicode_literals, absolute_import
import os
import sys
import platform
import json
import numpy as np
import numpy.ma as ma
if platform.python_version() < "2.7":
unittest = __import__("unittest2")
else:
import unittest
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir))
import consensus
def prp(o):
print(json.dumps(outcome, indent=3, sort_keys=True))
class TestConsensus(unittest.TestCase):
def setUp(self):
self.votes_unmasked = np.array([
[1, 1, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 1, 1],
])
self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked))
def test_Factory(self):
outcome = consensus.Factory(self.votes)
self.assertAlmostEquals(outcome["Certainty"], 0.228237569613, places=11)
def test_Factory_scaled(self):
scalar_decision_params = [
{"scaled": True, "min": 0.1, "max": 0.5},
{"scaled": True, "min": 0.2, "max": 0.7},
{"scaled": False, "min": 0, "max": 1},
{"scaled": False, "min": 0, "max": 1},
]
outcome = consensus.Factory(self.votes, Scales=scalar_decision_params)
self.assertAlmostEquals(outcome["Certainty"], 0.618113325804, places=11)
def tearDown(self):
del self.votes_unmasked
del self.votes
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestConsensus)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"platform.python_version",
"unittest.TextTestRunner",
"os.path.realpath",
"json.dumps",
"numpy.isnan",
"numpy.array",
"unittest.TestLoader",
"os.path.join",
"consensus.Factory"
] |
[((360, 385), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (383, 385), False, 'import platform\n'), ((484, 510), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (500, 510), False, 'import os\n'), ((531, 560), 'os.path.join', 'os.path.join', (['HERE', 'os.pardir'], {}), '(HERE, os.pardir)\n', (543, 560), False, 'import os\n'), ((603, 648), 'json.dumps', 'json.dumps', (['outcome'], {'indent': '(3)', 'sort_keys': '(True)'}), '(outcome, indent=3, sort_keys=True)\n', (613, 648), False, 'import json\n'), ((743, 841), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [0, 0, 1, 1], [0, \n 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [0, 0, 1,\n 1], [0, 0, 1, 1]])\n', (751, 841), True, 'import numpy as np\n'), ((1057, 1086), 'consensus.Factory', 'consensus.Factory', (['self.votes'], {}), '(self.votes)\n', (1074, 1086), False, 'import consensus\n'), ((1477, 1537), 'consensus.Factory', 'consensus.Factory', (['self.votes'], {'Scales': 'scalar_decision_params'}), '(self.votes, Scales=scalar_decision_params)\n', (1494, 1537), False, 'import consensus\n'), ((979, 1008), 'numpy.isnan', 'np.isnan', (['self.votes_unmasked'], {}), '(self.votes_unmasked)\n', (987, 1008), True, 'import numpy as np\n'), ((1739, 1760), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1758, 1760), False, 'import unittest\n'), ((1802, 1838), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1825, 1838), False, 'import unittest\n')]
|
"""
Comparing different kernels using cv2.filter2D()
"""
# Import required packages:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def show_with_matplotlib(color_img, title, pos):
"""Shows an image using matplotlib capabilities"""
# Convert BGR image to RGB
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(3, 4, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis('off')
# Create the dimensions of the figure and set title:
plt.figure(figsize=(12, 6))
plt.suptitle("Comparing different kernels using cv2.filter2D()", fontsize=14, fontweight='bold')
# Load the original image:
image = cv2.imread('cat-face.png')
# We try different kernels
# Identify kernel (does not modify the image)
kernel_identity = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
# Try different kernels for edge detection:
kernel_edge_detection_1 = np.array([[1, 0, -1],
[0, 0, 0],
[-1, 0, 1]])
kernel_edge_detection_2 = np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])
kernel_edge_detection_3 = np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
# Try different kernels for sharpening:
kernel_sharpen = np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]])
kernel_unsharp_masking = -1 / 256 * np.array([[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, -476, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]])
# Try different kernels for smoothing:
kernel_blur = 1 / 9 * np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
gaussian_blur = 1 / 16 * np.array([[1, 2, 1],
[2, 4, 2],
[1, 2, 1]])
# Try a kernel for embossing:
kernel_emboss = np.array([[-2, -1, 0],
[-1, 1, 1],
[0, 1, 2]])
# Try different kernels for edge detection:
sobel_x_kernel = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
sobel_y_kernel = np.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
outline_kernel = np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
# Apply all the kernels:
original_image = cv2.filter2D(image, -1, kernel_identity)
edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1)
edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2)
edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3)
sharpen_image = cv2.filter2D(image, -1, kernel_sharpen)
unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking)
blur_image = cv2.filter2D(image, -1, kernel_blur)
gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur)
emboss_image = cv2.filter2D(image, -1, kernel_emboss)
sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel)
sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel)
outline_image = cv2.filter2D(image, -1, outline_kernel)
# Show all the images:
show_with_matplotlib(original_image, "identity kernel", 1)
show_with_matplotlib(edge_image_1, "edge detection 1", 2)
show_with_matplotlib(edge_image_2, "edge detection 2", 3)
show_with_matplotlib(edge_image_3, "edge detection 3", 4)
show_with_matplotlib(sharpen_image, "sharpen", 5)
show_with_matplotlib(unsharp_masking_image, "unsharp masking", 6)
show_with_matplotlib(blur_image, "blur image", 7)
show_with_matplotlib(gaussian_blur_image, "gaussian blur image", 8)
show_with_matplotlib(emboss_image, "emboss image", 9)
show_with_matplotlib(sobel_x_image, "sobel x image", 10)
show_with_matplotlib(sobel_y_image, "sobel y image", 11)
show_with_matplotlib(outline_image, "outline image", 12)
# Show the Figure:
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"cv2.filter2D",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"cv2.imread",
"matplotlib.pyplot.figure",
"numpy.array"
] |
[((475, 502), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (485, 502), True, 'import matplotlib.pyplot as plt\n'), ((503, 604), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Comparing different kernels using cv2.filter2D()"""'], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Comparing different kernels using cv2.filter2D()', fontsize=\n 14, fontweight='bold')\n", (515, 604), True, 'import matplotlib.pyplot as plt\n'), ((636, 662), 'cv2.imread', 'cv2.imread', (['"""cat-face.png"""'], {}), "('cat-face.png')\n", (646, 662), False, 'import cv2\n'), ((755, 798), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n', (763, 798), True, 'import numpy as np\n'), ((926, 971), 'numpy.array', 'np.array', (['[[1, 0, -1], [0, 0, 0], [-1, 0, 1]]'], {}), '([[1, 0, -1], [0, 0, 0], [-1, 0, 1]])\n', (934, 971), True, 'import numpy as np\n'), ((1071, 1115), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, -4, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, -4, 1], [0, 1, 0]])\n', (1079, 1115), True, 'import numpy as np\n'), ((1215, 1266), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])\n', (1223, 1266), True, 'import numpy as np\n'), ((1397, 1444), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\n', (1405, 1444), True, 'import numpy as np\n'), ((2184, 2230), 'numpy.array', 'np.array', (['[[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]'], {}), '([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])\n', (2192, 2230), True, 'import numpy as np\n'), ((2345, 2391), 'numpy.array', 'np.array', (['[[1, 0, -1], [2, 0, -2], [1, 0, -1]]'], {}), '([[1, 0, -1], [2, 0, -2], [1, 0, -1]])\n', (2353, 2391), True, 'import numpy as np\n'), ((2464, 2510), 'numpy.array', 'np.array', (['[[1, 2, 1], [0, 0, 0], [-1, -2, -1]]'], {}), '([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])\n', (2472, 2510), True, 'import numpy as np\n'), ((2583, 2634), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])\n', (2591, 2634), True, 'import numpy as np\n'), ((2732, 2772), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_identity'], {}), '(image, -1, kernel_identity)\n', (2744, 2772), False, 'import cv2\n'), ((2788, 2836), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_edge_detection_1'], {}), '(image, -1, kernel_edge_detection_1)\n', (2800, 2836), False, 'import cv2\n'), ((2852, 2900), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_edge_detection_2'], {}), '(image, -1, kernel_edge_detection_2)\n', (2864, 2900), False, 'import cv2\n'), ((2916, 2964), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_edge_detection_3'], {}), '(image, -1, kernel_edge_detection_3)\n', (2928, 2964), False, 'import cv2\n'), ((2981, 3020), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_sharpen'], {}), '(image, -1, kernel_sharpen)\n', (2993, 3020), False, 'import cv2\n'), ((3045, 3092), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_unsharp_masking'], {}), '(image, -1, kernel_unsharp_masking)\n', (3057, 3092), False, 'import cv2\n'), ((3106, 3142), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_blur'], {}), '(image, -1, kernel_blur)\n', (3118, 3142), False, 'import cv2\n'), ((3165, 3203), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'gaussian_blur'], {}), '(image, -1, gaussian_blur)\n', (3177, 3203), False, 'import cv2\n'), ((3219, 3257), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel_emboss'], {}), '(image, -1, kernel_emboss)\n', (3231, 3257), False, 'import cv2\n'), ((3274, 3313), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'sobel_x_kernel'], {}), '(image, -1, sobel_x_kernel)\n', (3286, 3313), False, 'import cv2\n'), ((3330, 3369), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'sobel_y_kernel'], {}), '(image, -1, sobel_y_kernel)\n', (3342, 3369), False, 'import cv2\n'), ((3386, 3425), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'outline_kernel'], {}), '(image, -1, outline_kernel)\n', (3398, 3425), False, 'import cv2\n'), ((4162, 4172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4170, 4172), True, 'import matplotlib.pyplot as plt\n'), ((332, 354), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'pos'], {}), '(3, 4, pos)\n', (343, 354), True, 'import matplotlib.pyplot as plt\n'), ((359, 378), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_RGB'], {}), '(img_RGB)\n', (369, 378), True, 'import matplotlib.pyplot as plt\n'), ((383, 399), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (392, 399), True, 'import matplotlib.pyplot as plt\n'), ((404, 419), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (412, 419), True, 'import matplotlib.pyplot as plt\n'), ((1536, 1646), 'numpy.array', 'np.array', (['[[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, -476, 24, 6], [4, 16, 24, 16,\n 4], [1, 4, 6, 4, 1]]'], {}), '([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, -476, 24, 6], [4, 16,\n 24, 16, 4], [1, 4, 6, 4, 1]])\n', (1544, 1646), True, 'import numpy as np\n'), ((1889, 1932), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n', (1897, 1932), True, 'import numpy as np\n'), ((2023, 2066), 'numpy.array', 'np.array', (['[[1, 2, 1], [2, 4, 2], [1, 2, 1]]'], {}), '([[1, 2, 1], [2, 4, 2], [1, 2, 1]])\n', (2031, 2066), True, 'import numpy as np\n')]
|
import os
import re
import sys
import argparse
import json
import numpy as np
from glob import glob
import cv2
from utils.plot_utils import RandomColor
def parse_args():
parser = argparse.ArgumentParser(
description='Monocular 3D Tracking Visualizer',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('set', choices=['gta', 'kitti'])
parser.add_argument('split', choices=['train', 'val', 'test'],
help='Which data split to use in testing')
parser.add_argument('--session', default='623',
help='Name of the session, to separate exp')
parser.add_argument('--epoch', default='100',
help='How many epochs you used to separate exp')
parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd',
help='Flags for running evaluation code')
parser.add_argument('--save_vid', action='store_true', default=False,
help='Flags for saving video')
parser.add_argument('--save_txt', action='store_true', default=False,
help='Flags for saving txt')
parser.add_argument('--dry_run', action='store_true', default=False,
help='Show command without running')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Overwrite the output files')
args = parser.parse_args()
return args
print(' '.join(sys.argv))
args = parse_args()
if args.set == 'kitti':
IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'})
re_pattern = re.compile('[0-9]{4}')
else:
IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'})
re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])')
SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split})
out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag})
FONT = cv2.FONT_HERSHEY_SIMPLEX
FOURCC = cv2.VideoWriter_fourcc(*'mp4v')
fps = 15
np.random.seed(777)
rm_color = RandomColor(30)
tid2color = {}
def mkdir(path):
if not os.path.isdir(path):
print("Making directory {}".format(path))
os.makedirs(path) # Use with care
def gen_result(out_path, out_name, save_vid=False, save_txt=True,
dry_run=False, overwrite=False):
print("Reading meta data...")
info = json.load(open('{}{}.json'.format(out_path, out_name), 'r'))
if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name))
for seqid in range(len(info)):
file_seq = re_pattern.search(info[seqid]['filename']).group(0)
print('Reading {} from {}{}...'.format(file_seq, out_path, out_name))
if dry_run:
continue
seqout = []
vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq)
txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq)
if not overwrite:
if not os.path.isfile(txt_name) and save_txt:
pass
elif not os.path.isfile(vid_name) and save_vid:
pass
else:
print("SKIP running. Generated file {} Found".format(txt_name))
continue
if save_vid:
images = sorted(glob(IMAGE_PATH.format(file_seq)))
img = cv2.imread(images[0])
vidsize = (img.shape[1], img.shape[0]) # height, width
out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize)
demoinfo = info[seqid]['frames']
for idx, frame in enumerate(demoinfo):
if save_vid:
img = cv2.imread(images[idx])
img = cv2.putText(img, str(idx), (20, 30),
cv2.FONT_HERSHEY_COMPLEX, 1,
(180, 180, 180), 2)
for trk in frame['hypotheses']:
x1, y1, x2, y2, conf = trk['det_box']
xc, yc = trk['xc'], trk['yc']
if save_vid:
if trk['id'] not in tid2color:
tid2color[trk['id']] = rm_color.get_random_color(scale=255)
img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)),
tid2color[trk['id']], 2)
img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)),
tid2color[trk['id']], 4)
img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)),
cv2.FONT_HERSHEY_COMPLEX, 1,
tid2color[trk['id']], 2)
img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)),
cv2.FONT_HERSHEY_COMPLEX, 0.8,
tid2color[trk['id']], 2)
if save_txt:
'''
submit_txt = ' '.join([
str(idx),
str(int(trk['id'])),
'Car',
'-1 -1',
trk['alpha'],
str(x1), str(y1), str(x2), str(y2),
trk['dim'],
trk['loc'],
trk['rot'],
str(conf)])
'''
submit_txt = ' '.join([
str(idx),
str(int(trk['id'])),
'Car',
'-1 -1 -10',
str(x1), str(y1), str(x2), str(y2),
'-1 -1 -1',
'-1000 -1000 -1000 -10',
str(conf)])
#'''
submit_txt += '\n'
seqout.append(submit_txt)
if save_vid: out.write(img)
if save_txt:
print("{} saved.".format(txt_name))
with open(txt_name, 'w') as f:
f.writelines(seqout)
if save_vid:
print("{} saved.".format(vid_name))
out.release()
if __name__ == '__main__':
# Not using out_name, too slow
output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')]
my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep']
for dir_name in output_list:
print(dir_name)
save_vid = args.save_vid
if save_vid:
is_in = False
for ml in my_list:
is_in = is_in or (ml in dir_name)
save_vid = is_in
gen_result(SAVE_PATH,
dir_name,
save_vid=save_vid,
save_txt=args.save_txt,
dry_run=args.dry_run,
overwrite=args.overwrite
)
|
[
"numpy.random.seed",
"cv2.VideoWriter_fourcc",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.isdir",
"utils.plot_utils.RandomColor",
"cv2.imread",
"os.path.isfile",
"os.path.splitext",
"cv2.VideoWriter",
"os.listdir",
"re.compile"
] |
[((2253, 2284), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (2275, 2284), False, 'import cv2\n'), ((2295, 2314), 'numpy.random.seed', 'np.random.seed', (['(777)'], {}), '(777)\n', (2309, 2314), True, 'import numpy as np\n'), ((2326, 2341), 'utils.plot_utils.RandomColor', 'RandomColor', (['(30)'], {}), '(30)\n', (2337, 2341), False, 'from utils.plot_utils import RandomColor\n'), ((186, 317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Monocular 3D Tracking Visualizer"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Monocular 3D Tracking Visualizer',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (209, 317), False, 'import argparse\n'), ((1721, 1743), 're.compile', 're.compile', (['"""[0-9]{4}"""'], {}), "('[0-9]{4}')\n", (1731, 1743), False, 'import re\n'), ((1876, 1926), 're.compile', 're.compile', (['"""rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])"""'], {}), "('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])')\n", (1886, 1926), False, 'import re\n'), ((2386, 2405), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2399, 2405), False, 'import os\n'), ((2465, 2482), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2476, 2482), False, 'import os\n'), ((3620, 3641), 'cv2.imread', 'cv2.imread', (['images[0]'], {}), '(images[0])\n', (3630, 3641), False, 'import cv2\n'), ((3727, 3774), 'cv2.VideoWriter', 'cv2.VideoWriter', (['vid_name', 'FOURCC', 'fps', 'vidsize'], {}), '(vid_name, FOURCC, fps, vidsize)\n', (3742, 3774), False, 'import cv2\n'), ((6804, 6826), 'os.path.splitext', 'os.path.splitext', (['item'], {}), '(item)\n', (6820, 6826), False, 'import os\n'), ((6842, 6863), 'os.listdir', 'os.listdir', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (6852, 6863), False, 'import os\n'), ((3912, 3935), 'cv2.imread', 'cv2.imread', (['images[idx]'], {}), '(images[idx])\n', (3922, 3935), False, 'import cv2\n'), ((3253, 3277), 'os.path.isfile', 'os.path.isfile', (['txt_name'], {}), '(txt_name)\n', (3267, 3277), False, 'import os\n'), ((3334, 3358), 'os.path.isfile', 'os.path.isfile', (['vid_name'], {}), '(vid_name)\n', (3348, 3358), False, 'import os\n')]
|
# --------------
#Importing header files
import pandas as pd
from sklearn.model_selection import train_test_split as tts
# Code starts here
data= pd.read_csv(path)
X= data.drop(['customer.id','paid.back.loan'],1)
y=data['paid.back.loan']
X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
import pandas as pd
from sklearn.model_selection import train_test_split as tts
# Code starts here
fully_paid = y_train.value_counts()
plt.figure()
fully_paid.plot(kind='bar')
# Code ends here
# --------------
#Importing header files
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float)
X_train['int.rate'] = X_train['int.rate']/100
X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float)
X_test['int.rate'] = X_test['int.rate']/100
num_df = X_train.select_dtypes(include = np.number)
cat_df = X_train.select_dtypes(exclude = np.number)
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
# Code ends
cols = list(num_df)
fig, axes = plt.subplots(nrows =9, ncols= 1)
for i in range(1,9):
sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i])
# --------------
# Code starts here
# Code ends here
cols= list(cat_df)
fig, axes = plt.subplots(nrows = 2, ncols= 2)
for i in range (0,2):
for j in range(0,2):
sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j])
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder
# Code starts here
for i in list(cat_df):
X_train[i].fillna('NA')
le = LabelEncoder()
X_train[i] = le.fit_transform(X_train[i])
X_test[i].fillna('NA')
le = LabelEncoder()
X_test[i] = le.fit_transform(X_test[i])
#y_test = y_test.str.replace('No',0)
y_train.replace({'No':0,'Yes':1},inplace=True)
y_test.replace({'No':0,'Yes':1},inplace=True)
# Code ends here
from sklearn.metrics import accuracy_score
model = DecisionTreeClassifier(random_state = 0)
model.fit(X_train, y_train)
y_preds = model.predict(X_test)
acc= accuracy_score(y_test, y_preds)
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = DecisionTreeClassifier(random_state =0)
p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5)
p_tree.fit(X_train,y_train)
# Code ends here
ypreds2 = p_tree.predict(X_test)
acc_2 = accuracy_score(y_test, ypreds2)
acc_2
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big=pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here
|
[
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.axis",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.tree.export_graphviz",
"sklearn.preprocessing.LabelEncoder",
"pydotplus.graph_from_dot_data",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"numpy.arange",
"seaborn.countplot",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.subplots"
] |
[((150, 167), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (161, 167), True, 'import pandas as pd\n'), ((279, 319), 'sklearn.model_selection.train_test_split', 'tts', (['X', 'y'], {'random_state': '(0)', 'test_size': '(0.3)'}), '(X, y, random_state=0, test_size=0.3)\n', (282, 319), True, 'from sklearn.model_selection import train_test_split as tts\n'), ((569, 581), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (579, 581), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1288), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(9)', 'ncols': '(1)'}), '(nrows=9, ncols=1)\n', (1270, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1459, 1489), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (1471, 1489), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2223), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2207, 2223), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2291, 2322), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_preds'], {}), '(y_test, y_preds)\n', (2305, 2322), False, 'from sklearn.metrics import accuracy_score\n'), ((2547, 2585), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2569, 2585), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2596, 2660), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model_2', 'param_grid': 'parameter_grid', 'cv': '(5)'}), '(estimator=model_2, param_grid=parameter_grid, cv=5)\n', (2608, 2660), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2748, 2779), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'ypreds2'], {}), '(y_test, ypreds2)\n', (2762, 2779), False, 'from sklearn.metrics import accuracy_score\n'), ((3032, 3203), 'sklearn.tree.export_graphviz', 'export_graphviz', ([], {'decision_tree': 'p_tree.best_estimator_', 'out_file': 'None', 'feature_names': 'X.columns', 'filled': '(True)', 'class_names': "['loan_paid_back_yes', 'loan_paid_back_no']"}), "(decision_tree=p_tree.best_estimator_, out_file=None,\n feature_names=X.columns, filled=True, class_names=['loan_paid_back_yes',\n 'loan_paid_back_no'])\n", (3047, 3203), False, 'from sklearn.tree import export_graphviz\n'), ((3208, 3247), 'pydotplus.graph_from_dot_data', 'pydotplus.graph_from_dot_data', (['dot_data'], {}), '(dot_data)\n', (3237, 3247), False, 'import pydotplus\n'), ((3380, 3408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (3390, 3408), True, 'import matplotlib.pyplot as plt\n'), ((3441, 3456), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3449, 3456), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3465, 3467), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1369), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'y_train', 'y': 'num_df[cols[i]]', 'ax': 'axes[i]'}), '(x=y_train, y=num_df[cols[i]], ax=axes[i])\n', (1327, 1369), True, 'import seaborn as sns\n'), ((1828, 1842), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1840, 1842), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1926, 1940), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1938, 1940), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2463, 2479), 'numpy.arange', 'np.arange', (['(3)', '(10)'], {}), '(3, 10)\n', (2472, 2479), True, 'import numpy as np\n'), ((3419, 3439), 'matplotlib.pyplot.imread', 'plt.imread', (['img_path'], {}), '(img_path)\n', (3429, 3439), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1612), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'X_train[cols[i * 2 + j]]', 'hue': 'y_train', 'ax': 'axes[i, j]'}), '(x=X_train[cols[i * 2 + j]], hue=y_train, ax=axes[i, j])\n', (1556, 1612), True, 'import seaborn as sns\n')]
|
# -*- coding: utf-8 -*-
# @Time : 19-11-19 22:25
# @Author : <NAME>
# @Reference : None
# @File : cut_twist_join.py
# @IDE : PyCharm Community Edition
"""
将身份证正反面从原始图片中切分出来。
需要的参数有:
1.图片所在路径。
输出结果为:
切分后的身份证正反面图片。
"""
import os
import cv2
import numpy as np
def point_judge(center, bbox):
"""
用于将矩形框的边界按顺序排列
:param center: 矩形中心的坐标[x, y]
:param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
:return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上
"""
left = []
right = []
for i in range(4):
if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边
right.append(bbox[i])
else:
left.append(bbox[i])
if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上
right_down = right[1]
right_up = right[0]
else:
right_down = right[0]
right_up = right[1]
if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上
left_down = left[1]
left_up = left[0]
else:
left_down = left[0]
left_up = left[1]
return left_down, right_down, left_up, right_up
def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用
"""
将图片灰度化,并滤波
:param img: 输入RGB图片
:param image_name: 输入图片名称,测试时使用
:param save_path: 滤波结果保存路径,测试时使用
:return: 灰度化、滤波后图片
"""
# img = cv2.imread(image_path + image_name) # 读取图片
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片
# cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看
img_blurred = cv2.filter2D(img_gray, -1,
kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作
img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))
# cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改
return img_blurred
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用
"""
求取梯度,二值化
:param img_blurred: 滤波后的图片
:param image_name: 图片名,测试用
:param save_path: 保存路径,测试用
:return: 二值化后的图片
"""
gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)
gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)
img_gradient = cv2.subtract(gradX, gradY)
img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代
# 这里改进成自适应阈值,貌似没用
img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)
# cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)
img_closed = cv2.erode(img_closed, None, iterations=9)
img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀
# 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小
return img_closed
def find_bbox(img, img_closed): # 寻找身份证正反面区域
"""
根据二值化结果判定并裁剪出身份证正反面区域
:param img: 原始RGB图片
:param img_closed: 二值化后的图片
:return: 身份证正反面区域
"""
(contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数
# 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _)
contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序
countours_res = []
for i in range(0, len(contours)):
area = cv2.contourArea(contours[i]) # 计算面积
if (area <= 0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]):
# 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的)
rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数
box = cv2.boxPoints(rect)
left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box)
src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应
dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))],
[int(max(rect[1][0], rect[1][1])),
int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定
m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵
result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))),
flags=cv2.INTER_CUBIC) # 投影变换
countours_res.append(result)
return countours_res # 返回身份证区域
def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线
"""
根据规则,强行将粘连的区域切分
:param img_closed_original: 二值化图片
:return: 处理后的二值化图片
"""
img_closed = img_closed_original.copy()
img_closed = img_closed // 250
#print(img_closed.shape)
width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数
start_region_flag = 0
start_region_index = 0 # 身份证起始点高度值
end_region_index = 0 # 身份证结束点高度值
for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代
if start_region_flag == 0 and width_sum[i] > 330:
start_region_flag = 1
start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点
if width_sum[i] > 330:
end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点
# 身份证区域中白点最少的高度值,认为这是正反面的交点
# argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值
min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0]
img_closed_original[min_line_position][:] = 0
for i in range(1, 11): # 参数可变,分割10个点
temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i]
if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内
img_closed_original[temp_line_position][:] = 0 # 强制变为0
return img_closed_original
def cut_part_img(img, cut_percent):
"""
# 从宽度和高度两个方向,裁剪身份证边缘
:param img: 身份证区域
:param cut_percent: 裁剪的比例
:return: 裁剪后的身份证区域
"""
height, width, _ = img.shape
height_num = int(height * cut_percent) # 需要裁剪的高度值
h_start = 0 + height_num // 2 # 左右等比例切分
h_end = height - height_num // 2 - 1
width_num = int(width * cut_percent) # 需要裁剪的宽度值
w_start = 0 + width_num // 2
w_end = width - width_num // 2 - 1
return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片
def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片
"""
裁剪出一张图片中的身份证正反面区域
:param img_path: 图片所在路径
:param img_name: 图片名称
:param save_path: 结果保存路径 测试用
:param problem_path: 出错图片中间结果保存 测试用
:return: 身份证正反面图片
"""
img_path_name = os.path.join(img_path, img_name)
if not os.path.exists(img_path_name): # 判断图片是否存在
print('img {name} is not exits'.format(name=img_path_name))
return 1, [] # 图片不存在,直接返回,报错加一
img = cv2.imread(img_path_name) # 读取图片
img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波
img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))
# 对图像进行锐化
img_binary = gradient_and_binary(img_blurred) # 二值化
res_bbox = find_bbox(img_t, img_binary) # 切分正反面
if len(res_bbox) != 2: # 异常处理
print('Error happened when cut img {name}, try exception cut program '.format(name=img_path_name))
# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred)
# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary)
# cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果
img_binary = find_cut_line(img_binary) # 强制分割正反面
res_bbox = find_bbox(img_t, img_binary)
if len(res_bbox) != 2: # 纠正失败
print('Failed to cut img {name}, exception program end'.format(name=img_path_name))
return 1, None
else: # 纠正成功
print('Correctly cut img {name}, exception program end'.format(name=img_path_name))
return 0, res_bbox
else: # 裁剪过程正常
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img)
return 0, res_bbox
def process_img(img_path, save_path, problem_path):
"""
切分一个目录下的所有图片
:param img_path: 图片所在路径
:param save_path: 结果保存路径
:param problem_path: 问题图片保存路径
:return: None
"""
if not os.path.exists(img_path): # 判断图片路径是否存在
print('img path {name} is not exits, program break.'.format(name=img_path))
return
if not os.path.exists(save_path): # 保存路径不存在,则创建路径
os.makedirs(save_path)
if not os.path.exists(problem_path): # 保存路径不存在,则创建路径
os.makedirs(problem_path)
img_names = os.listdir(img_path)
error_count = 0
error_names = []
for img_name in img_names:
error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path)
error_count += error_temp
if error_temp == 0:
cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))
cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))
else:
error_names.append(img_name)
print('total error number is: ', error_count)
print('error images mame :')
for error_img_name in error_names:
print(error_img_name)
return
if __name__ == '__main__':
origin_img_path = './problem_imgs/'
cutted_save_path = './res_imgs/'
cut_problem_path = './temp_imgs/'
#process_img(img_path=origin_img_path, save_path=cutted_save_path, problem_path=cut_problem_path)
|
[
"cv2.getPerspectiveTransform",
"cv2.adaptiveThreshold",
"numpy.argsort",
"cv2.boxPoints",
"cv2.minAreaRect",
"cv2.erode",
"os.path.join",
"cv2.contourArea",
"cv2.subtract",
"cv2.dilate",
"cv2.cvtColor",
"os.path.exists",
"cv2.convertScaleAbs",
"cv2.morphologyEx",
"os.listdir",
"cv2.Sobel",
"os.makedirs",
"cv2.getStructuringElement",
"numpy.float32",
"cv2.imread",
"numpy.array"
] |
[((1470, 1507), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1482, 1507), False, 'import cv2\n'), ((2259, 2312), 'cv2.Sobel', 'cv2.Sobel', (['img_blurred'], {'ddepth': 'cv2.CV_32F', 'dx': '(1)', 'dy': '(0)'}), '(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)\n', (2268, 2312), False, 'import cv2\n'), ((2326, 2379), 'cv2.Sobel', 'cv2.Sobel', (['img_blurred'], {'ddepth': 'cv2.CV_32F', 'dx': '(0)', 'dy': '(1)'}), '(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)\n', (2335, 2379), False, 'import cv2\n'), ((2400, 2426), 'cv2.subtract', 'cv2.subtract', (['gradX', 'gradY'], {}), '(gradX, gradY)\n', (2412, 2426), False, 'import cv2\n'), ((2447, 2480), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['img_gradient'], {}), '(img_gradient)\n', (2466, 2480), False, 'import cv2\n'), ((2555, 2654), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['img_gradient', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY', '(3)', '(-3)'], {}), '(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 3, -3)\n', (2576, 2654), False, 'import cv2\n'), ((2762, 2814), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv2.MORPH_ELLIPSE, (5, 5))\n', (2787, 2814), False, 'import cv2\n'), ((2833, 2886), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_thresh', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(img_thresh, cv2.MORPH_CLOSE, kernel)\n', (2849, 2886), False, 'import cv2\n'), ((2905, 2957), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_closed', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img_closed, cv2.MORPH_OPEN, kernel)\n', (2921, 2957), False, 'import cv2\n'), ((2976, 3017), 'cv2.erode', 'cv2.erode', (['img_closed', 'None'], {'iterations': '(9)'}), '(img_closed, None, iterations=9)\n', (2985, 3017), False, 'import cv2\n'), ((3036, 3078), 'cv2.dilate', 'cv2.dilate', (['img_closed', 'None'], {'iterations': '(9)'}), '(img_closed, None, iterations=9)\n', (3046, 3078), False, 'import cv2\n'), ((7141, 7173), 'os.path.join', 'os.path.join', (['img_path', 'img_name'], {}), '(img_path, img_name)\n', (7153, 7173), False, 'import os\n'), ((7350, 7375), 'cv2.imread', 'cv2.imread', (['img_path_name'], {}), '(img_path_name)\n', (7360, 7375), False, 'import cv2\n'), ((9477, 9497), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (9487, 9497), False, 'import os\n'), ((3674, 3702), 'cv2.contourArea', 'cv2.contourArea', (['contours[i]'], {}), '(contours[i])\n', (3689, 3702), False, 'import cv2\n'), ((7186, 7215), 'os.path.exists', 'os.path.exists', (['img_path_name'], {}), '(img_path_name)\n', (7200, 7215), False, 'import os\n'), ((9135, 9159), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (9149, 9159), False, 'import os\n'), ((9288, 9313), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (9302, 9313), False, 'import os\n'), ((9341, 9363), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (9352, 9363), False, 'import os\n'), ((9376, 9404), 'os.path.exists', 'os.path.exists', (['problem_path'], {}), '(problem_path)\n', (9390, 9404), False, 'import os\n'), ((9432, 9457), 'os.makedirs', 'os.makedirs', (['problem_path'], {}), '(problem_path)\n', (9443, 9457), False, 'import os\n'), ((1674, 1733), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]', 'np.float32'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)\n', (1682, 1733), True, 'import numpy as np\n'), ((1808, 1867), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]', 'np.float32'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)\n', (1816, 1867), True, 'import numpy as np\n'), ((3904, 3932), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contours[i]'], {}), '(contours[i])\n', (3919, 3932), False, 'import cv2\n'), ((3985, 4004), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3998, 4004), False, 'import cv2\n'), ((4133, 4187), 'numpy.float32', 'np.float32', (['[left_down, right_down, left_up, right_up]'], {}), '([left_down, right_down, left_up, right_up])\n', (4143, 4187), True, 'import numpy as np\n'), ((4514, 4551), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (4541, 4551), False, 'import cv2\n'), ((5798, 5856), 'numpy.argsort', 'np.argsort', (['width_sum[start_region_index:end_region_index]'], {}), '(width_sum[start_region_index:end_region_index])\n', (5808, 5856), True, 'import numpy as np\n'), ((7486, 7545), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]', 'np.float32'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)\n', (7494, 7545), True, 'import numpy as np\n'), ((6005, 6063), 'numpy.argsort', 'np.argsort', (['width_sum[start_region_index:end_region_index]'], {}), '(width_sum[start_region_index:end_region_index])\n', (6015, 6063), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Desafio 4
#
# Neste desafio, vamos praticar um pouco sobre testes de hipóteses. Utilizaremos o _data set_ [2016 Olympics in Rio de Janeiro](https://www.kaggle.com/rio2016/olympic-games/), que contém dados sobre os atletas das Olimpíadas de 2016 no Rio de Janeiro.
#
# Esse _data set_ conta com informações gerais sobre 11538 atletas como nome, nacionalidade, altura, peso e esporte praticado. Estaremos especialmente interessados nas variáveis numéricas altura (`height`) e peso (`weight`). As análises feitas aqui são parte de uma Análise Exploratória de Dados (EDA).
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Setup_ geral
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
import statsmodels.api as sm
# In[2]:
#%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
# In[3]:
athletes = pd.read_csv("athletes.csv")
# In[4]:
athletes.info()
# In[5]:
athletes.head()
# In[6]:
athletes[['height','weight']].describe()
# In[7]:
athletes[['height','weight']].hist()
# In[8]:
def get_sample(df, col_name, n=100, seed=42):
"""Get a sample from a column of a dataframe.
It drops any numpy.nan entries before sampling. The sampling
is performed without replacement.
Example of numpydoc for those who haven't seen yet.
Parameters
----------
df : pandas.DataFrame
Source dataframe.
col_name : str
Name of the column to be sampled.
n : int
Sample size. Default is 100.
seed : int
Random seed. Default is 42.
Returns
-------
pandas.Series
Sample of size n from dataframe's column.
"""
np.random.seed(seed)
random_idx = np.random.choice(df[col_name].dropna().index, size=n, replace=False) #retorna uma array com index das colunas
return df.loc[random_idx, col_name] #retorna uma series com index e valor da coluna
# ## Inicia sua análise a partir daqui
# In[9]:
# Sua análise começa aqui.
# ## Questão 1
#
# Considerando uma amostra de tamanho 3000 da coluna `height` obtida com a função `get_sample()`, execute o teste de normalidade de Shapiro-Wilk com a função `scipy.stats.shapiro()`. Podemos afirmar que as alturas são normalmente distribuídas com base nesse teste (ao nível de significância de 5%)? Responda com um boolean (`True` ou `False`).
# In[10]:
def q1():
amostra_q1 = get_sample(athletes,'height', n=3000, seed=42)
stat, p = sct.shapiro(amostra_q1)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[11]:
q1()
# __Para refletir__:
#
# * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que?
# * Plote o qq-plot para essa variável e a analise.
# * Existe algum nível de significância razoável que nos dê outro resultado no teste? (Não faça isso na prática. Isso é chamado _p-value hacking_, e não é legal).
# In[12]:
amostra_q1 = get_sample(athletes,'height', n=3000, seed=42)
# In[13]:
sns.distplot(amostra_q1, bins=25, hist_kws={"density": True})
plt.show ()
# In[14]:
sm.qqplot(amostra_q1, fit=True, line="45")
plt.show ()
# In[15]:
amostra_q1 = get_sample(athletes,'height', n=3000, seed=42)
stat, p = sct.shapiro(amostra_q1)
p > 0.0000001
# ## Questão 2
#
# Repita o mesmo procedimento acima, mas agora utilizando o teste de normalidade de Jarque-Bera através da função `scipy.stats.jarque_bera()`. Agora podemos afirmar que as alturas são normalmente distribuídas (ao nível de significância de 5%)? Responda com um boolean (`True` ou `False`).
# In[16]:
def q2():
amostra_q2 = get_sample(athletes,'height', n=3000, seed=42)
stat, p = sct.jarque_bera(amostra_q2)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[17]:
q2()
# __Para refletir__:
#
# * Esse resultado faz sentido?
# In[18]:
amostra_q2 = get_sample(athletes,'height', n=3000, seed=42)
sm.qqplot(amostra_q2, fit=True, line="45")
plt.show ()
# ## Questão 3
#
# Considerando agora uma amostra de tamanho 3000 da coluna `weight` obtida com a função `get_sample()`. Faça o teste de normalidade de D'Agostino-Pearson utilizando a função `scipy.stats.normaltest()`. Podemos afirmar que os pesos vêm de uma distribuição normal ao nível de significância de 5%? Responda com um boolean (`True` ou `False`).
# In[19]:
def q3():
amostra_q3 = get_sample(athletes,'weight', n=3000, seed=42)
stat, p = sct.normaltest(amostra_q3)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[20]:
q3()
# __Para refletir__:
#
# * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que?
# * Um _box plot_ também poderia ajudar a entender a resposta.
# In[21]:
amostra_q3 = get_sample(athletes,'weight', n=3000, seed=42)
sns.distplot(amostra_q3, bins=25, hist_kws={"density": True})
plt.show ()
# In[22]:
sns.boxplot(data = amostra_q3)
# ## Questão 4
#
# Realize uma transformação logarítmica em na amostra de `weight` da questão 3 e repita o mesmo procedimento. Podemos afirmar a normalidade da variável transformada ao nível de significância de 5%? Responda com um boolean (`True` ou `False`).
# In[23]:
def q4():
amostra_q4 = get_sample(athletes,'weight', n=3000, seed=42)
amostra_q4_transformada = np.log(amostra_q4)
stat, p = sct.normaltest(amostra_q4_transformada)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[24]:
q4()
# __Para refletir__:
#
# * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que?
# * Você esperava um resultado diferente agora?
# In[25]:
amostra_q4 = get_sample(athletes,'weight', n=3000, seed=42)
amostra_q4_transformada = np.log(amostra_q4)
sns.distplot(amostra_q4_transformada, bins=25, hist_kws={"density": True})
plt.show ()
# In[26]:
sns.boxplot(data = amostra_q4_transformada)
# > __Para as questão 5 6 e 7 a seguir considere todos testes efetuados ao nível de significância de 5%__.
# ## Questão 5
#
# Obtenha todos atletas brasileiros, norte-americanos e canadenses em `DataFrame`s chamados `bra`, `usa` e `can`,respectivamente. Realize um teste de hipóteses para comparação das médias das alturas (`height`) para amostras independentes e variâncias diferentes com a função `scipy.stats.ttest_ind()` entre `bra` e `usa`. Podemos afirmar que as médias são estatisticamente iguais? Responda com um boolean (`True` ou `False`).
# In[27]:
athletes.columns
# In[45]:
athletes[(athletes.nationality == 'BRA') | (athletes.nationality == 'USA') | (athletes.nationality == 'CAN')]
# In[28]:
bra = athletes[athletes.nationality == 'BRA']
usa = athletes[athletes.nationality == 'USA']
can = athletes[athletes.nationality == 'CAN']
# In[29]:
bra['height'].describe()
# In[30]:
bra.isna().sum()
# In[31]:
usa['height'].describe()
# In[32]:
usa.isna().sum()
# In[46]:
can['height'].describe()
# In[47]:
can.isna().sum()
# In[33]:
def q5():
stat, p = sct.ttest_ind(bra['height'], usa['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[34]:
q5()
# In[35]:
sns.distplot(bra['height'], bins=25, hist=False, rug=True, label='BRA')
sns.distplot(usa['height'], bins=25, hist=False, rug=True, label='USA')
# ## Questão 6
#
# Repita o procedimento da questão 5, mas agora entre as alturas de `bra` e `can`. Podemos afimar agora que as médias são estatisticamente iguais? Reponda com um boolean (`True` ou `False`).
# In[48]:
def q6():
stat, p = sct.ttest_ind(bra['height'], can['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[49]:
q6()
# In[50]:
sns.distplot(bra['height'], bins=25, hist=False, rug=True, label='BRA')
sns.distplot(can['height'], bins=25, hist=False, rug=True, label='CAN')
# ## Questão 7
#
# Repita o procedimento da questão 6, mas agora entre as alturas de `usa` e `can`. Qual o valor do p-valor retornado? Responda como um único escalar arredondado para oito casas decimais.
# In[87]:
def q7():
stat, p = sct.ttest_ind(usa['height'], can['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona
print('stat= {}, p={}'.format(stat,p))
if p > 0.05:
print('Probably the same distribution')
else:
print('Probably different distributions')
return float(np.round(p, 8))
# In[88]:
q7()
# __Para refletir__:
#
# * O resultado faz sentido?
# * Você consegue interpretar esse p-valor?
# * Você consegue chegar a esse valor de p-valor a partir da variável de estatística?
# In[72]:
stat, p = sct.ttest_ind(usa['height'], can['height'], equal_var = True, nan_policy = 'omit')
print('stat= {}, p={}'.format(stat,p))
# In[69]:
#grau de liberdade para o teste t independente com variancias semelhantes: df = n1 + n2 - 2
gl = len(usa) + len(can) - 2
print(f"Graus de liberdade: {gl}")
q7_sf = sct.t.sf(stat, gl)*2 #Para Hipótese Bicaudal
print(q7_sf)
# In[77]:
sns.distplot(usa['height'], bins=25, hist=False, rug=True, label='USA')
sns.distplot(can['height'], bins=25, hist=False, rug=True, label='CAN')
|
[
"matplotlib.pyplot.show",
"numpy.log",
"numpy.random.seed",
"scipy.stats.shapiro",
"pandas.read_csv",
"scipy.stats.normaltest",
"scipy.stats.ttest_ind",
"IPython.core.pylabtools.figsize",
"seaborn.boxplot",
"seaborn.distplot",
"statsmodels.api.qqplot",
"scipy.stats.t.sf",
"scipy.stats.jarque_bera",
"numpy.round",
"seaborn.set"
] |
[((944, 958), 'IPython.core.pylabtools.figsize', 'figsize', (['(12)', '(8)'], {}), '(12, 8)\n', (951, 958), False, 'from IPython.core.pylabtools import figsize\n'), ((960, 969), 'seaborn.set', 'sns.set', ([], {}), '()\n', (967, 969), True, 'import seaborn as sns\n'), ((994, 1021), 'pandas.read_csv', 'pd.read_csv', (['"""athletes.csv"""'], {}), "('athletes.csv')\n", (1005, 1021), True, 'import pandas as pd\n'), ((3187, 3248), 'seaborn.distplot', 'sns.distplot', (['amostra_q1'], {'bins': '(25)', 'hist_kws': "{'density': True}"}), "(amostra_q1, bins=25, hist_kws={'density': True})\n", (3199, 3248), True, 'import seaborn as sns\n'), ((3249, 3259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3257, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3275, 3317), 'statsmodels.api.qqplot', 'sm.qqplot', (['amostra_q1'], {'fit': '(True)', 'line': '"""45"""'}), "(amostra_q1, fit=True, line='45')\n", (3284, 3317), True, 'import statsmodels.api as sm\n'), ((3318, 3328), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3326, 3328), True, 'import matplotlib.pyplot as plt\n'), ((3414, 3437), 'scipy.stats.shapiro', 'sct.shapiro', (['amostra_q1'], {}), '(amostra_q1)\n', (3425, 3437), True, 'import scipy.stats as sct\n'), ((4108, 4150), 'statsmodels.api.qqplot', 'sm.qqplot', (['amostra_q2'], {'fit': '(True)', 'line': '"""45"""'}), "(amostra_q2, fit=True, line='45')\n", (4117, 4150), True, 'import statsmodels.api as sm\n'), ((4151, 4161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4159, 4161), True, 'import matplotlib.pyplot as plt\n'), ((5037, 5098), 'seaborn.distplot', 'sns.distplot', (['amostra_q3'], {'bins': '(25)', 'hist_kws': "{'density': True}"}), "(amostra_q3, bins=25, hist_kws={'density': True})\n", (5049, 5098), True, 'import seaborn as sns\n'), ((5099, 5109), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5107, 5109), True, 'import matplotlib.pyplot as plt\n'), ((5125, 5153), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'amostra_q3'}), '(data=amostra_q3)\n', (5136, 5153), True, 'import seaborn as sns\n'), ((6010, 6028), 'numpy.log', 'np.log', (['amostra_q4'], {}), '(amostra_q4)\n', (6016, 6028), True, 'import numpy as np\n'), ((6029, 6103), 'seaborn.distplot', 'sns.distplot', (['amostra_q4_transformada'], {'bins': '(25)', 'hist_kws': "{'density': True}"}), "(amostra_q4_transformada, bins=25, hist_kws={'density': True})\n", (6041, 6103), True, 'import seaborn as sns\n'), ((6104, 6114), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6112, 6114), True, 'import matplotlib.pyplot as plt\n'), ((6130, 6171), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'amostra_q4_transformada'}), '(data=amostra_q4_transformada)\n', (6141, 6171), True, 'import seaborn as sns\n'), ((7556, 7627), 'seaborn.distplot', 'sns.distplot', (["bra['height']"], {'bins': '(25)', 'hist': '(False)', 'rug': '(True)', 'label': '"""BRA"""'}), "(bra['height'], bins=25, hist=False, rug=True, label='BRA')\n", (7568, 7627), True, 'import seaborn as sns\n'), ((7628, 7699), 'seaborn.distplot', 'sns.distplot', (["usa['height']"], {'bins': '(25)', 'hist': '(False)', 'rug': '(True)', 'label': '"""USA"""'}), "(usa['height'], bins=25, hist=False, rug=True, label='USA')\n", (7640, 7699), True, 'import seaborn as sns\n'), ((8222, 8293), 'seaborn.distplot', 'sns.distplot', (["bra['height']"], {'bins': '(25)', 'hist': '(False)', 'rug': '(True)', 'label': '"""BRA"""'}), "(bra['height'], bins=25, hist=False, rug=True, label='BRA')\n", (8234, 8293), True, 'import seaborn as sns\n'), ((8294, 8365), 'seaborn.distplot', 'sns.distplot', (["can['height']"], {'bins': '(25)', 'hist': '(False)', 'rug': '(True)', 'label': '"""CAN"""'}), "(can['height'], bins=25, hist=False, rug=True, label='CAN')\n", (8306, 8365), True, 'import seaborn as sns\n'), ((9212, 9290), 'scipy.stats.ttest_ind', 'sct.ttest_ind', (["usa['height']", "can['height']"], {'equal_var': '(True)', 'nan_policy': '"""omit"""'}), "(usa['height'], can['height'], equal_var=True, nan_policy='omit')\n", (9225, 9290), True, 'import scipy.stats as sct\n'), ((9585, 9656), 'seaborn.distplot', 'sns.distplot', (["usa['height']"], {'bins': '(25)', 'hist': '(False)', 'rug': '(True)', 'label': '"""USA"""'}), "(usa['height'], bins=25, hist=False, rug=True, label='USA')\n", (9597, 9656), True, 'import seaborn as sns\n'), ((9657, 9728), 'seaborn.distplot', 'sns.distplot', (["can['height']"], {'bins': '(25)', 'hist': '(False)', 'rug': '(True)', 'label': '"""CAN"""'}), "(can['height'], bins=25, hist=False, rug=True, label='CAN')\n", (9669, 9728), True, 'import seaborn as sns\n'), ((1819, 1839), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1833, 1839), True, 'import numpy as np\n'), ((2611, 2634), 'scipy.stats.shapiro', 'sct.shapiro', (['amostra_q1'], {}), '(amostra_q1)\n', (2622, 2634), True, 'import scipy.stats as sct\n'), ((3862, 3889), 'scipy.stats.jarque_bera', 'sct.jarque_bera', (['amostra_q2'], {}), '(amostra_q2)\n', (3877, 3889), True, 'import scipy.stats as sct\n'), ((4624, 4650), 'scipy.stats.normaltest', 'sct.normaltest', (['amostra_q3'], {}), '(amostra_q3)\n', (4638, 4650), True, 'import scipy.stats as sct\n'), ((5536, 5554), 'numpy.log', 'np.log', (['amostra_q4'], {}), '(amostra_q4)\n', (5542, 5554), True, 'import numpy as np\n'), ((5569, 5608), 'scipy.stats.normaltest', 'sct.normaltest', (['amostra_q4_transformada'], {}), '(amostra_q4_transformada)\n', (5583, 5608), True, 'import scipy.stats as sct\n'), ((7282, 7361), 'scipy.stats.ttest_ind', 'sct.ttest_ind', (["bra['height']", "usa['height']"], {'equal_var': '(False)', 'nan_policy': '"""omit"""'}), "(bra['height'], usa['height'], equal_var=False, nan_policy='omit')\n", (7295, 7361), True, 'import scipy.stats as sct\n'), ((7948, 8027), 'scipy.stats.ttest_ind', 'sct.ttest_ind', (["bra['height']", "can['height']"], {'equal_var': '(False)', 'nan_policy': '"""omit"""'}), "(bra['height'], can['height'], equal_var=False, nan_policy='omit')\n", (7961, 8027), True, 'import scipy.stats as sct\n'), ((8610, 8689), 'scipy.stats.ttest_ind', 'sct.ttest_ind', (["usa['height']", "can['height']"], {'equal_var': '(False)', 'nan_policy': '"""omit"""'}), "(usa['height'], can['height'], equal_var=False, nan_policy='omit')\n", (8623, 8689), True, 'import scipy.stats as sct\n'), ((9513, 9531), 'scipy.stats.t.sf', 'sct.t.sf', (['stat', 'gl'], {}), '(stat, gl)\n', (9521, 9531), True, 'import scipy.stats as sct\n'), ((8968, 8982), 'numpy.round', 'np.round', (['p', '(8)'], {}), '(p, 8)\n', (8976, 8982), True, 'import numpy as np\n')]
|
import numpy as np
from tensorflow import keras
import pandas as pd
import os
class DcmDataGenerator(keras.utils.Sequence):
"""Generates data for Keras
Sequence based data generator. Suitable for building data generator for training and prediction.
"""
def __init__(self, images_path, dim=(15, 512, 512), window=None):
"""Initialization
:param images_path: path to images location
:param dim: tuple indicating image dimension in format CHW
"""
self.list_IDs = os.listdir(images_path)
self.images_path = images_path
self.dim = dim
self.indexes = np.arange(len(self.list_IDs))
self.on_epoch_end()
self.window = window
def __len__(self):
"""Denotes the number of batches per epoch
:return: number of batches per epoch
"""
return len(self.list_IDs)
def on_epoch_end(self):
"""Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.list_IDs))
def flow(self, seed):
np.random.seed(seed)
i = int(np.random.randint(0, self.__len__(), size=(1,)))
while True:
yield self.__getitem__(i % self.__len__())
i += 1
def __getitem__(self, index):
"""Generate one patient's data
:param index: index of the patient
:return: X_dcm
"""
# Find list of IDs
patient_ID = self.list_IDs[index]
# Generate data
X_dcm = self._generate_X(patient_ID)
return X_dcm, np.array([1, ])
def _generate_X(self, patient_ID):
"""Generates data containing patient's images
:param patient_ID: ID of the patient
:return: patient's images
"""
# Initialization
X_dcm = np.empty((1, *self.dim), dtype=np.float32)
patient_path = os.path.join(self.images_path, patient_ID)
dcm_names = np.array([dcm_name[:-4] for dcm_name in os.listdir(patient_path)], dtype=int)
dcm_names = sorted(list(dcm_names))
patient_dcm_paths = [f'{self.images_path}/{patient_ID}/{dcm_num}.npy' for dcm_num in dcm_names]
# Generate data
for j, dcm_path in enumerate(patient_dcm_paths):
X_dcm[0, j] = self._load_dcm(dcm_path)
X_dcm = np.moveaxis(X_dcm, 1, -1)
return X_dcm
def _load_dcm(self, image_path):
"""Load grayscale image
:param image_path: path to image to load
:return: loaded image
"""
img = np.load(image_path, allow_pickle=True)
if self.window:
lb = self.window[0]
ub = self.window[1]
img[img < lb] = lb
img[img > ub] = ub
img = (img - lb) / (ub - lb)
return img
class CsvDataGenerator(keras.utils.Sequence):
"""Generates data for Keras
Sequence based data generator. Suitable for building data generator for training and prediction.
"""
def __init__(self, csv_path, to_fit=True, to_normalize=True):
"""Initialization
:param to_normalize: True to normalize, False otherwise
:param csv_path: path to csv file location
:param to_fit: True to return X and y, False to return X only
"""
self.to_normalize = to_normalize
self.list_IDs = os.listdir(csv_path[:-4])
self.csv_path = csv_path
self.to_fit = to_fit
self.indexes = np.arange(len(self.list_IDs))
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch
:return: number of batches per epoch
"""
return len(self.list_IDs)
def on_epoch_end(self):
"""Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.list_IDs))
def flow(self, seed):
np.random.seed(seed)
i = int(np.random.randint(0, self.__len__(), size=(1,)))
while True:
yield self.__getitem__(i % self.__len__())
i += 1
def __getitem__(self, index):
"""Generate one patient's data
:param index: index of the patient
:return: X
"""
# Find list of IDs
patient_ID = self.list_IDs[index]
# Generate data
X = self._generate_X(patient_ID)
if self.to_fit:
y = self._generate_y(patient_ID)
return X, y
else:
return X
def _generate_X(self, patient_ID):
"""Generates data containing patient's first csv record
:param patient_ID: ID of the patient
:return: patient's first csv record
"""
X = np.empty(shape=(1, 7), dtype=np.float32)
# Generate data
X[0] = self._load_X(self.csv_path, patient_ID)
return X
def _load_X(self, csv_path, patient_ID):
"""Load csv with patient's weeks and corresponding FVC
:param csv_path: path to csv file with weeks and FVC file to load
:return: loaded csv file with weeks and FVC file to load
"""
patients_df = pd.read_csv(csv_path)
patient = patients_df[patients_df['Patient'] == patient_ID]
patient.reset_index(inplace=True)
X_columns = ['Weeks', 'FVC', 'Age', 'Ex-smoker', 'Never smoked', 'Currently smokes', 'Sex_n']
X_patient = patient.loc[0, X_columns]
if self.to_normalize:
X_patient['Age'] = (X_patient['Age'] - 67.18850871530019) / 7.055116199848975
X_patient['FVC'] = (X_patient['FVC'] - 2690.479018721756) / 832.5021066817238
X_patient['Weeks'] = (X_patient['Weeks'] - 31.861846352485475) / 23.265510111399017
X_patient = X_patient.to_numpy()
return X_patient
def _generate_y(self, patient_ID):
"""Generates data containing patient's [1:] csv records
:param patient_ID: ID of the patient
:return: patient's [1:] csv records
"""
y = np.empty(shape=(1, 146, 2), dtype=np.float32)
# Generate data
y[0] = self._load_y(self.csv_path, patient_ID)
return y
def _load_y(self, csv_path, patient_ID):
"""Load csv with patient's weeks and corresponding FVC
:param csv_path: path to csv file with weeks and FVC file to load
:return: loaded csv file with weeks and FVC file to load
"""
patients_df = pd.read_csv(csv_path)
patient = patients_df[patients_df['Patient'] == patient_ID]
patient.reset_index(inplace=True)
weeks_FVC = patient.loc[1:, ['Weeks', 'FVC']]
weeks_FVC = weeks_FVC[~weeks_FVC.duplicated(['Weeks'])]
weeks_FVC = self.pad_y(weeks_FVC)
weeks_FVC = weeks_FVC.to_numpy()
return weeks_FVC
def pad_y(self, csv_df):
csv_df['isRecord'] = 1
for i in range(-12, 134):
if not np.any(csv_df['Weeks'] == i):
csv_df = csv_df.append({'Weeks': i, 'FVC': 0, 'isRecord': 0}, ignore_index=True)
csv_df.sort_values('Weeks', inplace=True)
csv_df.drop(columns='Weeks', inplace=True)
if self.to_normalize:
csv_df.loc[:, 'FVC'] = (csv_df.loc[:, 'FVC'] - 2690.479018721756) / 832.5021066817238
csv_df.reset_index(drop=True, inplace=True)
return csv_df
# ==================================#
# Creating datagen
def _merge_datagens(csv_gen, dcm_gen, shuffle=True, is_patient_record=True):
seed = 0
while True:
csv_flow = csv_gen.flow(seed)
dcm_flow = dcm_gen.flow(seed)
patient_num = 1
while True:
csv_data = next(csv_flow)
dcm_data = next(dcm_flow)
csv_X = csv_data[0]
dcm_X_img = dcm_data[0]
csv_y = csv_data[1][:, :, 0]
csv_is_patient_record = csv_data[1][:, :, 1]
if is_patient_record:
yield [csv_X, dcm_X_img], csv_y, csv_is_patient_record
else:
yield [csv_X, dcm_X_img], csv_y
patient_num += 1
if patient_num > 175:
break
if shuffle:
seed += 1
def create_datagen(shuffle=True, window=None, is_patient_record=True):
"""Returns generator that yields [csv_X, dcm_X_img], csv_y, csv_is_patient_record"""
csv_datagen = CsvDataGenerator('../../data/processed/train.csv', to_normalize=True)
dcm_datagen = DcmDataGenerator('../../data/processed/train', window=window)
merged_gen = _merge_datagens(csv_datagen, dcm_datagen, shuffle=shuffle, is_patient_record=is_patient_record)
return merged_gen
# def gen_train_test_split(datagen):
# datagen.
# gen = create_datagen(shuffle=True)
# x1, y1, is_p_r1 = next(gen)
|
[
"numpy.moveaxis",
"numpy.load",
"numpy.random.seed",
"pandas.read_csv",
"numpy.empty",
"numpy.any",
"numpy.array",
"os.path.join",
"os.listdir"
] |
[((519, 542), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (529, 542), False, 'import os\n'), ((1054, 1074), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1068, 1074), True, 'import numpy as np\n'), ((1791, 1833), 'numpy.empty', 'np.empty', (['(1, *self.dim)'], {'dtype': 'np.float32'}), '((1, *self.dim), dtype=np.float32)\n', (1799, 1833), True, 'import numpy as np\n'), ((1858, 1900), 'os.path.join', 'os.path.join', (['self.images_path', 'patient_ID'], {}), '(self.images_path, patient_ID)\n', (1870, 1900), False, 'import os\n'), ((2298, 2323), 'numpy.moveaxis', 'np.moveaxis', (['X_dcm', '(1)', '(-1)'], {}), '(X_dcm, 1, -1)\n', (2309, 2323), True, 'import numpy as np\n'), ((2521, 2559), 'numpy.load', 'np.load', (['image_path'], {'allow_pickle': '(True)'}), '(image_path, allow_pickle=True)\n', (2528, 2559), True, 'import numpy as np\n'), ((3316, 3341), 'os.listdir', 'os.listdir', (['csv_path[:-4]'], {}), '(csv_path[:-4])\n', (3326, 3341), False, 'import os\n'), ((3824, 3844), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3838, 3844), True, 'import numpy as np\n'), ((4634, 4674), 'numpy.empty', 'np.empty', ([], {'shape': '(1, 7)', 'dtype': 'np.float32'}), '(shape=(1, 7), dtype=np.float32)\n', (4642, 4674), True, 'import numpy as np\n'), ((5054, 5075), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (5065, 5075), True, 'import pandas as pd\n'), ((5926, 5971), 'numpy.empty', 'np.empty', ([], {'shape': '(1, 146, 2)', 'dtype': 'np.float32'}), '(shape=(1, 146, 2), dtype=np.float32)\n', (5934, 5971), True, 'import numpy as np\n'), ((6351, 6372), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (6362, 6372), True, 'import pandas as pd\n'), ((1549, 1562), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1557, 1562), True, 'import numpy as np\n'), ((6826, 6854), 'numpy.any', 'np.any', (["(csv_df['Weeks'] == i)"], {}), "(csv_df['Weeks'] == i)\n", (6832, 6854), True, 'import numpy as np\n'), ((1961, 1985), 'os.listdir', 'os.listdir', (['patient_path'], {}), '(patient_path)\n', (1971, 1985), False, 'import os\n')]
|
"""
generate periodic boundary condition (PBC).
Two methods to detect and partition the surface-nodes:
1. graph-method: (recommended, can deal with arbitrary deformed shape):
use dictionary-data-structure to map facet-nodes to element-number,
where the surface-facet is shared by only one element.
Construct the node-linking graph of surface, and the node-linking graph of the outlines.
Using outlines as boundaries,
partition the graph into different faces (left-, right-, down-, up-, back-, front- surfaces) by union-find algorithm.
2. method of xMin, xMax, yMin, yMax, zMin, zMax:
detect the surface simply by coordinates of all nodes.
This method can only be applied to the object with cuboid shape.
Two methods match nodes on opposites of the surface:
1. BFS method to match the nodes (time complexity of O(V + E), V and E are number of nodes and edges respectively):
Matching nodes during traversing of surface-node-graphs of opposite faces.
Given a matched node-pair, use similar vectors (pointed from current node to neighbors) to match their neighbors.
2. nearest-coordinates method: Could be very slow when there are many many nodes on a surface (with time complexity of O(V^2)).
"""
import torch as tch
import numpy as np
from elementsBody import *
def write_PBC_equation(file, obj, instance):
"""
write the PBC for the 8 outer vertex, and 12 edges, and 6 faces, with three steps:
1. make the 8 outer vertexes to form a parallel hexahedron (平行六面体))
2. make 12 edges to satisfy PBC
3. make the inside nodes of face-pair to coincide
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
if not hasattr(obj, 'v_x0y0z0'):
obj.getEdgeVertexForPBC()
## 1.1 make the y0face to be parallogram
file.write('************************** make the y0face to be parallogram \n')
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.v_x1y0z0, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.v_x0y0z0, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.v_x1y0z1, dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.v_x0y0z1, dm))
## 1.2 make vertexes of ylines to form parallel hexahedron
file.write('************************** make vertexes of 4 ylines to coincide \n')
for yline in obj.ylines[1:]:
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, yline['end'], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, yline['beg'], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.ylines[0]['end'], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.ylines[0]['beg'], dm))
# 2. make all outer edges to coincide
file.write('************************** make all outer edges to coincide \n')
xyzEdges = [obj.xlines, obj.ylines, obj.zlines]
for edges in xyzEdges:
for edge in edges[1:]:
for node in range(len(edge['inside'])):
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, edge['inside'][node], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, edge['beg'], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, edges[0]['inside'][node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, edges[0]['beg'], dm))
# 3. make all corresponding face-pairs to coincide
file.write('************************** make all corresponding face-pairs to coincide \n')
edgeNodes = set()
for edges in [obj.xlines, obj.ylines, obj.zlines]:
for edge in edges:
edgeNodes |= ({edge['beg']} | {edge['end']} | set(edge['inside']))
for iface, face in enumerate(obj.faceMatch):
for node in face:
for dm in [1, 2, 3]:
if node not in edgeNodes:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, node, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.baseNodes[iface][0], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, face[node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.baseNodes[iface][1], dm))
def write_PBC_equation_byGraph(file, obj, instance):
"""
use graph-method to get the PBC info,
write the PBC for the 8 outer vertex, and 12 edges, and 6 faces, with three steps:
1. make the 8 outer vertexes to form a parallel hexahedron (平行六面体))
2. make 12 edges to satisfy PBC
3. make the inside nodes of face-pair to coincide
the node-number of megaElement
(composed of vertex of outer surface) is shown as follows,
v3------v7
/| /|
v0------v4|
| | | |
| v2----|-v6
y ^ |/ |/
| v1------v5
--->
/ x
z
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
obj.getFaceForPBC_byGraph()
obj.getEdgeForPBC_byGraph()
## 1.1 make the y0face to be parallogram
file.write('************************** make the y0face to be parallogram \n')
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[6], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[2], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[5], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[1], dm))
## 1.2 make vertexes of ylines to form parallel hexahedron
file.write('************************** make vertexes of 4 ylines to coincide \n')
for i, j in [[7, 6], [3, 2], [0, 1]]:
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[i], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[j], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[4], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[5], dm))
# 2. make all outer edges to coincide
file.write('************************** make all outer edges to coincide \n')
edgeId = [
[[0, 4], [3, 7], [2, 6], [1, 5]], # xEdges
[[1, 0], [5, 4], [6, 7], [2, 3]], # yEdges
[[2, 1], [6, 5], [7, 4], [3, 0]] # zEdges
]
for edges in edgeId: # edges = xEdges or yEdges or zEdges
edge0 = (obj.megaElement[edges[0][0]], obj.megaElement[edges[0][1]])
if edge0 in obj.outlines:
for edge in edges[1:]:
edge1 = (obj.megaElement[edge[0]], obj.megaElement[edge[1]])
for node in range(len(obj.outlines[edge0])):
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.outlines[edge1][node], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, edge1[0], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.outlines[edge0][node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, edge0[0], dm))
# 3. make all corresponding face-pairs to coincide
file.write('************************** make all corresponding face-pairs to coincide \n')
for twoFacets in obj.faceMatch:
faceMatch = obj.faceMatch[twoFacets]
for node in faceMatch:
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, node, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, twoFacets[0], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, faceMatch[node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, twoFacets[4], dm))
def write_PBC_Nset(file, obj):
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
if not hasattr(obj, 'faceNode'):
obj.getFaceNode()
for node in obj.getFaceNode():
file.write('*Nset, nset=N{} \n'.format(node))
file.write('{}, \n'.format(node))
def write_nodes(file, obj):
nodes = obj.nodes
for node in nodes:
file.write(' {}, {}, {}, {} \n'.format(
node, nodes[node][0], nodes[node][1], nodes[node][2]
))
def adjustCoordinatesForPBC_byGraph(obj):
"""
use graph method to get the node-relation,
adjust the nodal coordiantes for periodic boundary condition (PBC)
make the nodes at face-pair to be strictly coincide at initial state
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
obj.getFaceForPBC_byGraph()
obj.getEdgeForPBC_byGraph()
makenp = False
for node in obj.nodes:
if type(obj.nodes[node]) == type([]):
makenp = True
break
if makenp:
for node in obj.nodes:
obj.nodes[node] = np.array(obj.nodes[node])
## 1.1 make the y0face to be parallogram
obj.nodes[obj.megaElement[6]] = \
obj.nodes[obj.megaElement[2]] + \
(obj.nodes[obj.megaElement[5]] - obj.nodes[obj.megaElement[1]])
## 1.2 make vertexes of ylines to form parallel hexahedron
for i, j in [[7, 6], [3, 2], [0, 1]]:
obj.nodes[obj.megaElement[i]] = \
obj.nodes[obj.megaElement[j]] + \
obj.nodes[obj.megaElement[4]] - obj.nodes[obj.megaElement[5]]
# 2. make all outer edges to coincide
edgeId = [
[[0, 4], [3, 7], [2, 6], [1, 5]], # xEdges
[[1, 0], [5, 4], [6, 7], [2, 3]], # yEdges
[[2, 1], [6, 5], [7, 4], [3, 0]] # zEdges
]
for edges in edgeId: # edges = xEdges or yEdges or zEdges
edge0 = (obj.megaElement[edges[0][0]], obj.megaElement[edges[0][1]])
if edge0 in obj.outlines:
for edge in edges[1:]:
edge1 = (obj.megaElement[edge[0]], obj.megaElement[edge[1]])
for node in range(len(obj.outlines[edge0])):
obj.nodes[obj.outlines[edge1][node]] = \
obj.nodes[edge1[0]] + \
obj.nodes[obj.outlines[edge0][node]] - obj.nodes[edge0[0]]
# 3. make all corresponding face-pairs to coincide
for twoFacets in obj.faceMatch:
faceMatch = obj.faceMatch[twoFacets]
for node in faceMatch:
obj.nodes[faceMatch[node]] = \
obj.nodes[twoFacets[4]] + \
obj.nodes[node] - obj.nodes[twoFacets[0]]
obj.nodesAdjusted = True
def adjustCoordinatesForPBC(obj):
"""
adjust the nodal coordiantes for periodic boundary condition (PBC)
make the nodes at face-pair to be strictly coincide at initial state
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
if not hasattr(obj, 'v_x0y0z0'):
obj.getEdgeVertexForPBC()
makenp = False
for node in obj.nodes:
if type(obj.nodes[node]) == type([]):
makenp = True
break
if makenp:
for node in obj.nodes:
obj.nodes[node] = np.array(obj.nodes[node])
## 1.1 make the y0face to be parallogram
obj.nodes[obj.v_x1y0z0] = \
obj.nodes[obj.v_x0y0z0] + \
(obj.nodes[obj.v_x1y0z1] - obj.nodes[obj.v_x0y0z1])
## 1.2 make vertexes of ylines to form parallel hexahedron
for yline in obj.ylines[1:]:
obj.nodes[yline['end']] = \
obj.nodes[yline['beg']] + \
obj.nodes[obj.ylines[0]['end']] - obj.nodes[obj.ylines[0]['beg']]
# 2. make all outer edges to coincide
xyzEdges = [obj.xlines, obj.ylines, obj.zlines]
for edges in xyzEdges:
for edge in edges[1:]:
for node in range(len(edge['inside'])):
obj.nodes[edge['inside'][node]] = \
obj.nodes[edge['beg']] + \
obj.nodes[edges[0]['inside'][node]] - obj.nodes[edges[0]['beg']]
# 3. make all corresponding face-pairs to coincide
edgeNodes = set()
for edges in [obj.xlines, obj.ylines, obj.zlines]:
for edge in edges:
edgeNodes |= ({edge['beg']} | {edge['end']} | set(edge['inside']))
for iface, face in enumerate(obj.faceMatch):
for node in face:
if node not in edgeNodes:
obj.nodes[node] = \
obj.nodes[obj.baseNodes[iface][0]] + \
obj.nodes[face[node]] - obj.nodes[obj.baseNodes[iface][1]]
obj.nodesAdjusted = True
if __name__ == "__main__":
testState = False
# get the inp file and the object
inpFile = input("\033[0;33;40m{}\033[0m".format("please insert the .inp file name (include the path): "))
job = inpFile.split("/")[-1].split(".inp")[0] if "/" in inpFile else inpFile.split("\\")[-1].split(".inp")[0]
path = inpFile.split(job + ".inp")[0]
obj = ElementsBody(*readInp(inpFile))
key = input("\033[35;1m{}\033[0m".format(
"which method do you want to use? \n"
"1: graph-method (recomended); \n"
"2: xMin, xMax, yMin, yMax, zMin, zMax; \n(insert 1 or 2): "
))
if key == "1":
getFaceForPBC = obj.getFaceForPBC_byGraph
writeEquations = write_PBC_equation_byGraph
adjustCoordinate = adjustCoordinatesForPBC_byGraph
elif key == "2":
getFaceForPBC = obj.getFaceForPBC
writeEquations = write_PBC_equation
adjustCoordinate = adjustCoordinatesForPBC
getFaceForPBC()
adjustCoor = input("do you want to adjust the coordinates for PBC? "
"(not recommended)\n\033[33m{}\033[0m".format('(y/n): '))
while adjustCoor not in ['y', 'n']:
adjustCoor = input('\033[33m{}\033[0m'.format('please insert "y" or "n": '))
if adjustCoor == 'y':
adjustCoordinate(obj)
if testState:
del obj.faceMatch
getFaceForPBC()
# find the instance name
instance = 'Part-1'
with open(inpFile, 'r') as file:
for line in file:
if '*Instance' in line and 'name=' in line:
instance = line.split(',')
instance = instance[1].split('=')
instance = instance[-1]
print('instance =', instance)
break
writeInp = input(
'ok to write the .inp file with PBC inside the file ? \033[36m{}\033[0m'.format('(y/n): ')
)
while writeInp not in ['y', 'n']:
writeInp = input('\033[31m{}\033[0m'.format(
'please insert "y" or "n": '
))
if writeInp == 'y':
newFileName = path + job + "_PBC.inp"
with open(newFileName, 'w') as newFile, open(inpFile, 'r') as oldFile:
clone = True
for line in oldFile:
if "Section:" in line and "**" in line:
write_PBC_Nset(newFile, obj)
elif '*End Assembly' in line:
writeEquations(newFile, obj, instance)
if clone == False and '*' in line:
clone = True
if clone:
newFile.write(line) # write the line from old file to new file
if "*Node\n" in line:
if hasattr(obj, 'nodesAdjusted'):
clone = False
print("\033[35;1m{}\033[0m".format("write new nodes for obj"))
write_nodes(newFile, obj)
print("\033[40;36;1m {} {} \033[35;1m {} \033[0m".format(
"file", newFileName, "has been written. "
))
elif input(
"\033[32;1m write nset- and equations- files for PBC? (y/n): \033[0m"
) in ["y", ""]:
# write the Nset
with open(path + '{}_nset.txt'.format(job), 'w') as file:
for node in obj.getFaceNode():
file.write('*Nset, nset=N{} \n'.format(node))
file.write('{}, \n'.format(node))
print("\033[40;36;1m {} {} \033[35;1m {} \033[0m".format(
"file", path + '{}_nset.txt'.format(job), "has been written. "
))
# write the equation for PBC
with open(path + '{}_equation.txt'.format(job), 'w') as file:
writeEquations(file, obj, instance)
print("\033[40;36;1m {} {} \033[35;1m {} \033[0m".format(
"file", path + '{}_equation.txt'.format(job), "has been written. "
))
|
[
"numpy.array"
] |
[((9765, 9790), 'numpy.array', 'np.array', (['obj.nodes[node]'], {}), '(obj.nodes[node])\n', (9773, 9790), True, 'import numpy as np\n'), ((11978, 12003), 'numpy.array', 'np.array', (['obj.nodes[node]'], {}), '(obj.nodes[node])\n', (11986, 12003), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from torchvision import datasets, transforms
DEFAULT_DATA_DIR = "/is/rg/al/Projects/prob-models/data/"
class ReconstructionDataset(torch.utils.data.Dataset):
def __init__(
self, name, split="train", flatten=True, train_split=0.8, data_dir=None
):
assert split in ("train", "val", "test")
if data_dir is None:
data_dir = DEFAULT_DATA_DIR
load_train = split == "train" or split == "val"
if name == "mnist":
dataset = datasets.MNIST(
data_dir,
train=load_train,
download=True,
transform=transforms.ToTensor(),
)
elif name == "fashion-mnist":
dataset = datasets.FashionMNIST(
data_dir,
train=load_train,
download=True,
transform=transforms.ToTensor(),
)
else:
raise ValueError("Unknown dataset name {name}")
self.images = torch.stack([x[0] for x in dataset], axis=0)
if split == "train" or split == "val":
train_samples = int(train_split * len(self.images))
rng = np.random.RandomState(45)
idxs = rng.permutation(len(self.images))
if split == "train":
train_idxs = idxs[:train_samples]
self.images = self.images[train_idxs]
else:
val_idxs = idxs[train_samples:]
self.images = self.images[val_idxs]
self._shape = self.images.shape[1:]
if flatten:
self.images = self.images.reshape(len(self.images), -1)
example = self[0]
if flatten:
self.input_dim = example[0].shape[0]
self.target_dim = example[1].shape[0]
else:
self.input_dim = example[0]
self.target_dim = example[1]
@property
def shape(self):
return self._shape
def to_tensors(self):
return self.images, self.images
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img = self.images[idx]
return img, img
|
[
"torch.stack",
"numpy.random.RandomState",
"torchvision.transforms.ToTensor"
] |
[((1027, 1071), 'torch.stack', 'torch.stack', (['[x[0] for x in dataset]'], {'axis': '(0)'}), '([x[0] for x in dataset], axis=0)\n', (1038, 1071), False, 'import torch\n'), ((1201, 1226), 'numpy.random.RandomState', 'np.random.RandomState', (['(45)'], {}), '(45)\n', (1222, 1226), True, 'import numpy as np\n'), ((656, 677), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (675, 677), False, 'from torchvision import datasets, transforms\n'), ((893, 914), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (912, 914), False, 'from torchvision import datasets, transforms\n')]
|
## l2_attack.py -- attack a network optimizing for l_2 distance
##
## Copyright (C) 2016, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
## Modified by <NAME> 2017
import tensorflow as tf
import numpy as np
BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary search
MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent
ABORT_EARLY = True # if we stop improving, abort gradient descent early
LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results, default 1e-2
TARGETED = False # should we target one specific class? or just be wrong?
CONFIDENCE = 0 # how strong the adversarial example should be
INITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess
class CarliniL2:
def __init__(self, sess, models, batch_size=1, confidence = CONFIDENCE,
targeted = TARGETED, learning_rate = LEARNING_RATE,
binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS,
abort_early = ABORT_EARLY,
initial_const = INITIAL_CONST,
boxmin = -0.5, boxmax = 0.5):
"""
The L_2 optimized attack.
This attack is the most efficient and should be used as the primary
attack to evaluate potential defenses.
Returns adversarial examples for the supplied model.
confidence: Confidence of adversarial examples: higher produces examples
that are farther away, but more strongly classified as adversarial.
batch_size: Number of attacks to run simultaneously.
targeted: True if we should perform a targetted attack, False otherwise.
learning_rate: The learning rate for the attack algorithm. Smaller values
produce better results but are slower to converge.
binary_search_steps: The number of times we perform binary search to
find the optimal tradeoff-constant between distance and confidence.
max_iterations: The maximum number of iterations. Larger values are more
accurate; setting too small will require a large learning rate and will
produce poor results.
abort_early: If true, allows early aborts if gradient descent gets stuck.
initial_const: The initial tradeoff-constant to use to tune the relative
importance of distance and confidence. If binary_search_steps is large,
the initial constant is not important.
boxmin: Minimum pixel value (default -0.5).
boxmax: Maximum pixel value (default 0.5).
"""
image_size, num_channels, num_labels = models[0].image_size, models[0].num_channels, models[0].num_labels
self.sess = sess
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.num_models = len(models)
self.num_labels = num_labels
shape = (batch_size,image_size,image_size,num_channels)
# the variable we're going to optimize over
modifier = tf.Variable(np.zeros(shape,dtype=np.float32))
# these are variables to be more efficient in sending data to tf
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)
self.weights = tf.Variable(np.zeros(self.num_models), dtype=tf.float32)
# and here's what we use to assign them
self.assign_timg = tf.placeholder(tf.float32, shape)
self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels))
self.assign_const = tf.placeholder(tf.float32, [batch_size])
self.assign_weights = tf.placeholder(tf.float32, [self.num_models])
# the resulting image, tanh'd to keep bounded from boxmin to boxmax
self.boxmul = (boxmax - boxmin) / 2.
self.boxplus = (boxmin + boxmax) / 2.
self.newimg = tf.tanh(modifier + self.timg) * self.boxmul + self.boxplus
# prediction BEFORE-SOFTMAX of the model
self.outputs = [model.predict(self.newimg) for model in models]
# distance to the input data
self.l2dist = tf.reduce_sum(tf.square(self.newimg-(tf.tanh(self.timg) * self.boxmul + self.boxplus)),[1,2,3])
# compute the probability of the label class versus the maximum other
reals = []
others = []
for i in xrange(self.num_models):
real = tf.reduce_sum((self.tlab) * self.outputs[i], 1)
other = tf.reduce_max((1 - self.tlab)*self.outputs[i] - (self.tlab*10000), 1)
reals.append(real)
others.append(other)
self.reals, self.others = reals, others
loss1list = []
if self.TARGETED:
# if targetted, optimize for making the other class most likely
for i in xrange(self.num_models):
loss1list.append(tf.maximum(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE)))
else:
# if untargeted, optimize for making this class least likely.
for i in xrange(self.num_models):
loss1list.append(tf.maximum(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE)))
self.loss1list = loss1list # TODO: remove
# sum up the losses
self.loss2 = tf.reduce_sum(self.l2dist)
self.loss1 = tf.reduce_sum(self.const * tf.add_n(self.loss1list))
self.loss = self.loss1 + self.loss2
self.reals = reals
self.others = others
# Setup the adam optimizer and keep track of variables we're creating
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
self.train = optimizer.minimize(self.loss, var_list=[modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.setup.append(self.weights.assign(self.assign_weights))
self.init = tf.variables_initializer(var_list=[modifier]+new_vars)
def attack(self, imgs, targets, weights):
"""
Perform the L_2 attack on the given images for the given targets.
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
r = []
# print('go up to',len(imgs))
for i in range(0,len(imgs),self.batch_size):
# print('tick',i)
r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size], weights))
return np.array(r)
def attack_batch(self, imgs, labs, weights):
"""
Run the attack on a batch of images and labels.
"""
def compareLoss(x, y):
"""
x is an np array of shape num_models x num_classes
y is the true label or target label of the class
returns a number in [0,1] indicating the expected loss of the learner
"""
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
for v in x: # update the target scores for each individual prediction
if self.TARGETED:
v[y] -= self.CONFIDENCE
else:
v[y] += self.CONFIDENCE
x = np.argmax(x, 1) # these are the predictions of each hypothesis
if self.TARGETED:
return np.dot(x == y, weights)
else:
return np.dot(x != y, weights)
batch_size = self.batch_size
# convert to tanh-space
imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size)*self.initial_const
upper_bound = np.ones(batch_size)*1e10
# the best l2, score, and image attack
o_bestl2 = [1e10]*batch_size
o_bestscore = [-1]*batch_size
o_bestattack = [np.zeros(imgs[0].shape)]*batch_size
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10]*batch_size
bestscore = [0.0]*batch_size
# set the variables so that we don't have to send them over again
self.sess.run(self.setup, {self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST,
self.assign_weights: weights})
# print "Outer Step ", outer_step, "Current C ", CONST, lower_bound, upper_bound
prev = 1e10 # used to be e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([self.train, self.loss,
self.l2dist, self.outputs,
self.newimg])
scores = np.array(scores).reshape(self.batch_size, self.num_models, self.num_labels)
# if iteration % 200 == 0:
# print(iteration, self.sess.run((self.loss, self.loss1, self.loss2)))
# check if we should abort search if we're getting nowhere. (check every 10%)
if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS * .10) == 0:
if l > prev*.9999:
break
prev = l
for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)):
currLoss = compareLoss(sc, np.argmax(batchlab[e])) # expected loss of the learner
if currLoss > bestscore[e]: # we've found a clear improvement for this value of c
bestl2[e] = l2
bestscore[e] = currLoss
if currLoss == bestscore[e] and l2 < bestl2[e]:
bestl2[e] = l2
if currLoss > o_bestscore[e]:
o_bestl2[e] = l2
o_bestscore[e] = currLoss
o_bestattack[e] = ii
if currLoss == o_bestscore[e] and l2 < o_bestl2[e]:
o_bestl2[e] = l2
o_bestattack[e] = ii
# finished trying out the adam optimizer for a particular c, now need to decide on the next value
# adjust the constant as needed
for e in range(batch_size):
if bestscore[e] == 1.0:
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e])/2
else:
lower_bound[e] = max(lower_bound[e],CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e])/2
else:
CONST[e] *= 100
# return the best solution found
return o_bestattack
|
[
"numpy.arctanh",
"tensorflow.reduce_sum",
"tensorflow.add_n",
"numpy.copy",
"numpy.argmax",
"tensorflow.maximum",
"tensorflow.variables_initializer",
"numpy.zeros",
"numpy.ones",
"tensorflow.placeholder",
"tensorflow.global_variables",
"numpy.array",
"tensorflow.tanh",
"numpy.dot",
"tensorflow.reduce_max",
"tensorflow.train.AdamOptimizer"
] |
[((3887, 3920), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'shape'], {}), '(tf.float32, shape)\n', (3901, 3920), True, 'import tensorflow as tf\n'), ((3948, 4000), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(batch_size, num_labels)'], {}), '(tf.float32, (batch_size, num_labels))\n', (3962, 4000), True, 'import tensorflow as tf\n'), ((4029, 4069), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size]'], {}), '(tf.float32, [batch_size])\n', (4043, 4069), True, 'import tensorflow as tf\n'), ((4100, 4145), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.num_models]'], {}), '(tf.float32, [self.num_models])\n', (4114, 4145), True, 'import tensorflow as tf\n'), ((5756, 5782), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.l2dist'], {}), '(self.l2dist)\n', (5769, 5782), True, 'import tensorflow as tf\n'), ((6120, 6162), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.LEARNING_RATE'], {}), '(self.LEARNING_RATE)\n', (6142, 6162), True, 'import tensorflow as tf\n'), ((6254, 6275), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6273, 6275), True, 'import tensorflow as tf\n'), ((6707, 6763), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': '([modifier] + new_vars)'}), '(var_list=[modifier] + new_vars)\n', (6731, 6763), True, 'import tensorflow as tf\n'), ((7325, 7336), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (7333, 7336), True, 'import numpy as np\n'), ((8384, 8442), 'numpy.arctanh', 'np.arctanh', (['((imgs - self.boxplus) / self.boxmul * 0.999999)'], {}), '((imgs - self.boxplus) / self.boxmul * 0.999999)\n', (8394, 8442), True, 'import numpy as np\n'), ((8519, 8539), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (8527, 8539), True, 'import numpy as np\n'), ((3398, 3431), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (3406, 3431), True, 'import numpy as np\n'), ((3538, 3553), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (3546, 3553), True, 'import numpy as np\n'), ((3605, 3639), 'numpy.zeros', 'np.zeros', (['(batch_size, num_labels)'], {}), '((batch_size, num_labels))\n', (3613, 3639), True, 'import numpy as np\n'), ((3691, 3711), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (3699, 3711), True, 'import numpy as np\n'), ((3766, 3791), 'numpy.zeros', 'np.zeros', (['self.num_models'], {}), '(self.num_models)\n', (3774, 3791), True, 'import numpy as np\n'), ((4876, 4921), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.tlab * self.outputs[i])', '(1)'], {}), '(self.tlab * self.outputs[i], 1)\n', (4889, 4921), True, 'import tensorflow as tf\n'), ((4944, 5015), 'tensorflow.reduce_max', 'tf.reduce_max', (['((1 - self.tlab) * self.outputs[i] - self.tlab * 10000)', '(1)'], {}), '((1 - self.tlab) * self.outputs[i] - self.tlab * 10000, 1)\n', (4957, 5015), True, 'import tensorflow as tf\n'), ((8556, 8575), 'numpy.ones', 'np.ones', (['batch_size'], {}), '(batch_size)\n', (8563, 8575), True, 'import numpy as np\n'), ((8617, 8636), 'numpy.ones', 'np.ones', (['batch_size'], {}), '(batch_size)\n', (8624, 8636), True, 'import numpy as np\n'), ((4336, 4365), 'tensorflow.tanh', 'tf.tanh', (['(modifier + self.timg)'], {}), '(modifier + self.timg)\n', (4343, 4365), True, 'import tensorflow as tf\n'), ((5831, 5855), 'tensorflow.add_n', 'tf.add_n', (['self.loss1list'], {}), '(self.loss1list)\n', (5839, 5855), True, 'import tensorflow as tf\n'), ((7815, 7825), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (7822, 7825), True, 'import numpy as np\n'), ((8092, 8107), 'numpy.argmax', 'np.argmax', (['x', '(1)'], {}), '(x, 1)\n', (8101, 8107), True, 'import numpy as np\n'), ((8209, 8232), 'numpy.dot', 'np.dot', (['(x == y)', 'weights'], {}), '(x == y, weights)\n', (8215, 8232), True, 'import numpy as np\n'), ((8274, 8297), 'numpy.dot', 'np.dot', (['(x != y)', 'weights'], {}), '(x != y, weights)\n', (8280, 8297), True, 'import numpy as np\n'), ((8789, 8812), 'numpy.zeros', 'np.zeros', (['imgs[0].shape'], {}), '(imgs[0].shape)\n', (8797, 8812), True, 'import numpy as np\n'), ((5332, 5407), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))'], {}), '(0.0, self.weights[i] * (others[i] - reals[i] + self.CONFIDENCE))\n', (5342, 5407), True, 'import tensorflow as tf\n'), ((5577, 5652), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))'], {}), '(0.0, self.weights[i] * (reals[i] - others[i] + self.CONFIDENCE))\n', (5587, 5652), True, 'import tensorflow as tf\n'), ((6077, 6098), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6096, 6098), True, 'import tensorflow as tf\n'), ((9988, 10004), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (9996, 10004), True, 'import numpy as np\n'), ((10595, 10617), 'numpy.argmax', 'np.argmax', (['batchlab[e]'], {}), '(batchlab[e])\n', (10604, 10617), True, 'import numpy as np\n'), ((4630, 4648), 'tensorflow.tanh', 'tf.tanh', (['self.timg'], {}), '(self.timg)\n', (4637, 4648), True, 'import tensorflow as tf\n')]
|
# -*- encoding:utf-8 -*-
"""
买入择时示例因子:动态自适应双均线策略
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import numpy as np
from .ABuFactorBuyBase import AbuFactorBuyXD, BuyCallMixin
from ..IndicatorBu.ABuNDMa import calc_ma_from_prices
from ..CoreBu.ABuPdHelper import pd_resample
from ..TLineBu.ABuTL import AbuTLine
__author__ = '阿布'
__weixin__ = 'abu_quant'
# noinspection PyAttributeOutsideInit
class AbuDoubleMaBuy(AbuFactorBuyXD, BuyCallMixin):
"""示例买入动态自适应双均线策略"""
def _init_self(self, **kwargs):
"""
kwargs中可选参数:fast: 均线快线周期,默认不设置,使用自适应动态快线
kwargs中可选参数:slow: 均线慢线周期,默认不设置,使用自适应动态慢线
kwargs中可选参数:resample_max: 动态慢线可设置参数重采样周期最大值,默认100,即动态慢线最大100
kwargs中可选参数:resample_min: 动态慢线可设置参数重采样周期最小值,默认10,即动态慢线最小10
kwargs中可选参数:change_threshold:动态慢线可设置参数代表慢线的选取阀值,默认0.12
"""
# 均线快线周期,默认使用5天均线
self.ma_fast = kwargs.pop('fast', -1)
self.dynamic_fast = False
if self.ma_fast == -1:
self.ma_fast = 5
self.dynamic_fast = True
# 均线慢线周期,默认使用60天均线
self.ma_slow = kwargs.pop('slow', -1)
self.dynamic_slow = False
if self.ma_slow == -1:
self.ma_slow = 60
self.dynamic_slow = True
# 动态慢线可设置参数重采样周期最大值,默认90
self.resample_max = kwargs.pop('resample_max', 100)
# 动态慢线可设置参数重采样周期最小值,默认10
self.resample_min = kwargs.pop('resample_min', 10)
# 动态慢线可设置参数代表慢线的选取阀值,默认0.12
self.change_threshold = kwargs.pop('change_threshold', 0.12)
if self.ma_fast >= self.ma_slow:
# 慢线周期必须大于快线
raise ValueError('ma_fast >= self.ma_slow !')
# xd周期数据需要比ma_slow大一天,这样计算ma就可以拿到今天和昨天两天的ma,用来判断金叉,死叉
kwargs['xd'] = self.ma_slow + 1
# 设置好xd后可以直接使用基类针对xd的初始化
super(AbuDoubleMaBuy, self)._init_self(**kwargs)
# 在输出生成的orders_pd中显示的名字
self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow)
def _dynamic_calc_fast(self, today):
"""
根据大盘最近一个月走势震荡程度,动态决策快线的值,规则如下:
如果大盘最近一个月走势使用:
一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3
二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9
三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18
四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30
"""
# 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势
benchmark_df = self.benchmark.kl_pd
# 拿出大盘的今天
benchmark_today = benchmark_df[benchmark_df.date == today.date]
if benchmark_today.empty:
# 默认值为慢线的0.15
return math.ceil(self.ma_slow * 0.15)
# 要拿大盘最近一个月的走势,准备切片的start,end
end_key = int(benchmark_today.iloc[0].key)
start_key = end_key - 20
if start_key < 0:
# 默认值为慢线的0.15
return math.ceil(self.ma_slow * 0.15)
# 使用切片切出从今天开始向前20天的数据
benchmark_month = benchmark_df[start_key:end_key + 1]
# 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象
benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line')
# 计算这个月最少需要几次拟合才能代表走势曲线
least = benchmark_month_line.show_least_valid_poly(show=False)
if least == 1:
# 一次拟合可以表达:fast=slow * 0.05 eg: slow=60->fast=60*0.05=3
return math.ceil(self.ma_slow * 0.05)
elif least == 2:
# 二次拟合可以表达:fast=slow * 0.15 eg: slow=60->fast=60*0.15=9
return math.ceil(self.ma_slow * 0.15)
elif least == 3:
# 三次拟合可以表达:fast=slow * 0.3 eg: slow=60->fast=60*0.3=18
return math.ceil(self.ma_slow * 0.3)
else:
# 四次及以上拟合可以表达:fast=slow * 0.5 eg: slow=60->fast=60*0.5=30
return math.ceil(self.ma_slow * 0.5)
def _dynamic_calc_slow(self, today):
"""
动态决策慢线的值,规则如下:
切片最近一段时间的金融时间序列,对金融时间序列进行变换周期重新采样,
对重新采样的结果进行pct_change处理,对pct_change序列取abs绝对值,
对pct_change绝对值序列取平均,即算出重新采样的周期内的平均变化幅度,
上述的变换周期由10, 15,20,30....进行迭代, 直到计算出第一个重新
采样的周期内的平均变化幅度 > 0.12的周期做为slow的取值
"""
last_kl = self.past_today_kl(today, self.resample_max)
if last_kl.empty:
# 返回慢线默认值60
return 60
for slow in np.arange(self.resample_min, self.resample_max, 5):
rule = '{}D'.format(slow)
change = abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean()
"""
eg: pd_resample(last_kl.close, rule, how='mean')
2014-07-23 249.0728
2014-09-03 258.3640
2014-10-15 240.8663
2014-11-26 220.1552
2015-01-07 206.0070
2015-02-18 198.0932
2015-04-01 217.9791
2015-05-13 251.3640
2015-06-24 266.4511
2015-08-05 244.3334
2015-09-16 236.2250
2015-10-28 222.0441
2015-12-09 222.0574
2016-01-20 177.2303
2016-03-02 226.8766
2016-04-13 230.6000
2016-05-25 216.7596
2016-07-06 222.6420
abs(pd_resample(last_kl.close, rule, how='mean').pct_change())
2014-09-03 0.037
2014-10-15 0.068
2014-11-26 0.086
2015-01-07 0.064
2015-02-18 0.038
2015-04-01 0.100
2015-05-13 0.153
2015-06-24 0.060
2015-08-05 0.083
2015-09-16 0.033
2015-10-28 0.060
2015-12-09 0.000
2016-01-20 0.202
2016-03-02 0.280
2016-04-13 0.016
2016-05-25 0.060
2016-07-06 0.027
abs(pd_resample(last_kl.close, rule, how='mean').pct_change()).mean():
0.080
"""
if change > self.change_threshold:
"""
返回第一个大于change_threshold的slow,
change_threshold默认为0.12,以周期突破的策略一般需要在0.08以上,0.12是为快线留出套利空间
"""
return slow
# 迭代np.arange(min, max, 5)都不符合就返回max
return self.resample_max
def fit_month(self, today):
# fit_month即在回测策略中每一个月执行一次的方法
if self.dynamic_slow:
# 一定要先动态算ma_slow,因为动态计算fast依赖slow
self.ma_slow = self._dynamic_calc_slow(today)
if self.dynamic_fast:
# 动态计算快线
self.ma_fast = self._dynamic_calc_fast(today)
# 动态重新计算后,改变在输出生成的orders_pd中显示的名字
self.factor_name = '{}:fast={},slow={}'.format(self.__class__.__name__, self.ma_fast, self.ma_slow)
# import logging
# logging.debug('{}:{}-fast={}|slow={}'.format(self.kl_pd.name, today.date, self.ma_fast, self.ma_slow))
def fit_day(self, today):
"""双均线买入择时因子,信号快线上穿慢行形成金叉做为买入信号"""
# 计算快线
fast_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_fast), min_periods=1)
# 计算慢线
slow_line = calc_ma_from_prices(self.xd_kl.close, int(self.ma_slow), min_periods=1)
if len(fast_line) >= 2 and len(slow_line) >= 2:
# 今天的快线值
fast_today = fast_line[-1]
# 昨天的快线值
fast_yesterday = fast_line[-2]
# 今天的慢线值
slow_today = slow_line[-1]
# 昨天的慢线值
slow_yesterday = slow_line[-2]
if slow_yesterday >= fast_yesterday and fast_today > slow_today:
# 快线上穿慢线, 形成买入金叉,使用了今天收盘价格,明天买入
return self.buy_tomorrow()
"""可以选择是否覆盖AbuFactorBuyXD中的buy_tomorrow来增大交易频率,默认基类中self.skip_days = self.xd降低了频率"""
# def buy_tomorrow(self):
# return self.make_buy_order(self.today_ind)
|
[
"numpy.arange",
"math.ceil"
] |
[((4463, 4513), 'numpy.arange', 'np.arange', (['self.resample_min', 'self.resample_max', '(5)'], {}), '(self.resample_min, self.resample_max, 5)\n', (4472, 4513), True, 'import numpy as np\n'), ((2821, 2851), 'math.ceil', 'math.ceil', (['(self.ma_slow * 0.15)'], {}), '(self.ma_slow * 0.15)\n', (2830, 2851), False, 'import math\n'), ((3046, 3076), 'math.ceil', 'math.ceil', (['(self.ma_slow * 0.15)'], {}), '(self.ma_slow * 0.15)\n', (3055, 3076), False, 'import math\n'), ((3510, 3540), 'math.ceil', 'math.ceil', (['(self.ma_slow * 0.05)'], {}), '(self.ma_slow * 0.05)\n', (3519, 3540), False, 'import math\n'), ((3653, 3683), 'math.ceil', 'math.ceil', (['(self.ma_slow * 0.15)'], {}), '(self.ma_slow * 0.15)\n', (3662, 3683), False, 'import math\n'), ((3795, 3824), 'math.ceil', 'math.ceil', (['(self.ma_slow * 0.3)'], {}), '(self.ma_slow * 0.3)\n', (3804, 3824), False, 'import math\n'), ((3928, 3957), 'math.ceil', 'math.ceil', (['(self.ma_slow * 0.5)'], {}), '(self.ma_slow * 0.5)\n', (3937, 3957), False, 'import math\n')]
|
import math
import click
import os.path
import shutil
import atoms_simulator
import numpy
import matplotlib.pyplot as plt
def get_project_path():
return os.path.dirname(atoms_simulator.__file__)
def get_path(path):
i = 1
while True:
if not os.path.lexists(f"{path}{i}"):
return f"{path}{i}"
i += 1
@click.group()
def ats():
"""Allows to perform detailed tests using atoms_simulator module."""
pass
@ats.command()
def init():
"""Creates a settings_ats.toml file in the current directory."""
if not os.path.isfile("settings_ats.toml"):
source = os.path.join(get_project_path(), "assets/settings_source.toml")
target = os.path.join(os.getcwd(), "settings_ats.toml")
shutil.copy(source, target)
click.echo("Settings file generated successfully.")
else:
click.echo("Settings file already exists. Please delete it in order to generate a new configuration file.")
@ats.command()
@click.option("-g", "--graphics", "graphics", help="Turn on pygame simulation", is_flag=True)
@click.option("--no-save", "no_save", help="Disable saving the results of the test.", is_flag=True)
def test(graphics, no_save):
"""Performs a series of tests based on the data in the settings_ats.toml file."""
settings_ats = atoms_simulator.Settings("settings_ats.toml")
if not settings_ats.load():
click.echo("No settings file detected. Generate the file first.")
return
if settings_ats["N_min"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["N_step"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["N_number"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["R"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
click.echo("Starting simulation...")
n_stop = settings_ats["N_min"] + settings_ats["N_step"] * (settings_ats["N_number"] - 1)
# size = max([settings_ats['h'], settings_ats['w'], math.ceil((4 * (n_stop + 1)) ** 0.5)])
# settings_ats['h'] = size
# settings_ats['w'] = size
test_cases = [
[i for _ in range(settings_ats['R'])] for i in range(settings_ats["N_min"], n_stop + 1, settings_ats["N_step"])
]
bounce = numpy.empty((len(test_cases), settings_ats['R']), dtype=int)
bounce_results = numpy.empty(len(test_cases), dtype=int)
cop = numpy.empty((len(test_cases), settings_ats['R']), dtype=float)
cop_results = numpy.empty(len(test_cases), dtype=float)
settings_ats.new('N', settings_ats["N_min"])
with click.progressbar(
range(len(test_cases) * settings_ats['R'] - 1, -1, -1), label="Performing simulations:", show_eta=False
) as progress:
for i in progress:
settings_ats['N'] = test_cases[i // settings_ats['R']][i % settings_ats['R']]
try:
bounce[i // settings_ats['R']][i % settings_ats['R']], \
cop[i // settings_ats['R']][i % settings_ats['R']] = atoms_simulator.simulate(settings_ats, graphics)
except ValueError as error:
click.echo(f"\n{error} Please generate a new settings file.")
return
if i % settings_ats['R'] == 0:
bounce_results[i // settings_ats['R']] = int(bounce[i // settings_ats['R']].mean())
cop_results[i // settings_ats['R']] = cop[i // settings_ats['R']].mean()
if not no_save:
if not os.path.isdir(results_path := os.path.join(os.getcwd(), "ats_results")):
os.mkdir(results_path)
target_path = get_path(os.path.join(results_path, "data_batch"))
os.mkdir(target_path)
numpy.savetxt(os.path.join(target_path, "bounces.csv"), bounce_results)
numpy.savetxt(os.path.join(target_path, "change_of_position.csv"), cop_results)
settings_ats.save(target=os.path.join(target_path, "used.toml"))
@ats.command()
@click.option("-b", "--data_batch", "data_batch", prompt=True, help="Name of the previously generated data batch.")
def plot(data_batch):
"""Plots the previously generated data."""
if not os.path.isdir(results_path := os.path.join(os.getcwd(), "ats_results")):
click.echo(
"The ats_results catalog doesn't exist within the current working directory. Generate some data first."
)
return
if not os.path.isdir(path := os.path.join(os.getcwd(), "ats_results", data_batch)):
click.echo(
f"The ats_results/{data_batch} catalog doesn't exist within the current working directory."
)
return
target_path = get_path(os.path.join(results_path, "figures_batch"))
os.mkdir(target_path)
settings_ats = atoms_simulator.Settings(os.path.join(path, "used.toml"))
if not (settings_ats.load() and os.path.isfile(os.path.join(path, "bounces.csv"))
and os.path.isfile(os.path.join(path, "change_of_position.csv"))):
click.echo("This data batch is corrupted.")
return
n_stop = settings_ats["N_min"] + settings_ats["N_step"] * (settings_ats["N_number"] - 1)
x = numpy.arange(settings_ats["N_min"], n_stop + 1, settings_ats["N_step"])
bounce = numpy.loadtxt(os.path.join(path, "bounces.csv"))
plt.plot(x, bounce, marker='o')
plt.title(f"Zależność liczby zderzeń od ilości atomów, M = {settings_ats['M']}")
plt.xlabel("Liczba atomów w pojemniku")
plt.ylabel("Liczba odbić atomu czerownego")
plt.grid(True)
plt.savefig(os.path.join(target_path, "bounces.png"))
plt.clf()
cop = numpy.loadtxt(os.path.join(path, "change_of_position.csv"))
plt.plot(x, cop, marker='o')
plt.title(f"Zależność średniej drogi swobodnej od ilości atomów, M = {settings_ats['M']}")
plt.xlabel("Liczba atomów w pojemniku")
plt.ylabel("Średnia droga swobodna atomu czerwonego")
plt.grid(True)
plt.savefig(os.path.join(target_path, "change_of_position.png"))
plt.clf()
settings_ats.save(os.path.join(target_path, "used.toml"))
click.echo("Figures created successfullly.")
|
[
"matplotlib.pyplot.title",
"atoms_simulator.Settings",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"atoms_simulator.simulate",
"click.option",
"click.echo",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"click.group",
"matplotlib.pyplot.grid",
"shutil.copy",
"matplotlib.pyplot.xlabel"
] |
[((345, 358), 'click.group', 'click.group', ([], {}), '()\n', (356, 358), False, 'import click\n'), ((983, 1080), 'click.option', 'click.option', (['"""-g"""', '"""--graphics"""', '"""graphics"""'], {'help': '"""Turn on pygame simulation"""', 'is_flag': '(True)'}), "('-g', '--graphics', 'graphics', help=\n 'Turn on pygame simulation', is_flag=True)\n", (995, 1080), False, 'import click\n'), ((1077, 1180), 'click.option', 'click.option', (['"""--no-save"""', '"""no_save"""'], {'help': '"""Disable saving the results of the test."""', 'is_flag': '(True)'}), "('--no-save', 'no_save', help=\n 'Disable saving the results of the test.', is_flag=True)\n", (1089, 1180), False, 'import click\n'), ((4172, 4291), 'click.option', 'click.option', (['"""-b"""', '"""--data_batch"""', '"""data_batch"""'], {'prompt': '(True)', 'help': '"""Name of the previously generated data batch."""'}), "('-b', '--data_batch', 'data_batch', prompt=True, help=\n 'Name of the previously generated data batch.')\n", (4184, 4291), False, 'import click\n'), ((1310, 1355), 'atoms_simulator.Settings', 'atoms_simulator.Settings', (['"""settings_ats.toml"""'], {}), "('settings_ats.toml')\n", (1334, 1355), False, 'import atoms_simulator\n'), ((2057, 2093), 'click.echo', 'click.echo', (['"""Starting simulation..."""'], {}), "('Starting simulation...')\n", (2067, 2093), False, 'import click\n'), ((5346, 5417), 'numpy.arange', 'numpy.arange', (["settings_ats['N_min']", '(n_stop + 1)', "settings_ats['N_step']"], {}), "(settings_ats['N_min'], n_stop + 1, settings_ats['N_step'])\n", (5358, 5417), False, 'import numpy\n'), ((5484, 5515), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'bounce'], {'marker': '"""o"""'}), "(x, bounce, marker='o')\n", (5492, 5515), True, 'import matplotlib.pyplot as plt\n'), ((5520, 5605), 'matplotlib.pyplot.title', 'plt.title', (['f"""Zależność liczby zderzeń od ilości atomów, M = {settings_ats[\'M\']}"""'], {}), '(f"Zależność liczby zderzeń od ilości atomów, M = {settings_ats[\'M\']}"\n )\n', (5529, 5605), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Liczba atomów w pojemniku"""'], {}), "('Liczba atomów w pojemniku')\n", (5615, 5644), True, 'import matplotlib.pyplot as plt\n'), ((5649, 5692), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Liczba odbić atomu czerownego"""'], {}), "('Liczba odbić atomu czerownego')\n", (5659, 5692), True, 'import matplotlib.pyplot as plt\n'), ((5697, 5711), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5705, 5711), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5783), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5781, 5783), True, 'import matplotlib.pyplot as plt\n'), ((5859, 5887), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'cop'], {'marker': '"""o"""'}), "(x, cop, marker='o')\n", (5867, 5887), True, 'import matplotlib.pyplot as plt\n'), ((5892, 5992), 'matplotlib.pyplot.title', 'plt.title', (['f"""Zależność średniej drogi swobodnej od ilości atomów, M = {settings_ats[\'M\']}"""'], {}), '(\n f"Zależność średniej drogi swobodnej od ilości atomów, M = {settings_ats[\'M\']}"\n )\n', (5901, 5992), True, 'import matplotlib.pyplot as plt\n'), ((5987, 6026), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Liczba atomów w pojemniku"""'], {}), "('Liczba atomów w pojemniku')\n", (5997, 6026), True, 'import matplotlib.pyplot as plt\n'), ((6031, 6084), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Średnia droga swobodna atomu czerwonego"""'], {}), "('Średnia droga swobodna atomu czerwonego')\n", (6041, 6084), True, 'import matplotlib.pyplot as plt\n'), ((6089, 6103), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6097, 6103), True, 'import matplotlib.pyplot as plt\n'), ((6177, 6186), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6184, 6186), True, 'import matplotlib.pyplot as plt\n'), ((6254, 6298), 'click.echo', 'click.echo', (['"""Figures created successfullly."""'], {}), "('Figures created successfullly.')\n", (6264, 6298), False, 'import click\n'), ((751, 778), 'shutil.copy', 'shutil.copy', (['source', 'target'], {}), '(source, target)\n', (762, 778), False, 'import shutil\n'), ((787, 838), 'click.echo', 'click.echo', (['"""Settings file generated successfully."""'], {}), "('Settings file generated successfully.')\n", (797, 838), False, 'import click\n'), ((857, 974), 'click.echo', 'click.echo', (['"""Settings file already exists. Please delete it in order to generate a new configuration file."""'], {}), "(\n 'Settings file already exists. Please delete it in order to generate a new configuration file.'\n )\n", (867, 974), False, 'import click\n'), ((1396, 1461), 'click.echo', 'click.echo', (['"""No settings file detected. Generate the file first."""'], {}), "('No settings file detected. Generate the file first.')\n", (1406, 1461), False, 'import click\n'), ((1523, 1610), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1533, 1610), False, 'import click\n'), ((1668, 1755), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1678, 1755), False, 'import click\n'), ((1815, 1902), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1825, 1902), False, 'import click\n'), ((1955, 2042), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1965, 2042), False, 'import click\n'), ((4448, 4573), 'click.echo', 'click.echo', (['"""The ats_results catalog doesn\'t exist within the current working directory. Generate some data first."""'], {}), '(\n "The ats_results catalog doesn\'t exist within the current working directory. Generate some data first."\n )\n', (4458, 4573), False, 'import click\n'), ((4697, 4810), 'click.echo', 'click.echo', (['f"""The ats_results/{data_batch} catalog doesn\'t exist within the current working directory."""'], {}), '(\n f"The ats_results/{data_batch} catalog doesn\'t exist within the current working directory."\n )\n', (4707, 4810), False, 'import click\n'), ((5186, 5229), 'click.echo', 'click.echo', (['"""This data batch is corrupted."""'], {}), "('This data batch is corrupted.')\n", (5196, 5229), False, 'import click\n'), ((3245, 3293), 'atoms_simulator.simulate', 'atoms_simulator.simulate', (['settings_ats', 'graphics'], {}), '(settings_ats, graphics)\n', (3269, 3293), False, 'import atoms_simulator\n'), ((3350, 3414), 'click.echo', 'click.echo', (['f"""\n{error} Please generate a new settings file."""'], {}), '(f"""\n{error} Please generate a new settings file.""")\n', (3360, 3414), False, 'import click\n')]
|
import pandas as pd
import numpy as np
import torch
def min_max_x(x):
for index, col in enumerate(x.T):
min_col = np.min(col)
max_col = np.max(col)
if min_col != max_col:
x.T[index] = (x.T[index] - min_col)/(max_col - min_col)
else:
x.T[index] = x.T[index] - min_col
return x
def load_dataset(path='./processed_dataset/data.csv', split=0.8, shuffle=True, seed=0):
np.random.seed(seed)
df = pd.read_csv(path)
df = df.values
if shuffle:
np.random.shuffle(df)
train = df[:int(df.shape[0]*split)]
validation = df[int(df.shape[0]*split):]
train_x, train_y = train.T[:12].T, train.T[12:].T
validation_x, validation_y = validation.T[:12].T, validation.T[12:].T
train_x, validation_x = min_max_x(train_x), min_max_x(validation_x)
train_x, train_y, validation_x, validation_y = train_x.astype(np.float32), train_y.astype(np.float32), validation_x.astype(np.float32), validation_y.astype(np.float32)
train_x, train_y, validation_x, validation_y = torch.from_numpy(train_x), torch.from_numpy(train_y), torch.from_numpy(validation_x), torch.from_numpy(validation_y)
return train_x, train_y, validation_x, validation_y
if __name__ == '__main__':
train_x, train_y, validation_x, validation_y = load_dataset()
print(train_x.shape, train_y.shape, validation_x.shape, validation_y.shape)
|
[
"numpy.random.seed",
"pandas.read_csv",
"numpy.min",
"numpy.max",
"numpy.random.shuffle",
"torch.from_numpy"
] |
[((384, 404), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (398, 404), True, 'import numpy as np\n'), ((411, 428), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (422, 428), True, 'import pandas as pd\n'), ((118, 129), 'numpy.min', 'np.min', (['col'], {}), '(col)\n', (124, 129), True, 'import numpy as np\n'), ((142, 153), 'numpy.max', 'np.max', (['col'], {}), '(col)\n', (148, 153), True, 'import numpy as np\n'), ((460, 481), 'numpy.random.shuffle', 'np.random.shuffle', (['df'], {}), '(df)\n', (477, 481), True, 'import numpy as np\n'), ((973, 998), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (989, 998), False, 'import torch\n'), ((1000, 1025), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (1016, 1025), False, 'import torch\n'), ((1027, 1057), 'torch.from_numpy', 'torch.from_numpy', (['validation_x'], {}), '(validation_x)\n', (1043, 1057), False, 'import torch\n'), ((1059, 1089), 'torch.from_numpy', 'torch.from_numpy', (['validation_y'], {}), '(validation_y)\n', (1075, 1089), False, 'import torch\n')]
|
import hashlib
import json
import numpy as np
from jina import Executor, DocumentArray, requests
class TagsHasher(Executor):
"""Convert an arbitrary set of tags into a fixed-dimensional matrix using the hashing trick.
Unlike FeatureHashser, you should only use Jaccard/Hamming distance when searching documents
embedded with TagsHasher. This is because the closeness of the value of each feature is meaningless
it is basically the result of a hash function. Hence, only identity value matters.
More info: https://en.wikipedia.org/wiki/Feature_hashing
"""
def __init__(self, n_dim: int = 256, max_val: int = 65536, sparse: bool = False, **kwargs):
"""
:param n_dim: the dimensionality of each document in the output embedding.
Small numbers of features are likely to cause hash collisions,
but large numbers will cause larger overall parameter dimensions.
:param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.
Note that this feature requires ``scipy``
:param text_attrs: which attributes to be considered as text attributes.
:param kwargs:
"""
super().__init__(**kwargs)
self.n_dim = n_dim
self.max_val = max_val
self.hash = hashlib.md5
self.sparse = sparse
def _any_hash(self, v):
try:
return int(v) # parse int parameter
except ValueError:
try:
return float(v) # parse float parameter
except ValueError:
if not v:
# ignore it when the parameter is empty
return 0
if isinstance(v, str):
v = v.strip()
if v.lower() in {'true', 'yes'}: # parse boolean parameter
return 1
if v.lower() in {'false', 'no'}:
return 0
if isinstance(v, (tuple, dict, list)):
v = json.dumps(v, sort_keys=True)
return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16)
@requests
def encode(self, docs: DocumentArray, **kwargs):
if self.sparse:
from scipy.sparse import csr_matrix
for idx, doc in enumerate(docs):
if doc.tags:
idxs, data = [], [] # sparse
table = np.zeros(self.n_dim) # dense
for k, v in doc.tags.items():
h = self._any_hash(k)
sign_h = np.sign(h)
col = h % self.n_dim
val = self._any_hash(v)
sign_v = np.sign(val)
val = val % self.max_val
idxs.append((0, col))
val = sign_h * sign_v * val
data.append(val)
table[col] += val
if self.sparse:
doc.embedding = csr_matrix(
(data, zip(*idxs)), shape=(1, self.n_dim)
)
else:
doc.embedding = table
|
[
"numpy.zeros",
"json.dumps",
"numpy.sign"
] |
[((2435, 2455), 'numpy.zeros', 'np.zeros', (['self.n_dim'], {}), '(self.n_dim)\n', (2443, 2455), True, 'import numpy as np\n'), ((2582, 2592), 'numpy.sign', 'np.sign', (['h'], {}), '(h)\n', (2589, 2592), True, 'import numpy as np\n'), ((2707, 2719), 'numpy.sign', 'np.sign', (['val'], {}), '(val)\n', (2714, 2719), True, 'import numpy as np\n'), ((2052, 2081), 'json.dumps', 'json.dumps', (['v'], {'sort_keys': '(True)'}), '(v, sort_keys=True)\n', (2062, 2081), False, 'import json\n')]
|
"""A class with static methods which can be used to access the data about
experiments.
This includes reading logs to parse success cases, reading images, costs
and speed.
"""
import numpy as np
from glob import glob
import torch
import pandas
import re
import json
from functools import lru_cache
import imageio
EPISODES = 561
class DataReader:
"""Container class for the static data access methods"""
EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json'
@staticmethod
@lru_cache(maxsize=1)
def get_experiments_mapping():
"""Reads the experiments mapping from a json file
EXPERIMENTS_MAPPING_FILE
"""
with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f:
x = json.load(f)
return x
@staticmethod
def get_images(experiment, seed, checkpoint, episode):
"""Get simulator images for a given model evaluation on a
given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png'
images = []
for image_path in sorted(glob(image_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_gradients(experiment, seed, checkpoint, episode):
"""Get gradients for a given model evaluation on a given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = []
for image_path in sorted(glob(gradient_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_last_gradient(experiment, seed, checkpoint, episode):
"""Get the last gradient for the model and episode
Returns:
(value, x, y) - tuple, where value is the max value of the
gradient, x, y are the location of this max
value in the gradient image.
"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = sorted(glob(gradient_paths))
if len(images) == 0:
return (0, 0, 0)
image_path = sorted(glob(gradient_paths))[-1]
image = imageio.imread(image_path)
mx_index = np.argmax(image)
value = image.flatten()[mx_index]
middle_x = image.shape[0] / 2
middle_y = image.shape[1] / 2
x = mx_index // image.shape[1]
x -= middle_x
y = mx_index % image.shape[1]
y -= middle_y
if value == 0:
return (0, 0, 0)
else:
return (value, x, y)
@staticmethod
def get_evaluation_log_file(experiment, seed, step):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{step}' + '.model.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
def get_training_log_file(experiment, seed):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'policy_networks/' + path[1] + \
f'-seed={seed}-novalue' + '.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
@lru_cache(maxsize=100)
def find_option_values(option,
experiment=None,
seed=None,
checkpoint=None):
"""Returns possible values for selected option.
Depending on option, returns:
if option == 'seed' - returns all seeds for given experiment.
experiment has to passed.
if option == 'checkpoint' - returns all checkpoints for given
experiment and seed.
experiment and seed have to be
passed.
if option == 'episode' - returns all episodes for given
model
experiment, seed, and checkpoint have
to be passed.
"""
if option == 'seed':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' + path[1] + '*.log')
regexp = r"seed=(\d+)-"
elif option == 'checkpoint':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' +
path[1] + f'-seed={seed}' + '*.model.log')
regexp = r'-novaluestep(\d+)\.'
elif option == 'episode':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] +
'planning_results/videos_simulator/' +
path[1] +
f'-seed={seed}-novaluestep{checkpoint}.model/ep*')
regexp = r'model/ep(\d+)'
values = []
for log in logs:
m = re.search(regexp, log)
if m:
result = m.group(1)
values.append(int(result))
else:
print(f'{log} doesn\'t contain {option}')
# log files for each step are generated for seeds
values = list(set(values))
values.sort()
return values
@staticmethod
def get_success_rate(experiment, seed, step):
"""get the success rate for a given model"""
log_file = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(log_file, 'r') as f:
last_line = f.readlines()[-1]
last_colon = last_line.rfind(':')
success_rate = float(last_line[(last_colon + 2):])
return success_rate
@staticmethod
def get_success_rates_for_experiment(experiment):
"""get success rate arrays for each seed for the given experiment
across all checkpoints.
The resulting shape of the np array is
(seeds, checkpoints), where seeds is the number of seeds,
and checkpints is the number of checkpoints.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
for seed in seeds:
result[seed] = []
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
if len(steps) < len(checkpoints):
steps = checkpoints
for checkpoint in checkpoints:
success = DataReader.get_success_rate(
experiment, seed, checkpoint)
result[seed].append(success)
min_length = min(min_length, len(result[seed]))
max_length = max(max_length, len(result[seed]))
if len(result) > 0:
result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge')
for seed in result])
steps = np.array(steps)
return steps, result
else:
return None, None
@staticmethod
def get_learning_curves_for_seed(experiment, seed):
"""Gets the training and validation total losses for a given experiment
and seed.
"""
path = DataReader.get_training_log_file(experiment, seed)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*step\s(\d+).*\s\[.*\π\:\s(.*)\].*\[.*\π\:\s(.*)\]")
steps = []
train_losses = []
validation_losses = []
for line in lines:
match = regex.match(line)
if match:
steps.append(int(match.group(1)))
train_losses.append(float(match.group(2)))
validation_losses.append(float(match.group(3)))
result = dict(
steps=steps,
train_losses=train_losses,
validation_losses=validation_losses,
)
return result
@staticmethod
def get_learning_curves_for_experiment(experiment):
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
train = {}
validation = {}
for seed in seeds:
result[seed] = []
curves = DataReader.get_learning_curves_for_seed(experiment, seed)
for i, step in enumerate(curves['steps']):
train.setdefault(step, []).append(curves['train_losses'][i])
validation.setdefault(step, []).append(curves['validation_losses'][i])
train_means = []
train_stds = []
validation_means = []
validation_stds = []
for key in train:
train_means.append(float(np.mean(train[key])))
train_stds.append(float(np.std(train[key])))
validation_means.append(float(np.mean(validation[key])))
validation_stds.append(float(np.std(validation[key])))
result = dict(
steps=list(train.keys()),
train=(train_means, train_stds),
validation=(validation_means, validation_stds),
)
return result
@staticmethod
def get_episodes_with_outcome(experiment, seed, step, outcome):
"""Gets episodes with given outcome for a given model.
If outcome == 1, returns successful episodes,
if outcome == 0, returns failing episodes.
"""
path = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*ep:\s+(\d+).*\|\ssuccess:\s+(\d).*")
result = []
for line in lines:
match = regex.match(line)
if match:
if int(match.group(2)) == outcome:
result.append(int(match.group(1)))
return result
@staticmethod
def get_episode_success_map(experiment, seed, step):
"""Gets a 0-1 array of shape (episodes) where episodes is
the number of episodes.
Ith value in the result is 0 if the ith episode failed,
and 1 otherwise.
"""
successes = DataReader.get_episodes_with_outcome(experiment,
seed,
step,
1)
successes = np.array(successes) - 1
result = np.zeros(EPISODES)
result[successes] = 1
return result
@staticmethod
def get_episodes_success_counts(experiment):
"""For a given experiment, for all episodes checks performance of all
the models with all possible seeds and checkpoints, and returns
an array of shape (episodes) where episodes is the number of episodes,
where Ith value is the number of models in this experiment that
succeeded in this episode.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = np.zeros(EPISODES)
for seed in seeds:
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
for checkpoint in checkpoints:
success = DataReader.get_episodes_with_outcome(experiment,
seed,
checkpoint,
1)
success = np.array(success)
success = success - 1
one_hot = np.zeros((len(success), EPISODES))
one_hot[np.arange(len(success)), success] = 1
one_hot = np.sum(one_hot, axis=0),
one_hot = np.squeeze(one_hot)
result += one_hot
return result
@staticmethod
def get_episode_speeds(experiment, seed, checkpoint, episode):
""" Returns an array of speeds for given model and given episode"""
return DataReader.get_model_speeds(experiment,
seed,
checkpoint)[episode - 1]
@staticmethod
def get_episode_costs(experiment, seed, checkpoint, episode):
""" Returns an array of data frames with all the costs for
given evaluation """
costs = DataReader.get_model_costs(experiment,
seed,
checkpoint)
if costs is not None:
return costs[episode - 1]
else:
return None
@staticmethod
@lru_cache(maxsize=10)
def get_model_costs(experiment, seed, checkpoint):
""" Returns an array of costs for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs'
costs_paths = glob(regex)
if len(costs_paths) == 0:
print(
f'costs_paths for {regex} is {costs_paths} and it\'s length is not 1')
return None
else:
raw_costs = torch.load(costs_paths[0])
# list of DataFrame, one per episode
costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in raw_costs]
return costs
@staticmethod
@lru_cache(maxsize=10)
def get_model_speeds(experiment, seed, checkpoint):
""" Returns an array of speeds for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states[:, 2:].norm(dim=1)) # is it correct
return result
@staticmethod
@lru_cache(maxsize=10)
def get_model_states(experiment, seed, checkpoint):
""" Returns an array of states for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states)
return result
|
[
"json.load",
"numpy.sum",
"torch.stack",
"numpy.argmax",
"numpy.std",
"imageio.imread",
"torch.load",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"glob.glob",
"numpy.squeeze",
"functools.lru_cache",
"re.search",
"re.compile"
] |
[((494, 514), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (503, 514), False, 'from functools import lru_cache\n'), ((4233, 4255), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(100)'}), '(maxsize=100)\n', (4242, 4255), False, 'from functools import lru_cache\n'), ((13831, 13852), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (13840, 13852), False, 'from functools import lru_cache\n'), ((14648, 14669), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (14657, 14669), False, 'from functools import lru_cache\n'), ((15584, 15605), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (15593, 15605), False, 'from functools import lru_cache\n'), ((2863, 2889), 'imageio.imread', 'imageio.imread', (['image_path'], {}), '(image_path)\n', (2877, 2889), False, 'import imageio\n'), ((2909, 2925), 'numpy.argmax', 'np.argmax', (['image'], {}), '(image)\n', (2918, 2925), True, 'import numpy as np\n'), ((3602, 3613), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (3606, 3613), False, 'from glob import glob\n'), ((4062, 4073), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (4066, 4073), False, 'from glob import glob\n'), ((8529, 8607), 're.compile', 're.compile', (['""".*step\\\\s(\\\\d+).*\\\\s\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\].*\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\]"""'], {}), "('.*step\\\\s(\\\\d+).*\\\\s\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\].*\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\]')\n", (8539, 8607), False, 'import re\n'), ((10741, 10797), 're.compile', 're.compile', (['""".*ep:\\\\s+(\\\\d+).*\\\\|\\\\ssuccess:\\\\s+(\\\\d).*"""'], {}), "('.*ep:\\\\s+(\\\\d+).*\\\\|\\\\ssuccess:\\\\s+(\\\\d).*')\n", (10751, 10797), False, 'import re\n'), ((11619, 11637), 'numpy.zeros', 'np.zeros', (['EPISODES'], {}), '(EPISODES)\n', (11627, 11637), True, 'import numpy as np\n'), ((12189, 12207), 'numpy.zeros', 'np.zeros', (['EPISODES'], {}), '(EPISODES)\n', (12197, 12207), True, 'import numpy as np\n'), ((14197, 14208), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (14201, 14208), False, 'from glob import glob\n'), ((15018, 15029), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (15022, 15029), False, 'from glob import glob\n'), ((15210, 15233), 'torch.load', 'torch.load', (['states_path'], {}), '(states_path)\n', (15220, 15233), False, 'import torch\n'), ((15954, 15965), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (15958, 15965), False, 'from glob import glob\n'), ((16146, 16169), 'torch.load', 'torch.load', (['states_path'], {}), '(states_path)\n', (16156, 16169), False, 'import torch\n'), ((735, 747), 'json.load', 'json.load', (['f'], {}), '(f)\n', (744, 747), False, 'import json\n'), ((1271, 1288), 'glob.glob', 'glob', (['image_paths'], {}), '(image_paths)\n', (1275, 1288), False, 'from glob import glob\n'), ((1896, 1916), 'glob.glob', 'glob', (['gradient_paths'], {}), '(gradient_paths)\n', (1900, 1916), False, 'from glob import glob\n'), ((2713, 2733), 'glob.glob', 'glob', (['gradient_paths'], {}), '(gradient_paths)\n', (2717, 2733), False, 'from glob import glob\n'), ((5274, 5329), 'glob.glob', 'glob', (["(path[0] + 'planning_results/' + path[1] + '*.log')"], {}), "(path[0] + 'planning_results/' + path[1] + '*.log')\n", (5278, 5329), False, 'from glob import glob\n'), ((6047, 6069), 're.search', 're.search', (['regexp', 'log'], {}), '(regexp, log)\n', (6056, 6069), False, 'import re\n'), ((8100, 8115), 'numpy.array', 'np.array', (['steps'], {}), '(steps)\n', (8108, 8115), True, 'import numpy as np\n'), ((11578, 11597), 'numpy.array', 'np.array', (['successes'], {}), '(successes)\n', (11586, 11597), True, 'import numpy as np\n'), ((14411, 14437), 'torch.load', 'torch.load', (['costs_paths[0]'], {}), '(costs_paths[0])\n', (14421, 14437), False, 'import torch\n'), ((15432, 15459), 'torch.stack', 'torch.stack', (['episode_states'], {}), '(episode_states)\n', (15443, 15459), False, 'import torch\n'), ((16368, 16395), 'torch.stack', 'torch.stack', (['episode_states'], {}), '(episode_states)\n', (16379, 16395), False, 'import torch\n'), ((2821, 2841), 'glob.glob', 'glob', (['gradient_paths'], {}), '(gradient_paths)\n', (2825, 2841), False, 'from glob import glob\n'), ((5490, 5569), 'glob.glob', 'glob', (["(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log')"], {}), "(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log')\n", (5494, 5569), False, 'from glob import glob\n'), ((12694, 12711), 'numpy.array', 'np.array', (['success'], {}), '(success)\n', (12702, 12711), True, 'import numpy as np\n'), ((12950, 12969), 'numpy.squeeze', 'np.squeeze', (['one_hot'], {}), '(one_hot)\n', (12960, 12969), True, 'import numpy as np\n'), ((5759, 5877), 'glob.glob', 'glob', (["(path[0] + 'planning_results/videos_simulator/' + path[1] +\n f'-seed={seed}-novaluestep{checkpoint}.model/ep*')"], {}), "(path[0] + 'planning_results/videos_simulator/' + path[1] +\n f'-seed={seed}-novaluestep{checkpoint}.model/ep*')\n", (5763, 5877), False, 'from glob import glob\n'), ((9900, 9919), 'numpy.mean', 'np.mean', (['train[key]'], {}), '(train[key])\n', (9907, 9919), True, 'import numpy as np\n'), ((9958, 9976), 'numpy.std', 'np.std', (['train[key]'], {}), '(train[key])\n', (9964, 9976), True, 'import numpy as np\n'), ((10021, 10045), 'numpy.mean', 'np.mean', (['validation[key]'], {}), '(validation[key])\n', (10028, 10045), True, 'import numpy as np\n'), ((10089, 10112), 'numpy.std', 'np.std', (['validation[key]'], {}), '(validation[key])\n', (10095, 10112), True, 'import numpy as np\n'), ((12899, 12922), 'numpy.sum', 'np.sum', (['one_hot'], {'axis': '(0)'}), '(one_hot, axis=0)\n', (12905, 12922), True, 'import numpy as np\n'), ((7959, 7981), 'numpy.array', 'np.array', (['result[seed]'], {}), '(result[seed])\n', (7967, 7981), True, 'import numpy as np\n')]
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mutable QNode, complicated primary parameters benchmark.
"""
# pylint: disable=invalid-name
import numpy as np
import pennylane as qml
import benchmark_utils as bu
def circuit(p, *, aux=0):
"""A very simple, lightweight mutable quantum circuit."""
qml.RX(p[aux][2], wires=[0])
return qml.expval(qml.PauliZ(0))
class Benchmark(bu.BaseBenchmark):
"""
This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for
mutable QNodes, using an extreme case where the QNode has lots of primary parameters with
a complicated nested structure, but relatively few auxiliary parameters, and only a few
of the primary parameters are actually used in the circuit.
When the QNode is constructed, a VariableRef is built for each primary parameter,
and the qfunc re-evaluated. In this test this is meant to be time-consuming, but it is only
strictly necessary if the auxiliary parameters change.
The main reasons why there are significant differences in the execution speed of this test
between different PL commits:
* :meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary params
have changed.
* Most of the primary params are not used in the circuit, hence
:meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives
wrt. them are always zero.
"""
name = "mutable qnode, complicated primary params"
min_wires = 1
n_vals = range(6, 13, 1)
def __init__(self, device=None, verbose=False):
super().__init__(device, verbose)
self.qnode = None
def setup(self):
self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None)
def benchmark(self, n=8):
# n is the number of levels in the primary parameter tree.
# Hence the number of primary parameters depends exponentially on n.
def create_params(n):
"""Recursively builds a tree structure with n levels."""
if n <= 0:
# the leaves are arrays
return np.random.randn(2)
# the other nodes have two branches and a scalar
return [create_params(n - 1), create_params(n - 1), np.random.randn()]
p = create_params(n)
def evaluate(aux):
"""Evaluates the qnode using the given auxiliary params."""
res = self.qnode(p, aux=aux)
# check the result
assert np.allclose(res, np.cos(p[aux][2]))
# first evaluation and construction
evaluate(0)
# evaluate the node several times more with a different auxiliary argument
# (it does not matter if p changes or not, the VariableRefs handle it)
for _ in range(1, 10):
# If we had evaluate(i % 2) here instead the auxiliary arguments would change
# every time, which would negate most possible speedups.
evaluate(1)
return True
|
[
"benchmark_utils.create_qnode",
"numpy.random.randn",
"pennylane.RX",
"numpy.cos",
"pennylane.PauliZ"
] |
[((861, 889), 'pennylane.RX', 'qml.RX', (['p[aux][2]'], {'wires': '[0]'}), '(p[aux][2], wires=[0])\n', (867, 889), True, 'import pennylane as qml\n'), ((912, 925), 'pennylane.PauliZ', 'qml.PauliZ', (['(0)'], {}), '(0)\n', (922, 925), True, 'import pennylane as qml\n'), ((2281, 2348), 'benchmark_utils.create_qnode', 'bu.create_qnode', (['circuit', 'self.device'], {'mutable': '(True)', 'interface': 'None'}), '(circuit, self.device, mutable=True, interface=None)\n', (2296, 2348), True, 'import benchmark_utils as bu\n'), ((2710, 2728), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (2725, 2728), True, 'import numpy as np\n'), ((2854, 2871), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2869, 2871), True, 'import numpy as np\n'), ((3111, 3128), 'numpy.cos', 'np.cos', (['p[aux][2]'], {}), '(p[aux][2])\n', (3117, 3128), True, 'import numpy as np\n')]
|
import numpy as np
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
dot_product = np.dot(a, b)
print(dot_product)
|
[
"numpy.dot",
"numpy.array"
] |
[((24, 43), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (32, 43), True, 'import numpy as np\n'), ((48, 67), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (56, 67), True, 'import numpy as np\n'), ((83, 95), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (89, 95), True, 'import numpy as np\n')]
|
from contextlib import ExitStack as DoesNotRaise
from typing import Tuple, Optional
import numpy as np
import pytest
from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch
@pytest.mark.parametrize(
"box_true, box_detection, expected_result, exception",
[
(None, None, None, pytest.raises(ValueError)),
((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)),
((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)),
([0., 0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)),
((0., 0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()),
((0, 1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()),
((0., 0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()),
((1., 0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()),
((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()),
((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()),
((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()),
((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()),
((0., 0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()),
((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()),
((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise())
]
)
def test_box_iou(
box_true: Tuple[float, float, float, float],
box_detection: Tuple[float, float, float, float],
expected_result: Optional[float],
exception: Exception
) -> None:
with exception:
result = box_iou(box_true=box_true, box_detection=box_detection)
assert result == expected_result
@pytest.mark.parametrize(
"boxes_true, boxes_detection, expected_result, exception",
[
(
None,
np.array([
[0., 0.25, 1., 1.25]
]),
None,
pytest.raises(ValueError)
),
(
np.array([
[0., 0.25, 1., 1.25]
]),
None,
None,
pytest.raises(ValueError)
),
(
np.array([
[0., 0., 1., 1.],
[2., 2., 2.5, 2.5]
]),
np.array([
[0., 0., 1., 1.],
[2., 2., 2.5, 2.5]
]),
np.array([
[1., 0.],
[0., 1.]
]),
DoesNotRaise()
),
(
np.array([
[0., 0., 1., 1.],
[0., 0.75, 1., 1.75]
]),
np.array([
[0., 0.25, 1., 1.25]
]),
np.array([
[0.6],
[1/3]
]),
DoesNotRaise()
),
(
np.array([
[0., 0., 1., 1.],
[0., 0.75, 1., 1.75]
]),
np.array([
[0., 0.25, 1., 1.25],
[0., 0.75, 1., 1.75],
[1., 1., 2., 2.]
]),
np.array([
[0.6, 1/7, 0],
[1/3, 1., 0]
]),
DoesNotRaise()
)
]
)
def test_box_iou_batch(
boxes_true: np.ndarray,
boxes_detection: np.ndarray,
expected_result: Optional[float],
exception: Exception
) -> None:
with exception:
result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection)
np.testing.assert_array_equal(result, expected_result)
QUARTER_MASK = np.zeros((10, 10)).astype('uint8')
QUARTER_MASK[0:5, 0:5] = 1
@pytest.mark.parametrize(
"mask_true, mask_detection, expected_result, exception",
[
(None, None, None, pytest.raises(ValueError)),
(np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)),
(np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)),
(np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)),
(np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)),
(np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()),
(np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()),
(np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()),
(np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()),
(np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise())
]
)
def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None:
with exception:
result = mask_iou(mask_true=mask_true, mask_detection=mask_detection)
assert result == expected_result
|
[
"numpy.testing.assert_array_equal",
"numpy.zeros",
"numpy.ones",
"contextlib.ExitStack",
"pytest.raises",
"numpy.array",
"onemetric.cv.utils.iou.mask_iou",
"onemetric.cv.utils.iou.box_iou_batch",
"onemetric.cv.utils.iou.box_iou"
] |
[((1550, 1605), 'onemetric.cv.utils.iou.box_iou', 'box_iou', ([], {'box_true': 'box_true', 'box_detection': 'box_detection'}), '(box_true=box_true, box_detection=box_detection)\n', (1557, 1605), False, 'from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch\n'), ((3366, 3435), 'onemetric.cv.utils.iou.box_iou_batch', 'box_iou_batch', ([], {'boxes_true': 'boxes_true', 'boxes_detection': 'boxes_detection'}), '(boxes_true=boxes_true, boxes_detection=boxes_detection)\n', (3379, 3435), False, 'from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch\n'), ((3444, 3498), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (3473, 3498), True, 'import numpy as np\n'), ((3516, 3534), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3524, 3534), True, 'import numpy as np\n'), ((4839, 4899), 'onemetric.cv.utils.iou.mask_iou', 'mask_iou', ([], {'mask_true': 'mask_true', 'mask_detection': 'mask_detection'}), '(mask_true=mask_true, mask_detection=mask_detection)\n', (4847, 4899), False, 'from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch\n'), ((307, 332), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (320, 332), False, 'import pytest\n'), ((382, 407), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (395, 407), False, 'import pytest\n'), ((457, 482), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (470, 482), False, 'import pytest\n'), ((536, 561), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (549, 561), False, 'import pytest\n'), ((613, 627), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (625, 627), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((678, 692), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (690, 692), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((744, 758), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (756, 758), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((810, 824), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (822, 824), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((881, 895), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (893, 895), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((952, 966), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (964, 966), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1023, 1037), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1035, 1037), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1094, 1108), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1106, 1108), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1160, 1174), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1172, 1174), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1227, 1241), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1239, 1241), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1294, 1308), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1306, 1308), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1784, 1818), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25]]'], {}), '([[0.0, 0.25, 1.0, 1.25]])\n', (1792, 1818), True, 'import numpy as np\n'), ((1878, 1903), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1891, 1903), False, 'import pytest\n'), ((1937, 1971), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25]]'], {}), '([[0.0, 0.25, 1.0, 1.25]])\n', (1945, 1971), True, 'import numpy as np\n'), ((2049, 2074), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2062, 2074), False, 'import pytest\n'), ((2108, 2162), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]])\n', (2116, 2162), True, 'import numpy as np\n'), ((2216, 2270), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]])\n', (2224, 2270), True, 'import numpy as np\n'), ((2324, 2358), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (2332, 2358), True, 'import numpy as np\n'), ((2414, 2428), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (2426, 2428), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((2462, 2518), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]])\n', (2470, 2518), True, 'import numpy as np\n'), ((2572, 2606), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25]]'], {}), '([[0.0, 0.25, 1.0, 1.25]])\n', (2580, 2606), True, 'import numpy as np\n'), ((2648, 2674), 'numpy.array', 'np.array', (['[[0.6], [1 / 3]]'], {}), '([[0.6], [1 / 3]])\n', (2656, 2674), True, 'import numpy as np\n'), ((2732, 2746), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (2744, 2746), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((2780, 2836), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]])\n', (2788, 2836), True, 'import numpy as np\n'), ((2890, 2975), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25], [0.0, 0.75, 1.0, 1.75], [1.0, 1.0, 2.0, 2.0]]'], {}), '([[0.0, 0.25, 1.0, 1.25], [0.0, 0.75, 1.0, 1.75], [1.0, 1.0, 2.0, 2.0]]\n )\n', (2898, 2975), True, 'import numpy as np\n'), ((3038, 3082), 'numpy.array', 'np.array', (['[[0.6, 1 / 7, 0], [1 / 3, 1.0, 0]]'], {}), '([[0.6, 1 / 7, 0], [1 / 3, 1.0, 0]])\n', (3046, 3082), True, 'import numpy as np\n'), ((3137, 3151), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (3149, 3151), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((3700, 3725), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3713, 3725), False, 'import pytest\n'), ((3815, 3840), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3828, 3840), False, 'import pytest\n'), ((3930, 3955), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3943, 3955), False, 'import pytest\n'), ((4044, 4069), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4057, 4069), False, 'import pytest\n'), ((4160, 4185), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4173, 4185), False, 'import pytest\n'), ((4272, 4286), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4284, 4286), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4373, 4387), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4385, 4387), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4477, 4491), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4489, 4491), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4577, 4591), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4589, 4591), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4658, 4672), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4670, 4672), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((3737, 3755), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3745, 3755), True, 'import numpy as np\n'), ((3773, 3791), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {}), '((20, 20))\n', (3781, 3791), True, 'import numpy as np\n'), ((3852, 3870), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {}), '((20, 20))\n', (3860, 3870), True, 'import numpy as np\n'), ((3888, 3906), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3896, 3906), True, 'import numpy as np\n'), ((3967, 3984), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (3974, 3984), True, 'import numpy as np\n'), ((4002, 4020), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4010, 4020), True, 'import numpy as np\n'), ((4120, 4138), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4128, 4138), True, 'import numpy as np\n'), ((4197, 4214), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4204, 4214), True, 'import numpy as np\n'), ((4232, 4250), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4240, 4250), True, 'import numpy as np\n'), ((4298, 4316), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4306, 4316), True, 'import numpy as np\n'), ((4334, 4351), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4341, 4351), True, 'import numpy as np\n'), ((4399, 4417), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4407, 4417), True, 'import numpy as np\n'), ((4435, 4453), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4443, 4453), True, 'import numpy as np\n'), ((4503, 4520), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4510, 4520), True, 'import numpy as np\n'), ((4538, 4555), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4545, 4555), True, 'import numpy as np\n'), ((4603, 4620), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4610, 4620), True, 'import numpy as np\n'), ((4081, 4098), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4088, 4098), True, 'import numpy as np\n')]
|
"""
@author: <NAME>,<NAME>
"""
import numpy as np
import streamlit as st
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
st.title("Synapse Unsupervised Models")
uploaded_file = st.file_uploader("Choose a csv file", type="csv")
if uploaded_file is not None:
data = pd.read_csv(uploaded_file)
st.write(data)
if uploaded_file is not None:
drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list())
X = data.drop(drop_column,axis = 1)
st.header('X : Features')
st.write(X)
if uploaded_file is not None:
if st.sidebar.checkbox("Feature Normalization"):
X = (X - np.mean(X))/np.std(X)
st.header("X : Features (Normalized)")
st.write(X)
class Kmeans:
def initialize_var(self,X,K=3):
X = np.array(X)
m,n = X.shape
c = np.random.randn(K,n)
return X,c,K
def assignment_move(self,X,c,K):
m = X.shape[0]
idx = np.zeros(m)
for o in range(10):
for i in range(m):
temp = np.zeros(K)
for j in range(K):
temp[j] = np.sum((X[i,:] - c[j,:]) ** 2)
idx[i] = np.argmin(temp)
for p in range(K):
points = [X[j] for j in range(len(X)) if idx[j] == p]
c[p] = np.mean(points, axis=0)
return idx,c
def test(self,X,K=3):
self.X,c,self.K = self.initialize_var(X,K)
self.idx,self.c = self.assignment_move(self.X,c,self.K)
X_ = pd.DataFrame(self.X)
idx_ = pd.DataFrame(self.idx)
data = pd.concat([X_,idx_],axis =1)
return self.c,data
def plot_clusters(self,d):
a={}
if self.X.shape[1]==2:
for i in range(2):
a['a'+str(i+1)] = self.X[:,i:i+1]
a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],)
a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],)
fig = go.Figure(data=go.Scatter(x=a['a1'],
y=a['a2'],
mode='markers',
marker=dict(color=self.idx)
))
st.plotly_chart(fig)
elif self.X.shape[1]==3:
d.columns = ['x','y','z','l']
fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l')
st.plotly_chart(fig)
elif self.X.shape[1]==3:
print("Incomplete")
else:
st.error("Your data is in Higher Dimension state")
class PCA:
def initialization(self,X):
X = np.array(X)
return X
def train(self,X):
self.X = self.initialization(X)
self.covariance_matrix = np.cov(X.T)
self.u,s,v = np.linalg.svd(self.covariance_matrix)
sum_s = np.sum(s)
self.variance_exp= []
k = 0
for i in s:
k = i+k
variance = k/sum_s
self.variance_exp.append(variance)
def K_components(self,n=2):
self.X= np.dot(self.X,self.u[:,:n])
return self.X
def variance_explained(self):
return self.variance_exp
if uploaded_file is not None:
Algorithms = st.sidebar.selectbox(
'Algorithm',
('None','K-means Clustering','Principal Component Analysis')
)
if uploaded_file is not None:
if Algorithms == 'K-means Clustering':
k_value = st.sidebar.number_input('Enter K value',value = 3)
train_button = st.sidebar.checkbox("Click Here for training")
if train_button:
d = Kmeans()
c,data = d.test(X,k_value)
st.subheader("Centroids")
st.write(c)
st.subheader("Clustering Data with labels")
st.write(data)
d.plot_clusters(data)
#except : raise ValueError('graph not computed with NaN values or no. of K value exceeds try again')
if Algorithms == 'Principal Component Analysis':
k_value = st.sidebar.number_input('Enter K components value',value = 3)
train_button = st.sidebar.checkbox("Click Here for training")
if train_button:
d = PCA()
d.train(X)
st.header('Variance Explained')
st.markdown(d.variance_explained())
st.info('Always Use Feature Normalization when applying PCA')
X_pca = d.K_components(k_value)
st.header('X : Feature (PCA)')
st.write(X_pca)
|
[
"numpy.sum",
"pandas.read_csv",
"streamlit.title",
"numpy.argmin",
"streamlit.sidebar.selectbox",
"numpy.linalg.svd",
"numpy.mean",
"pandas.DataFrame",
"streamlit.subheader",
"numpy.random.randn",
"streamlit.sidebar.checkbox",
"numpy.std",
"streamlit.info",
"numpy.reshape",
"numpy.cov",
"pandas.concat",
"streamlit.error",
"streamlit.plotly_chart",
"streamlit.header",
"plotly.express.scatter_3d",
"streamlit.file_uploader",
"numpy.dot",
"streamlit.sidebar.number_input",
"numpy.zeros",
"streamlit.write",
"numpy.array"
] |
[((159, 198), 'streamlit.title', 'st.title', (['"""Synapse Unsupervised Models"""'], {}), "('Synapse Unsupervised Models')\n", (167, 198), True, 'import streamlit as st\n'), ((216, 265), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a csv file"""'], {'type': '"""csv"""'}), "('Choose a csv file', type='csv')\n", (232, 265), True, 'import streamlit as st\n'), ((308, 334), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (319, 334), True, 'import pandas as pd\n'), ((339, 353), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (347, 353), True, 'import streamlit as st\n'), ((546, 571), 'streamlit.header', 'st.header', (['"""X : Features"""'], {}), "('X : Features')\n", (555, 571), True, 'import streamlit as st\n'), ((576, 587), 'streamlit.write', 'st.write', (['X'], {}), '(X)\n', (584, 587), True, 'import streamlit as st\n'), ((629, 673), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Feature Normalization"""'], {}), "('Feature Normalization')\n", (648, 673), True, 'import streamlit as st\n'), ((3334, 3435), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Algorithm"""', "('None', 'K-means Clustering', 'Principal Component Analysis')"], {}), "('Algorithm', ('None', 'K-means Clustering',\n 'Principal Component Analysis'))\n", (3354, 3435), True, 'import streamlit as st\n'), ((722, 760), 'streamlit.header', 'st.header', (['"""X : Features (Normalized)"""'], {}), "('X : Features (Normalized)')\n", (731, 760), True, 'import streamlit as st\n'), ((769, 780), 'streamlit.write', 'st.write', (['X'], {}), '(X)\n', (777, 780), True, 'import streamlit as st\n'), ((849, 860), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (857, 860), True, 'import numpy as np\n'), ((895, 916), 'numpy.random.randn', 'np.random.randn', (['K', 'n'], {}), '(K, n)\n', (910, 916), True, 'import numpy as np\n'), ((1012, 1023), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (1020, 1023), True, 'import numpy as np\n'), ((1588, 1608), 'pandas.DataFrame', 'pd.DataFrame', (['self.X'], {}), '(self.X)\n', (1600, 1608), True, 'import pandas as pd\n'), ((1624, 1646), 'pandas.DataFrame', 'pd.DataFrame', (['self.idx'], {}), '(self.idx)\n', (1636, 1646), True, 'import pandas as pd\n'), ((1662, 1691), 'pandas.concat', 'pd.concat', (['[X_, idx_]'], {'axis': '(1)'}), '([X_, idx_], axis=1)\n', (1671, 1691), True, 'import pandas as pd\n'), ((2677, 2688), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2685, 2688), True, 'import numpy as np\n'), ((2817, 2828), 'numpy.cov', 'np.cov', (['X.T'], {}), '(X.T)\n', (2823, 2828), True, 'import numpy as np\n'), ((2850, 2887), 'numpy.linalg.svd', 'np.linalg.svd', (['self.covariance_matrix'], {}), '(self.covariance_matrix)\n', (2863, 2887), True, 'import numpy as np\n'), ((2904, 2913), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (2910, 2913), True, 'import numpy as np\n'), ((3141, 3170), 'numpy.dot', 'np.dot', (['self.X', 'self.u[:, :n]'], {}), '(self.X, self.u[:, :n])\n', (3147, 3170), True, 'import numpy as np\n'), ((3569, 3618), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Enter K value"""'], {'value': '(3)'}), "('Enter K value', value=3)\n", (3592, 3618), True, 'import streamlit as st\n'), ((3652, 3698), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Click Here for training"""'], {}), "('Click Here for training')\n", (3671, 3698), True, 'import streamlit as st\n'), ((4161, 4221), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Enter K components value"""'], {'value': '(3)'}), "('Enter K components value', value=3)\n", (4184, 4221), True, 'import streamlit as st\n'), ((4246, 4292), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Click Here for training"""'], {}), "('Click Here for training')\n", (4265, 4292), True, 'import streamlit as st\n'), ((704, 713), 'numpy.std', 'np.std', (['X'], {}), '(X)\n', (710, 713), True, 'import numpy as np\n'), ((1901, 1938), 'numpy.reshape', 'np.reshape', (["a['a1']", "a['a1'].shape[0]"], {}), "(a['a1'], a['a1'].shape[0])\n", (1911, 1938), True, 'import numpy as np\n'), ((1963, 2000), 'numpy.reshape', 'np.reshape', (["a['a2']", "a['a2'].shape[0]"], {}), "(a['a2'], a['a2'].shape[0])\n", (1973, 2000), True, 'import numpy as np\n'), ((2255, 2275), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2270, 2275), True, 'import streamlit as st\n'), ((3801, 3826), 'streamlit.subheader', 'st.subheader', (['"""Centroids"""'], {}), "('Centroids')\n", (3813, 3826), True, 'import streamlit as st\n'), ((3839, 3850), 'streamlit.write', 'st.write', (['c'], {}), '(c)\n', (3847, 3850), True, 'import streamlit as st\n'), ((3863, 3906), 'streamlit.subheader', 'st.subheader', (['"""Clustering Data with labels"""'], {}), "('Clustering Data with labels')\n", (3875, 3906), True, 'import streamlit as st\n'), ((3919, 3933), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (3927, 3933), True, 'import streamlit as st\n'), ((4384, 4415), 'streamlit.header', 'st.header', (['"""Variance Explained"""'], {}), "('Variance Explained')\n", (4393, 4415), True, 'import streamlit as st\n'), ((4476, 4537), 'streamlit.info', 'st.info', (['"""Always Use Feature Normalization when applying PCA"""'], {}), "('Always Use Feature Normalization when applying PCA')\n", (4483, 4537), True, 'import streamlit as st\n'), ((4594, 4624), 'streamlit.header', 'st.header', (['"""X : Feature (PCA)"""'], {}), "('X : Feature (PCA)')\n", (4603, 4624), True, 'import streamlit as st\n'), ((4637, 4652), 'streamlit.write', 'st.write', (['X_pca'], {}), '(X_pca)\n', (4645, 4652), True, 'import streamlit as st\n'), ((692, 702), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (699, 702), True, 'import numpy as np\n'), ((1106, 1117), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (1114, 1117), True, 'import numpy as np\n'), ((1384, 1407), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (1391, 1407), True, 'import numpy as np\n'), ((2378, 2426), 'plotly.express.scatter_3d', 'px.scatter_3d', (['d'], {'x': '"""x"""', 'y': '"""y"""', 'z': '"""z"""', 'color': '"""l"""'}), "(d, x='x', y='y', z='z', color='l')\n", (2391, 2426), True, 'import plotly.express as px\n'), ((2440, 2460), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2455, 2460), True, 'import streamlit as st\n'), ((1183, 1215), 'numpy.sum', 'np.sum', (['((X[i, :] - c[j, :]) ** 2)'], {}), '((X[i, :] - c[j, :]) ** 2)\n', (1189, 1215), True, 'import numpy as np\n'), ((1244, 1259), 'numpy.argmin', 'np.argmin', (['temp'], {}), '(temp)\n', (1253, 1259), True, 'import numpy as np\n'), ((2565, 2615), 'streamlit.error', 'st.error', (['"""Your data is in Higher Dimension state"""'], {}), "('Your data is in Higher Dimension state')\n", (2573, 2615), True, 'import streamlit as st\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.