code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import os
import cv2
import copy
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import system_configs
import math
import external.nms as nms
def _rescale_points(dets, ratios, borders, sizes):
xs, ys = dets[:, :, 0], dets[:, :, 1]
xs += borders[0, 2]
ys += borders[0, 0]
xs *= ratios[0, 1]
ys *= ratios[0, 0]
np.clip(xs, 0, sizes[0, 1], out=xs)
np.clip(ys, 0, sizes[0, 0], out=ys)
def save_image(data, fn):
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data)
plt.savefig(fn, dpi = height)
plt.close()
def _get_sample_point(s, e, num_feature):
dx = (e[0] - s[0]) / (num_feature-1)
ke = (e[1] - s[1]) / (e[0]-s[0]+1e-4)
points = []
weights = []
for k in range(num_feature):
xm = dx*k+s[0]
ym = (xm-s[0])*ke + s[1]
xc = math.ceil(xm)
yc = math.ceil(ym)
xf = math.floor(xm)
yf = math.floor(ym)
points.append([[xf, yf],[xf, yc],[xc, yf],[xc, yc]])
weights.append([(1-(xm-xf))*(1-(ym-yf)), (1-(xm-xf))*(1-(yc-ym)), (1-(xc-xm))*(1-(ym-yf)), (1-(xc-xm))*(1-(yc-ym))])
return points, weights
def _clip_detections(image, detections):
height, width = image.shape[0:2]
if len(detections) > 0:
keep_inds = ((detections[:, 0, 0] > 0) & (detections[:, 0, 0] < width) & (detections[:, 0, 1] > 0) & (detections[:, 0, 1] < height)
&(detections[:, 1, 0] > 0) & (detections[:, 1, 0] < width) & (detections[:, 1, 1] > 0) & (detections[:, 1, 1] < height))
detections = detections[keep_inds]
return detections
def kp_decode(nnet, inputs, K, ae_threshold=0.5, kernel=3):
with torch.no_grad():
detections, time_backbone, time_psn = nnet.test(inputs, ae_threshold=ae_threshold, K=K, kernel=kernel)
#print(detections)
predictions = detections[inputs[3].squeeze()]
predictions = predictions.data.cpu().numpy()
return predictions, True
def crop_image(image, center, size):
cty, ctx = center
height, width = size
im_height, im_width = image.shape[0:2]
cropped_image = np.zeros((height, width, image.shape[2]), dtype=image.dtype)
x0, x1 = max(0, ctx - width // 2), min(ctx + width // 2, im_width)
y0, y1 = max(0, cty - height // 2), min(cty + height // 2, im_height)
left, right = ctx - x0, x1 - ctx
top, bottom = cty - y0, y1 - cty
cropped_cty, cropped_ctx = height // 2, width // 2
y_slice = slice(cropped_cty - top, cropped_cty + bottom)
x_slice = slice(cropped_ctx - left, cropped_ctx + right)
cropped_image[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_cty - top,
cropped_cty + bottom,
cropped_ctx - left,
cropped_ctx + right
], dtype=np.float32)
offset = np.array([
cty - height // 2,
ctx - width // 2
])
return cropped_image, border, offset
def kp_detection(image, db, quiry, nnet, debug=False, decode_func=kp_decode, cuda_id=0):
K = db.configs["top_k"]
ae_threshold = db.configs["ae_threshold"]
nms_kernel = db.configs["nms_kernel"]
max_tag_len = 400
num_feature = 8
total_acc = 0
num_none = 0
detections = np.array(quiry)
if True:
detections = _clip_detections(image, detections)
ori_detections = copy.deepcopy(detections)
height, width = image.shape[0:2]
scale = 1.0
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
tags = np.zeros((1, max_tag_len * 8 * 4), dtype=np.int64)
weights = np.zeros((1, max_tag_len * 8 * 4), dtype=np.float32)
tag_masks = np.zeros((1, max_tag_len), dtype=np.uint8)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(resized_image, new_center, [inp_height, inp_width])
cv2.imwrite('test.png', resized_image)
resized_image = resized_image / 255.
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [inp_height, inp_width]
ratios[0] = [height_ratio, width_ratio]
if len(detections) > 0:
_rescale_points(detections, ratios, borders, sizes)
# normalize_(resized_image, db.mean, db.std)
tag_ind = 0
b_ind = 0
for k in range(len(detections)):
sp = detections[k, 0]
ep = detections[k, 1]
p_points, p_weights = _get_sample_point(sp, ep, num_feature)
for kth in range(len(p_points)):
p_point = p_points[kth]
p_weight = p_weights[kth]
for sth in range(4):
tags[b_ind, tag_ind + sth] = p_point[sth][1] * out_width + p_point[sth][0]
weights[b_ind, tag_ind + sth] = p_weight[sth]
tag_ind += 4
tag_masks[b_ind, k] = 1
tags = np.clip(tags, 0, (out_width - 1) * (out_height - 1))
if torch.cuda.is_available():
images = torch.from_numpy(images).cuda(cuda_id)
tags = torch.from_numpy(tags).cuda(cuda_id)
weights = torch.from_numpy(weights).cuda(cuda_id)
tag_masks = torch.from_numpy(tag_masks).cuda(cuda_id)
else:
images = torch.from_numpy(images)
tags = torch.from_numpy(tags)
weights = torch.from_numpy(weights)
tag_masks = torch.from_numpy(tag_masks)
predictions, flag = decode_func(nnet, [images, tags, weights, tag_masks], K, ae_threshold=ae_threshold,
kernel=nms_kernel)
#print(predictions)
predictions = predictions.tolist()
pair2pre = {}
ori_detections = ori_detections.tolist()
for det, pre in zip(ori_detections, predictions):
pair2pre[str(det)] = pre
return pair2pre
def testing(image, db, quiry, nnet, debug=False, decode_func=kp_decode, cuda_id=0):
return globals()[system_configs.sampling_function](image, db, quiry, nnet, debug=debug, cuda_id=cuda_id)
|
[
"numpy.clip",
"cv2.imwrite",
"matplotlib.pyplot.savefig",
"math.ceil",
"cv2.resize",
"math.floor",
"matplotlib.pyplot.Axes",
"torch.from_numpy",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"torch.cuda.is_available",
"copy.deepcopy",
"torch.no_grad",
"numpy.shape"
] |
[((390, 425), 'numpy.clip', 'np.clip', (['xs', '(0)', 'sizes[0, 1]'], {'out': 'xs'}), '(xs, 0, sizes[0, 1], out=xs)\n', (397, 425), True, 'import numpy as np\n'), ((430, 465), 'numpy.clip', 'np.clip', (['ys', '(0)', 'sizes[0, 0]'], {'out': 'ys'}), '(ys, 0, sizes[0, 0], out=ys)\n', (437, 465), True, 'import numpy as np\n'), ((505, 519), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (513, 519), True, 'import numpy as np\n'), ((588, 600), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (598, 600), True, 'import matplotlib.pyplot as plt\n'), ((666, 701), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (674, 701), True, 'import matplotlib.pyplot as plt\n'), ((766, 793), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fn'], {'dpi': 'height'}), '(fn, dpi=height)\n', (777, 793), True, 'import matplotlib.pyplot as plt\n'), ((800, 811), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (809, 811), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2453), 'numpy.zeros', 'np.zeros', (['(height, width, image.shape[2])'], {'dtype': 'image.dtype'}), '((height, width, image.shape[2]), dtype=image.dtype)\n', (2401, 2453), True, 'import numpy as np\n'), ((2931, 3046), 'numpy.array', 'np.array', (['[cropped_cty - top, cropped_cty + bottom, cropped_ctx - left, cropped_ctx +\n right]'], {'dtype': 'np.float32'}), '([cropped_cty - top, cropped_cty + bottom, cropped_ctx - left, \n cropped_ctx + right], dtype=np.float32)\n', (2939, 3046), True, 'import numpy as np\n'), ((3090, 3137), 'numpy.array', 'np.array', (['[cty - height // 2, ctx - width // 2]'], {}), '([cty - height // 2, ctx - width // 2])\n', (3098, 3137), True, 'import numpy as np\n'), ((3505, 3520), 'numpy.array', 'np.array', (['quiry'], {}), '(quiry)\n', (3513, 3520), True, 'import numpy as np\n'), ((1073, 1086), 'math.ceil', 'math.ceil', (['xm'], {}), '(xm)\n', (1082, 1086), False, 'import math\n'), ((1100, 1113), 'math.ceil', 'math.ceil', (['ym'], {}), '(ym)\n', (1109, 1113), False, 'import math\n'), ((1127, 1141), 'math.floor', 'math.floor', (['xm'], {}), '(xm)\n', (1137, 1141), False, 'import math\n'), ((1155, 1169), 'math.floor', 'math.floor', (['ym'], {}), '(ym)\n', (1165, 1169), False, 'import math\n'), ((1907, 1922), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1920, 1922), False, 'import torch\n'), ((3616, 3641), 'copy.deepcopy', 'copy.deepcopy', (['detections'], {}), '(detections)\n', (3629, 3641), False, 'import copy\n'), ((3805, 3848), 'numpy.array', 'np.array', (['[new_height // 2, new_width // 2]'], {}), '([new_height // 2, new_width // 2])\n', (3813, 3848), True, 'import numpy as np\n'), ((3941, 3998), 'numpy.zeros', 'np.zeros', (['(1, 3, inp_height, inp_width)'], {'dtype': 'np.float32'}), '((1, 3, inp_height, inp_width), dtype=np.float32)\n', (3949, 3998), True, 'import numpy as np\n'), ((4016, 4050), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {'dtype': 'np.float32'}), '((1, 2), dtype=np.float32)\n', (4024, 4050), True, 'import numpy as np\n'), ((4069, 4103), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {'dtype': 'np.float32'}), '((1, 4), dtype=np.float32)\n', (4077, 4103), True, 'import numpy as np\n'), ((4120, 4154), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {'dtype': 'np.float32'}), '((1, 2), dtype=np.float32)\n', (4128, 4154), True, 'import numpy as np\n'), ((4170, 4220), 'numpy.zeros', 'np.zeros', (['(1, max_tag_len * 8 * 4)'], {'dtype': 'np.int64'}), '((1, max_tag_len * 8 * 4), dtype=np.int64)\n', (4178, 4220), True, 'import numpy as np\n'), ((4239, 4291), 'numpy.zeros', 'np.zeros', (['(1, max_tag_len * 8 * 4)'], {'dtype': 'np.float32'}), '((1, max_tag_len * 8 * 4), dtype=np.float32)\n', (4247, 4291), True, 'import numpy as np\n'), ((4312, 4354), 'numpy.zeros', 'np.zeros', (['(1, max_tag_len)'], {'dtype': 'np.uint8'}), '((1, max_tag_len), dtype=np.uint8)\n', (4320, 4354), True, 'import numpy as np\n'), ((4546, 4588), 'cv2.resize', 'cv2.resize', (['image', '(new_width, new_height)'], {}), '(image, (new_width, new_height))\n', (4556, 4588), False, 'import cv2\n'), ((4700, 4738), 'cv2.imwrite', 'cv2.imwrite', (['"""test.png"""', 'resized_image'], {}), "('test.png', resized_image)\n", (4711, 4738), False, 'import cv2\n'), ((5733, 5785), 'numpy.clip', 'np.clip', (['tags', '(0)', '((out_width - 1) * (out_height - 1))'], {}), '(tags, 0, (out_width - 1) * (out_height - 1))\n', (5740, 5785), True, 'import numpy as np\n'), ((5797, 5822), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5820, 5822), False, 'import torch\n'), ((6103, 6127), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (6119, 6127), False, 'import torch\n'), ((6147, 6169), 'torch.from_numpy', 'torch.from_numpy', (['tags'], {}), '(tags)\n', (6163, 6169), False, 'import torch\n'), ((6192, 6217), 'torch.from_numpy', 'torch.from_numpy', (['weights'], {}), '(weights)\n', (6208, 6217), False, 'import torch\n'), ((6242, 6269), 'torch.from_numpy', 'torch.from_numpy', (['tag_masks'], {}), '(tag_masks)\n', (6258, 6269), False, 'import torch\n'), ((5845, 5869), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (5861, 5869), False, 'import torch\n'), ((5903, 5925), 'torch.from_numpy', 'torch.from_numpy', (['tags'], {}), '(tags)\n', (5919, 5925), False, 'import torch\n'), ((5962, 5987), 'torch.from_numpy', 'torch.from_numpy', (['weights'], {}), '(weights)\n', (5978, 5987), False, 'import torch\n'), ((6026, 6053), 'torch.from_numpy', 'torch.from_numpy', (['tag_masks'], {}), '(tag_masks)\n', (6042, 6053), False, 'import torch\n')]
|
import numpy as np
from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz
from matplotlib import pyplot as plt
import nibabel as nb
import subprocess
import math
def add_afni_prefix(tpattern):
if tpattern:
if ".txt" in tpattern:
tpattern = "@{0}".format(tpattern)
return tpattern
def nullify(value, function=None):
from traits.trait_base import Undefined
if value is None:
return Undefined
if function:
return function(value)
return value
def chunk_ts(func_file, n_chunks=None, chunk_size=None):
func_img = nb.load(func_file)
trs = func_img.shape[3]
TR_ranges = []
if n_chunks:
chunk_size = trs/n_chunks
elif chunk_size:
n_chunks = int(trs/chunk_size)
else:
raise Exception("\n[!] Dev error: Either 'n_chunks' or 'chunk_size' "
"arguments must be passed to 'chunk_ts' function.\n")
for chunk_idx in range(0, n_chunks):
if chunk_idx == n_chunks - 1:
TR_ranges.append((int(chunk_idx*chunk_size), int(trs - 1)))
else:
TR_ranges.append((int(chunk_idx*chunk_size), int((chunk_idx+1)*chunk_size - 1)))
return TR_ranges
def split_ts_chunks(func_file, tr_ranges):
if '.nii' in func_file:
ext = '.nii'
if '.nii.gz' in func_file:
ext = '.nii.gz'
split_funcs = []
for chunk_idx, tr_range in enumerate(tr_ranges):
out_file = os.path.join(os.getcwd(), os.path.basename(func_file).replace(ext, "_{0}{1}".format(chunk_idx, ext)))
in_file = "{0}[{1}..{2}]".format(func_file, tr_range[0], tr_range[1])
cmd = ["3dcalc", "-a", in_file, "-expr", "a", "-prefix", out_file]
retcode = subprocess.check_output(cmd)
split_funcs.append(out_file)
return split_funcs
def oned_text_concat(in_files):
out_file = os.path.join(os.getcwd(), os.path.basename(in_files[0].replace("_0", "")))
out_txt = []
for txt in in_files:
with open(txt, 'r') as f:
txt_lines = f.readlines()
if not out_txt:
out_txt = [x for x in txt_lines]
else:
for line in txt_lines:
if "#" in line:
continue
out_txt.append(line)
with open(out_file, 'wt') as f:
for line in out_txt:
f.write(line)
return out_file
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def notch_filter_motion(motion_params, filter_type, TR, fc_RR_min=None,
fc_RR_max=None, center_freq=None, freq_bw=None,
lowpass_cutoff=None, filter_order=4):
# Adapted from DCAN Labs:
# https://github.com/DCAN-Labs/dcan_bold_processing/blob/master/
# ...matlab_code/filtered_movement_regressors.m
if "ms" in TR:
TR = float(TR.replace("ms", ""))/1000
elif "ms" not in TR and "s" in TR:
TR = float(TR.replace("s", ""))
params_data = np.loadtxt(motion_params)
# Sampling frequency
fs = 1 / TR
# Nyquist frequency
fNy = fs / 2
if filter_type == "notch":
# Respiratory Rate
if fc_RR_min and fc_RR_max:
rr = [float(fc_RR_min) / float(60),
float(fc_RR_max) / float(60)]
rr_fNy = [rr[0] + fNy, rr[1] + fNy]
fa = abs(rr - np.floor(np.divide(rr_fNy, fs)) * fs)
elif center_freq and freq_bw:
tail = float(freq_bw)/float(2)
fa = [center_freq-tail, center_freq+tail]
W_notch = np.divide(fa, fNy)
Wn = np.mean(W_notch)
bw = np.diff(W_notch)
# for filter info
center_freq = Wn * fNy
bandwidth = fa[1] - fa[0]
Q = Wn/bw
[b_filt, a_filt] = iirnotch(Wn, Q)
num_f_apply = np.floor(filter_order / 2)
filter_info = f"Motion estimate filter information\n\nType: Notch\n" \
f"\nCenter freq: {center_freq}\nBandwidth: {bandwidth}\n\n" \
f"Wn: {Wn}\nQ: {Q}\n\n" \
f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"
elif filter_type == "lowpass":
if fc_RR_min:
rr = float(fc_RR_min) / float(60)
rr_fNy = rr + fNy
fa = abs(rr - np.floor(np.divide(rr_fNy, fs)) * fs)
elif lowpass_cutoff:
fa = lowpass_cutoff
Wn = fa/fNy
if filter_order:
b_filt = firwin(filter_order+1, Wn)
a_filt = 1
num_f_apply = 0
filter_info = f"Motion estimate filter information\n\nType: Lowpass" \
f"\n\nCutoff freq: {fa}\nWn: {Wn}\n\n" \
f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"
filter_design = os.path.join(os.getcwd(),
"motion_estimate_filter_design.txt")
filter_plot = os.path.join(os.getcwd(),
"motion_estimate_filter_freq-response.png")
# plot frequency response for user info
w, h = freqz(b_filt, a_filt, fs=fs)
fig, ax1 = plt.subplots()
ax1.set_title('Motion estimate filter frequency response')
ax1.plot(w, 20 * np.log10(abs(h)), 'b')
ax1.set_ylabel('Amplitude [dB]', color='b')
ax1.set_xlabel('Frequency [Hz]')
plt.savefig(filter_plot)
with open(filter_design, 'wt') as f:
f.write(filter_info)
# convert rotation params from degrees to mm
params_data[:, 0:3] = degrees_to_mm(params_data[:, 0:3], head_radius=50)
filtered_params = lfilter(b_filt, a_filt, params_data.T, zi=None)
for i in range(0, int(num_f_apply) - 1):
filtered_params = lfilter(b_filt, a_filt, filtered_params, zi=None)
# back rotation params to degrees
filtered_params[0:3,:] = mm_to_degrees(filtered_params[0:3,:], head_radius = 50)
# back rotation params to degrees
filtered_params[0:3,:] = mm_to_degrees(filtered_params[0:3,:], head_radius = 50)
filtered_motion_params = os.path.join(os.getcwd(),
"{0}_filtered.1D".format(os.path.basename(motion_params)))
np.savetxt(filtered_motion_params, filtered_params.T, fmt='%f')
return (filtered_motion_params, filter_design, filter_plot)
|
[
"subprocess.check_output",
"numpy.mean",
"matplotlib.pyplot.savefig",
"nibabel.load",
"scipy.signal.firwin",
"scipy.signal.iirnotch",
"numpy.floor",
"numpy.diff",
"numpy.loadtxt",
"scipy.signal.lfilter",
"numpy.savetxt",
"scipy.signal.freqz",
"matplotlib.pyplot.subplots",
"numpy.divide"
] |
[((590, 608), 'nibabel.load', 'nb.load', (['func_file'], {}), '(func_file)\n', (597, 608), True, 'import nibabel as nb\n'), ((3832, 3857), 'numpy.loadtxt', 'np.loadtxt', (['motion_params'], {}), '(motion_params)\n', (3842, 3857), True, 'import numpy as np\n'), ((5890, 5918), 'scipy.signal.freqz', 'freqz', (['b_filt', 'a_filt'], {'fs': 'fs'}), '(b_filt, a_filt, fs=fs)\n', (5895, 5918), False, 'from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz\n'), ((5935, 5949), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5947, 5949), True, 'from matplotlib import pyplot as plt\n'), ((6148, 6172), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filter_plot'], {}), '(filter_plot)\n', (6159, 6172), True, 'from matplotlib import pyplot as plt\n'), ((6394, 6441), 'scipy.signal.lfilter', 'lfilter', (['b_filt', 'a_filt', 'params_data.T'], {'zi': 'None'}), '(b_filt, a_filt, params_data.T, zi=None)\n', (6401, 6441), False, 'from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz\n'), ((6973, 7036), 'numpy.savetxt', 'np.savetxt', (['filtered_motion_params', 'filtered_params.T'], {'fmt': '"""%f"""'}), "(filtered_motion_params, filtered_params.T, fmt='%f')\n", (6983, 7036), True, 'import numpy as np\n'), ((1732, 1760), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (1755, 1760), False, 'import subprocess\n'), ((4402, 4420), 'numpy.divide', 'np.divide', (['fa', 'fNy'], {}), '(fa, fNy)\n', (4411, 4420), True, 'import numpy as np\n'), ((4435, 4451), 'numpy.mean', 'np.mean', (['W_notch'], {}), '(W_notch)\n', (4442, 4451), True, 'import numpy as np\n'), ((4465, 4481), 'numpy.diff', 'np.diff', (['W_notch'], {}), '(W_notch)\n', (4472, 4481), True, 'import numpy as np\n'), ((4620, 4635), 'scipy.signal.iirnotch', 'iirnotch', (['Wn', 'Q'], {}), '(Wn, Q)\n', (4628, 4635), False, 'from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz\n'), ((4658, 4684), 'numpy.floor', 'np.floor', (['(filter_order / 2)'], {}), '(filter_order / 2)\n', (4666, 4684), True, 'import numpy as np\n'), ((6514, 6563), 'scipy.signal.lfilter', 'lfilter', (['b_filt', 'a_filt', 'filtered_params'], {'zi': 'None'}), '(b_filt, a_filt, filtered_params, zi=None)\n', (6521, 6563), False, 'from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz\n'), ((5303, 5331), 'scipy.signal.firwin', 'firwin', (['(filter_order + 1)', 'Wn'], {}), '(filter_order + 1, Wn)\n', (5309, 5331), False, 'from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz\n'), ((4218, 4239), 'numpy.divide', 'np.divide', (['rr_fNy', 'fs'], {}), '(rr_fNy, fs)\n', (4227, 4239), True, 'import numpy as np\n'), ((5144, 5165), 'numpy.divide', 'np.divide', (['rr_fNy', 'fs'], {}), '(rr_fNy, fs)\n', (5153, 5165), True, 'import numpy as np\n')]
|
# Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
import theano.tensor as T
import theano.sandbox.linalg as linalg
from sklearn.utils import check_random_state
from . import TheanoDistribution
from .base import bound
class Normal(TheanoDistribution):
"""Normal distribution.
This distribution supports 1D data only.
"""
def __init__(self, mu=0.0, sigma=1.0):
"""Constructor.
Parameters
----------
* `mu` [float]:
The distribution mean.
* `sigma` [float]:
The distribution standard deviation.
"""
super(Normal, self).__init__(mu=mu, sigma=sigma)
# pdf
self.pdf_ = (
(1. / np.sqrt(2. * np.pi)) / self.sigma *
T.exp(-(self.X - self.mu) ** 2 / (2. * self.sigma ** 2))).ravel()
self._make(self.pdf_, "pdf")
# -log pdf
self.nll_ = bound(
T.log(self.sigma) + T.log(np.sqrt(2. * np.pi)) +
(self.X - self.mu) ** 2 / (2. * self.sigma ** 2),
np.inf,
self.sigma > 0.).ravel()
self._make(self.nll_, "nll")
# cdf
self.cdf_ = 0.5 * (1. + T.erf((self.X - self.mu) /
(self.sigma * np.sqrt(2.)))).ravel()
self._make(self.cdf_, "cdf")
# ppf
self.ppf_ = (self.mu +
np.sqrt(2.) * self.sigma * T.erfinv(2. * self.p - 1.))
self._make(self.ppf_, "ppf", args=[self.p])
class MultivariateNormal(TheanoDistribution):
"""Multivariate normal distribution."""
def __init__(self, mu, sigma):
"""Constructor.
Parameters
----------
* `mu` [1d array]:
The means.
* `sigma` [2d array]:
The covariance matrix.
"""
super(MultivariateNormal, self).__init__(mu=mu, sigma=sigma)
# XXX: The SDP-ness of sigma should be check upon changes
# ndim
self.ndim_ = self.mu.shape[0]
self._make(self.ndim_, "ndim_func_", args=[])
# pdf
L = linalg.cholesky(self.sigma)
sigma_det = linalg.det(self.sigma) # XXX: compute from L instead
sigma_inv = linalg.matrix_inverse(self.sigma) # XXX: idem
self.pdf_ = (
(1. / T.sqrt((2. * np.pi) ** self.ndim_ * T.abs_(sigma_det))) *
T.exp(-0.5 * T.sum(T.mul(T.dot(self.X - self.mu,
sigma_inv),
self.X - self.mu),
axis=1))).ravel()
self._make(self.pdf_, "pdf")
# -log pdf
self.nll_ = -T.log(self.pdf_) # XXX: for sure this can be better
self._make(self.nll_, "nll")
# self.rvs_
self._make(T.dot(L, self.X.T).T + self.mu, "rvs_func_")
def rvs(self, n_samples, random_state=None, **kwargs):
rng = check_random_state(random_state)
X = rng.randn(n_samples, self.ndim)
return self.rvs_func_(X, **kwargs)
def cdf(self, X, **kwargs):
"""Not supported."""
raise NotImplementedError
def ppf(self, X, **kwargs):
"""Not supported."""
raise NotImplementedError
@property
def ndim(self):
return self.ndim_func_()[None][0]
|
[
"theano.tensor.exp",
"sklearn.utils.check_random_state",
"numpy.sqrt",
"theano.sandbox.linalg.matrix_inverse",
"theano.sandbox.linalg.det",
"theano.tensor.abs_",
"theano.tensor.erfinv",
"theano.sandbox.linalg.cholesky",
"theano.tensor.log",
"theano.tensor.dot"
] |
[((2187, 2214), 'theano.sandbox.linalg.cholesky', 'linalg.cholesky', (['self.sigma'], {}), '(self.sigma)\n', (2202, 2214), True, 'import theano.sandbox.linalg as linalg\n'), ((2235, 2257), 'theano.sandbox.linalg.det', 'linalg.det', (['self.sigma'], {}), '(self.sigma)\n', (2245, 2257), True, 'import theano.sandbox.linalg as linalg\n'), ((2309, 2342), 'theano.sandbox.linalg.matrix_inverse', 'linalg.matrix_inverse', (['self.sigma'], {}), '(self.sigma)\n', (2330, 2342), True, 'import theano.sandbox.linalg as linalg\n'), ((3003, 3035), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (3021, 3035), False, 'from sklearn.utils import check_random_state\n'), ((2754, 2770), 'theano.tensor.log', 'T.log', (['self.pdf_'], {}), '(self.pdf_)\n', (2759, 2770), True, 'import theano.tensor as T\n'), ((1518, 1546), 'theano.tensor.erfinv', 'T.erfinv', (['(2.0 * self.p - 1.0)'], {}), '(2.0 * self.p - 1.0)\n', (1526, 1546), True, 'import theano.tensor as T\n'), ((871, 928), 'theano.tensor.exp', 'T.exp', (['(-(self.X - self.mu) ** 2 / (2.0 * self.sigma ** 2))'], {}), '(-(self.X - self.mu) ** 2 / (2.0 * self.sigma ** 2))\n', (876, 928), True, 'import theano.tensor as T\n'), ((1491, 1503), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1498, 1503), True, 'import numpy as np\n'), ((2884, 2902), 'theano.tensor.dot', 'T.dot', (['L', 'self.X.T'], {}), '(L, self.X.T)\n', (2889, 2902), True, 'import theano.tensor as T\n'), ((823, 843), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (830, 843), True, 'import numpy as np\n'), ((1033, 1050), 'theano.tensor.log', 'T.log', (['self.sigma'], {}), '(self.sigma)\n', (1038, 1050), True, 'import theano.tensor as T\n'), ((1059, 1079), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (1066, 1079), True, 'import numpy as np\n'), ((2433, 2450), 'theano.tensor.abs_', 'T.abs_', (['sigma_det'], {}), '(sigma_det)\n', (2439, 2450), True, 'import theano.tensor as T\n'), ((1364, 1376), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1371, 1376), True, 'import numpy as np\n'), ((2492, 2526), 'theano.tensor.dot', 'T.dot', (['(self.X - self.mu)', 'sigma_inv'], {}), '(self.X - self.mu, sigma_inv)\n', (2497, 2526), True, 'import theano.tensor as T\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import ctypes
import numpy
import warnings
from nidaqmx._lib import (
lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32)
from nidaqmx.errors import (
check_for_error, is_string_buffer_too_small, is_array_buffer_too_small,
DaqResourceWarning)
from nidaqmx.system._watchdog_modules.expiration_state import ExpirationState
from nidaqmx.system._watchdog_modules.expiration_states_collection import (
ExpirationStatesCollection)
from nidaqmx.utils import flatten_channel_string
from nidaqmx.constants import (
Edge, TriggerType, WDTTaskAction)
from nidaqmx.types import (
AOExpirationState, COExpirationState, DOExpirationState)
__all__ = ['WatchdogTask']
class WatchdogTask(object):
"""
Represents the watchdog configurations for a DAQmx task.
"""
def __init__(self, device_name, task_name='', timeout=10):
"""
Creates and configures a task that controls the watchdog timer of a
device. The timer activates when you start the task.
Use the DAQmx Configure Watchdog Expiration States functions to
configure channel expiration states. This class does not program
the watchdog timer on a real-time controller.
Args:
device_name (str): Specifies is the name as configured in MAX of
the device to which this operation applies.
task_name (str): Specifies the name to assign to the task. If you
use this constructor in a loop and specify a name for the task,
you must use the DAQmx Clear Task method within the loop after
you are finished with the task. Otherwise, NI-DAQmx attempts to
create multiple tasks with the same name, which results in an
error.
timeout (float): Specifies the amount of time in seconds until the
watchdog timer expires. A value of -1 means the internal timer
never expires. Set this input to -1 if you use an Expiration
Trigger to expire the watchdog task. If this time elapses, the
device sets the physical channels to the states you specify
with the digital physical channel expiration states input.
"""
self._handle = lib_importer.task_handle(0)
cfunc = lib_importer.windll.DAQmxCreateWatchdogTimerTaskEx
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
ctypes_byte_str, ctypes_byte_str,
ctypes.POINTER(lib_importer.task_handle),
ctypes.c_double]
error_code = cfunc(
device_name, task_name, ctypes.byref(self._handle), timeout)
check_for_error(error_code)
# Saved name is used in self.close() to throw graceful error on
# double closes.
self._saved_name = self.name
self._expiration_states = ExpirationStatesCollection(self._handle)
def __del__(self):
if self._handle is not None:
warnings.warn(
'Task of name "{0}" was not explicitly closed before it was '
'destructed. Resources on the task device may still be '
'reserved.'.format(self.name), DaqResourceWarning)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@property
def expiration_states(self):
"""
nidaqmx.system._watchdog_modules.expiration_states_collection.
ExpirationStatesCollection:
Gets the collection of expiration states for this watchdog task.
"""
return self._expiration_states
@property
def expir_trig_dig_edge_edge(self):
"""
:class:`nidaqmx.constants.Edge`: Specifies on which edge of a
digital signal to expire the watchdog task.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDigEdgeWatchdogExpirTrigEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return Edge(val.value)
@expir_trig_dig_edge_edge.setter
def expir_trig_dig_edge_edge(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDigEdgeWatchdogExpirTrigEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@expir_trig_dig_edge_edge.deleter
def expir_trig_dig_edge_edge(self):
cfunc = lib_importer.windll.DAQmxResetDigEdgeWatchdogExpirTrigEdge
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def expir_trig_dig_edge_src(self):
"""
str: Specifies the name of a terminal where a digital signal
exists to use as the source of the Expiration Trigger.
"""
cfunc = lib_importer.windll.DAQmxGetDigEdgeWatchdogExpirTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@expir_trig_dig_edge_src.setter
def expir_trig_dig_edge_src(self, val):
cfunc = lib_importer.windll.DAQmxSetDigEdgeWatchdogExpirTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@expir_trig_dig_edge_src.deleter
def expir_trig_dig_edge_src(self):
cfunc = lib_importer.windll.DAQmxResetDigEdgeWatchdogExpirTrigSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def expir_trig_trig_on_network_conn_loss(self):
"""
bool: Specifies the watchdog timer behavior when the network
connection is lost between the host and the chassis. If set
to true, the watchdog timer expires when the chassis detects
the loss of network connection.
"""
val = c_bool32()
cfunc = (lib_importer.windll.
DAQmxGetWatchdogExpirTrigOnNetworkConnLoss)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@expir_trig_trig_on_network_conn_loss.setter
def expir_trig_trig_on_network_conn_loss(self, val):
cfunc = (lib_importer.windll.
DAQmxSetWatchdogExpirTrigOnNetworkConnLoss)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@expir_trig_trig_on_network_conn_loss.deleter
def expir_trig_trig_on_network_conn_loss(self):
cfunc = (lib_importer.windll.
DAQmxResetWatchdogExpirTrigOnNetworkConnLoss)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def expir_trig_trig_type(self):
"""
:class:`nidaqmx.constants.TriggerType`: Specifies the type of
trigger to use to expire a watchdog task.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetWatchdogExpirTrigType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return TriggerType(val.value)
@expir_trig_trig_type.setter
def expir_trig_trig_type(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetWatchdogExpirTrigType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@expir_trig_trig_type.deleter
def expir_trig_trig_type(self):
cfunc = lib_importer.windll.DAQmxResetWatchdogExpirTrigType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def expired(self):
"""
bool: Indicates if the watchdog timer expired. You can read this
property only while the task is running.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetWatchdogHasExpired
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@property
def timeout(self):
"""
float: Specifies in seconds the amount of time until the
watchdog timer expires. A value of -1 means the internal
timer never expires. Set this input to -1 if you use an
Expiration Trigger to expire the watchdog task.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetWatchdogTimeout
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return val.value
@timeout.setter
def timeout(self, val):
cfunc = lib_importer.windll.DAQmxSetWatchdogTimeout
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_double]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@timeout.deleter
def timeout(self):
cfunc = lib_importer.windll.DAQmxResetWatchdogTimeout
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
@property
def name(self):
"""
str: Indicates the name of the task.
"""
cfunc = lib_importer.windll.DAQmxGetTaskName
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_char_p,
ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
def _control_watchdog_task(self, action):
"""
Controls the watchdog timer task according to the action you
specify. This function does not program the watchdog timer on a
real-time controller. Use the Real-Time Watchdog VIs to program
the watchdog timer on a real-time controller.
Args:
action (nidaqmx.constants.WDTTaskAction): Specifies how to
control the watchdog timer task.
"""
cfunc = lib_importer.windll.DAQmxControlWatchdogTask
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, action.value)
check_for_error(error_code)
def cfg_watchdog_ao_expir_states(self, expiration_states):
"""
Configures the expiration states for an analog watchdog timer task.
Args:
expiration_states
(List[nidaqmx.system.watchdog.AOExpirationState]):
Contains the states to which to set analog physical channels
when the watchdog timer expires. Each element of the list
contains an analog physical channel name, the corresponding
expiration state, and the output type for that analog
physical channel. The units of "expiration state" must be
specified in volts for an analog output voltage expiration
state, or amps for an analog output current expiration state.
physical_channel (str): Specifies the analog output channel to
modify. You cannot modify dedicated analog input lines.
expiration_state (float): Specifies the value to set the
channel to upon expiration.
output_type (nidaqmx.constants.WatchdogAOExpirState):
Specifies the output type of the physical channel.
Returns:
List[nidaqmx.system._watchdog_modules.expiration_state.ExpirationState]:
Indicates the list of objects representing the configured
expiration states.
"""
channel_names = flatten_channel_string(
[e.physical_channel for e in expiration_states])
expir_state = numpy.float64(
[e.expiration_state for e in expiration_states])
output_type = numpy.int32(
[e.output_type.value for e in expiration_states])
cfunc = lib_importer.windll.DAQmxCfgWatchdogAOExpirStates
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')),
wrapped_ndpointer(dtype=numpy.int32, flags=('C', 'W')),
ctypes.c_uint]
error_code = cfunc(
self._handle, channel_names, expir_state, output_type,
len(expiration_states))
check_for_error(error_code)
return [ExpirationState(self._handle, e.physical_channel)
for e in expiration_states]
def cfg_watchdog_co_expir_states(self, expiration_states):
"""
Configures the expiration states for a counter watchdog timer task.
Args:
expiration_states
(List[nidaqmx.system.watchdog.COExpirationState]):
Contains the states to which to set counter physical channels
when the watchdog timer expires. Each element of the list
contains a counter physical channel name and the corresponding
state for that counter physical channel.
physical_channel (str): Specifies the counter output channel to
modify. You cannot modify dedicated counter input lines.
expiration_state (nidaqmx.constants.WatchdogCOExpirState):
Specifies the value to set the channel to upon expiration.
Returns:
List[nidaqmx.system._watchdog_modules.expiration_state.ExpirationState]:
Indicates the list of objects representing the configured
expiration states.
"""
channel_names = flatten_channel_string(
[e.physical_channel for e in expiration_states])
expir_state = numpy.int32(
[e.expiration_state.value for e in expiration_states])
cfunc = lib_importer.windll.DAQmxCfgWatchdogCOExpirStates
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
wrapped_ndpointer(dtype=numpy.int32, flags=('C', 'W')),
ctypes.c_uint]
error_code = cfunc(
self._handle, channel_names, expir_state, len(expiration_states))
check_for_error(error_code)
return [ExpirationState(self._handle, e.physical_channel)
for e in expiration_states]
def cfg_watchdog_do_expir_states(self, expiration_states):
"""
Configures the expiration states for a digital watchdog timer task.
Args:
expiration_states
(List[nidaqmx.system.watchdog.DOExpirationState]):
Contains the states to which to set digital physical channels
when the watchdog timer expires. Each element of the list
contains a digital physical channel name and the corresponding
state for that digital physical channel.
physical_channel (str): Specifies the digital output channel to
modify. You cannot modify dedicated digital input lines.
expiration_state (nidaqmx.constants.Level): Specifies the
value to set the channel to upon expiration.
Returns:
List[nidaqmx.system._watchdog_modules.expiration_state.ExpirationState]:
Indicates the list of objects representing the configured
expiration states.
"""
channel_names = flatten_channel_string(
[e.physical_channel for e in expiration_states])
expir_state = numpy.int32(
[e.expiration_state.value for e in expiration_states])
cfunc = lib_importer.windll.DAQmxCfgWatchdogDOExpirStates
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
wrapped_ndpointer(dtype=numpy.int32, flags=('C', 'W')),
ctypes.c_uint]
error_code = cfunc(
self._handle, channel_names, expir_state, len(expiration_states))
check_for_error(error_code)
return [ExpirationState(self._handle, e.physical_channel)
for e in expiration_states]
def clear_expiration(self):
"""
Unlock a device whose watchdog timer expired.
This function does not program the watchdog timer on a real-time
controller. Use the Real-Time Watchdog VIs to program the watchdog
timer on a real-time controller.
"""
self._control_watchdog_task(WDTTaskAction.CLEAR_EXPIRATION)
def close(self):
"""
Clears the task.
Before clearing, this method aborts the task, if necessary,
and releases any resources the task reserved. You cannot use a task
after you clear it unless you recreate the task.
If you create a DAQmx Task object within a loop, use this method
within the loop after you are finished with the task to avoid
allocating unnecessary memory.
"""
if self._handle is None:
warnings.warn(
'Attempted to close NI-DAQmx task of name "{0}" but task was '
'already closed.'.format(self._saved_name), DaqResourceWarning)
return
cfunc = lib_importer.windll.DAQmxClearTask
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(self._handle)
check_for_error(error_code)
self._handle = None
def control(self, action):
"""
Alters the state of a task according to the action you specify.
Args:
action (nidaqmx.constants.TaskMode): Specifies how to alter
the task state.
"""
cfunc = lib_importer.windll.DAQmxTaskControl
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, action.value)
check_for_error(error_code)
def reset_timer(self):
"""
Reset the internal timer. You must continually reset the internal
timer to prevent it from timing out and locking the device.
This function does not program the watchdog timer on a real-time
controller. Use the Real-Time Watchdog VIs to program the watchdog
timer on a real-time controller.
"""
self._control_watchdog_task(WDTTaskAction.RESET_TIMER)
def start(self):
"""
Transitions the task to the running state to begin the measurement
or generation. Using this method is required for some applications and
is optional for others.
"""
cfunc = lib_importer.windll.DAQmxStartTask
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [lib_importer.task_handle]
error_code = cfunc(self._handle)
check_for_error(error_code)
def stop(self):
"""
Stops the task and returns it to the state the task was in before the
DAQmx Start Task method ran.
"""
cfunc = lib_importer.windll.DAQmxStopTask
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [lib_importer.task_handle]
error_code = cfunc(self._handle)
check_for_error(error_code)
|
[
"nidaqmx.errors.check_for_error",
"nidaqmx.utils.flatten_channel_string",
"ctypes.byref",
"ctypes.POINTER",
"nidaqmx.errors.is_string_buffer_too_small",
"nidaqmx.system._watchdog_modules.expiration_state.ExpirationState",
"numpy.float64",
"nidaqmx._lib.wrapped_ndpointer",
"nidaqmx.constants.Edge",
"numpy.int32",
"ctypes.create_string_buffer",
"nidaqmx._lib.lib_importer.task_handle",
"nidaqmx.system._watchdog_modules.expiration_states_collection.ExpirationStatesCollection",
"ctypes.c_double",
"ctypes.c_int",
"nidaqmx.constants.TriggerType",
"nidaqmx._lib.c_bool32"
] |
[((2444, 2471), 'nidaqmx._lib.lib_importer.task_handle', 'lib_importer.task_handle', (['(0)'], {}), '(0)\n', (2468, 2471), False, 'from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32\n'), ((2964, 2991), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (2979, 2991), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((3161, 3201), 'nidaqmx.system._watchdog_modules.expiration_states_collection.ExpirationStatesCollection', 'ExpirationStatesCollection', (['self._handle'], {}), '(self._handle)\n', (3187, 3201), False, 'from nidaqmx.system._watchdog_modules.expiration_states_collection import ExpirationStatesCollection\n'), ((4146, 4160), 'ctypes.c_int', 'ctypes.c_int', ([], {}), '()\n', (4158, 4160), False, 'import ctypes\n'), ((4546, 4573), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (4561, 4573), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((4590, 4605), 'nidaqmx.constants.Edge', 'Edge', (['val.value'], {}), '(val.value)\n', (4594, 4605), False, 'from nidaqmx.constants import Edge, TriggerType, WDTTaskAction\n'), ((5067, 5094), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (5082, 5094), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((5511, 5538), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (5526, 5538), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((6631, 6660), 'nidaqmx.errors.check_for_error', 'check_for_error', (['size_or_code'], {}), '(size_or_code)\n', (6646, 6660), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((7140, 7167), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (7155, 7167), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((7581, 7608), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (7596, 7608), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((7972, 7982), 'nidaqmx._lib.c_bool32', 'c_bool32', ([], {}), '()\n', (7980, 7982), False, 'from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32\n'), ((8390, 8417), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (8405, 8417), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((8927, 8954), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (8942, 8954), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((9421, 9448), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (9436, 9448), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((9662, 9676), 'ctypes.c_int', 'ctypes.c_int', ([], {}), '()\n', (9674, 9676), False, 'import ctypes\n'), ((10055, 10082), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (10070, 10082), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((10099, 10121), 'nidaqmx.constants.TriggerType', 'TriggerType', (['val.value'], {}), '(val.value)\n', (10110, 10121), False, 'from nidaqmx.constants import Edge, TriggerType, WDTTaskAction\n'), ((10568, 10595), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (10583, 10595), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((10997, 11024), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (11012, 11024), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((11227, 11237), 'nidaqmx._lib.c_bool32', 'c_bool32', ([], {}), '()\n', (11235, 11237), False, 'from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32\n'), ((11609, 11636), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (11624, 11636), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((12001, 12018), 'ctypes.c_double', 'ctypes.c_double', ([], {}), '()\n', (12016, 12018), False, 'import ctypes\n'), ((12418, 12445), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (12433, 12445), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((12865, 12892), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (12880, 12892), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((13262, 13289), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (13277, 13289), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((14253, 14282), 'nidaqmx.errors.check_for_error', 'check_for_error', (['size_or_code'], {}), '(size_or_code)\n', (14268, 14282), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((15149, 15176), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (15164, 15176), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((16623, 16694), 'nidaqmx.utils.flatten_channel_string', 'flatten_channel_string', (['[e.physical_channel for e in expiration_states]'], {}), '([e.physical_channel for e in expiration_states])\n', (16645, 16694), False, 'from nidaqmx.utils import flatten_channel_string\n'), ((16730, 16792), 'numpy.float64', 'numpy.float64', (['[e.expiration_state for e in expiration_states]'], {}), '([e.expiration_state for e in expiration_states])\n', (16743, 16792), False, 'import numpy\n'), ((16828, 16889), 'numpy.int32', 'numpy.int32', (['[e.output_type.value for e in expiration_states]'], {}), '([e.output_type.value for e in expiration_states])\n', (16839, 16889), False, 'import numpy\n'), ((17527, 17554), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (17542, 17554), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((18770, 18841), 'nidaqmx.utils.flatten_channel_string', 'flatten_channel_string', (['[e.physical_channel for e in expiration_states]'], {}), '([e.physical_channel for e in expiration_states])\n', (18792, 18841), False, 'from nidaqmx.utils import flatten_channel_string\n'), ((18877, 18943), 'numpy.int32', 'numpy.int32', (['[e.expiration_state.value for e in expiration_states]'], {}), '([e.expiration_state.value for e in expiration_states])\n', (18888, 18943), False, 'import numpy\n'), ((19474, 19501), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (19489, 19501), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((20702, 20773), 'nidaqmx.utils.flatten_channel_string', 'flatten_channel_string', (['[e.physical_channel for e in expiration_states]'], {}), '([e.physical_channel for e in expiration_states])\n', (20724, 20773), False, 'from nidaqmx.utils import flatten_channel_string\n'), ((20809, 20875), 'numpy.int32', 'numpy.int32', (['[e.expiration_state.value for e in expiration_states]'], {}), '([e.expiration_state.value for e in expiration_states])\n', (20820, 20875), False, 'import numpy\n'), ((21406, 21433), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (21421, 21433), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((22909, 22936), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (22924, 22936), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((23556, 23583), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (23571, 23583), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((24538, 24565), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (24553, 24565), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((25000, 25027), 'nidaqmx.errors.check_for_error', 'check_for_error', (['error_code'], {}), '(error_code)\n', (25015, 25027), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((2919, 2945), 'ctypes.byref', 'ctypes.byref', (['self._handle'], {}), '(self._handle)\n', (2931, 2945), False, 'import ctypes\n'), ((4519, 4536), 'ctypes.byref', 'ctypes.byref', (['val'], {}), '(val)\n', (4531, 4536), False, 'import ctypes\n'), ((6141, 6179), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['temp_size'], {}), '(temp_size)\n', (6168, 6179), False, 'import ctypes\n'), ((6277, 6317), 'nidaqmx.errors.is_string_buffer_too_small', 'is_string_buffer_too_small', (['size_or_code'], {}), '(size_or_code)\n', (6303, 6317), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((8363, 8380), 'ctypes.byref', 'ctypes.byref', (['val'], {}), '(val)\n', (8375, 8380), False, 'import ctypes\n'), ((10028, 10045), 'ctypes.byref', 'ctypes.byref', (['val'], {}), '(val)\n', (10040, 10045), False, 'import ctypes\n'), ((11582, 11599), 'ctypes.byref', 'ctypes.byref', (['val'], {}), '(val)\n', (11594, 11599), False, 'import ctypes\n'), ((12391, 12408), 'ctypes.byref', 'ctypes.byref', (['val'], {}), '(val)\n', (12403, 12408), False, 'import ctypes\n'), ((13763, 13801), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['temp_size'], {}), '(temp_size)\n', (13790, 13801), False, 'import ctypes\n'), ((13899, 13939), 'nidaqmx.errors.is_string_buffer_too_small', 'is_string_buffer_too_small', (['size_or_code'], {}), '(size_or_code)\n', (13925, 13939), False, 'from nidaqmx.errors import check_for_error, is_string_buffer_too_small, is_array_buffer_too_small, DaqResourceWarning\n'), ((17572, 17621), 'nidaqmx.system._watchdog_modules.expiration_state.ExpirationState', 'ExpirationState', (['self._handle', 'e.physical_channel'], {}), '(self._handle, e.physical_channel)\n', (17587, 17621), False, 'from nidaqmx.system._watchdog_modules.expiration_state import ExpirationState\n'), ((19519, 19568), 'nidaqmx.system._watchdog_modules.expiration_state.ExpirationState', 'ExpirationState', (['self._handle', 'e.physical_channel'], {}), '(self._handle, e.physical_channel)\n', (19534, 19568), False, 'from nidaqmx.system._watchdog_modules.expiration_state import ExpirationState\n'), ((21451, 21500), 'nidaqmx.system._watchdog_modules.expiration_state.ExpirationState', 'ExpirationState', (['self._handle', 'e.physical_channel'], {}), '(self._handle, e.physical_channel)\n', (21466, 21500), False, 'from nidaqmx.system._watchdog_modules.expiration_state import ExpirationState\n'), ((2771, 2811), 'ctypes.POINTER', 'ctypes.POINTER', (['lib_importer.task_handle'], {}), '(lib_importer.task_handle)\n', (2785, 2811), False, 'import ctypes\n'), ((4434, 4462), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (4448, 4462), False, 'import ctypes\n'), ((8282, 8306), 'ctypes.POINTER', 'ctypes.POINTER', (['c_bool32'], {}), '(c_bool32)\n', (8296, 8306), False, 'import ctypes\n'), ((9943, 9971), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (9957, 9971), False, 'import ctypes\n'), ((11501, 11525), 'ctypes.POINTER', 'ctypes.POINTER', (['c_bool32'], {}), '(c_bool32)\n', (11515, 11525), False, 'import ctypes\n'), ((12303, 12334), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (12317, 12334), False, 'import ctypes\n'), ((17210, 17266), 'nidaqmx._lib.wrapped_ndpointer', 'wrapped_ndpointer', ([], {'dtype': 'numpy.float64', 'flags': "('C', 'W')"}), "(dtype=numpy.float64, flags=('C', 'W'))\n", (17227, 17266), False, 'from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32\n'), ((17292, 17346), 'nidaqmx._lib.wrapped_ndpointer', 'wrapped_ndpointer', ([], {'dtype': 'numpy.int32', 'flags': "('C', 'W')"}), "(dtype=numpy.int32, flags=('C', 'W'))\n", (17309, 17346), False, 'from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32\n'), ((19264, 19318), 'nidaqmx._lib.wrapped_ndpointer', 'wrapped_ndpointer', ([], {'dtype': 'numpy.int32', 'flags': "('C', 'W')"}), "(dtype=numpy.int32, flags=('C', 'W'))\n", (19281, 19318), False, 'from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32\n'), ((21196, 21250), 'nidaqmx._lib.wrapped_ndpointer', 'wrapped_ndpointer', ([], {'dtype': 'numpy.int32', 'flags': "('C', 'W')"}), "(dtype=numpy.int32, flags=('C', 'W'))\n", (21213, 21250), False, 'from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str, c_bool32\n')]
|
import numpy as np
import scipy.sparse as ssp
import torch
from beta_rec.models.torch_engine import ModelEngine
from beta_rec.utils.common_util import timeit
def top_k(values, k, exclude=[]):
"""Return the indices of the k items with the highest value in the list of values.
Exclude the ids from the list "exclude".
"""
# Put low similarity to viewed items to exclude them from recommendations
values[exclude] = -np.inf
return list(np.argpartition(-values, range(k))[:k])
def get_sparse_vector(ids, length, values=None):
"""Sparse vector generation.
If "values" is None, the elements are set to 1.
"""
n = len(ids)
if values is None:
return ssp.coo_matrix((np.ones(n), (ids, np.zeros(n))), (length, 1)).tocsc()
else:
return ssp.coo_matrix((values, (ids, np.zeros(n))), (length, 1)).tocsc()
class UserKNN(torch.nn.Module):
"""A PyTorch Module for UserKNN model."""
def __init__(self, config):
"""Initialize UserKNN Class."""
super(UserKNN, self).__init__()
self.config = config
self.device = self.config["device_str"]
self.n_users = self.config["n_users"]
self.n_items = self.config["n_items"]
self.neighbourhood_size = self.config["neighbourhood_size"]
def prepare_model(self, data):
"""Load data into matrices.
:param data:
:return:
"""
row = data.train["col_user"].to_numpy()
col = data.train["col_item"].to_numpy()
self.binary_user_item = ssp.coo_matrix(
(np.ones(len(data.train)), (row, col)), shape=(self.n_users, self.n_items)
).tocsr()
def _items_count_per_user(self):
"""Calculate the number of interacted items for an user.
:return:
"""
if not hasattr(self, "__items_count_per_user"):
self.__items_count_per_user = np.asarray(
self.binary_user_item.sum(axis=1)
).ravel()
return self.__items_count_per_user
def similarity_with_users(self, sequence):
"""Calculate the similarity between the a given user and all users according to the overlap ratio.
:param sequence: the user's interacted items
:return:
"""
sparse_sequence = get_sparse_vector(sequence, self.n_items)
overlap = self.binary_user_item.dot(sparse_sequence).toarray().ravel()
overlap[overlap != 0] /= np.sqrt(self._items_count_per_user()[overlap != 0])
return overlap
def forward(self, batch_data):
"""Redundant method for UserKNN.
Args:
batch_data: tuple consists of (users, pos_items, neg_items), which must be LongTensor.
"""
return 0.0
def predict(self, users, items):
"""Predict result with the model.
Args:
users (int, or list of int): user id(s).
items (int, or list of int): item id(s).
Return:
scores (int, or list of int): predicted scores of these user-item pairs.
"""
scores = []
for i in range(len(users)):
sequence = self.binary_user_item.getrow(users[i]).nonzero()[0]
sim_with_users = self.similarity_with_users(sequence)
nearest_neighbour = top_k(sim_with_users, self.neighbourhood_size)
neighbour_items = get_sparse_vector(
nearest_neighbour,
self.n_users,
values=sim_with_users[nearest_neighbour],
)
sim_with_items = (
self.binary_user_item.T.dot(neighbour_items).toarray().ravel()
)
sim_with_items[sequence] = -np.inf
scores.append(sim_with_items[items[i]])
return torch.tensor(scores)
class UserKNNEngine(ModelEngine):
"""UserKNNEngine Class."""
def __init__(self, config):
"""Initialize UserKNNEngine Class."""
print("userKNNEngine init")
self.config = config
self.model = UserKNN(config["model"])
# super(UserKNNEngine, self).__init__(config)
def train_single_batch(self, batch_data):
"""Train a single batch.
However, userKNN is a neighbourhood model bases its prediction on the similarity relationships among users.
It requires no training procedure.
Args:
batch_data (list): batch users, positive items and negative items.
Return:
0
"""
assert hasattr(self, "model"), "Please specify the exact model !"
return 0
@timeit
def train_an_epoch(self, train_loader, epoch_id):
"""Train a epoch, generate batch_data from data_loader, and call train_single_batch.
Like the train_single_batch method, UserKNN requires no training procedure.
Args:
train_loader (DataLoader):
epoch_id (int): set to 1.
"""
assert hasattr(self, "model"), "Please specify the exact model !"
# self.model.train()
print(f"[Training Epoch {epoch_id}] skipped")
self.writer.add_scalar("model/loss", 0.0, epoch_id)
self.writer.add_scalar("model/regularizer", 0.0, epoch_id)
|
[
"torch.tensor",
"numpy.zeros",
"numpy.ones"
] |
[((3752, 3772), 'torch.tensor', 'torch.tensor', (['scores'], {}), '(scores)\n', (3764, 3772), False, 'import torch\n'), ((716, 726), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (723, 726), True, 'import numpy as np\n'), ((734, 745), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (742, 745), True, 'import numpy as np\n'), ((825, 836), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (833, 836), True, 'import numpy as np\n')]
|
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from sklearn.model_selection import train_test_split
from torch import nn
from torch.nn import functional as F
from transformers import BertTokenizer, BertConfig, BertModel
class BertTextClassificationDataset(torch.utils.data.Dataset):
def __init__(self, input_ids, tokentype_ids, attention_mask, targets, sample_weight=None, transform=None):
self.input_ids = np.array(input_ids, dtype=np.int32)
self.tokentype_ids = np.array(tokentype_ids, dtype=np.int32)
self.attention_mask = np.array(attention_mask, dtype=np.int32)
self.targets = np.array(targets, dtype=np.int32)
if sample_weight is not None:
self.sample_weight = np.array(sample_weight)
else:
self.sample_weight = np.ones(self.targets.shape)
self.transform = transform
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
input_id, tokentype_id, attention_mask, target = \
self.input_ids[index], self.tokentype_ids[index], self.attention_mask[index], self.targets[index]
sample_weight = self.sample_weight[index]
return torch.as_tensor(input_id).type(torch.LongTensor), torch.as_tensor(tokentype_id).type(torch.LongTensor), \
torch.as_tensor(attention_mask).type(torch.LongTensor), \
torch.as_tensor(target).type(torch.LongTensor), torch.as_tensor(sample_weight.astype('float'))
class BertClassifier(pl.LightningModule):
def __init__(self, bert_checkpoint, classes, dense_dropout=0.5, **kwargs):
super().__init__()
self.save_hyperparameters()
# Load model
self.config = BertConfig.from_pretrained(bert_checkpoint)
self.encoder = BertModel.from_pretrained(bert_checkpoint, config=self.config)
self.decoder = nn.Sequential(
nn.Dropout(dense_dropout),
nn.Linear(in_features=self.config.hidden_size, out_features=self.config.hidden_size),
nn.Dropout(dense_dropout),
nn.Linear(in_features=self.config.hidden_size, out_features=classes),
)
def forward(self, input_ids, token_type_ids, attention_mask):
output = self.encoder(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
output = output[0][:, 0]
output = self.decoder(output)
return output
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=3e-5, eps=1e-8)
return optimizer
def training_step(self, train_batch, batch_idx):
input_id, tokentype_id, attention_mask, target, sample_weight = train_batch
output = self.forward(input_id, tokentype_id, attention_mask)
loss = F.cross_entropy(output, target, reduction='none')
loss = loss * sample_weight
loss = loss.mean()
self.log('train loss', loss)
return loss
def validation_step(self, val_batch, batch_idx):
input_id, tokentype_id, attention_mask, target, sample_weight = val_batch
output = self.forward(input_id, tokentype_id, attention_mask)
loss = F.cross_entropy(output, target)
self.log('val_loss', loss)
def test_step(self, val_batch, batch_idx):
input_id, tokentype_id, attention_mask, target, sample_weight = val_batch
output = self.forward(input_id, tokentype_id, attention_mask)
output = output.to('cpu').numpy()
target = target.to('cpu').numpy()
correct_top1, incorrect_top1 = 0, 0
correct_top5, incorrect_top5 = 0, 0
for o, t in zip(output, target):
sorted_args = np.argsort(-o)
if sorted_args[0] == t:
correct_top1 += 1
else:
incorrect_top1 += 1
if t in sorted_args[:5]:
correct_top5 += 1
else:
incorrect_top5 += 1
return {"correct_top1": correct_top1, "correct_top5": correct_top5, "incorrect_top1": incorrect_top1,
"incorrect_top5": incorrect_top5}
def test_epoch_end(self, outputs):
correct_top1, incorrect_top1 = 0, 0
correct_top5, incorrect_top5 = 0, 0
for out in outputs:
correct_top1 += out["correct_top1"]
incorrect_top1 += out["incorrect_top1"]
correct_top5 += out["correct_top5"]
incorrect_top5 += out["incorrect_top5"]
print({"acc_top1": correct_top1 / (correct_top1 + incorrect_top1),
"acc_top5": correct_top5 / (correct_top5 + incorrect_top5)})
CLASSNUM = 255
X = list(range(4099))
train_ids, test_ids = train_test_split(X, test_size=0.2, random_state=1)
train_ids, val_ids = train_test_split(train_ids, test_size=0.25, random_state=1)
text_all = np.array([line.strip().split(' =->= ')[1] for line in open('data/dscps_encoded.txt', 'r').readlines()])
tags_all = np.array([int(line.strip()) for line in open('data/tags_id.txt').readlines()])
# Bert prepare
bert_checkpoint = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(bert_checkpoint)
MAX_LENGTH = 300
train_X = tokenizer(list(text_all[train_ids]), padding=True, truncation=True, max_length=MAX_LENGTH)
train_Y = tags_all[train_ids]
train_dataset = BertTextClassificationDataset(train_X['input_ids'], train_X['token_type_ids'],
train_X['attention_mask'], train_Y)
val_X = tokenizer(list(text_all[val_ids]), padding=True, truncation=True, max_length=MAX_LENGTH)
val_Y = tags_all[val_ids]
val_dataset = BertTextClassificationDataset(val_X['input_ids'], val_X['token_type_ids'], val_X['attention_mask'], val_Y)
test_X = tokenizer(list(text_all[test_ids]), padding=True, truncation=True, max_length=MAX_LENGTH)
test_Y = tags_all[test_ids]
test_dataset = BertTextClassificationDataset(test_X['input_ids'], test_X['token_type_ids'], test_X['attention_mask'],
test_Y)
train_dl = torch.utils.data.DataLoader(train_dataset, batch_size=16)
val_dl = torch.utils.data.DataLoader(val_dataset, batch_size=8)
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=4)
bert_model = BertClassifier(bert_checkpoint, CLASSNUM)
trainer = pl.Trainer(max_epochs=50, gpus=1, callbacks=[EarlyStopping(monitor='val_loss')])
trainer.fit(bert_model, train_dl, val_dl)
trainer.test(bert_model, test_dl)
|
[
"torch.nn.Dropout",
"torch.as_tensor",
"numpy.ones",
"sklearn.model_selection.train_test_split",
"pytorch_lightning.callbacks.early_stopping.EarlyStopping",
"transformers.BertTokenizer.from_pretrained",
"transformers.BertModel.from_pretrained",
"numpy.argsort",
"numpy.array",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"transformers.BertConfig.from_pretrained",
"torch.nn.Linear"
] |
[((4739, 4789), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X, test_size=0.2, random_state=1)\n', (4755, 4789), False, 'from sklearn.model_selection import train_test_split\n'), ((4812, 4871), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_ids'], {'test_size': '(0.25)', 'random_state': '(1)'}), '(train_ids, test_size=0.25, random_state=1)\n', (4828, 4871), False, 'from sklearn.model_selection import train_test_split\n'), ((5144, 5190), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['bert_checkpoint'], {}), '(bert_checkpoint)\n', (5173, 5190), False, 'from transformers import BertTokenizer, BertConfig, BertModel\n'), ((6073, 6130), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': '(16)'}), '(train_dataset, batch_size=16)\n', (6100, 6130), False, 'import torch\n'), ((6140, 6194), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': '(8)'}), '(val_dataset, batch_size=8)\n', (6167, 6194), False, 'import torch\n'), ((6205, 6260), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(4)'}), '(test_dataset, batch_size=4)\n', (6232, 6260), False, 'import torch\n'), ((507, 542), 'numpy.array', 'np.array', (['input_ids'], {'dtype': 'np.int32'}), '(input_ids, dtype=np.int32)\n', (515, 542), True, 'import numpy as np\n'), ((572, 611), 'numpy.array', 'np.array', (['tokentype_ids'], {'dtype': 'np.int32'}), '(tokentype_ids, dtype=np.int32)\n', (580, 611), True, 'import numpy as np\n'), ((642, 682), 'numpy.array', 'np.array', (['attention_mask'], {'dtype': 'np.int32'}), '(attention_mask, dtype=np.int32)\n', (650, 682), True, 'import numpy as np\n'), ((706, 739), 'numpy.array', 'np.array', (['targets'], {'dtype': 'np.int32'}), '(targets, dtype=np.int32)\n', (714, 739), True, 'import numpy as np\n'), ((1789, 1832), 'transformers.BertConfig.from_pretrained', 'BertConfig.from_pretrained', (['bert_checkpoint'], {}), '(bert_checkpoint)\n', (1815, 1832), False, 'from transformers import BertTokenizer, BertConfig, BertModel\n'), ((1856, 1918), 'transformers.BertModel.from_pretrained', 'BertModel.from_pretrained', (['bert_checkpoint'], {'config': 'self.config'}), '(bert_checkpoint, config=self.config)\n', (1881, 1918), False, 'from transformers import BertTokenizer, BertConfig, BertModel\n'), ((2848, 2897), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {'reduction': '"""none"""'}), "(output, target, reduction='none')\n", (2863, 2897), True, 'from torch.nn import functional as F\n'), ((3239, 3270), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (3254, 3270), True, 'from torch.nn import functional as F\n'), ((811, 834), 'numpy.array', 'np.array', (['sample_weight'], {}), '(sample_weight)\n', (819, 834), True, 'import numpy as np\n'), ((882, 909), 'numpy.ones', 'np.ones', (['self.targets.shape'], {}), '(self.targets.shape)\n', (889, 909), True, 'import numpy as np\n'), ((1969, 1994), 'torch.nn.Dropout', 'nn.Dropout', (['dense_dropout'], {}), '(dense_dropout)\n', (1979, 1994), False, 'from torch import nn\n'), ((2008, 2097), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.config.hidden_size', 'out_features': 'self.config.hidden_size'}), '(in_features=self.config.hidden_size, out_features=self.config.\n hidden_size)\n', (2017, 2097), False, 'from torch import nn\n'), ((2106, 2131), 'torch.nn.Dropout', 'nn.Dropout', (['dense_dropout'], {}), '(dense_dropout)\n', (2116, 2131), False, 'from torch import nn\n'), ((2145, 2213), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.config.hidden_size', 'out_features': 'classes'}), '(in_features=self.config.hidden_size, out_features=classes)\n', (2154, 2213), False, 'from torch import nn\n'), ((3745, 3759), 'numpy.argsort', 'np.argsort', (['(-o)'], {}), '(-o)\n', (3755, 3759), True, 'import numpy as np\n'), ((6373, 6406), 'pytorch_lightning.callbacks.early_stopping.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""'}), "(monitor='val_loss')\n", (6386, 6406), False, 'from pytorch_lightning.callbacks.early_stopping import EarlyStopping\n'), ((1271, 1296), 'torch.as_tensor', 'torch.as_tensor', (['input_id'], {}), '(input_id)\n', (1286, 1296), False, 'import torch\n'), ((1321, 1350), 'torch.as_tensor', 'torch.as_tensor', (['tokentype_id'], {}), '(tokentype_id)\n', (1336, 1350), False, 'import torch\n'), ((1392, 1423), 'torch.as_tensor', 'torch.as_tensor', (['attention_mask'], {}), '(attention_mask)\n', (1407, 1423), False, 'import torch\n'), ((1465, 1488), 'torch.as_tensor', 'torch.as_tensor', (['target'], {}), '(target)\n', (1480, 1488), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 26 18:17:56 2021
@author: rringuet
Convert data in CTIPe output files to wrapped versions
"""
#import numpy as np
from numpy import transpose, zeros, array, append
from time import perf_counter
from netCDF4 import Dataset
from astropy.constants import R_earth
file_varnames = {'density':['rho',0,['time','lon_d','lat_d','lev'],'kg/m**3'],
'temperature':['T',1,['time','lon_d','lat_d','lev'],'K'],
'electron_temperature':['T_e',2,['time','lon_h','lat_h','radius'],'K'],
'ion_temperature':['T_i',3,['time','lon_h','lat_h','radius'],'K'],
'height_d':['H_lev',4,['time','lon_d','lat_d','lev'],'m'],
'height_n':['H_ilev',4,['time','lon_n','lat_n','ilev'],'m'],
'meridional_neutral_wind':['Vn_lat',5,['time','lon_n','lat_n','ilev'],'m/s'],
'zonal_neutral_wind':['Vn_lon',6,['time','lon_n','lat_n','ilev'],'m/s'],
'vertical_neutral_wind':['Vn_H',7,['time','lon_n','lat_n','ilev'],'m/s'],
'neutral_temperature':['T_n',8,['time','lon_n','lat_n','ilev'],'K'],
'mean_molecular_mass':['Rmt',9,['time','lon_d','lat_d','lev'],'amu'],
'electron_density':['N_e',10,['time','lon_h','lat_h','radius'],'1/m**3'],
'neutral_density':['N_n',11,['time','lon_n','lat_n','ilev'],'1/m**3'],
'solar_heating':['Q_Solar',12,['time','lon_n','lat_n','ilev'],'J/kg/s'],
'joule_heating':['Q_Joule',13,['time','lon_n','lat_n','ilev'],'J/kg/s'],
'radiation_heat_cool':['Q_radiation',14,['time','lon_n','lat_n','ilev'],'J/kg/s'],
'atomic_oxygen_density':['N_O',15,['time','lon_n','lat_n','ilev'],'1/m**3'],
'molecular_oxygen_density':['N_O2',16,['time','lon_n','lat_n','ilev'],'1/m**3'],
'molecular_nitrogen_density':['N_N2',17,['time','lon_n','lat_n','ilev'],'1/m**3'],
'nitric_oxide_density':['N_NO',18,['time','lon_n','lat_n','ilev'],'1/m**3'],
'nitric_oxide_ion_density':['N_NOplus',19,['time','lon_n','lat_n','ilev'],'1/m**3'],
'molecular_nitrogen_ion_density':['N_N2plus',20,['time','lon_n','lat_n','ilev'],'1/m**3'],
'molecular_oxygen_ion_density':['N_O2plus',21,['time','lon_n','lat_n','ilev'],'1/m**3'],
'atomic_nitrogen_ion_density':['N_Nplus',22,['time','lon_n','lat_n','ilev'],'1/m**3'],
'atomic_oxygen_ion_density':['N_Oplus',23,['time','lon_h','lat_h','radius'],'1/m**3'],
'atomic_hydrogen_ion_density':['N_Hplus',24,['time','lon_h','lat_h','radius'],'1/m**3'],
'pedersen_conductivity':['Sigma_P',25,['time','lon_n','lat_n','ilev'],'S/m'],
'hall_conductivity':['Sigma_H',26,['time','lon_n','lat_n','ilev'],'S/m'],
'zonal_ion_velocity':['Vi_lon',27,['time','lon_n','lat_n','ilev'],'m/s'],
'meridional_ion_velocity':['Vi_lat',28,['time','lon_n','lat_n','ilev'],'m/s'],
#start 3D variables
'height_integrated_joule_heating':['W_Joule',29,['time','lon_n','lat_n'],'W/m**2'],
'energy_influx':['Eflux_precip',30,['time','lon_n','lat_n'],'W/m**2'],
'mean_energy':['Eavg_precip',31,['time','lon_n','lat_n'],'keV'],
'total_electron_content':['TEC',32,['time','lon_n','lat_n'],'1/m**2'], #'10**16/m**2'
'theta_electric_field_at_140km':['E_theta140km',33,['time','Elon','Elat'],'V/m'],
'lambda_electric_field_at_140km':['E_lambda140km',34,['time','Elon','Elat'],'V/m'],
'theta_electric_field_at_300km':['E_theta300km',35,['time','Elon','Elat'],'V/m'],
'lambda_electric_field_at_300km':['E_lambda300km',36,['time','Elon','Elat'],'V/m']}
def ctipe_wrap_variables(var_dict, variable_name):
'''wrap variables in longitude and transpose as needed for ctipe model output'''
if 'electric_field' in variable_name:
# CTIPe efield variables from neutral file do not need to be wrapped but
# need to be transposed from (time,lon,lat) to (time,lat,lon)
#new_variable = np.transpose(variable,[0,2,1])
pass #want in (time, lon, lat)
elif len(var_dict['data'].shape) == 3: # 3D variable, wrap in longitude then transpose
shape_list = list(var_dict['data'].shape) # time, lat, lon
shape_list[2]+=1 #need one more place in longitude
tmp_arr = zeros(shape_list) #array to set-up wrapped data in
tmp_arr[:,:,:-1]=var_dict['data'] #copy data into grid
tmp_arr[:,:,-1] = var_dict['data'][:,:,0] #wrap in longitude
var_dict['data'] = transpose(tmp_arr, (0,2,1)) #(t,lat,lon) -> (t,lon,lat)
var_dict['size'] = (shape_list[0],shape_list[2],shape_list[1])
elif len(var_dict['data'].shape) == 4: # 4D variable
shape_list = list(var_dict['data'].shape) # time, lat, lon, height
shape_list[3]+=1 #need one more place in longitude
tmp_arr = zeros(shape_list) #array to set-up wrapped data in
tmp_arr[:,:,:,:-1]=var_dict['data'] #copy data into grid
tmp_arr[:,:,:,-1] = var_dict['data'][:,:,:,0] #wrap in longitude
var_dict['data'] = transpose(tmp_arr, (0,3,2,1)) #(t,h,lat,lon) -> (t,lon,lat,h)
var_dict['size'] = (shape_list[0],shape_list[3],shape_list[2],shape_list[1])
return var_dict
'''
elif (variable_name == 'lon') and (variable.max() < 360.):
new_variable = np.append(variable,360.)
'''
def ctipe_combine_files(file_prefix, verbose=False):
'''Combine data from 3 files, wrapping in longitude and transposing as necessary.'''
tic=perf_counter()
#determine file names of group
filetype_list = ['-plot-density.nc','-plot-height.nc','-plot-neutral.nc']
filename_density, filename_height, filename_neutral = [
file_prefix+filetype for filetype in filetype_list]
#open data files
ctipe_density = Dataset(filename_density)
ctipe_height = Dataset(filename_height) #in meters
ctipe_neutral = Dataset(filename_neutral)
#retrieve data and key properties from each file
d_dict={key:{'data':array(ctipe_density.variables[key]),
'datatype':ctipe_density.variables[key].datatype,
'size':ctipe_density.variables[key].size} \
for key in ctipe_density.variables.keys()}
h_dict={key:{'data':array(ctipe_height.variables[key]),
'datatype':ctipe_height.variables[key].datatype,
'size':ctipe_height.variables[key].size} \
for key in ctipe_height.variables.keys()}
n_dict={key:{'data':array(ctipe_neutral.variables[key]),
'datatype':ctipe_neutral.variables[key].datatype,
'size':ctipe_neutral.variables[key].size} \
for key in ctipe_neutral.variables.keys()}
#close files
ctipe_density.close()
ctipe_height.close()
ctipe_neutral.close()
#wrap longitude dimensions
d_dict['lon']['data'] = append(d_dict['lon']['data'], 360.)
d_dict['lon']['size']+=1
h_dict['lon']['data'] = append(h_dict['lon']['data'], 360.)
h_dict['lon']['size']+=1
n_dict['lon']['data'] = append(n_dict['lon']['data'], 360.)
n_dict['lon']['size']+=1
#collect dimensions data, assuming time and ilev are all the same
#assuming lon and lat in diff files might be different
dim_dict={}
dim_dict['time'] = d_dict['time']
dim_dict['lat_d'] = d_dict['lat']
dim_dict['lat_h'] = h_dict['lat']
dim_dict['lat_n'] = n_dict['lat']
dim_dict['lon_d'] = d_dict['lon']
dim_dict['lon_h'] = h_dict['lon']
dim_dict['lon_n'] = n_dict['lon']
dim_dict['lev'] = d_dict['plev']
dim_dict['ilev'] = n_dict['plev']
dim_dict['Elat'] = n_dict['elat']
dim_dict['Elon'] = n_dict['elon']
#convert height in km to radius in R_E to align with SPH coord sys (since long is 0 to 360)
height_dict = h_dict['ht']
height_dict['data'] = (height_dict['data']+R_earth.value/1000.)/(R_earth.value/1000.)
dim_dict['radius'] = height_dict
#remove keys from file dictionaries for coordinates
for key in ['time','lat','lon','elat','elon','ht','plev']:
if key in d_dict.keys(): del d_dict[key]
if key in h_dict.keys(): del h_dict[key]
if key in n_dict.keys(): del n_dict[key]
#adjust 'height' variable names to better distinguish
d_dict['height_d'] = d_dict['height']
del d_dict['height']
n_dict['height_n'] = n_dict['height']
del n_dict['height']
#initialize single output file
data_out = Dataset(file_prefix+'.nc', 'w', format='NETCDF4')
data_out.model = 'CTIPe'
data_out.file = ''.join([f+',' for f in [filename_density, filename_height,
filename_neutral]]).strip(',') #csv list of files
# store dimensions
for dim in dim_dict.keys():
if verbose: print(dim)
new_dim = data_out.createDimension(dim, dim_dict[dim]['size'])
new_var = data_out.createVariable(dim, dim_dict[dim]['datatype'],tuple((dim,)))
new_var[:] = dim_dict[dim]['data']
if verbose: print('Dimensions complete.\n')
#add variable data from density file to output file
for key in d_dict.keys():
if key in ['ZMAG','mean_molecular_mass'] or key not in file_varnames.keys():
continue #using Rmt in neutral file
if verbose: print(key,file_varnames[key][2])
d_dict[key] = ctipe_wrap_variables(d_dict[key], key)
new_var = data_out.createVariable(file_varnames[key][0], d_dict[key]['datatype'],
tuple(file_varnames[key][2]))
new_var[:] = d_dict[key]['data']
if verbose: print('Density file complete.\n')
#add variable data from height file to output file
for key in h_dict.keys():
if key == 'ZMAG' or key not in file_varnames.keys():
continue #no other variables depend on ZMAG, so ignore
if verbose: print(key,file_varnames[key][2])
h_dict[key] = ctipe_wrap_variables(h_dict[key], key)
new_var = data_out.createVariable(file_varnames[key][0], h_dict[key]['datatype'],
tuple(file_varnames[key][2]))
new_var[:] = h_dict[key]['data']
if verbose: print('Height file complete.\n')
#add variable data from neutral file to output file
for key in n_dict.keys():
if key in ['ZMAG','electron_density','atomic_oxygen_ion_density',
'atomic_hydrogen_ion_density'] or key not in file_varnames.keys():
continue #N_e is in height file
if verbose: print(key,file_varnames[key][2])
n_dict[key] = ctipe_wrap_variables(n_dict[key], key)
new_var = data_out.createVariable(file_varnames[key][0], n_dict[key]['datatype'],
tuple(file_varnames[key][2]))
new_var[:] = n_dict[key]['data']
if verbose: print('Neutral file complete.\n')
#close file
print(f"Data for {file_prefix} converted in {perf_counter()-tic:.6f}s.")
data_out.close()
return file_prefix+'.nc'
if __name__=='__main__':
#define file names (input and output)
file_dir = 'C:/Users/rringuet/Kamodo_WinDev1/CTIPe/Data/'
file_prefix = file_dir+'2015-03-18'
new_filename = ctipe_combine_files(file_prefix)
|
[
"netCDF4.Dataset",
"time.perf_counter",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.transpose"
] |
[((5938, 5952), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5950, 5952), False, 'from time import perf_counter\n'), ((6240, 6265), 'netCDF4.Dataset', 'Dataset', (['filename_density'], {}), '(filename_density)\n', (6247, 6265), False, 'from netCDF4 import Dataset\n'), ((6286, 6310), 'netCDF4.Dataset', 'Dataset', (['filename_height'], {}), '(filename_height)\n', (6293, 6310), False, 'from netCDF4 import Dataset\n'), ((6344, 6369), 'netCDF4.Dataset', 'Dataset', (['filename_neutral'], {}), '(filename_neutral)\n', (6351, 6369), False, 'from netCDF4 import Dataset\n'), ((7369, 7405), 'numpy.append', 'append', (["d_dict['lon']['data']", '(360.0)'], {}), "(d_dict['lon']['data'], 360.0)\n", (7375, 7405), False, 'from numpy import transpose, zeros, array, append\n'), ((7468, 7504), 'numpy.append', 'append', (["h_dict['lon']['data']", '(360.0)'], {}), "(h_dict['lon']['data'], 360.0)\n", (7474, 7504), False, 'from numpy import transpose, zeros, array, append\n'), ((7575, 7611), 'numpy.append', 'append', (["n_dict['lon']['data']", '(360.0)'], {}), "(n_dict['lon']['data'], 360.0)\n", (7581, 7611), False, 'from numpy import transpose, zeros, array, append\n'), ((9031, 9082), 'netCDF4.Dataset', 'Dataset', (["(file_prefix + '.nc')", '"""w"""'], {'format': '"""NETCDF4"""'}), "(file_prefix + '.nc', 'w', format='NETCDF4')\n", (9038, 9082), False, 'from netCDF4 import Dataset\n'), ((4693, 4710), 'numpy.zeros', 'zeros', (['shape_list'], {}), '(shape_list)\n', (4698, 4710), False, 'from numpy import transpose, zeros, array, append\n'), ((4910, 4939), 'numpy.transpose', 'transpose', (['tmp_arr', '(0, 2, 1)'], {}), '(tmp_arr, (0, 2, 1))\n', (4919, 4939), False, 'from numpy import transpose, zeros, array, append\n'), ((6455, 6490), 'numpy.array', 'array', (['ctipe_density.variables[key]'], {}), '(ctipe_density.variables[key])\n', (6460, 6490), False, 'from numpy import transpose, zeros, array, append\n'), ((6715, 6749), 'numpy.array', 'array', (['ctipe_height.variables[key]'], {}), '(ctipe_height.variables[key])\n', (6720, 6749), False, 'from numpy import transpose, zeros, array, append\n'), ((6959, 6994), 'numpy.array', 'array', (['ctipe_neutral.variables[key]'], {}), '(ctipe_neutral.variables[key])\n', (6964, 6994), False, 'from numpy import transpose, zeros, array, append\n'), ((5255, 5272), 'numpy.zeros', 'zeros', (['shape_list'], {}), '(shape_list)\n', (5260, 5272), False, 'from numpy import transpose, zeros, array, append\n'), ((5478, 5510), 'numpy.transpose', 'transpose', (['tmp_arr', '(0, 3, 2, 1)'], {}), '(tmp_arr, (0, 3, 2, 1))\n', (5487, 5510), False, 'from numpy import transpose, zeros, array, append\n'), ((11591, 11605), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (11603, 11605), False, 'from time import perf_counter\n')]
|
import os
import numpy as np
SPEECH_DATA_PATH = './../Data_Clean'
DUMP_DATA_PATH = './../Data_Clean/Filtered_Dev'
train_y = np.load(os.path.join(SPEECH_DATA_PATH, 'train_transcripts.npy'),
encoding='bytes')
dev_y = np.load(os.path.join(SPEECH_DATA_PATH, 'dev_transcripts.npy'),
encoding='bytes')
dev_x = np.load(os.path.join(SPEECH_DATA_PATH, 'dev.npy'), encoding='bytes')
dup_list = []
for i in range(len(dev_y)):
for j in range(len(train_y)):
if np.array_equal(dev_y[i], train_y[j]):
dup_list.append(i)
break
dev_y_rev = np.delete(dev_y, dup_list)
dev_x_rev = np.delete(dev_x, dup_list)
assert (len(dev_y_rev) == len(dev_x_rev))
np.save(os.path.join(DUMP_DATA_PATH, 'dev.npy'), dev_x_rev)
np.save(os.path.join(DUMP_DATA_PATH, 'dev_transcripts.npy'), dev_y_rev)
|
[
"numpy.array_equal",
"numpy.delete",
"os.path.join"
] |
[((597, 623), 'numpy.delete', 'np.delete', (['dev_y', 'dup_list'], {}), '(dev_y, dup_list)\n', (606, 623), True, 'import numpy as np\n'), ((636, 662), 'numpy.delete', 'np.delete', (['dev_x', 'dup_list'], {}), '(dev_x, dup_list)\n', (645, 662), True, 'import numpy as np\n'), ((135, 190), 'os.path.join', 'os.path.join', (['SPEECH_DATA_PATH', '"""train_transcripts.npy"""'], {}), "(SPEECH_DATA_PATH, 'train_transcripts.npy')\n", (147, 190), False, 'import os\n'), ((244, 297), 'os.path.join', 'os.path.join', (['SPEECH_DATA_PATH', '"""dev_transcripts.npy"""'], {}), "(SPEECH_DATA_PATH, 'dev_transcripts.npy')\n", (256, 297), False, 'import os\n'), ((349, 390), 'os.path.join', 'os.path.join', (['SPEECH_DATA_PATH', '"""dev.npy"""'], {}), "(SPEECH_DATA_PATH, 'dev.npy')\n", (361, 390), False, 'import os\n'), ((714, 753), 'os.path.join', 'os.path.join', (['DUMP_DATA_PATH', '"""dev.npy"""'], {}), "(DUMP_DATA_PATH, 'dev.npy')\n", (726, 753), False, 'import os\n'), ((774, 825), 'os.path.join', 'os.path.join', (['DUMP_DATA_PATH', '"""dev_transcripts.npy"""'], {}), "(DUMP_DATA_PATH, 'dev_transcripts.npy')\n", (786, 825), False, 'import os\n'), ((497, 533), 'numpy.array_equal', 'np.array_equal', (['dev_y[i]', 'train_y[j]'], {}), '(dev_y[i], train_y[j])\n', (511, 533), True, 'import numpy as np\n')]
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestConditionalOp(hu.HypothesisTestCase):
@given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
condition = np.random.choice(a=[True, False], size=rows_num)
def ref(condition, data_t, data_f):
output = [
data_t[i] if condition[i] else data_f[i]
for i in range(rows_num)
]
return (output,)
self.assertReferenceChecks(gc, op, [condition, data_t, data_f], ref)
|
[
"numpy.random.choice",
"numpy.random.random",
"hypothesis.strategies.integers",
"caffe2.python.core.CreateOperator"
] |
[((1118, 1197), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Conditional"""', "['condition', 'data_t', 'data_f']", '"""output"""'], {}), "('Conditional', ['condition', 'data_t', 'data_f'], 'output')\n", (1137, 1197), False, 'from caffe2.python import core\n'), ((1386, 1434), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[True, False]', 'size': 'rows_num'}), '(a=[True, False], size=rows_num)\n', (1402, 1434), True, 'import numpy as np\n'), ((1013, 1034), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(10000)'], {}), '(1, 10000)\n', (1024, 1034), True, 'import hypothesis.strategies as st\n'), ((1237, 1273), 'numpy.random.random', 'np.random.random', (['(rows_num, 10, 20)'], {}), '((rows_num, 10, 20))\n', (1253, 1273), True, 'import numpy as np\n'), ((1310, 1346), 'numpy.random.random', 'np.random.random', (['(rows_num, 10, 20)'], {}), '((rows_num, 10, 20))\n', (1326, 1346), True, 'import numpy as np\n')]
|
# HDF5DatasetGenerator.py
import h5py
import numpy as np
from tensorflow.keras.utils import to_categorical
class HDF5DatasetGenerator:
'''
Use to generate a dataset for use withing keras framework
form a HDF5 file.
'''
def __init__(self, dbPath, batchSize, preprocessors = None,
aug = None, binarize = True, classes = 2):
'''
'''
# store the batch size, preprocessors, and data augmentor,
# whether or not the labels should be binarized, along with
# the total number of classes
self.batchSize = batchSize
self.preprocessors = preprocessors
self.aug = aug
self.binarize = binarize
self.classes = classes
# open the HDF5 database for reading and determine the total
# number of entries in the database
self.db = h5py.File(dbPath)
self.numImages = self.db["labels"].shape[0]
def generator(self, passes = np.inf):
# initialize the epoch count
epochs = 0
# keep looping infinitely -- the model will stop once we have
# reached the desired number of epochs
while epochs < passes:
# loop over the HDF5 dataset
for i in np.arange(0, self.numImages, self.batchSize):
# extract the iamges and labels from the HDF5 dataset
images = self.db["images"][i: i + self.batchSize]
labels = self.db["labels"][i: i + self.batchSize]
# check to see if the labels should be binarized
if self.binarize:
labels = to_categorical(labels, self.classes)
# check to see if our preprocessors are not None
if self.preprocessors is not None:
# initialize the list of processed images
procImages = []
# loop over the images
for image in images:
# loop over the preprocessors and apply each
# to the image
for p in self.preprocessors:
image = p.preprocess(image)
# update the list of processed images
procImages.append(image)
# update the images array to be the processed
# images
images = np.array(procImages)
# if the data augmentor exists, apply it
if self.aug is not None:
(images, labels) = next(self.aug.flow(images, labels, batch_size = self.batchSize))
# yield a tuple of images and labels
yield (images, labels)
# increment the total number of epochs processed
epochs += 1
print(epochs)
def close(self):
'''
'''
# cose the datab<se
self.db.close()
|
[
"numpy.array",
"tensorflow.keras.utils.to_categorical",
"numpy.arange",
"h5py.File"
] |
[((848, 865), 'h5py.File', 'h5py.File', (['dbPath'], {}), '(dbPath)\n', (857, 865), False, 'import h5py\n'), ((1233, 1277), 'numpy.arange', 'np.arange', (['(0)', 'self.numImages', 'self.batchSize'], {}), '(0, self.numImages, self.batchSize)\n', (1242, 1277), True, 'import numpy as np\n'), ((1610, 1646), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['labels', 'self.classes'], {}), '(labels, self.classes)\n', (1624, 1646), False, 'from tensorflow.keras.utils import to_categorical\n'), ((2445, 2465), 'numpy.array', 'np.array', (['procImages'], {}), '(procImages)\n', (2453, 2465), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 09:59:15 2020
@author: <NAME>
"""
import os
import pandas as pd
import numpy as np
out = open("./res.txt", 'w+')
#%% 读取生成式 @表示为空
generator = {}
start = None #开始符号
ter_set = set() #终结符集合
non_set = set() #非终结符集合
all_set = set()
#grammar2
f = open('./tiny.txt')
for line in f:
line = line.split('->')
#添加开始符号
if not non_set:
start = line[0].split()[0]
#非终结符
non_sym = line[0].split()[0]
non_set.add(non_sym)
#右侧所有符号
all_syms = line[1].split() #[:-1] delete '/n'
all_set = all_set.union(set(all_syms))
if non_sym not in generator:
generator[non_sym] = []
generator[non_sym].append(all_syms)
#求非终结符集
ter_set = all_set - non_set
print("起始符号:{0}\n非终结符:{1}\n终结符:{2}\n===========".format(start, non_set, ter_set), file=out)
#%% 消除左递归
#%% 提取左因子
#%% 生成First集
first = {}
#第一轮 把所有开头的非终结符和空放入First集
for l_sym in generator:
for each in generator[l_sym]:
if l_sym not in first:
first[l_sym] = set()
if each[0] in ter_set:
first[l_sym].add(each[0])
#循环轮 添加非终结符,循环直到不再变化
update = 1
while update == 1:
update = 0
for l_sym in generator:
for each in generator[l_sym]:
#处理空符号,尤其是前面所有的均为空
emputy_flag = 0
for i in range(len(each)):
if each[i] in non_set:
#如果添加导致改变,设置标志位
temp = first[l_sym].union( first[each[i]] - set('@') )
if first[l_sym] != temp:
update = 1
first[l_sym] = temp
if each[i] in ter_set and each[i] not in first[l_sym]:
update = 1
first[l_sym].add(each[i])
if each[i] in ter_set or (each[i] in non_set and '@' not in first[each[i]]):
emputy_flag = 1
break
if emputy_flag == 0:
first[l_sym].add('@')
print("First集:", file=out)
for item in first:
print("{0}:{1}".format(item,first[item]), file=out)
print("===========", file=out)
#%% 生成Follow集
# $ 加入起始符号
follow = {}
for non_sym in non_set:
follow[non_sym] = set()
follow[start] = set('$')
# 第一轮 将非终结符后的终结符添加到FOLLOW中
for l_sym in generator:
for each in generator[l_sym]:
for i in range(len(each)-1):
if each[i] in non_set and each[i+1] in ter_set:
follow[each[i]].add(each[i+1])
#第二轮 集合间的同步,非终结符后的非终结符first集,左侧添加到右侧最后,循环直到不再变化
update = 1
while update == 1:
update = 0
for l_sym in generator:
for each in generator[l_sym]:
# 产生式后加入$,用来合并逻辑,不必从末尾向前找空,而是顺序向后
temp_produce = each.copy()
temp_produce.append('$')
for i in range(len(each)):
next_p = i
while True:
next_p += 1
#非终结符后的非终结符first集
if temp_produce[i] in non_set and temp_produce[next_p] in non_set:
temp = follow[temp_produce[i]].union( first[temp_produce[next_p]] - set('@') )
if follow[temp_produce[i]] != temp:
update = 1
follow[temp_produce[i]] = temp
#左侧follow添加到右侧最后
if temp_produce[i] in non_set and temp_produce[next_p] == '$':
temp = follow[temp_produce[i]].union( follow[l_sym] )
if follow[temp_produce[i]] != temp:
update = 1
follow[temp_produce[i]] = temp
if temp_produce[next_p] in ter_set or temp_produce[next_p] == '$' or ( '@' not in first[temp_produce[next_p]]):
break
print("Follow集:", file=out)
for item in follow:
print("{0}:{1}".format(item,follow[item]), file=out)
print("===========", file=out)
#%% 生成LL1分析表 判断是否是LL1文法
vaild = True
ter_list = list(ter_set)
non_list = list(non_set)
ter_list.append('$')
ter_list.remove('@')
ll1_table = pd.DataFrame(columns = ter_list,index = non_list)
generator_list = []
count = 0
for l_sym in generator:
for each in generator[l_sym]:
#print(each,count)
generator_list.append([l_sym,each])
if each[0] == '@':
#添加Follow
for sym in follow[l_sym]:
if np.isnan(ll1_table.loc[l_sym,sym]):
ll1_table.loc[l_sym,sym] = count
else:
vaild = False
count += 1
continue
if each[0] in ter_set:
if np.isnan(ll1_table.loc[l_sym,each[0]]):
ll1_table.loc[l_sym,each[0]] = count
else:
vaild = False
#TODO:处理first中有空的情况
if each[0] in non_set:
for sym in first[each[0]]:
if np.isnan(ll1_table.loc[l_sym,sym]):
ll1_table.loc[l_sym,sym] = count
else:
vaild = False
count += 1
if not vaild:
print('文法不满足LL(1)条件!')
ll1_table.to_excel('LL1.xlsx')
#%% 分析栈分析代码 以及构造抽象语法树
token_file = open('./token_list.txt')
tokens = []
for line in token_file:
tokens.append(line[:-1])
tokens.append('$')
stack = []
stack.append("$")
stack.append(start)
while stack:
print('=====================\n','stack:',stack,'\n tokens:',tokens, file=out)
#match
if stack[-1] in ter_set or (stack[-1] == '$' and tokens[-1] == '$'):
if stack[-1] == tokens[0]:
stack.pop()
tokens.pop(0)
continue
else:
print('不满足文法')
break
#action
if stack[-1] in non_set:
if np.isnan(ll1_table.loc[stack[-1],tokens[0]]):
print('不满足文法')
break
else:
use_p = generator_list[ll1_table.loc[stack[-1],tokens[0]]]
stack.pop()
temp = use_p[1].copy()
if temp[0] != '@':
temp.reverse()
stack.extend(temp)
#%%
out.close()
'''
update = 1
while update == 1:
update = 0
for l_sym in generator:
#TODO:处理空符号,尤其是前面所有的均为空
for each in generator[l_sym]:
if each[0] in non_set:
#如果添加导致改变,设置标志位
temp = first[l_sym].union( first[each[0]] - set('@') )
if first[l_sym] != temp:
update = 1
first[l_sym] = temp
'''
|
[
"pandas.DataFrame",
"numpy.isnan"
] |
[((4096, 4142), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ter_list', 'index': 'non_list'}), '(columns=ter_list, index=non_list)\n', (4108, 4142), True, 'import pandas as pd\n'), ((5743, 5788), 'numpy.isnan', 'np.isnan', (['ll1_table.loc[stack[-1], tokens[0]]'], {}), '(ll1_table.loc[stack[-1], tokens[0]])\n', (5751, 5788), True, 'import numpy as np\n'), ((4646, 4685), 'numpy.isnan', 'np.isnan', (['ll1_table.loc[l_sym, each[0]]'], {}), '(ll1_table.loc[l_sym, each[0]])\n', (4654, 4685), True, 'import numpy as np\n'), ((4411, 4446), 'numpy.isnan', 'np.isnan', (['ll1_table.loc[l_sym, sym]'], {}), '(ll1_table.loc[l_sym, sym])\n', (4419, 4446), True, 'import numpy as np\n'), ((4904, 4939), 'numpy.isnan', 'np.isnan', (['ll1_table.loc[l_sym, sym]'], {}), '(ll1_table.loc[l_sym, sym])\n', (4912, 4939), True, 'import numpy as np\n')]
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An AlphaZero style model with a policy and value head."""
import collections
import functools
import os
from typing import Sequence
import numpy as np
import tensorflow.compat.v1 as tf
import horovod.tensorflow as hvd
def cascade(x, fns):
for fn in fns:
x = fn(x)
return x
tfkl = tf.keras.layers
conv_2d = functools.partial(tfkl.Conv2D, padding="same")
def batch_norm(training, updates, name):
"""A batch norm layer.
Args:
training: A placeholder of whether this is done in training or not.
updates: A list to be extended with this layer's updates.
name: Name of the layer.
Returns:
A function to apply to the previous layer.
"""
bn = tfkl.BatchNormalization(name=name)
def batch_norm_layer(x):
# This emits a warning that training is a placeholder instead of a concrete
# bool, but seems to work anyway.
applied = bn(x, training)
updates.extend(bn.updates)
return applied
return batch_norm_layer
def residual_layer(inputs, num_filters, kernel_size, training, updates, name):
return cascade(inputs, [
conv_2d(num_filters, kernel_size, name=f"{name}_res_conv1"),
batch_norm(training, updates, f"{name}_res_batch_norm1"),
tfkl.Activation("relu"),
conv_2d(num_filters, kernel_size, name=f"{name}_res_conv2"),
batch_norm(training, updates, f"{name}_res_batch_norm2"),
lambda x: tfkl.add([x, inputs]),
tfkl.Activation("relu"),
])
class TrainInput(collections.namedtuple(
"TrainInput", "observation legals_mask policy value")):
"""Inputs for training the Model."""
@staticmethod
def stack(train_inputs):
observation, legals_mask, policy, value = zip(*train_inputs)
return TrainInput(
np.array(observation, dtype=np.float32),
np.array(legals_mask, dtype=np.bool),
np.array(policy),
np.expand_dims(value, 1))
class Losses(collections.namedtuple("Losses", "policy value l2")):
"""Losses from a training step."""
@property
def total(self):
return self.policy + self.value + self.l2
def __str__(self):
return ("Losses(total: {:.3f}, policy: {:.3f}, value: {:.3f}, "
"l2: {:.3f})").format(self.total, self.policy, self.value, self.l2)
def __add__(self, other):
return Losses(self.policy + other.policy,
self.value + other.value,
self.l2 + other.l2)
def __truediv__(self, n):
return Losses(self.policy / n, self.value / n, self.l2 / n)
class Model(object):
"""An AlphaZero style model with a policy and value head.
This supports three types of models: mlp, conv2d and resnet.
All models have a shared torso stack with two output heads: policy and value.
They have same meaning as in the AlphaGo Zero and AlphaZero papers. The resnet
model copies the one in that paper when set with width 256 and depth 20. The
conv2d model is the same as the resnet except uses a conv+batchnorm+relu
instead of the res blocks. The mlp model uses dense layers instead of conv,
and drops batch norm.
Links to relevant articles/papers:
https://deepmind.com/blog/article/alphago-zero-starting-scratch has an open
access link to the AlphaGo Zero nature paper.
https://deepmind.com/blog/article/alphazero-shedding-new-light-grand-games-chess-shogi-and-go
has an open access link to the AlphaZero science paper.
All are parameterized by their input (observation) shape and output size
(number of actions), though the conv2d and resnet might only work with games
that have spatial data (ie 3 non-batch dimensions, eg: connect four would
work, but not poker).
The depth is the number of blocks in the torso, where the definition of a
block varies by model. For a resnet it's a resblock which is two conv2ds,
batch norms and relus, and an addition. For conv2d it's a conv2d, a batch norm
and a relu. For mlp it's a dense plus relu.
The width is the number of filters for any conv2d and the number of hidden
units for any dense layer.
Note that this uses an explicit graph so that it can be used for inference
and training from C++. It seems to also be 20%+ faster than using eager mode,
at least for the unit test.
"""
valid_model_types = ["mlp", "conv2d", "resnet"]
def __init__(self, session, saver, path):
"""Init a model. Use build_model, from_checkpoint or from_graph instead."""
self._session = session
self._saver = saver
self._path = path
def get_var(name):
return self._session.graph.get_tensor_by_name(name + ":0")
self._input = get_var("input")
self._legals_mask = get_var("legals_mask")
self._training = get_var("training")
self._value_out = get_var("value_out")
self._policy_softmax = get_var("policy_softmax")
self._policy_loss = get_var("policy_loss")
self._value_loss = get_var("value_loss")
self._l2_reg_loss = get_var("l2_reg_loss")
self._policy_targets = get_var("policy_targets")
self._value_targets = get_var("value_targets")
self._train = self._session.graph.get_operation_by_name("train")
@classmethod
def _get_gpu_config(cls):
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.allow_growth = True
config.gpu_options.force_gpu_compatible = True
return config
@classmethod
def build_model(cls, model_type, input_shape, output_size, nn_width, nn_depth,
weight_decay, learning_rate, path):
"""Build a model with the specified params."""
if model_type not in cls.valid_model_types:
raise ValueError(f"Invalid model type: {model_type}, "
f"expected one of: {cls.valid_model_types}")
# The order of creating the graph, init, saver, and session is important!
# https://stackoverflow.com/a/40788998
g = tf.Graph() # Allow multiple independent models and graphs.
with g.as_default():
cls._define_graph(model_type, input_shape, output_size, nn_width,
nn_depth, weight_decay, learning_rate)
init = tf.variables_initializer(tf.global_variables(),
name="init_all_vars_op")
bcast = hvd.broadcast_global_variables(0)
with tf.device("/cpu:0"): # Saver only works on CPU.
saver = tf.train.Saver(
max_to_keep=10000, sharded=False, name="saver")
config = cls._get_gpu_config()
session = tf.Session(graph=g, config=config)
session.__enter__()
session.run(init)
session.run(bcast)
return cls(session, saver, path)
@classmethod
def from_checkpoint(cls, checkpoint, path=None):
"""Load a model from a checkpoint."""
model = cls.from_graph(checkpoint, path)
model.load_checkpoint(checkpoint)
return model
@classmethod
def from_graph(cls, metagraph, path=None):
"""Load only the model from a graph or checkpoint."""
if not os.path.exists(metagraph):
metagraph += ".meta"
if not path:
path = os.path.dirname(metagraph)
g = tf.Graph() # Allow multiple independent models and graphs.
with g.as_default():
saver = tf.train.import_meta_graph(metagraph)
config = cls._get_gpu_config()
session = tf.Session(graph=g, config=config)
session.__enter__()
session.run("init_all_vars_op")
return cls(session, saver, path)
def __del__(self):
if hasattr(self, "_session") and self._session:
self._session.close()
@staticmethod
def _define_graph(model_type, input_shape, output_size,
nn_width, nn_depth, weight_decay, learning_rate):
"""Define the model graph."""
# Inference inputs
input_size = int(np.prod(input_shape))
observations = tf.placeholder(tf.float32, [None, input_size], name="input")
legals_mask = tf.placeholder(tf.bool, [None, output_size],
name="legals_mask")
training = tf.placeholder(tf.bool, name="training")
bn_updates = []
# Main torso of the network
if model_type == "mlp":
torso = observations # Ignore the input shape, treat it as a flat array.
for i in range(nn_depth):
torso = cascade(torso, [
tfkl.Dense(nn_width, name=f"torso_{i}_dense"),
tfkl.Activation("relu"),
])
elif model_type == "conv2d":
torso = tfkl.Reshape(input_shape)(observations)
for i in range(nn_depth):
torso = cascade(torso, [
conv_2d(nn_width, 3, name=f"torso_{i}_conv"),
batch_norm(training, bn_updates, f"torso_{i}_batch_norm"),
tfkl.Activation("relu"),
])
elif model_type == "resnet":
torso = cascade(observations, [
tfkl.Reshape(input_shape),
conv_2d(nn_width, 3, name="torso_in_conv"),
batch_norm(training, bn_updates, "torso_in_batch_norm"),
tfkl.Activation("relu"),
])
for i in range(nn_depth):
torso = residual_layer(torso, nn_width, 3, training, bn_updates,
f"torso_{i}")
else:
raise ValueError("Unknown model type.")
# The policy head
if model_type == "mlp":
policy_head = cascade(torso, [
tfkl.Dense(nn_width, name="policy_dense"),
tfkl.Activation("relu"),
])
else:
policy_head = cascade(torso, [
conv_2d(filters=2, kernel_size=1, name="policy_conv"),
batch_norm(training, bn_updates, "policy_batch_norm"),
tfkl.Activation("relu"),
tfkl.Flatten(),
])
policy_logits = tfkl.Dense(output_size, name="policy")(policy_head)
policy_logits = tf.where(legals_mask, policy_logits,
-1e32 * tf.ones_like(policy_logits))
unused_policy_softmax = tf.identity(tfkl.Softmax()(policy_logits),
name="policy_softmax")
policy_targets = tf.placeholder(
shape=[None, output_size], dtype=tf.float32, name="policy_targets")
policy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=policy_logits, labels=policy_targets),
name="policy_loss")
# The value head
if model_type == "mlp":
value_head = torso # Nothing specific before the shared value head.
else:
value_head = cascade(torso, [
conv_2d(filters=1, kernel_size=1, name="value_conv"),
batch_norm(training, bn_updates, "value_batch_norm"),
tfkl.Activation("relu"),
tfkl.Flatten(),
])
value_out = cascade(value_head, [
tfkl.Dense(nn_width, name="value_dense"),
tfkl.Activation("relu"),
tfkl.Dense(1, name="value"),
tfkl.Activation("tanh"),
])
# Need the identity to name the single value output from the dense layer.
value_out = tf.identity(value_out, name="value_out")
value_targets = tf.placeholder(
shape=[None, 1], dtype=tf.float32, name="value_targets")
value_loss = tf.identity(tf.losses.mean_squared_error(
value_out, value_targets), name="value_loss")
l2_reg_loss = tf.add_n([
weight_decay * tf.nn.l2_loss(var)
for var in tf.trainable_variables()
if "/bias:" not in var.name
], name="l2_reg_loss")
total_loss = policy_loss + value_loss + l2_reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate * hvd.size())
optimizer = hvd.DistributedOptimizer(optimizer)
with tf.control_dependencies(bn_updates):
unused_train = optimizer.minimize(total_loss, name="train")
@property
def num_trainable_variables(self):
return sum(np.prod(v.shape) for v in tf.trainable_variables())
def print_trainable_variables(self):
for v in tf.trainable_variables():
print("{}: {}".format(v.name, v.shape))
def write_graph(self, filename):
full_path = os.path.join(self._path, filename)
tf.train.export_meta_graph(
graph_def=self._session.graph_def, saver_def=self._saver.saver_def,
filename=full_path, as_text=False)
return full_path
def inference(self, observation, legals_mask):
return self._session.run(
[self._value_out, self._policy_softmax],
feed_dict={self._input: np.array(observation, dtype=np.float32),
self._legals_mask: np.array(legals_mask, dtype=np.bool),
self._training: False})
def update(self, train_inputs: Sequence[TrainInput]):
"""Runs a training step."""
batch = TrainInput.stack(train_inputs)
# Run a training step and get the losses.
_, policy_loss, value_loss, l2_reg_loss = self._session.run(
[self._train, self._policy_loss, self._value_loss, self._l2_reg_loss],
feed_dict={self._input: batch.observation,
self._legals_mask: batch.legals_mask,
self._policy_targets: batch.policy,
self._value_targets: batch.value,
self._training: True})
return Losses(policy_loss, value_loss, l2_reg_loss)
def save_checkpoint(self, step):
if hvd.rank() == 0:
return self._saver.save(
self._session,
os.path.join(self._path, "checkpoint"),
global_step=step)
def load_checkpoint(self, path):
return self._saver.restore(self._session, path)
|
[
"numpy.prod",
"tensorflow.compat.v1.ones_like",
"horovod.tensorflow.rank",
"horovod.tensorflow.broadcast_global_variables",
"numpy.array",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.nn.l2_loss",
"horovod.tensorflow.local_rank",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"os.path.exists",
"horovod.tensorflow.size",
"tensorflow.compat.v1.control_dependencies",
"collections.namedtuple",
"tensorflow.compat.v1.identity",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.train.import_meta_graph",
"os.path.dirname",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.Graph",
"os.path.join",
"tensorflow.compat.v1.train.export_meta_graph",
"functools.partial",
"numpy.expand_dims",
"horovod.tensorflow.DistributedOptimizer"
] |
[((917, 963), 'functools.partial', 'functools.partial', (['tfkl.Conv2D'], {'padding': '"""same"""'}), "(tfkl.Conv2D, padding='same')\n", (934, 963), False, 'import functools\n'), ((2057, 2133), 'collections.namedtuple', 'collections.namedtuple', (['"""TrainInput"""', '"""observation legals_mask policy value"""'], {}), "('TrainInput', 'observation legals_mask policy value')\n", (2079, 2133), False, 'import collections\n'), ((2482, 2533), 'collections.namedtuple', 'collections.namedtuple', (['"""Losses"""', '"""policy value l2"""'], {}), "('Losses', 'policy value l2')\n", (2504, 2533), False, 'import collections\n'), ((5730, 5746), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5744, 5746), True, 'import tensorflow.compat.v1 as tf\n'), ((6435, 6445), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (6443, 6445), True, 'import tensorflow.compat.v1 as tf\n'), ((7028, 7062), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'g', 'config': 'config'}), '(graph=g, config=config)\n', (7038, 7062), True, 'import tensorflow.compat.v1 as tf\n'), ((7627, 7637), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (7635, 7637), True, 'import tensorflow.compat.v1 as tf\n'), ((7813, 7847), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'g', 'config': 'config'}), '(graph=g, config=config)\n', (7823, 7847), True, 'import tensorflow.compat.v1 as tf\n'), ((8311, 8371), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32', '[None, input_size]'], {'name': '"""input"""'}), "(tf.float32, [None, input_size], name='input')\n", (8325, 8371), True, 'import tensorflow.compat.v1 as tf\n'), ((8390, 8454), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.bool', '[None, output_size]'], {'name': '"""legals_mask"""'}), "(tf.bool, [None, output_size], name='legals_mask')\n", (8404, 8454), True, 'import tensorflow.compat.v1 as tf\n'), ((8503, 8543), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""training"""'}), "(tf.bool, name='training')\n", (8517, 8543), True, 'import tensorflow.compat.v1 as tf\n'), ((10468, 10555), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'shape': '[None, output_size]', 'dtype': 'tf.float32', 'name': '"""policy_targets"""'}), "(shape=[None, output_size], dtype=tf.float32, name=\n 'policy_targets')\n", (10482, 10555), True, 'import tensorflow.compat.v1 as tf\n'), ((11393, 11433), 'tensorflow.compat.v1.identity', 'tf.identity', (['value_out'], {'name': '"""value_out"""'}), "(value_out, name='value_out')\n", (11404, 11433), True, 'import tensorflow.compat.v1 as tf\n'), ((11454, 11525), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'shape': '[None, 1]', 'dtype': 'tf.float32', 'name': '"""value_targets"""'}), "(shape=[None, 1], dtype=tf.float32, name='value_targets')\n", (11468, 11525), True, 'import tensorflow.compat.v1 as tf\n'), ((11967, 12002), 'horovod.tensorflow.DistributedOptimizer', 'hvd.DistributedOptimizer', (['optimizer'], {}), '(optimizer)\n', (11991, 12002), True, 'import horovod.tensorflow as hvd\n'), ((12285, 12309), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (12307, 12309), True, 'import tensorflow.compat.v1 as tf\n'), ((12409, 12443), 'os.path.join', 'os.path.join', (['self._path', 'filename'], {}), '(self._path, filename)\n', (12421, 12443), False, 'import os\n'), ((12448, 12582), 'tensorflow.compat.v1.train.export_meta_graph', 'tf.train.export_meta_graph', ([], {'graph_def': 'self._session.graph_def', 'saver_def': 'self._saver.saver_def', 'filename': 'full_path', 'as_text': '(False)'}), '(graph_def=self._session.graph_def, saver_def=\n self._saver.saver_def, filename=full_path, as_text=False)\n', (12474, 12582), True, 'import tensorflow.compat.v1 as tf\n'), ((2320, 2359), 'numpy.array', 'np.array', (['observation'], {'dtype': 'np.float32'}), '(observation, dtype=np.float32)\n', (2328, 2359), True, 'import numpy as np\n'), ((2369, 2405), 'numpy.array', 'np.array', (['legals_mask'], {'dtype': 'np.bool'}), '(legals_mask, dtype=np.bool)\n', (2377, 2405), True, 'import numpy as np\n'), ((2415, 2431), 'numpy.array', 'np.array', (['policy'], {}), '(policy)\n', (2423, 2431), True, 'import numpy as np\n'), ((2441, 2465), 'numpy.expand_dims', 'np.expand_dims', (['value', '(1)'], {}), '(value, 1)\n', (2455, 2465), True, 'import numpy as np\n'), ((5796, 5812), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (5810, 5812), True, 'import horovod.tensorflow as hvd\n'), ((6793, 6826), 'horovod.tensorflow.broadcast_global_variables', 'hvd.broadcast_global_variables', (['(0)'], {}), '(0)\n', (6823, 6826), True, 'import horovod.tensorflow as hvd\n'), ((7508, 7533), 'os.path.exists', 'os.path.exists', (['metagraph'], {}), '(metagraph)\n', (7522, 7533), False, 'import os\n'), ((7592, 7618), 'os.path.dirname', 'os.path.dirname', (['metagraph'], {}), '(metagraph)\n', (7607, 7618), False, 'import os\n'), ((7726, 7763), 'tensorflow.compat.v1.train.import_meta_graph', 'tf.train.import_meta_graph', (['metagraph'], {}), '(metagraph)\n', (7752, 7763), True, 'import tensorflow.compat.v1 as tf\n'), ((8270, 8290), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (8277, 8290), True, 'import numpy as np\n'), ((10602, 10694), 'tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'policy_logits', 'labels': 'policy_targets'}), '(logits=policy_logits, labels=\n policy_targets)\n', (10644, 10694), True, 'import tensorflow.compat.v1 as tf\n'), ((11564, 11618), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['value_out', 'value_targets'], {}), '(value_out, value_targets)\n', (11592, 11618), True, 'import tensorflow.compat.v1 as tf\n'), ((12012, 12047), 'tensorflow.compat.v1.control_dependencies', 'tf.control_dependencies', (['bn_updates'], {}), '(bn_updates)\n', (12035, 12047), True, 'import tensorflow.compat.v1 as tf\n'), ((13618, 13628), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (13626, 13628), True, 'import horovod.tensorflow as hvd\n'), ((6693, 6714), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (6712, 6714), True, 'import tensorflow.compat.v1 as tf\n'), ((6838, 6857), 'tensorflow.compat.v1.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (6847, 6857), True, 'import tensorflow.compat.v1 as tf\n'), ((6903, 6965), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10000)', 'sharded': '(False)', 'name': '"""saver"""'}), "(max_to_keep=10000, sharded=False, name='saver')\n", (6917, 6965), True, 'import tensorflow.compat.v1 as tf\n'), ((10284, 10311), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['policy_logits'], {}), '(policy_logits)\n', (10296, 10311), True, 'import tensorflow.compat.v1 as tf\n'), ((11939, 11949), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (11947, 11949), True, 'import horovod.tensorflow as hvd\n'), ((12180, 12196), 'numpy.prod', 'np.prod', (['v.shape'], {}), '(v.shape)\n', (12187, 12196), True, 'import numpy as np\n'), ((13701, 13739), 'os.path.join', 'os.path.join', (['self._path', '"""checkpoint"""'], {}), "(self._path, 'checkpoint')\n", (13713, 13739), False, 'import os\n'), ((11701, 11719), 'tensorflow.compat.v1.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (11714, 11719), True, 'import tensorflow.compat.v1 as tf\n'), ((11739, 11763), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (11761, 11763), True, 'import tensorflow.compat.v1 as tf\n'), ((12206, 12230), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (12228, 12230), True, 'import tensorflow.compat.v1 as tf\n'), ((12777, 12816), 'numpy.array', 'np.array', (['observation'], {'dtype': 'np.float32'}), '(observation, dtype=np.float32)\n', (12785, 12816), True, 'import numpy as np\n'), ((12856, 12892), 'numpy.array', 'np.array', (['legals_mask'], {'dtype': 'np.bool'}), '(legals_mask, dtype=np.bool)\n', (12864, 12892), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Test module for level set transport
"""
from __future__ import print_function
from builtins import range
from builtins import object
from proteus.iproteus import *
import os
import numpy as np
import tables
class TestRotation2D(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self,method):
self.aux_names = []
self.meshdir = os.path.dirname(os.path.abspath(__file__))
self._scriptdir = os.path.dirname(os.path.abspath(__file__))
def teardown_method(self,method):
filenames = []
for aux_name in self.aux_names:
filenames.extend([aux_name+'.'+ext for ext in ['h5','xmf']])
filenames.append('proteus.log')
for f in filenames:
if os.path.exists(f):
try:
os.remove(f)
except OSError as e:
print ("Error: %s - %s" %(e.filename,e.strerror))
else:
pass
def test_rotation2D(self,use_strong_constraints=False):
from proteus import default_s, default_so, default_p, default_n
reload(default_s)
reload(default_so)
reload(default_p)
reload(default_n)
from . import (ls_rotation_2d_p,
redist_rotation_2d_p,
vof_rotation_2d_p,
ls_consrv_rotation_2d_p,
ls_rotation_2d_n,
redist_rotation_2d_n,
vof_rotation_2d_n,
ls_consrv_rotation_2d_n,
ls_rotation_2d_so)
opts.logLevel=7
opts.verbose=True
opts.profile=True
opts.gatherArchive=True
sList=[]
if ls_rotation_2d_so.sList == []:
for i in range(len(ls_rotation_2d_so.pnList)):
s = default_s
sList.append(s)
else:
sList = ls_rotation_2d_so.sList
ns = NumericalSolution.NS_base(ls_rotation_2d_so,
[ls_rotation_2d_p,
redist_rotation_2d_p,
vof_rotation_2d_p,
ls_consrv_rotation_2d_p],
[ls_rotation_2d_n,
redist_rotation_2d_n,
vof_rotation_2d_n,
ls_consrv_rotation_2d_n],
sList,
opts)
ns.calculateSolution(ls_rotation_2d_so.name)
self.aux_names.append(ls_rotation_2d_so.name)
# COMPARE VS SAVED FILES #
actual = tables.open_file(ls_rotation_2d_so.name+'.h5','r')
expected_path = 'comparison_files/' + 'comparison_' + ls_rotation_2d_so.name + '_u_t10.csv'
#write comparison file
#np.array(actual.root.u_t10).tofile(os.path.join(self._scriptdir, expected_path),sep=",")
np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=","),np.array(actual.root.u_t10).flatten(),decimal=6)
expected_path = 'comparison_files/' + 'comparison_' + ls_rotation_2d_so.name + '_phid_t10.csv'
#write comparison file
#np.array(actual.root.phid_t10).tofile(os.path.join(self._scriptdir, expected_path),sep=",")
np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=","),np.array(actual.root.phid_t10).flatten(),decimal=10)
expected_path = 'comparison_files/' + 'comparison_' + ls_rotation_2d_so.name + '_vof_t10.csv'
#write comparison file
#np.array(actual.root.vof_t10).tofile(os.path.join(self._scriptdir, expected_path),sep=",")
np.testing.assert_almost_equal(np.fromfile(os.path.join(self._scriptdir, expected_path),sep=","),np.array(actual.root.vof_t10).flatten(),decimal=10)
actual.close()
del ns
if __name__ == '__main__':
pass
|
[
"os.path.exists",
"os.path.join",
"tables.open_file",
"numpy.array",
"os.path.abspath",
"os.remove"
] |
[((2853, 2906), 'tables.open_file', 'tables.open_file', (["(ls_rotation_2d_so.name + '.h5')", '"""r"""'], {}), "(ls_rotation_2d_so.name + '.h5', 'r')\n", (2869, 2906), False, 'import tables\n'), ((484, 509), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (499, 509), False, 'import os\n'), ((553, 578), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (568, 578), False, 'import os\n'), ((846, 863), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (860, 863), False, 'import os\n'), ((3184, 3228), 'os.path.join', 'os.path.join', (['self._scriptdir', 'expected_path'], {}), '(self._scriptdir, expected_path)\n', (3196, 3228), False, 'import os\n'), ((3574, 3618), 'os.path.join', 'os.path.join', (['self._scriptdir', 'expected_path'], {}), '(self._scriptdir, expected_path)\n', (3586, 3618), False, 'import os\n'), ((3966, 4010), 'os.path.join', 'os.path.join', (['self._scriptdir', 'expected_path'], {}), '(self._scriptdir, expected_path)\n', (3978, 4010), False, 'import os\n'), ((906, 918), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (915, 918), False, 'import os\n'), ((3238, 3265), 'numpy.array', 'np.array', (['actual.root.u_t10'], {}), '(actual.root.u_t10)\n', (3246, 3265), True, 'import numpy as np\n'), ((3628, 3658), 'numpy.array', 'np.array', (['actual.root.phid_t10'], {}), '(actual.root.phid_t10)\n', (3636, 3658), True, 'import numpy as np\n'), ((4020, 4049), 'numpy.array', 'np.array', (['actual.root.vof_t10'], {}), '(actual.root.vof_t10)\n', (4028, 4049), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import logging
import numpy as np
from matrixprofile import core
logger = logging.getLogger(__name__)
_EPS = 1e-14
def _batch_compute(args):
"""
Internal function to compute a batch of the time series in parallel.
Parameters
----------
args : tuple
Various attributes used for computing the batch.
(
batch_start : int
The starting index for this batch.
batch_end : int
The ending index for this batch.
ts : array_like
The time series to compute the matrix profile for.
query : array_like
The query.
window_size : int
The size of the window to compute the profile over.
data_length : int
The number of elements in the time series.
profile_length : int
The number of elements that will be in the final matrix
profile.
exclusion_zone : int
Used to exclude trivial matches.
data_mu : array_like
The moving average over the time series for the given window
size.
data_sig : array_like
The moving standard deviation over the time series for the
given window size.
first_product : array_like
The first sliding dot product for the time series over index
0 to window_size.
skip_locs : array_like
Indices that should be skipped for distance profile calculation
due to a nan or inf.
)
Returns
-------
dict : profile
The matrix profile, left and right matrix profiles and their respective
profile indices.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> }
"""
num_dim, batch_start, batch_end, ts, query, window_size, data_length, \
profile_length, exclusion_zone, data_mu, data_sig, \
first_product, skip_locs, profile_dimension, return_dimension = args
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
left_matrix_profile = None
right_matrix_profile = None
left_profile_index = None
right_profile_index = None
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
# with batch 0 we do not need to recompute the dot product
# however with other batch windows, we need the previous iterations sliding
# dot product
last_product = np.copy(first_product)
if batch_start is 0:
first_window = query[:, batch_start:batch_start + window_size]
else:
first_window = query[:, batch_start - 1:batch_start + window_size - 1]
for i in range(num_dim):
last_product[i, :] = core.fft_convolve(ts[i, :], first_window[i, :])
query_sum = np.sum(first_window, axis=1)
query_2sum = np.sum(first_window**2, axis=1)
query_mu, query_sig = np.empty(num_dim), np.empty(num_dim)
for i in range(num_dim):
query_mu[i], query_sig[i] = core.moving_avg_std(first_window[i, :], window_size)
drop_value = np.empty(num_dim)
for i in range(num_dim):
drop_value[i] = first_window[i, 0]
distance_profile = np.empty((num_dim, profile_length))
# make sure to compute inclusively from batch start to batch end
# otherwise there are gaps in the profile
if batch_end < profile_length:
batch_end += 1
# iteratively compute distance profile and update with element-wise mins
for i in range(batch_start, batch_end):
# check for nan or inf and skip
if skip_locs[i]:
continue
for j in range(num_dim):
if i == 0:
query_window = query[j, i:i + window_size]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, 0,
distance_profile[j, :])
else:
query_window = query[j, i:i + window_size]
query_sum[j] = query_sum[j] - drop_value[j] + query_window[-1]
query_2sum[j] = query_2sum[j] - drop_value[j]**2 + query_window[-1]**2
query_mu[j] = query_sum[j] / window_size
query_sig2 = query_2sum[j] / window_size - query_mu[j]**2
if query_sig2 < _EPS:
query_sig2 = _EPS
query_sig[j] = np.sqrt(query_sig2)
last_product[j, 1:] = last_product[j, 0:data_length - window_size] \
- ts[j, 0:data_length - window_size] * drop_value[j] \
+ ts[j, window_size:] * query_window[-1]
last_product[j, 0] = first_product[j, i]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply the exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, i,
distance_profile[j, :])
distance_profile[j, distance_profile[j, :] < _EPS] = 0
drop_value[j] = query_window[0]
if np.any(query_sig < _EPS):
continue
distance_profile[:, skip_locs] = np.inf
distance_profile[data_sig < np.sqrt(_EPS)] = np.inf
distance_profile_dim = np.argsort(distance_profile, axis=0)
distance_profile_sort = np.sort(distance_profile, axis=0)
distance_profile_cumsum = np.zeros(profile_length)
for j in range(num_dim):
distance_profile_cumsum += distance_profile_sort[j, :]
distance_profile_mean = distance_profile_cumsum / (j + 1)
# update the matrix profile
indices = (distance_profile_mean < matrix_profile[j, :])
matrix_profile[j, indices] = distance_profile_mean[indices]
profile_index[j, indices] = i
if return_dimension:
profile_dimension[j][:, indices] = distance_profile_dim[:j + 1, indices]
# update the left and right matrix profiles
# find differences, shift left and update
indices = distance_profile_mean[i:] < left_matrix_profile[j, i:]
falses = np.zeros(i).astype('bool')
indices = np.append(falses, indices)
left_matrix_profile[j, indices] = distance_profile_mean[indices]
left_profile_index[j, np.argwhere(indices)] = i
# find differences, shift right and update
indices = distance_profile_mean[0:i] < right_matrix_profile[j, 0:i]
falses = np.zeros(profile_length - i).astype('bool')
indices = np.append(indices, falses)
right_matrix_profile[j, indices] = distance_profile_mean[indices]
right_profile_index[j, np.argwhere(indices)] = i
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
}
def mstomp(ts, window_size, return_dimension=False, n_jobs=1):
"""
Computes multidimensional matrix profile with mSTAMP (stomp based). Ray or Python's multiprocessing library may be used. When you have initialized Ray on your machine, it takes priority over using Python's multiprocessing.
Parameters
----------
ts : array_like, shape (n_dim, seq_len)
The multidimensional time series to compute the multidimensional matrix profile for.
window_size: int
The size of the window to compute the matrix profile over.
return_dimension : bool
if True, also return the matrix profile dimension. It takses O(d^2 n)
to store and O(d^2 n^2) to compute. (default is False)
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A MatrixProfile data structure.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> 'metric': The distance metric computed for the mp,
>>> 'w': The window size used to compute the matrix profile,
>>> 'ez': The exclusion zone used,
>>> 'sample_pct': Percentage of samples used in computing the MP,
>>> 'data': {
>>> 'ts': Time series data,
>>> 'query': Query data if supplied
>>> }
>>> 'class': "MatrixProfile"
>>> 'algorithm': "stomp_based_mstamp"
>>> }
Raises
------
ValueError
If window_size < 4.
If window_size > time series length / 2.
If ts is not a list or np.array.
"""
query = ts
# data conversion to np.array
ts = core.to_np_array(ts)
query = core.to_np_array(query)
if window_size < 4:
error = "window size must be at least 4."
raise ValueError(error)
if ts.ndim == 1:
ts = np.expand_dims(ts, axis=0)
query = np.expand_dims(query, axis=0)
if window_size > query.shape[1] / 2:
error = "Time series is too short relative to desired window size"
raise ValueError(error)
# multiprocessing or single threaded approach
if n_jobs == 1:
pass
else:
n_jobs = core.valid_n_jobs(n_jobs)
# precompute some common values - profile length, query length etc.
profile_length = core.get_profile_length(ts, query, window_size)
data_length = ts.shape[1]
query_length = query.shape[1]
num_queries = query_length - window_size + 1
exclusion_zone = int(np.ceil(window_size / 2.0))
num_dim = ts.shape[0]
# find skip locations, clean up nan and inf in the ts and query
skip_locs = core.find_multid_skip_locations(ts, profile_length, window_size)
ts = core.clean_nan_inf(ts)
query = core.clean_nan_inf(query)
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
# profile_index = np.full((num_dim, profile_length), -1)
# compute left and right matrix profile when similarity join does not happen
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
profile_dimension = []
if return_dimension:
n_jobs = 1
for i in range(num_dim):
profile_dimension.append(np.empty((i + 1, profile_length), dtype=int))
# precompute some statistics on ts
data_mu, data_sig, first_product = np.empty((num_dim, profile_length)), np.empty(
(num_dim, profile_length)), np.empty((num_dim, profile_length))
for i in range(num_dim):
data_mu[i, :], data_sig[i, :] = core.moving_avg_std(ts[i, :], window_size)
first_window = query[i, 0:window_size]
first_product[i, :] = core.fft_convolve(ts[i, :], first_window)
batch_windows = []
results = []
# batch compute with multiprocessing
args = []
for start, end in core.generate_batch_jobs(num_queries, n_jobs):
args.append((num_dim, start, end, ts, query, window_size, data_length, profile_length, exclusion_zone, data_mu,
data_sig, first_product, skip_locs, profile_dimension, return_dimension))
batch_windows.append((start, end))
# we are running single threaded stomp - no need to initialize any
# parallel environments.
if n_jobs == 1 or len(args) == 1:
results.append(_batch_compute(args[0]))
else:
# parallelize
with core.mp_pool()(n_jobs) as pool:
results = pool.map(_batch_compute, args)
# now we combine the batch results
if len(results) == 1:
result = results[0]
matrix_profile = result['mp']
profile_index = result['pi']
profile_dimension = result['pd']
left_matrix_profile = result['lmp']
left_profile_index = result['lpi']
right_matrix_profile = result['rmp']
right_profile_index = result['rpi']
else:
for index, result in enumerate(results):
start = batch_windows[index][0]
end = batch_windows[index][1]
# update the matrix profile
indices = result['mp'] < matrix_profile
matrix_profile[indices] = result['mp'][indices]
profile_index[indices] = result['pi'][indices]
# update the left and right matrix profiles
indices = result['lmp'] < left_matrix_profile
left_matrix_profile[indices] = result['lmp'][indices]
left_profile_index[indices] = result['lpi'][indices]
indices = result['rmp'] < right_matrix_profile
right_matrix_profile[indices] = result['rmp'][indices]
right_profile_index[indices] = result['rpi'][indices]
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
'metric': 'euclidean',
'w': window_size,
'ez': exclusion_zone,
'sample_pct': 1,
'data': {
'ts': ts,
'query': query
},
'class': "MatrixProfile",
'algorithm': "stomp_based_mstamp"
}
|
[
"logging.getLogger",
"matrixprofile.core.clean_nan_inf",
"matrixprofile.core.apply_exclusion_zone",
"numpy.sqrt",
"numpy.argsort",
"matrixprofile.core.distance_profile",
"numpy.sort",
"numpy.empty",
"matrixprofile.core.fft_convolve",
"matrixprofile.core.get_profile_length",
"matrixprofile.core.to_np_array",
"matrixprofile.core.mp_pool",
"numpy.ceil",
"matrixprofile.core.moving_avg_std",
"numpy.any",
"matrixprofile.core.valid_n_jobs",
"numpy.copy",
"matrixprofile.core.find_multid_skip_locations",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"numpy.expand_dims",
"numpy.full",
"matrixprofile.core.generate_batch_jobs"
] |
[((338, 365), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (355, 365), False, 'import logging\n'), ((2657, 2699), 'numpy.full', 'np.full', (['(num_dim, profile_length)', 'np.inf'], {}), '((num_dim, profile_length), np.inf)\n', (2664, 2699), True, 'import numpy as np\n'), ((2720, 2757), 'numpy.full', 'np.full', (['(num_dim, profile_length)', '(0)'], {}), '((num_dim, profile_length), 0)\n', (2727, 2757), True, 'import numpy as np\n'), ((2910, 2933), 'numpy.copy', 'np.copy', (['matrix_profile'], {}), '(matrix_profile)\n', (2917, 2933), True, 'import numpy as np\n'), ((2961, 2984), 'numpy.copy', 'np.copy', (['matrix_profile'], {}), '(matrix_profile)\n', (2968, 2984), True, 'import numpy as np\n'), ((3010, 3032), 'numpy.copy', 'np.copy', (['profile_index'], {}), '(profile_index)\n', (3017, 3032), True, 'import numpy as np\n'), ((3059, 3081), 'numpy.copy', 'np.copy', (['profile_index'], {}), '(profile_index)\n', (3066, 3081), True, 'import numpy as np\n'), ((3263, 3285), 'numpy.copy', 'np.copy', (['first_product'], {}), '(first_product)\n', (3270, 3285), True, 'import numpy as np\n'), ((3602, 3630), 'numpy.sum', 'np.sum', (['first_window'], {'axis': '(1)'}), '(first_window, axis=1)\n', (3608, 3630), True, 'import numpy as np\n'), ((3648, 3681), 'numpy.sum', 'np.sum', (['(first_window ** 2)'], {'axis': '(1)'}), '(first_window ** 2, axis=1)\n', (3654, 3681), True, 'import numpy as np\n'), ((3879, 3896), 'numpy.empty', 'np.empty', (['num_dim'], {}), '(num_dim)\n', (3887, 3896), True, 'import numpy as np\n'), ((3992, 4027), 'numpy.empty', 'np.empty', (['(num_dim, profile_length)'], {}), '((num_dim, profile_length))\n', (4000, 4027), True, 'import numpy as np\n'), ((10227, 10247), 'matrixprofile.core.to_np_array', 'core.to_np_array', (['ts'], {}), '(ts)\n', (10243, 10247), False, 'from matrixprofile import core\n'), ((10260, 10283), 'matrixprofile.core.to_np_array', 'core.to_np_array', (['query'], {}), '(query)\n', (10276, 10283), False, 'from matrixprofile import core\n'), ((10879, 10926), 'matrixprofile.core.get_profile_length', 'core.get_profile_length', (['ts', 'query', 'window_size'], {}), '(ts, query, window_size)\n', (10902, 10926), False, 'from matrixprofile import core\n'), ((11204, 11268), 'matrixprofile.core.find_multid_skip_locations', 'core.find_multid_skip_locations', (['ts', 'profile_length', 'window_size'], {}), '(ts, profile_length, window_size)\n', (11235, 11268), False, 'from matrixprofile import core\n'), ((11278, 11300), 'matrixprofile.core.clean_nan_inf', 'core.clean_nan_inf', (['ts'], {}), '(ts)\n', (11296, 11300), False, 'from matrixprofile import core\n'), ((11313, 11338), 'matrixprofile.core.clean_nan_inf', 'core.clean_nan_inf', (['query'], {}), '(query)\n', (11331, 11338), False, 'from matrixprofile import core\n'), ((11387, 11429), 'numpy.full', 'np.full', (['(num_dim, profile_length)', 'np.inf'], {}), '((num_dim, profile_length), np.inf)\n', (11394, 11429), True, 'import numpy as np\n'), ((11450, 11487), 'numpy.full', 'np.full', (['(num_dim, profile_length)', '(0)'], {}), '((num_dim, profile_length), 0)\n', (11457, 11487), True, 'import numpy as np\n'), ((11657, 11680), 'numpy.copy', 'np.copy', (['matrix_profile'], {}), '(matrix_profile)\n', (11664, 11680), True, 'import numpy as np\n'), ((11708, 11731), 'numpy.copy', 'np.copy', (['matrix_profile'], {}), '(matrix_profile)\n', (11715, 11731), True, 'import numpy as np\n'), ((11757, 11779), 'numpy.copy', 'np.copy', (['profile_index'], {}), '(profile_index)\n', (11764, 11779), True, 'import numpy as np\n'), ((11806, 11828), 'numpy.copy', 'np.copy', (['profile_index'], {}), '(profile_index)\n', (11813, 11828), True, 'import numpy as np\n'), ((12565, 12610), 'matrixprofile.core.generate_batch_jobs', 'core.generate_batch_jobs', (['num_queries', 'n_jobs'], {}), '(num_queries, n_jobs)\n', (12589, 12610), False, 'from matrixprofile import core\n'), ((3706, 3723), 'numpy.empty', 'np.empty', (['num_dim'], {}), '(num_dim)\n', (3714, 3723), True, 'import numpy as np\n'), ((3725, 3742), 'numpy.empty', 'np.empty', (['num_dim'], {}), '(num_dim)\n', (3733, 3742), True, 'import numpy as np\n'), ((3808, 3860), 'matrixprofile.core.moving_avg_std', 'core.moving_avg_std', (['first_window[i, :]', 'window_size'], {}), '(first_window[i, :], window_size)\n', (3827, 3860), False, 'from matrixprofile import core\n'), ((6349, 6373), 'numpy.any', 'np.any', (['(query_sig < _EPS)'], {}), '(query_sig < _EPS)\n', (6355, 6373), True, 'import numpy as np\n'), ((6536, 6572), 'numpy.argsort', 'np.argsort', (['distance_profile'], {'axis': '(0)'}), '(distance_profile, axis=0)\n', (6546, 6572), True, 'import numpy as np\n'), ((6605, 6638), 'numpy.sort', 'np.sort', (['distance_profile'], {'axis': '(0)'}), '(distance_profile, axis=0)\n', (6612, 6638), True, 'import numpy as np\n'), ((6673, 6697), 'numpy.zeros', 'np.zeros', (['profile_length'], {}), '(profile_length)\n', (6681, 6697), True, 'import numpy as np\n'), ((10426, 10452), 'numpy.expand_dims', 'np.expand_dims', (['ts'], {'axis': '(0)'}), '(ts, axis=0)\n', (10440, 10452), True, 'import numpy as np\n'), ((10469, 10498), 'numpy.expand_dims', 'np.expand_dims', (['query'], {'axis': '(0)'}), '(query, axis=0)\n', (10483, 10498), True, 'import numpy as np\n'), ((10759, 10784), 'matrixprofile.core.valid_n_jobs', 'core.valid_n_jobs', (['n_jobs'], {}), '(n_jobs)\n', (10776, 10784), False, 'from matrixprofile import core\n'), ((11065, 11091), 'numpy.ceil', 'np.ceil', (['(window_size / 2.0)'], {}), '(window_size / 2.0)\n', (11072, 11091), True, 'import numpy as np\n'), ((12096, 12131), 'numpy.empty', 'np.empty', (['(num_dim, profile_length)'], {}), '((num_dim, profile_length))\n', (12104, 12131), True, 'import numpy as np\n'), ((12133, 12168), 'numpy.empty', 'np.empty', (['(num_dim, profile_length)'], {}), '((num_dim, profile_length))\n', (12141, 12168), True, 'import numpy as np\n'), ((12179, 12214), 'numpy.empty', 'np.empty', (['(num_dim, profile_length)'], {}), '((num_dim, profile_length))\n', (12187, 12214), True, 'import numpy as np\n'), ((12284, 12326), 'matrixprofile.core.moving_avg_std', 'core.moving_avg_std', (['ts[i, :]', 'window_size'], {}), '(ts[i, :], window_size)\n', (12303, 12326), False, 'from matrixprofile import core\n'), ((12404, 12445), 'matrixprofile.core.fft_convolve', 'core.fft_convolve', (['ts[i, :]', 'first_window'], {}), '(ts[i, :], first_window)\n', (12421, 12445), False, 'from matrixprofile import core\n'), ((3537, 3584), 'matrixprofile.core.fft_convolve', 'core.fft_convolve', (['ts[i, :]', 'first_window[i, :]'], {}), '(ts[i, :], first_window[i, :])\n', (3554, 3584), False, 'from matrixprofile import core\n'), ((7472, 7498), 'numpy.append', 'np.append', (['falses', 'indices'], {}), '(falses, indices)\n', (7481, 7498), True, 'import numpy as np\n'), ((7859, 7885), 'numpy.append', 'np.append', (['indices', 'falses'], {}), '(indices, falses)\n', (7868, 7885), True, 'import numpy as np\n'), ((4566, 4682), 'matrixprofile.core.distance_profile', 'core.distance_profile', (['last_product[j, :]', 'window_size', 'data_mu[j, :]', 'data_sig[j, :]', 'query_mu[j]', 'query_sig[j]'], {}), '(last_product[j, :], window_size, data_mu[j, :],\n data_sig[j, :], query_mu[j], query_sig[j])\n', (4587, 4682), False, 'from matrixprofile import core\n'), ((4823, 4924), 'matrixprofile.core.apply_exclusion_zone', 'core.apply_exclusion_zone', (['exclusion_zone', '(0)', 'window_size', 'data_length', '(0)', 'distance_profile[j, :]'], {}), '(exclusion_zone, 0, window_size, data_length, 0,\n distance_profile[j, :])\n', (4848, 4924), False, 'from matrixprofile import core\n'), ((5469, 5488), 'numpy.sqrt', 'np.sqrt', (['query_sig2'], {}), '(query_sig2)\n', (5476, 5488), True, 'import numpy as np\n'), ((5801, 5917), 'matrixprofile.core.distance_profile', 'core.distance_profile', (['last_product[j, :]', 'window_size', 'data_mu[j, :]', 'data_sig[j, :]', 'query_mu[j]', 'query_sig[j]'], {}), '(last_product[j, :], window_size, data_mu[j, :],\n data_sig[j, :], query_mu[j], query_sig[j])\n', (5822, 5917), False, 'from matrixprofile import core\n'), ((6062, 6163), 'matrixprofile.core.apply_exclusion_zone', 'core.apply_exclusion_zone', (['exclusion_zone', '(0)', 'window_size', 'data_length', 'i', 'distance_profile[j, :]'], {}), '(exclusion_zone, 0, window_size, data_length, i,\n distance_profile[j, :])\n', (6087, 6163), False, 'from matrixprofile import core\n'), ((6480, 6493), 'numpy.sqrt', 'np.sqrt', (['_EPS'], {}), '(_EPS)\n', (6487, 6493), True, 'import numpy as np\n'), ((11971, 12015), 'numpy.empty', 'np.empty', (['(i + 1, profile_length)'], {'dtype': 'int'}), '((i + 1, profile_length), dtype=int)\n', (11979, 12015), True, 'import numpy as np\n'), ((13102, 13116), 'matrixprofile.core.mp_pool', 'core.mp_pool', ([], {}), '()\n', (13114, 13116), False, 'from matrixprofile import core\n'), ((7423, 7434), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (7431, 7434), True, 'import numpy as np\n'), ((7610, 7630), 'numpy.argwhere', 'np.argwhere', (['indices'], {}), '(indices)\n', (7621, 7630), True, 'import numpy as np\n'), ((7793, 7821), 'numpy.zeros', 'np.zeros', (['(profile_length - i)'], {}), '(profile_length - i)\n', (7801, 7821), True, 'import numpy as np\n'), ((7999, 8019), 'numpy.argwhere', 'np.argwhere', (['indices'], {}), '(indices)\n', (8010, 8019), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 13:05:28 2018:
在版本3的基础上,根据pandas的join方法来求交集
根据从量表中筛选的样本,来获得符合要求的原始数据的路径
数据结构neuroimageDataPath//subject00001//files
也可以是任何的数据结构,只要给定subjName在哪里就行
总之,最后把file复制到其他地方(可以给每个subject限定某个符合条件file,比如以'.nii'结尾的file)
input:
# reference_file:需要复制的被试名字所在text文件(大表中的uid)
# keywork_of_reference_uid:如提取量表中唯一识别号的正则表达式
# ith_number_of_reference_uid: 量表中的唯一识别号有多个匹配项时,选择第几个 (比如有一个名字为subj0001_bold7000, 此时可能匹配到0001和7000,遇到这种情况选择第几个匹配项)
# keyword_of_parent_folder_containing_target_file:想把被试的哪个模态/或那个文件夹下的文件复制出来(如同时有'resting'和'dti'时,选择那个模态)
# matching_point_number_of_target_uid_in_backwards:与referenceid匹配的唯一识别号在倒数第几个block内(以target file为起点计算,第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'的唯一识别号在倒数第3个中
# keyword_of_target_file_uid:用来筛选mri数据中唯一识别号的正则表达式
# ith_number_of_targetfile_uid: target file中的唯一识别号有多个匹配项时,选择第几个.
# keyword_of_target_file_uid:用来筛选file的正则表达式或keyword
# targe_file_folder:原始数据的根目录
# save_path: 将原始数据copy到哪个大路径
# n_processess=5几个线程
# is_save_log:是否保存复制log
# is_copy:是否执行复制功能
# is_move:是否移动(0)
# save_into_one_or_more_folder:保存到每个被试文件夹下,还是保存到一个文件夹下
# save_suffix:文件保存的尾缀('.nii')
# is_run:是否真正对文件执行移动或复制(0)
# 总体来说被复制的文件放在如下的路径:save_path/saveFolderName/subjName/files
@author: <NAME>
new featrue:真多核多线程处理,类的函数统一返回self
匹配file name:正则表达式匹配
"""
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pandas as pd
import time
import os
import shutil
import sys
sys.path.append(
r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Utils')
class CopyFmri():
def __init__(
self,
reference_file=r'E:\wangfeidata\uid.txt',
targe_file_folder=r'E:\wangfeidata\FunImgARWD',
keywork_of_reference_uid='([1-9]\d*)',
ith_number_of_reference_uid=0,
keyword_of_target_file_uid='([1-9]\d*)',
ith_number_of_targetfile_uid=0,
matching_point_number_of_target_uid_in_backwards=2,
keywork_of_target_file_not_for_uid='nii',
keyword_of_parent_folder_containing_target_file='',
save_path=r'E:\wangfeidata',
n_processess=2,
is_save_log=1,
is_copy=0,
is_move=0,
save_into_one_or_more_folder='one_file_one_folder',
save_suffix='.nii',
is_run=0):
self.reference_file = reference_file
self.targe_file_folder = targe_file_folder
self.keywork_of_reference_uid = keywork_of_reference_uid
self.ith_number_of_reference_uid = ith_number_of_reference_uid
self.keyword_of_target_file_uid = keyword_of_target_file_uid
self.matching_point_number_of_target_uid_in_backwards = matching_point_number_of_target_uid_in_backwards
self.ith_number_of_targetfile_uid = ith_number_of_targetfile_uid
self.keywork_of_target_file_not_for_uid = keywork_of_target_file_not_for_uid
self.keyword_of_parent_folder_containing_target_file = keyword_of_parent_folder_containing_target_file
self.save_path = save_path
self.n_processess = n_processess
self.is_save_log = is_save_log
self.is_copy = is_copy
self.is_move = is_move
self.save_into_one_or_more_folder = save_into_one_or_more_folder
self.save_suffix = save_suffix
self.is_run = is_run
# %% process the input
def _after_init(self):
""" handle the init parameter
"""
# chech param
if self.is_copy == 1 & self.is_move == 1:
print('### Cannot copy and move at the same time! ###\n')
print('### please press Ctrl+C to close the progress ###\n')
# create save folder
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# read reference_file(excel or text)
try:
self.subjName_forSelect = pd.read_excel(
self.reference_file, dtype='str', header=None, index=None)
except BaseException:
self.subjName_forSelect = pd.read_csv(
self.reference_file, dtype='str', header=None)
print('###提取subjName_forSelect中的匹配成分,默认为数字###\n###当有多个匹配时默认是第1个###\n')
if self.keywork_of_reference_uid:
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0].str.findall(self.keywork_of_reference_uid)
self.subjName_forSelect = [self.subjName_forSelect_[self.ith_number_of_reference_uid]
for self.subjName_forSelect_ in
self.subjName_forSelect
if len(self.subjName_forSelect_)]
def walkAllPath(self):
self.allWalkPath = os.walk(self.targe_file_folder)
# allWalkPath=[allWalkPath_ for allWalkPath_ in allWalkPath]
return self
def fetch_allFilePath(self):
self.allFilePath = []
for onePath in self.allWalkPath:
for oneFile in onePath[2]:
target_folder = os.path.join(onePath[0], oneFile)
self.allFilePath.append(target_folder)
return self
def fetch_allSubjName(self):
'''
matching_point_number_of_target_uid_in_backwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
'''
self.allSubjName = self.allFilePath
for i in range(self.matching_point_number_of_target_uid_in_backwards - 1):
self.allSubjName = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSubjName_raw = self.allSubjName
return self
def fetch_folerNameContainingFile(self):
'''
如果file上一级uid不是subject name,那么就涉及到选择那个文件夹下的file
此时先确定每一个file上面的uid name(可能是模态名),然后根据你的关键词来筛选
'''
self.folerNameContainingFile = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allFilePath]
self.folerNameContainingFile = [os.path.basename(
folderName) for folderName in self.folerNameContainingFile]
return self
def fetch_allFileName(self):
'''
获取所有file name,用于后续的筛选。
适用场景:假如跟file一起的有我们不需要的file,
比如混杂在dicom file中的有text文件,而这些text是我们不想要的。
'''
self.allFileName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allFilePath]
return self
# %% screen according several rules
def screen_pathLogicalLocation_accordingTo_yourSubjName(self):
""" 匹配subject name:注意此处用精确匹配,只有完成匹配时,才匹配成功"""
"""maker sure subjName_forSelect is pd.Series and its content is string"""
if isinstance(self.subjName_forSelect, type(pd.DataFrame([1]))):
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]
if not isinstance(self.subjName_forSelect[0], str):
self.subjName_forSelect = pd.Series(
self.subjName_forSelect, dtype='str')
# 一定要注意匹配对之间的数据类型要一致!!!
try:
# 提取所有被试的uid
# self.logic_index_subjname=\
# np.sum(
# pd.DataFrame(
# [self.allSubjName.iloc[:,0].str.contains\
# (name_for_self) for name_for_self in self.subjName_forSelect]
# ).T,
# axis=1)
#
# self.logic_index_subjname=self.logic_index_subjname>=1
self.allSubjName = self.allSubjName.iloc[:, 0].str.findall(
self.keyword_of_target_file_uid)
# 正则表达提取后,可能有的不匹配而为空list,此时应该把空list当作不匹配而去除
allSubjName_temp = []
for name in self.allSubjName.values:
if name:
allSubjName_temp.append(name[self.ith_number_of_targetfile_uid])
else:
allSubjName_temp.append(None)
self.allSubjName = allSubjName_temp
self.allSubjName = pd.DataFrame(self.allSubjName)
self.subjName_forSelect = pd.DataFrame(self.subjName_forSelect)
self.logic_index_subjname = pd.DataFrame(
np.zeros(len(self.allSubjName)) == 1)
for i in range(len(self.subjName_forSelect)):
self.logic_index_subjname = self.logic_index_subjname.mask(
self.allSubjName == self.subjName_forSelect.iloc[i, 0], True)
except BaseException:
print('subjName mismatch subjName_forSelected!\nplease check their type')
sys.exit(0)
return self
def screen_pathLogicalLocation_accordingTo_folerNameContainingFile(self):
""" 匹配folerNameContainingFile:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
"""
if self.keyword_of_parent_folder_containing_target_file:
self.logic_index_foler_name_containing_file = [
self.keyword_of_parent_folder_containing_target_file in oneName_ for oneName_ in self.folerNameContainingFile]
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
else:
self.logic_index_foler_name_containing_file = np.ones(
[len(self.folerNameContainingFile), 1]) == 1
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
return self
def screen_pathLogicalLocation_accordingTo_fileName(self):
""" 匹配file name (不是用于提取uid):正则表达式匹配
"""
if self.keywork_of_target_file_not_for_uid:
self.allFileName = pd.Series(self.allFileName)
self.logic_index_file_name = self.allFileName.str.contains(
self.keywork_of_target_file_not_for_uid)
else:
self.logic_index_file_name = np.ones([len(self.allFileName), 1]) == 1
self.logic_index_file_name = pd.DataFrame(self.logic_index_file_name)
return self
# %% final logical location of selfected file path
def fetch_totalLogicalLocation(self):
self.logic_index_all = pd.concat(
[
self.logic_index_file_name,
self.logic_index_foler_name_containing_file,
self.logic_index_subjname],
axis=1)
self.logic_index_all = np.sum(
self.logic_index_all,
axis=1) == np.shape(
self.logic_index_all)[1]
return self
def fetch_selfectedFilePath_accordingPathLogicalLocation(self):
# target_folder
self.allFilePath = pd.DataFrame(self.allFilePath)
self.allSelectedFilePath = self.allFilePath[self.logic_index_all]
self.allSelectedFilePath = self.allSelectedFilePath.dropna()
# uid name
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSelectedSubjName = self.allSubjName[self.logic_index_all]
self.allSelectedSubjName = self.allSelectedSubjName.dropna()
# raw name
self.allSubjName_raw = pd.DataFrame(self.allSubjName_raw)
self.allSelectedSubjName_raw = self.allSubjName_raw[self.logic_index_all]
self.allSelectedSubjName_raw = self.allSelectedSubjName_raw.dropna()
return self
# %% run copy
def copy_base(self, i, subjName):
n_allSelectedSubj = len(np.unique(self.allSelectedSubjName_raw))
# 每个file保存到每个subjxxx文件夹下面
if self.save_into_one_or_more_folder == 'one_file_one_folder':
folder_name = subjName.split('.')[0]
output_folder = os.path.join(self.save_path, folder_name)
# 新建subjxxx文件夹
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 所有file保存到一个uid下面(file的名字以subjxxx命名)
elif self.save_into_one_or_more_folder == 'all_file_one_folder':
output_folder = os.path.join(
self.save_path, subjName + self.save_suffix)
# copying OR moving OR do nothing
fileIndex = self.allSelectedSubjName_raw[(
self.allSelectedSubjName_raw.values == subjName)].index.tolist()
if self.is_copy == 1 and self.is_move == 0:
[shutil.copy(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.is_copy == 0 and self.is_move == 1:
[shutil.move(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.is_copy == 0 and self.is_move == 0:
print('### No copy and No move ###\n')
else:
print('### Cannot copy and move at the same time! ###\n')
print('Copy the {}/{}th subject: {} OK!\n'.format(i + 1, n_allSelectedSubj, subjName))
def copy_multiprocess(self):
s = time.time()
# 每个file保存到每个subjxxx文件夹下面
if self.save_into_one_or_more_folder == 'one_file_one_folder':
pass
elif self.save_into_one_or_more_folder == 'all_file_one_folder':
pass
else:
print(
"###没有指定复制到一个文件夹还是每个被试文件夹###\n###{}跟'all_file_one_folder' OR 'one_file_one_folder'都不符合###".format(
self.save_into_one_or_more_folder))
# 多线程
# unique的name
uniSubjName = self.allSelectedSubjName_raw.iloc[:, 0].unique()
print('Copying...\n')
"""
# 单线程
for i,subjName in enumerate(uniSubjName):
self.copy_base(i,subjName)
"""
# 多线程
cores = multiprocessing.cpu_count()
if self.n_processess > cores:
self.n_processess = cores - 1
with ThreadPoolExecutor(self.n_processess) as executor:
for i, subjName in enumerate(uniSubjName):
executor.submit(self.copy_base, i, subjName)
print('=' * 30)
#
e = time.time()
print('Done!\nRunning time is {:.1f} second'.format(e - s))
# %%
def main_run(self):
# all target_folder and name
self._after_init()
self = self.walkAllPath()
self = self.fetch_allFilePath()
self = self.fetch_allSubjName()
self = self.fetch_allFileName()
# selfect
self = self.fetch_folerNameContainingFile()
# logicLoc_subjName:根据被试名字匹配所得到的logicLoc。以此类推。
# fileName≠subjName,比如fileName可以是xxx.nii,但是subjName可能是subjxxx
self = self.screen_pathLogicalLocation_accordingTo_yourSubjName()
self = self.screen_pathLogicalLocation_accordingTo_folerNameContainingFile()
self = self.screen_pathLogicalLocation_accordingTo_fileName()
self = self.fetch_totalLogicalLocation()
self = self.fetch_selfectedFilePath_accordingPathLogicalLocation()
self.unmatched_ref = \
pd.DataFrame(list(
set.difference(set(list(self.subjName_forSelect.astype(np.int32).iloc[:, 0])),
set(list(self.allSelectedSubjName.astype(np.int32).iloc[:, 0])))
)
)
print('=' * 50 + '\n')
print(
'Files that not found are : {}\n\nThey may be saved in:\n[{}]\n'.format(
self.unmatched_ref.values,
self.save_path))
print('=' * 50 + '\n')
# save for checking
if self.is_save_log:
# time information
now = time.localtime()
now = time.strftime("%Y-%m-%d %H:%M:%S", now)
# all matched name
uniSubjName = self.allSelectedSubjName_raw.iloc[:, 0].unique()
uniSubjName = [uniSubjName_ for uniSubjName_ in uniSubjName]
uniSubjName = pd.DataFrame(uniSubjName)
uniSubjName.to_csv(
os.path.join(
self.save_path,
'log_allSelectedSubjName.txt'),
index=False,
header=False)
# 所有不匹配的被试名称
self.unmatched_ref.to_csv(
os.path.join(
self.save_path,
'log_unmatched_reference.txt'),
index=False,
header=False)
# 被选路径下所有的文件夹名称
pd.DataFrame(pd.unique(self.allSubjName.iloc[:, 0])).dropna().to_csv(
os.path.join(self.save_path, 'log_alltargetfilename.txt'), index=False, header=False)
# 所有匹配的文件路径
self.allSelectedFilePath.to_csv(
os.path.join(
self.save_path,
'log_allSelectedFilePath.txt'),
index=False,
header=False)
# 保存log
f = open(
os.path.join(
self.save_path,
"log_copy_inputs.txt"),
'a')
f.write("\n\n")
f.write('====================' + now + '====================')
f.write("\n\n")
f.write("reference_file is: " + self.reference_file)
f.write("\n\n")
f.write(
"keyword_of_parent_folder_containing_target_file are: " +
self.keyword_of_parent_folder_containing_target_file)
f.write("\n\n")
f.write("matching_point_number_of_target_uid_in_backwards is: " +
str(self.matching_point_number_of_target_uid_in_backwards))
f.write("\n\n")
f.write("keyword_of_target_file_uid is: " +
str(self.keyword_of_target_file_uid))
f.write("\n\n")
f.write("keyword_of_target_file_uid is: " +
str(self.keyword_of_target_file_uid))
f.write("\n\n")
f.write("targe_file_folder is: " + self.targe_file_folder)
f.write("\n\n")
f.write("save_path is: " + self.save_path)
f.write("\n\n")
f.write("n_processess is: " + str(self.n_processess))
f.write("\n\n")
f.close()
# copy
if self.is_run:
self.copy_multiprocess()
return self
# %%
if __name__ == '__main__':
uid = r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\ID_Scale_Headmotion\held_out_samples.txt'
target_folder = r'D:\WorkStation_2018\WorkStation_dynamicFC_V1\Data\ROISignals_FumImgARWSFC_screened'
save_path = r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\held_out_samples'
matching_point_number_of_target_uid_in_backwards = 1
keywork_of_target_file_not_for_uid = ''
save_suffix= ''
copy = CopyFmri(
reference_file=uid,
targe_file_folder=target_folder,
keywork_of_reference_uid='([1-9]\d*)',
ith_number_of_reference_uid=0,
keyword_of_target_file_uid='([1-9]\d*)',
ith_number_of_targetfile_uid=0,
matching_point_number_of_target_uid_in_backwards=matching_point_number_of_target_uid_in_backwards,
keywork_of_target_file_not_for_uid=keywork_of_target_file_not_for_uid,
keyword_of_parent_folder_containing_target_file='',
save_path=save_path,
n_processess=8,
is_save_log=1,
is_copy=1,
is_move=0,
save_into_one_or_more_folder='all_file_one_folder',
save_suffix=save_suffix,
is_run=1)
results = copy.main_run()
# --------------------------------
results=results.__dict__
print(results.keys())
print('Done!')
|
[
"pandas.read_csv",
"multiprocessing.cpu_count",
"pandas.read_excel",
"sys.exit",
"sys.path.append",
"os.walk",
"pandas.unique",
"os.path.exists",
"shutil.move",
"pandas.DataFrame",
"time.localtime",
"os.path.dirname",
"shutil.copy",
"numpy.shape",
"time.time",
"pandas.Series",
"numpy.unique",
"os.makedirs",
"concurrent.futures.ThreadPoolExecutor",
"time.strftime",
"os.path.join",
"numpy.sum",
"os.path.basename",
"pandas.concat"
] |
[((1610, 1720), 'sys.path.append', 'sys.path.append', (['"""D:\\\\My_Codes\\\\LC_Machine_Learning\\\\lc_rsfmri_tools\\\\lc_rsfmri_tools_python\\\\Utils"""'], {}), "(\n 'D:\\\\My_Codes\\\\LC_Machine_Learning\\\\lc_rsfmri_tools\\\\lc_rsfmri_tools_python\\\\Utils'\n )\n", (1625, 1720), False, 'import sys\n'), ((4899, 4930), 'os.walk', 'os.walk', (['self.targe_file_folder'], {}), '(self.targe_file_folder)\n', (4906, 4930), False, 'import os\n'), ((5954, 5984), 'pandas.DataFrame', 'pd.DataFrame', (['self.allSubjName'], {}), '(self.allSubjName)\n', (5966, 5984), True, 'import pandas as pd\n'), ((10618, 10742), 'pandas.concat', 'pd.concat', (['[self.logic_index_file_name, self.logic_index_foler_name_containing_file,\n self.logic_index_subjname]'], {'axis': '(1)'}), '([self.logic_index_file_name, self.\n logic_index_foler_name_containing_file, self.logic_index_subjname], axis=1)\n', (10627, 10742), True, 'import pandas as pd\n'), ((11095, 11125), 'pandas.DataFrame', 'pd.DataFrame', (['self.allFilePath'], {}), '(self.allFilePath)\n', (11107, 11125), True, 'import pandas as pd\n'), ((11315, 11345), 'pandas.DataFrame', 'pd.DataFrame', (['self.allSubjName'], {}), '(self.allSubjName)\n', (11327, 11345), True, 'import pandas as pd\n'), ((11539, 11573), 'pandas.DataFrame', 'pd.DataFrame', (['self.allSubjName_raw'], {}), '(self.allSubjName_raw)\n', (11551, 11573), True, 'import pandas as pd\n'), ((13342, 13353), 'time.time', 'time.time', ([], {}), '()\n', (13351, 13353), False, 'import time\n'), ((14066, 14093), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (14091, 14093), False, 'import multiprocessing\n'), ((14402, 14413), 'time.time', 'time.time', ([], {}), '()\n', (14411, 14413), False, 'import time\n'), ((3901, 3931), 'os.path.exists', 'os.path.exists', (['self.save_path'], {}), '(self.save_path)\n', (3915, 3931), False, 'import os\n'), ((3945, 3972), 'os.makedirs', 'os.makedirs', (['self.save_path'], {}), '(self.save_path)\n', (3956, 3972), False, 'import os\n'), ((4070, 4142), 'pandas.read_excel', 'pd.read_excel', (['self.reference_file'], {'dtype': '"""str"""', 'header': 'None', 'index': 'None'}), "(self.reference_file, dtype='str', header=None, index=None)\n", (4083, 4142), True, 'import pandas as pd\n'), ((5845, 5875), 'os.path.basename', 'os.path.basename', (['allFilePath_'], {}), '(allFilePath_)\n', (5861, 5875), False, 'import os\n'), ((6271, 6300), 'os.path.dirname', 'os.path.dirname', (['allFilePath_'], {}), '(allFilePath_)\n', (6286, 6300), False, 'import os\n'), ((6392, 6420), 'os.path.basename', 'os.path.basename', (['folderName'], {}), '(folderName)\n', (6408, 6420), False, 'import os\n'), ((6704, 6734), 'os.path.basename', 'os.path.basename', (['allFilePath_'], {}), '(allFilePath_)\n', (6720, 6734), False, 'import os\n'), ((7297, 7344), 'pandas.Series', 'pd.Series', (['self.subjName_forSelect'], {'dtype': '"""str"""'}), "(self.subjName_forSelect, dtype='str')\n", (7306, 7344), True, 'import pandas as pd\n'), ((8500, 8530), 'pandas.DataFrame', 'pd.DataFrame', (['self.allSubjName'], {}), '(self.allSubjName)\n', (8512, 8530), True, 'import pandas as pd\n'), ((8569, 8606), 'pandas.DataFrame', 'pd.DataFrame', (['self.subjName_forSelect'], {}), '(self.subjName_forSelect)\n', (8581, 8606), True, 'import pandas as pd\n'), ((9559, 9616), 'pandas.DataFrame', 'pd.DataFrame', (['self.logic_index_foler_name_containing_file'], {}), '(self.logic_index_foler_name_containing_file)\n', (9571, 9616), True, 'import pandas as pd\n'), ((9834, 9891), 'pandas.DataFrame', 'pd.DataFrame', (['self.logic_index_foler_name_containing_file'], {}), '(self.logic_index_foler_name_containing_file)\n', (9846, 9891), True, 'import pandas as pd\n'), ((10132, 10159), 'pandas.Series', 'pd.Series', (['self.allFileName'], {}), '(self.allFileName)\n', (10141, 10159), True, 'import pandas as pd\n'), ((10426, 10466), 'pandas.DataFrame', 'pd.DataFrame', (['self.logic_index_file_name'], {}), '(self.logic_index_file_name)\n', (10438, 10466), True, 'import pandas as pd\n'), ((10843, 10879), 'numpy.sum', 'np.sum', (['self.logic_index_all'], {'axis': '(1)'}), '(self.logic_index_all, axis=1)\n', (10849, 10879), True, 'import numpy as np\n'), ((11842, 11881), 'numpy.unique', 'np.unique', (['self.allSelectedSubjName_raw'], {}), '(self.allSelectedSubjName_raw)\n', (11851, 11881), True, 'import numpy as np\n'), ((12065, 12106), 'os.path.join', 'os.path.join', (['self.save_path', 'folder_name'], {}), '(self.save_path, folder_name)\n', (12077, 12106), False, 'import os\n'), ((14188, 14225), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['self.n_processess'], {}), '(self.n_processess)\n', (14206, 14225), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((15911, 15927), 'time.localtime', 'time.localtime', ([], {}), '()\n', (15925, 15927), False, 'import time\n'), ((15946, 15985), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'now'], {}), "('%Y-%m-%d %H:%M:%S', now)\n", (15959, 15985), False, 'import time\n'), ((16192, 16217), 'pandas.DataFrame', 'pd.DataFrame', (['uniSubjName'], {}), '(uniSubjName)\n', (16204, 16217), True, 'import pandas as pd\n'), ((4228, 4286), 'pandas.read_csv', 'pd.read_csv', (['self.reference_file'], {'dtype': '"""str"""', 'header': 'None'}), "(self.reference_file, dtype='str', header=None)\n", (4239, 4286), True, 'import pandas as pd\n'), ((5195, 5228), 'os.path.join', 'os.path.join', (['onePath[0]', 'oneFile'], {}), '(onePath[0], oneFile)\n', (5207, 5228), False, 'import os\n'), ((5732, 5761), 'os.path.dirname', 'os.path.dirname', (['allFilePath_'], {}), '(allFilePath_)\n', (5747, 5761), False, 'import os\n'), ((7105, 7122), 'pandas.DataFrame', 'pd.DataFrame', (['[1]'], {}), '([1])\n', (7117, 7122), True, 'import pandas as pd\n'), ((9060, 9071), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9068, 9071), False, 'import sys\n'), ((10908, 10938), 'numpy.shape', 'np.shape', (['self.logic_index_all'], {}), '(self.logic_index_all)\n', (10916, 10938), True, 'import numpy as np\n'), ((12153, 12182), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (12167, 12182), False, 'import os\n'), ((12200, 12226), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (12211, 12226), False, 'import os\n'), ((12375, 12432), 'os.path.join', 'os.path.join', (['self.save_path', '(subjName + self.save_suffix)'], {}), '(self.save_path, subjName + self.save_suffix)\n', (12387, 12432), False, 'import os\n'), ((12686, 12760), 'shutil.copy', 'shutil.copy', (['self.allSelectedFilePath.loc[fileIndex_, :][0]', 'output_folder'], {}), '(self.allSelectedFilePath.loc[fileIndex_, :][0], output_folder)\n', (12697, 12760), False, 'import shutil\n'), ((16266, 16325), 'os.path.join', 'os.path.join', (['self.save_path', '"""log_allSelectedSubjName.txt"""'], {}), "(self.save_path, 'log_allSelectedSubjName.txt')\n", (16278, 16325), False, 'import os\n'), ((16508, 16567), 'os.path.join', 'os.path.join', (['self.save_path', '"""log_unmatched_reference.txt"""'], {}), "(self.save_path, 'log_unmatched_reference.txt')\n", (16520, 16567), False, 'import os\n'), ((16796, 16853), 'os.path.join', 'os.path.join', (['self.save_path', '"""log_alltargetfilename.txt"""'], {}), "(self.save_path, 'log_alltargetfilename.txt')\n", (16808, 16853), False, 'import os\n'), ((16968, 17027), 'os.path.join', 'os.path.join', (['self.save_path', '"""log_allSelectedFilePath.txt"""'], {}), "(self.save_path, 'log_allSelectedFilePath.txt')\n", (16980, 17027), False, 'import os\n'), ((17188, 17239), 'os.path.join', 'os.path.join', (['self.save_path', '"""log_copy_inputs.txt"""'], {}), "(self.save_path, 'log_copy_inputs.txt')\n", (17200, 17239), False, 'import os\n'), ((12882, 12956), 'shutil.move', 'shutil.move', (['self.allSelectedFilePath.loc[fileIndex_, :][0]', 'output_folder'], {}), '(self.allSelectedFilePath.loc[fileIndex_, :][0], output_folder)\n', (12893, 12956), False, 'import shutil\n'), ((16723, 16761), 'pandas.unique', 'pd.unique', (['self.allSubjName.iloc[:, 0]'], {}), '(self.allSubjName.iloc[:, 0])\n', (16732, 16761), True, 'import pandas as pd\n')]
|
from __future__ import print_function
import torch
import torch.optim as optim
from data.data_loader import CreateDataLoader
import tqdm
import cv2
import yaml
from schedulers import WarmRestart, LinearDecay
import numpy as np
from models.networks import get_nets
from models.losses import get_loss
from models.models import get_model
from tensorboardX import SummaryWriter
import logging
logging.basicConfig(filename='res.log',level=logging.DEBUG)
writer = SummaryWriter('res_runs')
REPORT_EACH = 100
torch.backends.cudnn.bencmark = True
cv2.setNumThreads(0)
class Trainer:
def __init__(self, config):
self.config = config
self.train_dataset = self._get_dataset(config, 'train')
self.val_dataset = self._get_dataset(config, 'test')
self.best_metric = 0
self.warmup_epochs = config['warmup_num']
def train(self):
self._init_params()
for epoch in range(0, config['num_epochs']):
if (epoch == self.warmup_epochs) and not(self.warmup_epochs == 0):
self.netG.module.unfreeze()
self.optimizer_G = self._get_optim(self.netG, self.config['optimizer']['lr_G'])
self.scheduler_G = self._get_scheduler(self.optimizer_G)
train_loss = self._run_epoch(epoch)
val_loss, val_psnr = self._validate(epoch)
self.scheduler_G.step()
val_metric = val_psnr
if val_metric > self.best_metric:
self.best_metric = val_metric
torch.save({
'model': self.netG.state_dict()
}, 'best_{}.h5'.format(self.config['experiment_desc']))
torch.save({
'model': self.netG.state_dict()
}, 'last_{}.h5'.format(self.config['experiment_desc']))
print(('val_loss={}, val_metric={}, best_metric={}\n'.format(val_loss, val_metric, self.best_metric)))
logging.debug("Experiment Name: %s, Epoch: %d, Train Loss: %.3f, Val Accuracy: %.3f, Val Loss: %.3f, Best Loss: %.3f" % (
self.config['experiment_desc'], epoch, train_loss, val_loss, val_metric, self.best_metric))
def _run_epoch(self, epoch):
self.netG = self.netG.train()
losses_G = []
losses_vgg = []
losses_adv = []
psnrs = []
ssim = []
batches_per_epoch = len(self.train_dataset) / config['batch_size']
for param_group in self.optimizer_G.param_groups:
lr = param_group['lr']
tq = tqdm.tqdm(self.train_dataset.dataloader)
tq.set_description('Epoch {}, lr {}'.format(epoch, lr))
i = 0
for data in tq:
inputs, targets = self.model.get_input(data)
outputs = self.netG(inputs)
for _ in range(config['D_update_ratio']):
self.optimizer_D.zero_grad()
loss_D = config['loss']['adv'] * self.criterionD(self.netD, outputs, targets)
loss_D.backward(retain_graph=True)
self.optimizer_D.step()
self.optimizer_G.zero_grad()
loss_content = self.criterionG(outputs, targets)
loss_adv = self.criterionD.get_g_loss(self.netD, outputs)
loss_G = loss_content + config['loss']['adv'] * loss_adv
loss_G.backward()
self.optimizer_G.step()
losses_G.append(loss_G.item())
losses_vgg.append(loss_content.item())
losses_adv.append(loss_adv.item())
curr_psnr, curr_ssim = self.model.get_acc(outputs, targets)
psnrs.append(curr_psnr)
ssim.append(curr_ssim)
mean_loss_G = np.mean(losses_G[-REPORT_EACH:])
mean_loss_vgg = np.mean(losses_vgg[-REPORT_EACH:])
mean_loss_adv = np.mean(losses_adv[-REPORT_EACH:])
mean_psnr = np.mean(psnrs[-REPORT_EACH:])
mean_ssim = np.mean(ssim[-REPORT_EACH:])
if i % 100 == 0:
writer.add_scalar('Train_G_Loss', mean_loss_G, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_G_Loss_vgg', mean_loss_vgg, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_G_Loss_adv', mean_loss_adv, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_PSNR', mean_psnr, i + (batches_per_epoch * epoch))
writer.add_scalar('Train_SSIM', mean_ssim, i + (batches_per_epoch * epoch))
writer.add_image('output', outputs)
writer.add_image('target', targets)
self.model.visualize_data(writer, data, i + (batches_per_epoch * epoch))
tq.set_postfix(loss=self.model.get_loss(mean_loss_G, mean_psnr, mean_ssim))
i += 1
tq.close()
return np.mean(losses_G)
def _validate(self, epoch):
self.netG = self.netG.eval()
losses = []
psnrs = []
ssim = []
tq = tqdm.tqdm(self.val_dataset.dataloader)
tq.set_description('Validation')
for data in tq:
inputs, targets = self.model.get_input(data)
outputs = self.netG(inputs)
loss_content = self.criterionG(outputs, targets)
loss_G = loss_content + config['loss']['adv'] * self.criterionD.get_g_loss(self.netD, outputs)
losses.append(loss_G.item())
curr_psnr, curr_ssim = self.model.get_acc(outputs, targets, full=True)
psnrs.append(curr_psnr)
ssim.append(curr_ssim)
val_loss = np.mean(losses)
val_psnr = np.mean(psnrs)
val_ssim = np.mean(ssim)
tq.close()
writer.add_scalar('Validation_Loss', val_loss, epoch)
writer.add_scalar('Validation_PSNR', val_psnr, epoch)
writer.add_scalar('Validation_SSIM', val_ssim, epoch)
writer.add_image('output', outputs)
writer.add_image('target', targets)
return val_loss, val_psnr
def _get_dataset(self, config, filename):
data_loader = CreateDataLoader(config, filename)
return data_loader.load_data()
def _get_optim(self, model, lr):
if self.config['optimizer']['name'] == 'adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
elif self.config['optimizer']['name'] == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
elif self.config['optimizer']['name'] == 'adadelta':
optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
else:
raise ValueError("Optimizer [%s] not recognized." % self.config['optimizer']['name'])
return optimizer
def _get_scheduler(self, optimizer):
if self.config['scheduler']['name'] == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
patience=self.config['scheduler']['patience'],
factor=self.config['scheduler']['factor'],
min_lr=self.config['scheduler']['min_lr'])
elif self.config['optimizer']['name'] == 'sgdr':
scheduler = WarmRestart(optimizer)
elif self.config['scheduler']['name'] == 'linear':
scheduler = LinearDecay(optimizer,
min_lr=self.config['scheduler']['min_lr'],
num_epochs=self.config['num_epochs'],
start_epoch=self.config['scheduler']['start_epoch'])
else:
raise ValueError("Scheduler [%s] not recognized." % self.config['scheduler']['name'])
return scheduler
def _init_params(self):
self.netG, self.netD = get_nets(self.config['model'])
self.netG.cuda()
self.netD.cuda()
self.model = get_model(self.config['model'])
self.criterionG, self.criterionD = get_loss(self.config['model'])
self.optimizer_G = self._get_optim(self.netG, self.config['optimizer']['lr_G'])
self.optimizer_D = self._get_optim(self.netD, self.config['optimizer']['lr_D'])
self.scheduler_G = self._get_scheduler(self.optimizer_G)
self.scheduler_D = self._get_scheduler(self.optimizer_D)
if __name__ == '__main__':
with open('config/deblur_solver.yaml', 'r') as f:
config = yaml.load(f)
trainer = Trainer(config)
trainer.train()
|
[
"logging.basicConfig",
"numpy.mean",
"cv2.setNumThreads",
"models.networks.get_nets",
"tensorboardX.SummaryWriter",
"logging.debug",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"tqdm.tqdm",
"yaml.load",
"models.losses.get_loss",
"schedulers.LinearDecay",
"schedulers.WarmRestart",
"data.data_loader.CreateDataLoader",
"models.models.get_model"
] |
[((391, 451), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""res.log"""', 'level': 'logging.DEBUG'}), "(filename='res.log', level=logging.DEBUG)\n", (410, 451), False, 'import logging\n'), ((460, 485), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['"""res_runs"""'], {}), "('res_runs')\n", (473, 485), False, 'from tensorboardX import SummaryWriter\n'), ((541, 561), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (558, 561), False, 'import cv2\n'), ((2204, 2244), 'tqdm.tqdm', 'tqdm.tqdm', (['self.train_dataset.dataloader'], {}), '(self.train_dataset.dataloader)\n', (2213, 2244), False, 'import tqdm\n'), ((4080, 4097), 'numpy.mean', 'np.mean', (['losses_G'], {}), '(losses_G)\n', (4087, 4097), True, 'import numpy as np\n'), ((4205, 4243), 'tqdm.tqdm', 'tqdm.tqdm', (['self.val_dataset.dataloader'], {}), '(self.val_dataset.dataloader)\n', (4214, 4243), False, 'import tqdm\n'), ((4699, 4714), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (4706, 4714), True, 'import numpy as np\n'), ((4728, 4742), 'numpy.mean', 'np.mean', (['psnrs'], {}), '(psnrs)\n', (4735, 4742), True, 'import numpy as np\n'), ((4756, 4769), 'numpy.mean', 'np.mean', (['ssim'], {}), '(ssim)\n', (4763, 4769), True, 'import numpy as np\n'), ((5115, 5149), 'data.data_loader.CreateDataLoader', 'CreateDataLoader', (['config', 'filename'], {}), '(config, filename)\n', (5131, 5149), False, 'from data.data_loader import CreateDataLoader\n'), ((6626, 6656), 'models.networks.get_nets', 'get_nets', (["self.config['model']"], {}), "(self.config['model'])\n", (6634, 6656), False, 'from models.networks import get_nets\n'), ((6710, 6741), 'models.models.get_model', 'get_model', (["self.config['model']"], {}), "(self.config['model'])\n", (6719, 6741), False, 'from models.models import get_model\n'), ((6779, 6809), 'models.losses.get_loss', 'get_loss', (["self.config['model']"], {}), "(self.config['model'])\n", (6787, 6809), False, 'from models.losses import get_loss\n'), ((7183, 7195), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (7192, 7195), False, 'import yaml\n'), ((1691, 1917), 'logging.debug', 'logging.debug', (["('Experiment Name: %s, Epoch: %d, Train Loss: %.3f, Val Accuracy: %.3f, Val Loss: %.3f, Best Loss: %.3f'\n % (self.config['experiment_desc'], epoch, train_loss, val_loss,\n val_metric, self.best_metric))"], {}), "(\n 'Experiment Name: %s, Epoch: %d, Train Loss: %.3f, Val Accuracy: %.3f, Val Loss: %.3f, Best Loss: %.3f'\n % (self.config['experiment_desc'], epoch, train_loss, val_loss,\n val_metric, self.best_metric))\n", (1704, 1917), False, 'import logging\n'), ((3137, 3169), 'numpy.mean', 'np.mean', (['losses_G[-REPORT_EACH:]'], {}), '(losses_G[-REPORT_EACH:])\n', (3144, 3169), True, 'import numpy as np\n'), ((3189, 3223), 'numpy.mean', 'np.mean', (['losses_vgg[-REPORT_EACH:]'], {}), '(losses_vgg[-REPORT_EACH:])\n', (3196, 3223), True, 'import numpy as np\n'), ((3243, 3277), 'numpy.mean', 'np.mean', (['losses_adv[-REPORT_EACH:]'], {}), '(losses_adv[-REPORT_EACH:])\n', (3250, 3277), True, 'import numpy as np\n'), ((3293, 3322), 'numpy.mean', 'np.mean', (['psnrs[-REPORT_EACH:]'], {}), '(psnrs[-REPORT_EACH:])\n', (3300, 3322), True, 'import numpy as np\n'), ((3338, 3366), 'numpy.mean', 'np.mean', (['ssim[-REPORT_EACH:]'], {}), '(ssim[-REPORT_EACH:])\n', (3345, 3366), True, 'import numpy as np\n'), ((5861, 6063), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'patience': "self.config['scheduler']['patience']", 'factor': "self.config['scheduler']['factor']", 'min_lr': "self.config['scheduler']['min_lr']"}), "(optimizer, mode='min', patience=self.\n config['scheduler']['patience'], factor=self.config['scheduler'][\n 'factor'], min_lr=self.config['scheduler']['min_lr'])\n", (5897, 6063), True, 'import torch.optim as optim\n'), ((6184, 6206), 'schedulers.WarmRestart', 'WarmRestart', (['optimizer'], {}), '(optimizer)\n', (6195, 6206), False, 'from schedulers import WarmRestart, LinearDecay\n'), ((6275, 6440), 'schedulers.LinearDecay', 'LinearDecay', (['optimizer'], {'min_lr': "self.config['scheduler']['min_lr']", 'num_epochs': "self.config['num_epochs']", 'start_epoch': "self.config['scheduler']['start_epoch']"}), "(optimizer, min_lr=self.config['scheduler']['min_lr'],\n num_epochs=self.config['num_epochs'], start_epoch=self.config[\n 'scheduler']['start_epoch'])\n", (6286, 6440), False, 'from schedulers import WarmRestart, LinearDecay\n')]
|
'''
Author: <NAME>
Created Date: 2021-06-20
Last Modified: 2021-06-29
content:
'''
import os
import os.path as osp
from collections import OrderedDict
from functools import reduce
import mmcv
import numpy as np
from mmcv.utils import print_log
from prettytable import PrettyTable
from torch.utils.data import Dataset
from mmseg.core import eval_metrics, my_eval_metrics
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose, ComposeWithVisualization
from .custom import CustomDataset
@DATASETS.register_module()
class CustomDatasetCD(CustomDataset):
"""Custom dataset for change detection. An example of file structure
is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img1_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── img2_dir2
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img1_dir (str): Path to first image directory
img2_dir (str): Path to second image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, and
self.PALETTE is None, random palette will be generated.
Default: None
"""
CLASSES = None
PALETTE = None
def __init__(self,
pipeline,
img1_dir,
img2_dir,
img_suffix='.jpg',
ann_dir=None,
seg_map_suffix='.png',
split=None,
data_root=None,
test_mode=False,
ignore_index=255,
reduce_zero_label=False,
classes=None,
palette=None,
if_visualize=False,
):
self.pipeline = ComposeWithVisualization(pipeline, if_visualize=if_visualize)
self.img1_dir = img1_dir
self.img2_dir = img2_dir
self.img_suffix = img_suffix
self.ann_dir = ann_dir
self.seg_map_suffix = seg_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
self.label_map = None # map from old class index to new class index
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
classes, palette)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img1_dir):
self.img1_dir = osp.join(self.data_root, self.img1_dir)
self.img2_dir = osp.join(self.data_root, self.img2_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.split is None or osp.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(self.img1_dir, self.img_suffix,
self.ann_dir,
self.seg_map_suffix, self.split)
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
results['img1_prefix'] = self.img1_dir
results['img2_prefix'] = self.img2_dir
results['seg_prefix'] = self.ann_dir
if self.custom_classes:
results['label_map'] = self.label_map
def evaluate(self,
results,
metric='mIoU',
logger=None,
efficient_test=False,
**kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
'mDice' and 'mFscore' are supported.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
ret_metrics = my_eval_metrics(
results,
gt_seg_maps,
num_classes,
self.ignore_index,
metric,
label_map=self.label_map,
reduce_zero_label=self.reduce_zero_label)
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
# summary table
# ret_metrics_summary = OrderedDict({
# ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
# for ret_metric, ret_metric_value in ret_metrics.items()
# })
CD_metrics = ['FscoreCD', 'PrecisionCD', 'RecallCD']
ret_metrics_CD = OrderedDict({
key: np.round(np.nanmean(ret_metrics.pop(key)) * 100, 2)
for key in CD_metrics
})
# each class table
ret_metrics.pop('aAcc', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
# for logger
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)
# summary_table_data = PrettyTable()
# for key, val in ret_metrics_summary.items():
# if key == 'aAcc':
# summary_table_data.add_column(key, [val])
# else:
# summary_table_data.add_column('m' + key, [val])
CD_table_data = PrettyTable()
for key, val in ret_metrics_CD.items():
CD_table_data.add_column(key, [val])
print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
# print_log('\n' + summary_table_data.get_string(), logger=logger)
print_log('\n' + CD_table_data.get_string(), logger=logger)
# each metric dict
# for key, value in ret_metrics_summary.items():
# if key == 'aAcc':
# eval_results[key] = value / 100.0
# else:
# eval_results['m' + key] = value / 100.0
# each metric dict
for key, value in ret_metrics_CD.items():
eval_results[key] = value / 100.0
ret_metrics_class.pop('Class', None)
for key, value in ret_metrics_class.items():
eval_results.update({
key + '.' + str(name): value[idx] / 100.0
for idx, name in enumerate(class_names)
})
if mmcv.is_list_of(results, str):
for file_name in results:
os.remove(file_name)
return eval_results
|
[
"prettytable.PrettyTable",
"os.path.isabs",
"numpy.unique",
"mmcv.utils.print_log",
"mmseg.core.my_eval_metrics",
"os.path.join",
"mmcv.is_list_of",
"numpy.round",
"os.remove"
] |
[((6447, 6596), 'mmseg.core.my_eval_metrics', 'my_eval_metrics', (['results', 'gt_seg_maps', 'num_classes', 'self.ignore_index', 'metric'], {'label_map': 'self.label_map', 'reduce_zero_label': 'self.reduce_zero_label'}), '(results, gt_seg_maps, num_classes, self.ignore_index,\n metric, label_map=self.label_map, reduce_zero_label=self.reduce_zero_label)\n', (6462, 6596), False, 'from mmseg.core import eval_metrics, my_eval_metrics\n'), ((7671, 7684), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (7682, 7684), False, 'from prettytable import PrettyTable\n'), ((8090, 8103), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (8101, 8103), False, 'from prettytable import PrettyTable\n'), ((8210, 8249), 'mmcv.utils.print_log', 'print_log', (['"""per class results:"""', 'logger'], {}), "('per class results:', logger)\n", (8219, 8249), False, 'from mmcv.utils import print_log\n'), ((8329, 8358), 'mmcv.utils.print_log', 'print_log', (['"""Summary:"""', 'logger'], {}), "('Summary:', logger)\n", (8338, 8358), False, 'from mmcv.utils import print_log\n'), ((9147, 9176), 'mmcv.is_list_of', 'mmcv.is_list_of', (['results', 'str'], {}), '(results, str)\n', (9162, 9176), False, 'import mmcv\n'), ((4478, 4502), 'os.path.isabs', 'osp.isabs', (['self.img1_dir'], {}), '(self.img1_dir)\n', (4487, 4502), True, 'import os.path as osp\n'), ((4536, 4575), 'os.path.join', 'osp.join', (['self.data_root', 'self.img1_dir'], {}), '(self.data_root, self.img1_dir)\n', (4544, 4575), True, 'import os.path as osp\n'), ((4608, 4647), 'os.path.join', 'osp.join', (['self.data_root', 'self.img2_dir'], {}), '(self.data_root, self.img2_dir)\n', (4616, 4647), True, 'import os.path as osp\n'), ((4749, 4787), 'os.path.join', 'osp.join', (['self.data_root', 'self.ann_dir'], {}), '(self.data_root, self.ann_dir)\n', (4757, 4787), True, 'import os.path as osp\n'), ((4883, 4919), 'os.path.join', 'osp.join', (['self.data_root', 'self.split'], {}), '(self.data_root, self.split)\n', (4891, 4919), True, 'import os.path as osp\n'), ((7391, 7426), 'numpy.round', 'np.round', (['(ret_metric_value * 100)', '(2)'], {}), '(ret_metric_value * 100, 2)\n', (7399, 7426), True, 'import numpy as np\n'), ((9232, 9252), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (9241, 9252), False, 'import os\n'), ((4692, 4715), 'os.path.isabs', 'osp.isabs', (['self.ann_dir'], {}), '(self.ann_dir)\n', (4701, 4715), True, 'import os.path as osp\n'), ((4830, 4851), 'os.path.isabs', 'osp.isabs', (['self.split'], {}), '(self.split)\n', (4839, 4851), True, 'import os.path as osp\n'), ((6330, 6342), 'numpy.unique', 'np.unique', (['_'], {}), '(_)\n', (6339, 6342), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2018 <NAME>, SMBYC
# Email: xcorredorl at ideam.gov.co
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
import dask.array as da
import numpy as np
from stack_composed.image import Image
def statistic(stat, images, band, num_process, chunksize):
# create a empty initial wrapper raster for managed dask parallel
# in chunks and storage result
wrapper_array = da.empty(Image.wrapper_shape, chunks=chunksize)
chunksize = wrapper_array.chunks[0][0]
# call built in numpy statistical functions, with a specified axis. if
# axis=2 means it will Compute along the 'depth' axis, per pixel.
# with the return being n by m, the shape of each band.
#
# Compute the median
if stat == 'median':
def stat_func(stack_chunk, metadata):
return np.nanmedian(stack_chunk, axis=2)
# Compute the arithmetic mean
if stat == 'mean':
def stat_func(stack_chunk, metadata):
return np.nanmean(stack_chunk, axis=2)
# Compute the geometric mean
if stat == 'gmean':
def stat_func(stack_chunk, metadata):
product = np.nanprod(stack_chunk, axis=2)
count = np.count_nonzero(np.nan_to_num(stack_chunk), axis=2)
gmean = np.array([p ** (1.0 / c) for p, c in zip(product, count)])
gmean[gmean == 1] = np.nan
return gmean
# Compute the maximum value
if stat == 'max':
def stat_func(stack_chunk, metadata):
return np.nanmax(stack_chunk, axis=2)
# Compute the minimum value
if stat == 'min':
def stat_func(stack_chunk, metadata):
return np.nanmin(stack_chunk, axis=2)
# Compute the standard deviation
if stat == 'std':
def stat_func(stack_chunk, metadata):
return np.nanstd(stack_chunk, axis=2)
# Compute the valid pixels
# this count the valid data (no nans) across the z-axis
if stat == 'valid_pixels':
def stat_func(stack_chunk, metadata):
return stack_chunk.shape[2] - np.isnan(stack_chunk).sum(axis=2)
# Compute the percentile NN
if stat.startswith('percentile_'):
p = int(stat.split('_')[1])
def stat_func(stack_chunk, metadata):
return np.nanpercentile(stack_chunk, p, axis=2)
# Compute the last valid pixel
if stat == 'last_pixel':
def last_pixel(pixel_time_series, index_sort):
if np.isnan(pixel_time_series).all():
return np.nan
for index in index_sort:
if not np.isnan(pixel_time_series[index]):
return pixel_time_series[index]
def stat_func(stack_chunk, metadata):
index_sort = np.argsort(metadata['date'])[::-1] # from the most recent to the oldest
return np.apply_along_axis(last_pixel, 2, stack_chunk, index_sort)
# Compute the julian day of the last valid pixel
if stat == 'jday_last_pixel':
def jday_last_pixel(pixel_time_series, index_sort, jdays):
if np.isnan(pixel_time_series).all():
return 0 # better np.nan but there is bug with multiprocessing with return nan value here
for index in index_sort:
if not np.isnan(pixel_time_series[index]):
return jdays[index]
def stat_func(stack_chunk, metadata):
index_sort = np.argsort(metadata['date'])[::-1] # from the most recent to the oldest
return np.apply_along_axis(jday_last_pixel, 2, stack_chunk, index_sort, metadata['jday'])
# Compute the julian day of the median value
if stat == 'jday_median':
def jday_median(pixel_time_series, index_sort, jdays):
if np.isnan(pixel_time_series).all():
return 0 # better np.nan but there is bug with multiprocessing with return nan value here
jdays = [jdays[index] for index in index_sort if not np.isnan(pixel_time_series[index])]
return np.ceil(np.median(jdays))
def stat_func(stack_chunk, metadata):
index_sort = np.argsort(metadata['date']) # from the oldest to most recent
return np.apply_along_axis(jday_median, 2, stack_chunk, index_sort, metadata['jday'])
# Compute the trimmed median with lower limit and upper limit
if stat.startswith('trim_mean_'):
# TODO: check this stats when the time series have few data
lower = int(stat.split('_')[2])
upper = int(stat.split('_')[3])
def trim_mean(pixel_time_series):
if np.isnan(pixel_time_series).all():
return 0 # better np.nan but there is bug with multiprocessing with return nan value here
pts = pixel_time_series[~np.isnan(pixel_time_series)]
if len(pts) <= 2:
return np.percentile(pts, (lower+upper)/2)
return np.mean(pts[(pts >= np.percentile(pts, lower)) & (pts <= np.percentile(pts, upper))])
def stat_func(stack_chunk, metadata):
return np.apply_along_axis(trim_mean, 2, stack_chunk)
# Compute the linear trend using least-squares method
if stat == 'linear_trend':
def linear_trend(pixel_time_series, index_sort, date_list):
if np.isnan(pixel_time_series).all() or len(pixel_time_series[~np.isnan(pixel_time_series)]) == 1:
return np.nan
# Unix timestamp in days
x = [int(int(date_list[index].strftime("%s")) / 86400) for index in index_sort]
x = [i-x[0] for i in x] # diff from minimum
pts = np.array([pixel_time_series[index] for index in index_sort])
y = np.ma.array(pts, mask=np.isnan(pts))
ssxm, ssxym, ssyxm, ssym = np.ma.cov(x, y, bias=1).flat
slope = ssxym / ssxm
return slope*1000000
def stat_func(stack_chunk, metadata):
index_sort = np.argsort(metadata['date']) # from the oldest to most recent
return np.apply_along_axis(linear_trend, 2, stack_chunk, index_sort, metadata['date'])
# Compute the statistical for the respective chunk
def calc(block, block_id=None, chunksize=None):
yc = block_id[0] * chunksize
yc_size = block.shape[0]
xc = block_id[1] * chunksize
xc_size = block.shape[1]
# make stack reading all images only in specific chunk
chunks_list = [image.get_chunk_in_wrapper(band, xc, xc_size, yc, yc_size) for image in images]
# delete empty chunks
mask_none = [False if x is None else True for x in chunks_list]
chunks_list = np.array([i for i in chunks_list if i is not None])
if not chunks_list.size:
# all chunks are empty, return the chunk with nan
return np.full((yc_size, xc_size), np.nan)
# for some statistics that required filename as metadata
metadata = {}
if stat in ["last_pixel", "jday_last_pixel", "jday_median", "linear_trend"]:
metadata["date"] = np.array([image.date for image in images])[mask_none]
if stat in ["jday_last_pixel", "jday_median"]:
metadata["jday"] = np.array([image.jday for image in images])[mask_none]
stack_chunk = np.stack(chunks_list, axis=2)
return stat_func(stack_chunk, metadata)
# process
map_blocks = da.map_blocks(calc, wrapper_array, chunks=wrapper_array.chunks, chunksize=chunksize, dtype=float)
result_array = map_blocks.compute(num_workers=num_process, scheduler="processes")
return result_array
|
[
"numpy.nanpercentile",
"dask.array.map_blocks",
"numpy.argsort",
"numpy.array",
"numpy.nanmean",
"numpy.nanmin",
"numpy.stack",
"numpy.nanmax",
"numpy.nanstd",
"numpy.full",
"dask.array.empty",
"numpy.nanprod",
"numpy.isnan",
"numpy.ma.cov",
"numpy.median",
"numpy.nanmedian",
"numpy.apply_along_axis",
"numpy.percentile",
"numpy.nan_to_num"
] |
[((652, 699), 'dask.array.empty', 'da.empty', (['Image.wrapper_shape'], {'chunks': 'chunksize'}), '(Image.wrapper_shape, chunks=chunksize)\n', (660, 699), True, 'import dask.array as da\n'), ((7581, 7683), 'dask.array.map_blocks', 'da.map_blocks', (['calc', 'wrapper_array'], {'chunks': 'wrapper_array.chunks', 'chunksize': 'chunksize', 'dtype': 'float'}), '(calc, wrapper_array, chunks=wrapper_array.chunks, chunksize=\n chunksize, dtype=float)\n', (7594, 7683), True, 'import dask.array as da\n'), ((6847, 6898), 'numpy.array', 'np.array', (['[i for i in chunks_list if i is not None]'], {}), '([i for i in chunks_list if i is not None])\n', (6855, 6898), True, 'import numpy as np\n'), ((7471, 7500), 'numpy.stack', 'np.stack', (['chunks_list'], {'axis': '(2)'}), '(chunks_list, axis=2)\n', (7479, 7500), True, 'import numpy as np\n'), ((1071, 1104), 'numpy.nanmedian', 'np.nanmedian', (['stack_chunk'], {'axis': '(2)'}), '(stack_chunk, axis=2)\n', (1083, 1104), True, 'import numpy as np\n'), ((1228, 1259), 'numpy.nanmean', 'np.nanmean', (['stack_chunk'], {'axis': '(2)'}), '(stack_chunk, axis=2)\n', (1238, 1259), True, 'import numpy as np\n'), ((1386, 1417), 'numpy.nanprod', 'np.nanprod', (['stack_chunk'], {'axis': '(2)'}), '(stack_chunk, axis=2)\n', (1396, 1417), True, 'import numpy as np\n'), ((1754, 1784), 'numpy.nanmax', 'np.nanmax', (['stack_chunk'], {'axis': '(2)'}), '(stack_chunk, axis=2)\n', (1763, 1784), True, 'import numpy as np\n'), ((1905, 1935), 'numpy.nanmin', 'np.nanmin', (['stack_chunk'], {'axis': '(2)'}), '(stack_chunk, axis=2)\n', (1914, 1935), True, 'import numpy as np\n'), ((2061, 2091), 'numpy.nanstd', 'np.nanstd', (['stack_chunk'], {'axis': '(2)'}), '(stack_chunk, axis=2)\n', (2070, 2091), True, 'import numpy as np\n'), ((2510, 2550), 'numpy.nanpercentile', 'np.nanpercentile', (['stack_chunk', 'p'], {'axis': '(2)'}), '(stack_chunk, p, axis=2)\n', (2526, 2550), True, 'import numpy as np\n'), ((3063, 3122), 'numpy.apply_along_axis', 'np.apply_along_axis', (['last_pixel', '(2)', 'stack_chunk', 'index_sort'], {}), '(last_pixel, 2, stack_chunk, index_sort)\n', (3082, 3122), True, 'import numpy as np\n'), ((3735, 3822), 'numpy.apply_along_axis', 'np.apply_along_axis', (['jday_last_pixel', '(2)', 'stack_chunk', 'index_sort', "metadata['jday']"], {}), "(jday_last_pixel, 2, stack_chunk, index_sort, metadata[\n 'jday'])\n", (3754, 3822), True, 'import numpy as np\n'), ((4336, 4364), 'numpy.argsort', 'np.argsort', (["metadata['date']"], {}), "(metadata['date'])\n", (4346, 4364), True, 'import numpy as np\n'), ((4418, 4496), 'numpy.apply_along_axis', 'np.apply_along_axis', (['jday_median', '(2)', 'stack_chunk', 'index_sort', "metadata['jday']"], {}), "(jday_median, 2, stack_chunk, index_sort, metadata['jday'])\n", (4437, 4496), True, 'import numpy as np\n'), ((5275, 5321), 'numpy.apply_along_axis', 'np.apply_along_axis', (['trim_mean', '(2)', 'stack_chunk'], {}), '(trim_mean, 2, stack_chunk)\n', (5294, 5321), True, 'import numpy as np\n'), ((5825, 5885), 'numpy.array', 'np.array', (['[pixel_time_series[index] for index in index_sort]'], {}), '([pixel_time_series[index] for index in index_sort])\n', (5833, 5885), True, 'import numpy as np\n'), ((6146, 6174), 'numpy.argsort', 'np.argsort', (["metadata['date']"], {}), "(metadata['date'])\n", (6156, 6174), True, 'import numpy as np\n'), ((6228, 6307), 'numpy.apply_along_axis', 'np.apply_along_axis', (['linear_trend', '(2)', 'stack_chunk', 'index_sort', "metadata['date']"], {}), "(linear_trend, 2, stack_chunk, index_sort, metadata['date'])\n", (6247, 6307), True, 'import numpy as np\n'), ((7014, 7049), 'numpy.full', 'np.full', (['(yc_size, xc_size)', 'np.nan'], {}), '((yc_size, xc_size), np.nan)\n', (7021, 7049), True, 'import numpy as np\n'), ((1455, 1481), 'numpy.nan_to_num', 'np.nan_to_num', (['stack_chunk'], {}), '(stack_chunk)\n', (1468, 1481), True, 'import numpy as np\n'), ((2971, 2999), 'numpy.argsort', 'np.argsort', (["metadata['date']"], {}), "(metadata['date'])\n", (2981, 2999), True, 'import numpy as np\n'), ((3643, 3671), 'numpy.argsort', 'np.argsort', (["metadata['date']"], {}), "(metadata['date'])\n", (3653, 3671), True, 'import numpy as np\n'), ((4246, 4262), 'numpy.median', 'np.median', (['jdays'], {}), '(jdays)\n', (4255, 4262), True, 'import numpy as np\n'), ((5068, 5107), 'numpy.percentile', 'np.percentile', (['pts', '((lower + upper) / 2)'], {}), '(pts, (lower + upper) / 2)\n', (5081, 5107), True, 'import numpy as np\n'), ((5979, 6002), 'numpy.ma.cov', 'np.ma.cov', (['x', 'y'], {'bias': '(1)'}), '(x, y, bias=1)\n', (5988, 6002), True, 'import numpy as np\n'), ((7254, 7296), 'numpy.array', 'np.array', (['[image.date for image in images]'], {}), '([image.date for image in images])\n', (7262, 7296), True, 'import numpy as np\n'), ((7394, 7436), 'numpy.array', 'np.array', (['[image.jday for image in images]'], {}), '([image.jday for image in images])\n', (7402, 7436), True, 'import numpy as np\n'), ((2686, 2713), 'numpy.isnan', 'np.isnan', (['pixel_time_series'], {}), '(pixel_time_series)\n', (2694, 2713), True, 'import numpy as np\n'), ((2811, 2845), 'numpy.isnan', 'np.isnan', (['pixel_time_series[index]'], {}), '(pixel_time_series[index])\n', (2819, 2845), True, 'import numpy as np\n'), ((3293, 3320), 'numpy.isnan', 'np.isnan', (['pixel_time_series'], {}), '(pixel_time_series)\n', (3301, 3320), True, 'import numpy as np\n'), ((3495, 3529), 'numpy.isnan', 'np.isnan', (['pixel_time_series[index]'], {}), '(pixel_time_series[index])\n', (3503, 3529), True, 'import numpy as np\n'), ((3976, 4003), 'numpy.isnan', 'np.isnan', (['pixel_time_series'], {}), '(pixel_time_series)\n', (3984, 4003), True, 'import numpy as np\n'), ((4807, 4834), 'numpy.isnan', 'np.isnan', (['pixel_time_series'], {}), '(pixel_time_series)\n', (4815, 4834), True, 'import numpy as np\n'), ((4986, 5013), 'numpy.isnan', 'np.isnan', (['pixel_time_series'], {}), '(pixel_time_series)\n', (4994, 5013), True, 'import numpy as np\n'), ((5924, 5937), 'numpy.isnan', 'np.isnan', (['pts'], {}), '(pts)\n', (5932, 5937), True, 'import numpy as np\n'), ((2303, 2324), 'numpy.isnan', 'np.isnan', (['stack_chunk'], {}), '(stack_chunk)\n', (2311, 2324), True, 'import numpy as np\n'), ((4183, 4217), 'numpy.isnan', 'np.isnan', (['pixel_time_series[index]'], {}), '(pixel_time_series[index])\n', (4191, 4217), True, 'import numpy as np\n'), ((5495, 5522), 'numpy.isnan', 'np.isnan', (['pixel_time_series'], {}), '(pixel_time_series)\n', (5503, 5522), True, 'import numpy as np\n'), ((5143, 5168), 'numpy.percentile', 'np.percentile', (['pts', 'lower'], {}), '(pts, lower)\n', (5156, 5168), True, 'import numpy as np\n'), ((5180, 5205), 'numpy.percentile', 'np.percentile', (['pts', 'upper'], {}), '(pts, upper)\n', (5193, 5205), True, 'import numpy as np\n'), ((5555, 5582), 'numpy.isnan', 'np.isnan', (['pixel_time_series'], {}), '(pixel_time_series)\n', (5563, 5582), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------------------------
# Program: The LDAR Simulator (LDAR-Sim)
# File: methods.deployment.GHGSat1
# Purpose: GHGSat1 company specific deployment classes and methods
#
# Copyright (C) 2018-2021 Intelligent Methane Monitoring and Management System (IM3S) Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published
# by the Free Software Foundation, version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
# You should have received a copy of the MIT License
# along with this program. If not, see <https://opensource.org/licenses/MIT>.
#
# ------------------------------------------------------------------------------
import random
import numpy as np
from utils.generic_functions import geo_idx
def detect_emissions(self, site, covered_leaks, covered_equipment_rates, covered_site_rate,
site_rate, venting, equipment_rates):
equip_measured_rates = []
site_measured_rate = 0
found_leak = False
n_leaks = len(covered_leaks)
missed_leaks_str = '{}_missed_leaks'.format(self.config['label'])
# extract the wind speed on site based on site's geo indices
site_lat = np.float16(site['lat'])
site_lon = np.float16(site['lon'])
lat_idx = geo_idx(site_lat, self.state['weather'].latitude)
lon_idx = geo_idx(site_lon, self.state['weather'].longitude)
ti = self.state['t'].current_timestep
windspeed = self.state['weather'].winds[ti, lat_idx, lon_idx]
# MDL is calculated based on wind speed and parameter
# listed in Jacob et al., 2016
# For point source, MDL is proportion to wind speed
# when wind speed is 5km/h (1.39 m/s) MDL is 5.79 g/s
Q_min = self.config['sensor']['MDL'][0] * (self.config['sensor']['MDL'][1]/windspeed)
# check detection
if covered_site_rate > Q_min:
# calculate the measured emission size
# Based on Table1 of Jacob et al.,2016, the precision of
# GHGSat can be off by sigma (usually between 1% to 5%)
# sample a sigma number
sigma = random.choice([0.01, 0.02, 0.03, 0.04, 0.05])
site_measured_rate = covered_site_rate * (1 - sigma)
found_leak = True
else:
site_dict = None
site[missed_leaks_str] += n_leaks
self.timeseries[missed_leaks_str][self.state['t'].current_timestep] += n_leaks
site_dict = {
'site': site,
'leaks_present': covered_leaks,
'site_true_rate': site_rate,
'site_measured_rate': site_measured_rate,
'equip_measured_rates': equip_measured_rates,
'vent_rate': venting,
'found_leak': found_leak,
}
return site_dict
|
[
"random.choice",
"utils.generic_functions.geo_idx",
"numpy.float16"
] |
[((1445, 1468), 'numpy.float16', 'np.float16', (["site['lat']"], {}), "(site['lat'])\n", (1455, 1468), True, 'import numpy as np\n'), ((1484, 1507), 'numpy.float16', 'np.float16', (["site['lon']"], {}), "(site['lon'])\n", (1494, 1507), True, 'import numpy as np\n'), ((1522, 1571), 'utils.generic_functions.geo_idx', 'geo_idx', (['site_lat', "self.state['weather'].latitude"], {}), "(site_lat, self.state['weather'].latitude)\n", (1529, 1571), False, 'from utils.generic_functions import geo_idx\n'), ((1586, 1636), 'utils.generic_functions.geo_idx', 'geo_idx', (['site_lon', "self.state['weather'].longitude"], {}), "(site_lon, self.state['weather'].longitude)\n", (1593, 1636), False, 'from utils.generic_functions import geo_idx\n'), ((2323, 2368), 'random.choice', 'random.choice', (['[0.01, 0.02, 0.03, 0.04, 0.05]'], {}), '([0.01, 0.02, 0.03, 0.04, 0.05])\n', (2336, 2368), False, 'import random\n')]
|
import pytest
from hyperloop.Python import magnetic_drag
import numpy as np
from openmdao.api import Group, Problem
def create_problem(magdrag):
root = Group()
prob = Problem(root)
prob.root.add('comp', magdrag)
return prob
class TestVac(object):
def test_case1_vs_breakpoint(self):
magdrag = magnetic_drag.MagDrag()
prob = create_problem(magdrag)
prob.setup()
prob['comp.v'] = 23
prob['comp.R'] = 0.019269
prob['comp.L'] = 3.59023e-6
prob['comp.Fyu'] = 29430.0
prob['comp.lam'] = 0.125658
prob.run()
print('magdrag is %f' % prob['comp.magdraglev'])
assert np.isclose(prob['comp.magdraglev'], 137342.0, rtol=.001)
|
[
"hyperloop.Python.magnetic_drag.MagDrag",
"numpy.isclose",
"openmdao.api.Problem",
"openmdao.api.Group"
] |
[((158, 165), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (163, 165), False, 'from openmdao.api import Group, Problem\n'), ((177, 190), 'openmdao.api.Problem', 'Problem', (['root'], {}), '(root)\n', (184, 190), False, 'from openmdao.api import Group, Problem\n'), ((326, 349), 'hyperloop.Python.magnetic_drag.MagDrag', 'magnetic_drag.MagDrag', ([], {}), '()\n', (347, 349), False, 'from hyperloop.Python import magnetic_drag\n'), ((675, 732), 'numpy.isclose', 'np.isclose', (["prob['comp.magdraglev']", '(137342.0)'], {'rtol': '(0.001)'}), "(prob['comp.magdraglev'], 137342.0, rtol=0.001)\n", (685, 732), True, 'import numpy as np\n')]
|
import itertools
import logging
import os.path as osp
import tempfile
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from .builder import DATASETS
from .coco import CocoDataset
# DATASETS.register_module(name='LVISDataset', module=LVISDataset)
# LVISDataset = LVISV05Dataset
# DATASETS.register_module(name='LVISDataset', module=LVISDataset)
@DATASETS.register_module()
class LVISV1Dataset(CocoDataset):
CLASSES = (
'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',
'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',
'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',
'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',
'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',
'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',
'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',
'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',
'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',
'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',
'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',
'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',
'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',
'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',
'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',
'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',
'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',
'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',
'coloring_material', 'combination_lock', 'pacifier', 'comic_book',
'compass', 'computer_keyboard', 'condiment', 'cone', 'control',
'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',
'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',
'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',
'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',
'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
'folding_chair', 'food_processor', 'football_(American)',
'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',
'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',
'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',
'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',
'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',
'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
'lizard', 'log', 'lollipop', 'speaker_(stero_equipment)', 'loveseat',
'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',
'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',
'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',
'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',
'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',
'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',
'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'napkin',
'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',
'newsstand', 'nightshirt', 'nosebag_(for_animals)',
'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',
'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',
'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',
'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',
'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',
'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'reflector', 'remote_control',
'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
'rolling_pin', 'root_beer', 'router_(computer_equipment)',
'rubber_band', 'runner_(carpet)', 'plastic_bag',
'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',
'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',
'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',
'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',
'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',
'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',
'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',
'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
'washbasin', 'automatic_washer', 'watch', 'water_bottle',
'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
'water_gun', 'water_scooter', 'water_ski', 'water_tower',
'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini')
# def load_annotations(self, ann_file):
# try:
# import lvis
# assert lvis.__version__ >= '10.5.3'
# from lvis import LVIS
# except AssertionError:
# raise AssertionError('Incompatible version of lvis is installed. '
# 'Run pip uninstall lvis first. Then run pip '
# 'install mmlvis to install open-mmlab forked '
# 'lvis. ')
# except ImportError:
# raise ImportError('Package lvis is not installed. Please run pip '
# 'install mmlvis to install open-mmlab forked '
# 'lvis.')
# self.coco = LVIS(ann_file)
# # assert not self.custom_classes, 'LVIS custom classes is not supported'
# self.cat_ids = self.coco.get_cat_ids()
# self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
# self.img_ids = self.coco.get_img_ids()
# data_infos = []
# for i in self.img_ids:
# info = self.coco.load_imgs([i])[0]
# # coco_url is used in LVISv1 instead of file_name
# # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
# # train/val split in specified in url
# info['filename'] = info['coco_url'].replace(
# 'http://images.cocodataset.org/', '')
# data_infos.append(info)
# return data_infos
def load_annotations(self, ann_file):
try:
import lvis
assert lvis.__version__ >= '10.5.3'
from lvis import LVIS
except AssertionError:
raise AssertionError('Incompatible version of lvis is installed. '
'Run pip uninstall lvis first. Then run pip '
'install mmlvis to install open-mmlab forked '
'lvis. ')
except ImportError:
raise ImportError('Package lvis is not installed. Please run pip '
'install mmlvis to install open-mmlab forked '
'lvis.')
self.lvis = LVIS(ann_file)
self.full_cat_ids = self.lvis.get_cat_ids()
self.full_cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.full_cat_ids)
}
self.CLASSES = tuple([item['name'] for item in self.lvis.dataset['categories']])
self.cat_ids = self.lvis.get_cat_ids()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.lvis.get_img_ids()
self.img_infos = []
for i in self.img_ids:
info = self.lvis.load_imgs([i])[0]
info['filename'] = info['coco_url'].replace(
'http://images.cocodataset.org/', '')
self.img_infos.append(info)
return self.img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
ann_info = self.lvis.load_anns(ann_ids)
return self._parse_ann_info(self.img_infos[idx], ann_info)
def get_ann_info_withoutparse(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
ann_info = self.lvis.load_anns(ann_ids)
return ann_info
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.lvis.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
gt_masks = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if 'iscrowd' in ann.keys():
if ann['iscrowd']:
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks.append(self.lvis.ann_to_mask(ann))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks,
seg_map=seg_map)
return ann
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in LVIS protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None):
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str, float]: LVIS style metrics.
"""
try:
import lvis
assert lvis.__version__ >= '10.5.3'
from lvis import LVISResults, LVISEval
except AssertionError:
raise AssertionError('Incompatible version of lvis is installed. '
'Run pip uninstall lvis first. Then run pip '
'install mmlvis to install open-mmlab forked '
'lvis. ')
except ImportError:
raise ImportError('Package lvis is not installed. Please run pip '
'install mmlvis to install open-mmlab forked '
'lvis.')
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
eval_results = {}
# get original api
lvis_gt = self.coco
for metric in metrics:
msg = 'Evaluating {}...'.format(metric)
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError('{} is not in results'.format(metric))
try:
lvis_dt = LVISResults(lvis_gt, result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)
lvis_eval.params.imgIds = self.img_ids
if metric == 'proposal':
lvis_eval.params.useCats = 0
lvis_eval.params.maxDets = list(proposal_nums)
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
for k, v in lvis_eval.get_results().items():
if k.startswith('AR'):
val = float('{:.3f}'.format(float(v)))
eval_results[k] = val
else:
#////////////////////////////
# lvis_eval.params.useCats = 1 # manually changed
########
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
lvis_results = lvis_eval.get_results()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = lvis_eval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.load_cats([catId])[0]
# print(max(precisions[5,:,idx,0]))
# precision = precisions[:, :, idx, 0, -1]
precision = precisions[:, :, idx, 0] # manually
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
np.save("classwise_AP.npy", np.array(table_data))
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
for k, v in lvis_results.items():
if k.startswith('AP'):
key = '{}_{}'.format(metric, k)
val = float('{:.3f}'.format(float(v)))
eval_results[key] = val
ap_summary = ' '.join([
'{}:{:.3f}'.format(k, float(v))
for k, v in lvis_results.items() if k.startswith('AP')
])
eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary
lvis_eval.print_results()
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
#"============================================================="
# from .custom import CustomDataset
# # from .custom_instaboost import CustomDataset
# from .registry import DATASETS
# from lvis.lvis import LVIS
# @DATASETS.register_module
# class LvisDataset(CustomDataset):
# def load_annotations(self, ann_file):
# self.lvis = LVIS(ann_file)
# self.full_cat_ids = self.lvis.get_cat_ids()
# self.full_cat2label = {
# cat_id: i + 1
# for i, cat_id in enumerate(self.full_cat_ids)
# }
# self.CLASSES = tuple([item['name'] for item in self.lvis.dataset['categories']])
# self.cat_ids = self.lvis.get_cat_ids()
# self.cat2label = {
# cat_id: i + 1
# for i, cat_id in enumerate(self.cat_ids)
# }
# self.img_ids = self.lvis.get_img_ids()
# img_infos = []
# for i in self.img_ids:
# info = self.lvis.load_imgs([i])[0]
# info['filename'] = info['file_name'].split('_')[-1]
# img_infos.append(info)
# return img_infos
# def get_ann_info(self, idx):
# img_id = self.img_infos[idx]['id']
# ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
# ann_info = self.lvis.load_anns(ann_ids)
# return self._parse_ann_info(self.img_infos[idx], ann_info)
# def get_ann_info_withoutparse(self, idx):
# img_id = self.img_infos[idx]['id']
# ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
# ann_info = self.lvis.load_anns(ann_ids)
# return ann_info
# def _filter_imgs(self, min_size=32):
# """Filter images too small or without ground truths."""
# valid_inds = []
# ids_with_ann = set(_['image_id'] for _ in self.lvis.anns.values())
# for i, img_info in enumerate(self.img_infos):
# if self.img_ids[i] not in ids_with_ann:
# continue
# if min(img_info['width'], img_info['height']) >= min_size:
# valid_inds.append(i)
# return valid_inds
# def _parse_ann_info(self, img_info, ann_info):
# """Parse bbox and mask annotation.
# Args:
# ann_info (list[dict]): Annotation info of an image.
# with_mask (bool): Whether to parse mask annotations.
# Returns:
# dict: A dict containing the following keys: bboxes, bboxes_ignore,
# labels, masks, mask_polys, poly_lens.
# """
# gt_bboxes = []
# gt_labels = []
# gt_bboxes_ignore = []
# # Two formats are provided.
# # 1. mask: a binary map of the same size of the image.
# # 2. polys: each mask consists of one or several polys, each poly is a
# # list of float.
# gt_masks = []
# for i, ann in enumerate(ann_info):
# if ann.get('ignore', False):
# continue
# x1, y1, w, h = ann['bbox']
# if ann['area'] <= 0 or w < 1 or h < 1:
# continue
# bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
# if 'iscrowd' in ann.keys():
# if ann['iscrowd']:
# gt_bboxes_ignore.append(bbox)
# else:
# gt_bboxes.append(bbox)
# gt_labels.append(self.cat2label[ann['category_id']])
# gt_masks.append(self.lvis.ann_to_mask(ann))
# if gt_bboxes:
# gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
# gt_labels = np.array(gt_labels, dtype=np.int64)
# else:
# gt_bboxes = np.zeros((0, 4), dtype=np.float32)
# gt_labels = np.array([], dtype=np.int64)
# if gt_bboxes_ignore:
# gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
# else:
# gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
# seg_map = img_info['filename'].replace('jpg', 'png')
# ann = dict(
# bboxes=gt_bboxes,
# labels=gt_labels,
# bboxes_ignore=gt_bboxes_ignore,
# masks=gt_masks,
# seg_map=seg_map)
# return ann
|
[
"itertools.chain",
"tempfile.TemporaryDirectory",
"lvis.LVIS",
"lvis.LVISEval",
"numpy.mean",
"mmcv.utils.print_log",
"os.path.join",
"numpy.array",
"numpy.zeros",
"lvis.LVISResults",
"terminaltables.AsciiTable",
"numpy.arange"
] |
[((20183, 20197), 'lvis.LVIS', 'LVIS', (['ann_file'], {}), '(ann_file)\n', (20187, 20197), False, 'from lvis import LVIS\n'), ((24173, 24199), 'numpy.arange', 'np.arange', (['(0.5)', '(0.96)', '(0.05)'], {}), '(0.5, 0.96, 0.05)\n', (24182, 24199), True, 'import numpy as np\n'), ((23239, 23276), 'numpy.array', 'np.array', (['gt_bboxes'], {'dtype': 'np.float32'}), '(gt_bboxes, dtype=np.float32)\n', (23247, 23276), True, 'import numpy as np\n'), ((23301, 23336), 'numpy.array', 'np.array', (['gt_labels'], {'dtype': 'np.int64'}), '(gt_labels, dtype=np.int64)\n', (23309, 23336), True, 'import numpy as np\n'), ((23375, 23409), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (23383, 23409), True, 'import numpy as np\n'), ((23434, 23462), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (23442, 23462), True, 'import numpy as np\n'), ((23524, 23568), 'numpy.array', 'np.array', (['gt_bboxes_ignore'], {'dtype': 'np.float32'}), '(gt_bboxes_ignore, dtype=np.float32)\n', (23532, 23568), True, 'import numpy as np\n'), ((23614, 23648), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (23622, 23648), True, 'import numpy as np\n'), ((26449, 26478), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (26476, 26478), False, 'import tempfile\n'), ((26509, 26542), 'os.path.join', 'osp.join', (['tmp_dir.name', '"""results"""'], {}), "(tmp_dir.name, 'results')\n", (26517, 26542), True, 'import os.path as osp\n'), ((26892, 26921), 'mmcv.utils.print_log', 'print_log', (['msg'], {'logger': 'logger'}), '(msg, logger=logger)\n', (26901, 26921), False, 'from mmcv.utils import print_log\n'), ((27939, 27975), 'lvis.LVISEval', 'LVISEval', (['lvis_gt', 'lvis_dt', 'iou_type'], {}), '(lvis_gt, lvis_dt, iou_type)\n', (27947, 27975), False, 'from lvis import LVISResults, LVISEval\n'), ((27359, 27392), 'mmcv.utils.print_log', 'print_log', (['log_msg'], {'logger': 'logger'}), '(log_msg, logger=logger)\n', (27368, 27392), False, 'from mmcv.utils import print_log\n'), ((27575, 27617), 'lvis.LVISResults', 'LVISResults', (['lvis_gt', 'result_files[metric]'], {}), '(lvis_gt, result_files[metric])\n', (27586, 27617), False, 'from lvis import LVISResults, LVISEval\n'), ((27665, 27769), 'mmcv.utils.print_log', 'print_log', (['"""The testing results of the whole dataset is empty."""'], {'logger': 'logger', 'level': 'logging.ERROR'}), "('The testing results of the whole dataset is empty.', logger=\n logger, level=logging.ERROR)\n", (27674, 27769), False, 'from mmcv.utils import print_log\n'), ((30720, 30742), 'terminaltables.AsciiTable', 'AsciiTable', (['table_data'], {}), '(table_data)\n', (30730, 30742), False, 'from terminaltables import AsciiTable\n'), ((30763, 30807), 'mmcv.utils.print_log', 'print_log', (["('\\n' + table.table)"], {'logger': 'logger'}), "('\\n' + table.table, logger=logger)\n", (30772, 30807), False, 'from mmcv.utils import print_log\n'), ((30212, 30250), 'itertools.chain', 'itertools.chain', (['*results_per_category'], {}), '(*results_per_category)\n', (30227, 30250), False, 'import itertools\n'), ((30670, 30690), 'numpy.array', 'np.array', (['table_data'], {}), '(table_data)\n', (30678, 30690), True, 'import numpy as np\n'), ((29855, 29873), 'numpy.mean', 'np.mean', (['precision'], {}), '(precision)\n', (29862, 29873), True, 'import numpy as np\n')]
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import numpy as np
import pytest
import cirq
from cirq.linalg import matrix_commutes
def test_is_diagonal():
assert cirq.is_diagonal(np.empty((0, 0)))
assert cirq.is_diagonal(np.empty((1, 0)))
assert cirq.is_diagonal(np.empty((0, 1)))
assert cirq.is_diagonal(np.array([[1]]))
assert cirq.is_diagonal(np.array([[-1]]))
assert cirq.is_diagonal(np.array([[5]]))
assert cirq.is_diagonal(np.array([[3j]]))
assert cirq.is_diagonal(np.array([[1, 0]]))
assert cirq.is_diagonal(np.array([[1], [0]]))
assert not cirq.is_diagonal(np.array([[1, 1]]))
assert not cirq.is_diagonal(np.array([[1], [1]]))
assert cirq.is_diagonal(np.array([[5j, 0], [0, 2]]))
assert cirq.is_diagonal(np.array([[1, 0], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0.1], [0.1, 1]]))
assert cirq.is_diagonal(np.array([[1, 1e-11], [1e-10, 1]]))
def test_is_diagonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_diagonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_diagonal(np.array([[1, 0.5], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0.5], [-0.6, 1]]), atol=atol)
def test_is_hermitian():
assert cirq.is_hermitian(np.empty((0, 0)))
assert not cirq.is_hermitian(np.empty((1, 0)))
assert not cirq.is_hermitian(np.empty((0, 1)))
assert cirq.is_hermitian(np.array([[1]]))
assert cirq.is_hermitian(np.array([[-1]]))
assert cirq.is_hermitian(np.array([[5]]))
assert not cirq.is_hermitian(np.array([[3j]]))
assert not cirq.is_hermitian(np.array([[0, 0]]))
assert not cirq.is_hermitian(np.array([[0], [0]]))
assert not cirq.is_hermitian(np.array([[5j, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[5, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[1, 0], [0, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0], [1, 1]]))
assert not cirq.is_hermitian(np.array([[1, 1], [0, 1]]))
assert cirq.is_hermitian(np.array([[1, 1], [1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_hermitian(np.array([[1, 1j], [1j, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0.1], [-0.1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j + 1e-11], [-1j, 1 + 1j * 1e-9]]))
def test_is_hermitian_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_hermitian(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert cirq.is_hermitian(np.array([[1, 0.25], [-0.25, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0], [-0.6, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.25], [-0.35, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_hermitian(np.array([[1, 0.5, 0.5], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.5, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
def test_is_unitary():
assert cirq.is_unitary(np.empty((0, 0)))
assert not cirq.is_unitary(np.empty((1, 0)))
assert not cirq.is_unitary(np.empty((0, 1)))
assert cirq.is_unitary(np.array([[1]]))
assert cirq.is_unitary(np.array([[-1]]))
assert cirq.is_unitary(np.array([[1j]]))
assert not cirq.is_unitary(np.array([[5]]))
assert not cirq.is_unitary(np.array([[3j]]))
assert not cirq.is_unitary(np.array([[1, 0]]))
assert not cirq.is_unitary(np.array([[1], [0]]))
assert not cirq.is_unitary(np.array([[1, 0], [0, -2]]))
assert cirq.is_unitary(np.array([[1, 0], [0, -1]]))
assert cirq.is_unitary(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_unitary(np.array([[1, 0], [1, 1]]))
assert not cirq.is_unitary(np.array([[1, 1], [0, 1]]))
assert not cirq.is_unitary(np.array([[1, 1], [1, 1]]))
assert not cirq.is_unitary(np.array([[1, -1], [1, 1]]))
assert cirq.is_unitary(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert cirq.is_unitary(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_unitary(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_unitary(np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_unitary_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_unitary(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_unitary(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_unitary(np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), atol=atol)
def test_is_orthogonal():
assert cirq.is_orthogonal(np.empty((0, 0)))
assert not cirq.is_orthogonal(np.empty((1, 0)))
assert not cirq.is_orthogonal(np.empty((0, 1)))
assert cirq.is_orthogonal(np.array([[1]]))
assert cirq.is_orthogonal(np.array([[-1]]))
assert not cirq.is_orthogonal(np.array([[1j]]))
assert not cirq.is_orthogonal(np.array([[5]]))
assert not cirq.is_orthogonal(np.array([[3j]]))
assert not cirq.is_orthogonal(np.array([[1, 0]]))
assert not cirq.is_orthogonal(np.array([[1], [0]]))
assert not cirq.is_orthogonal(np.array([[1, 0], [0, -2]]))
assert cirq.is_orthogonal(np.array([[1, 0], [0, -1]]))
assert not cirq.is_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_orthogonal(np.array([[1, -1], [1, 1]]))
assert cirq.is_orthogonal(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not cirq.is_orthogonal(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_orthogonal(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_orthogonal(np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_orthogonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_orthogonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_orthogonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_orthogonal(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_orthogonal(np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), atol=atol)
def test_is_special_orthogonal():
assert cirq.is_special_orthogonal(np.empty((0, 0)))
assert not cirq.is_special_orthogonal(np.empty((1, 0)))
assert not cirq.is_special_orthogonal(np.empty((0, 1)))
assert cirq.is_special_orthogonal(np.array([[1]]))
assert not cirq.is_special_orthogonal(np.array([[-1]]))
assert not cirq.is_special_orthogonal(np.array([[1j]]))
assert not cirq.is_special_orthogonal(np.array([[5]]))
assert not cirq.is_special_orthogonal(np.array([[3j]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0]]))
assert not cirq.is_special_orthogonal(np.array([[1], [0]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [0, -2]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [0, -1]]))
assert cirq.is_special_orthogonal(np.array([[-1, 0], [0, -1]]))
assert not cirq.is_special_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, -1], [1, 1]]))
assert cirq.is_special_orthogonal(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [1, -1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_special_orthogonal(np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_special_orthogonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_special_orthogonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_special_orthogonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries, except for determinant factors.
assert cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), atol=atol
)
assert not cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol
)
assert not cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), atol=atol
)
def test_is_special_unitary():
assert cirq.is_special_unitary(np.empty((0, 0)))
assert not cirq.is_special_unitary(np.empty((1, 0)))
assert not cirq.is_special_unitary(np.empty((0, 1)))
assert cirq.is_special_unitary(np.array([[1]]))
assert not cirq.is_special_unitary(np.array([[-1]]))
assert not cirq.is_special_unitary(np.array([[5]]))
assert not cirq.is_special_unitary(np.array([[3j]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [0, -2]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [0, -1]]))
assert cirq.is_special_unitary(np.array([[-1, 0], [0, -1]]))
assert not cirq.is_special_unitary(np.array([[1j, 0], [0, 1]]))
assert cirq.is_special_unitary(np.array([[1j, 0], [0, -1j]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [1, 1]]))
assert not cirq.is_special_unitary(np.array([[1, 1], [0, 1]]))
assert not cirq.is_special_unitary(np.array([[1, 1], [1, 1]]))
assert not cirq.is_special_unitary(np.array([[1, -1], [1, 1]]))
assert cirq.is_special_unitary(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert cirq.is_special_unitary(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_unitary(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_special_unitary(np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_special_unitary_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_special_unitary(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_special_unitary(np.array([[1, 0], [-0.6, 1]]), atol=atol)
assert cirq.is_special_unitary(np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.1), atol=atol)
assert not cirq.is_special_unitary(np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.3), atol=atol)
# Error isn't accumulated across entries, except for determinant factors.
assert cirq.is_special_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), atol=atol)
assert not cirq.is_special_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_special_unitary(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), atol=atol
)
def test_is_normal():
assert cirq.is_normal(np.array([[1]]))
assert cirq.is_normal(np.array([[3j]]))
assert cirq.is_normal(cirq.testing.random_density_matrix(4))
assert cirq.is_normal(cirq.testing.random_unitary(5))
assert not cirq.is_normal(np.array([[0, 1], [0, 0]]))
assert not cirq.is_normal(np.zeros((1, 0)))
def test_is_normal_tolerance():
atol = 0.25
# Pays attention to specified tolerance.
assert cirq.is_normal(np.array([[0, 0.5], [0, 0]]), atol=atol)
assert not cirq.is_normal(np.array([[0, 0.6], [0, 0]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_normal(np.array([[0, 0.5, 0], [0, 0, 0.5], [0, 0, 0]]), atol=atol)
assert not cirq.is_normal(np.array([[0, 0.5, 0], [0, 0, 0.6], [0, 0, 0]]), atol=atol)
def test_commutes():
assert matrix_commutes(np.empty((0, 0)), np.empty((0, 0)))
assert not matrix_commutes(np.empty((1, 0)), np.empty((0, 1)))
assert not matrix_commutes(np.empty((0, 1)), np.empty((1, 0)))
assert not matrix_commutes(np.empty((1, 0)), np.empty((1, 0)))
assert not matrix_commutes(np.empty((0, 1)), np.empty((0, 1)))
assert matrix_commutes(np.array([[1]]), np.array([[2]]))
assert matrix_commutes(np.array([[1]]), np.array([[0]]))
x = np.array([[0, 1], [1, 0]])
y = np.array([[0, -1j], [1j, 0]])
z = np.array([[1, 0], [0, -1]])
xx = np.kron(x, x)
zz = np.kron(z, z)
assert matrix_commutes(x, x)
assert matrix_commutes(y, y)
assert matrix_commutes(z, z)
assert not matrix_commutes(x, y)
assert not matrix_commutes(x, z)
assert not matrix_commutes(y, z)
assert matrix_commutes(xx, zz)
assert matrix_commutes(xx, np.diag([1, -1, -1, 1 + 1e-9]))
def test_commutes_tolerance():
atol = 0.5
x = np.array([[0, 1], [1, 0]])
z = np.array([[1, 0], [0, -1]])
# Pays attention to specified tolerance.
assert matrix_commutes(x, x + z * 0.1, atol=atol)
assert not matrix_commutes(x, x + z * 0.5, atol=atol)
def test_allclose_up_to_global_phase():
assert cirq.allclose_up_to_global_phase(np.array([1]), np.array([1j]))
assert not cirq.allclose_up_to_global_phase(np.array([[[1]]]), np.array([1]))
assert cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[1]]))
assert cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[-1]]))
assert cirq.allclose_up_to_global_phase(np.array([[0]]), np.array([[0]]))
assert cirq.allclose_up_to_global_phase(np.array([[1, 2]]), np.array([[1j, 2j]]))
assert cirq.allclose_up_to_global_phase(np.array([[1, 2.0000000001]]), np.array([[1j, 2j]]))
assert not cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[1, 0]]))
assert not cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[2]]))
assert not cirq.allclose_up_to_global_phase(np.array([[1]]), np.array([[2]]))
def test_binary_sub_tensor_slice():
a = slice(None)
e = Ellipsis
assert cirq.slice_for_qubits_equal_to([], 0) == (e,)
assert cirq.slice_for_qubits_equal_to([0], 0b0) == (0, e)
assert cirq.slice_for_qubits_equal_to([0], 0b1) == (1, e)
assert cirq.slice_for_qubits_equal_to([1], 0b0) == (a, 0, e)
assert cirq.slice_for_qubits_equal_to([1], 0b1) == (a, 1, e)
assert cirq.slice_for_qubits_equal_to([2], 0b0) == (a, a, 0, e)
assert cirq.slice_for_qubits_equal_to([2], 0b1) == (a, a, 1, e)
assert cirq.slice_for_qubits_equal_to([0, 1], 0b00) == (0, 0, e)
assert cirq.slice_for_qubits_equal_to([1, 2], 0b00) == (a, 0, 0, e)
assert cirq.slice_for_qubits_equal_to([1, 3], 0b00) == (a, 0, a, 0, e)
assert cirq.slice_for_qubits_equal_to([1, 3], 0b10) == (a, 0, a, 1, e)
assert cirq.slice_for_qubits_equal_to([3, 1], 0b10) == (a, 1, a, 0, e)
assert cirq.slice_for_qubits_equal_to([2, 1, 0], 0b001) == (0, 0, 1, e)
assert cirq.slice_for_qubits_equal_to([2, 1, 0], 0b010) == (0, 1, 0, e)
assert cirq.slice_for_qubits_equal_to([2, 1, 0], 0b100) == (1, 0, 0, e)
assert cirq.slice_for_qubits_equal_to([0, 1, 2], 0b101) == (1, 0, 1, e)
assert cirq.slice_for_qubits_equal_to([0, 2, 1], 0b101) == (1, 1, 0, e)
m = np.array([0] * 16).reshape((2, 2, 2, 2))
for k in range(16):
m[cirq.slice_for_qubits_equal_to([3, 2, 1, 0], k)] = k
assert list(m.reshape(16)) == list(range(16))
assert cirq.slice_for_qubits_equal_to([0], 0b1, num_qubits=1) == (1,)
assert cirq.slice_for_qubits_equal_to([1], 0b0, num_qubits=2) == (a, 0)
assert cirq.slice_for_qubits_equal_to([1], 0b0, num_qubits=3) == (a, 0, a)
assert cirq.slice_for_qubits_equal_to([2], 0b0, num_qubits=3) == (a, a, 0)
def test_binary_sub_tensor_slice_big_endian():
a = slice(None)
e = Ellipsis
sfqet = cirq.slice_for_qubits_equal_to
assert sfqet([], big_endian_qureg_value=0) == (e,)
assert sfqet([0], big_endian_qureg_value=0b0) == (0, e)
assert sfqet([0], big_endian_qureg_value=0b1) == (1, e)
assert sfqet([1], big_endian_qureg_value=0b0) == (a, 0, e)
assert sfqet([1], big_endian_qureg_value=0b1) == (a, 1, e)
assert sfqet([2], big_endian_qureg_value=0b0) == (a, a, 0, e)
assert sfqet([2], big_endian_qureg_value=0b1) == (a, a, 1, e)
assert sfqet([0, 1], big_endian_qureg_value=0b00) == (0, 0, e)
assert sfqet([1, 2], big_endian_qureg_value=0b00) == (a, 0, 0, e)
assert sfqet([1, 3], big_endian_qureg_value=0b00) == (a, 0, a, 0, e)
assert sfqet([1, 3], big_endian_qureg_value=0b01) == (a, 0, a, 1, e)
assert sfqet([3, 1], big_endian_qureg_value=0b01) == (a, 1, a, 0, e)
assert sfqet([2, 1, 0], big_endian_qureg_value=0b100) == (0, 0, 1, e)
assert sfqet([2, 1, 0], big_endian_qureg_value=0b010) == (0, 1, 0, e)
assert sfqet([2, 1, 0], big_endian_qureg_value=0b001) == (1, 0, 0, e)
assert sfqet([0, 1, 2], big_endian_qureg_value=0b101) == (1, 0, 1, e)
assert sfqet([0, 2, 1], big_endian_qureg_value=0b101) == (1, 1, 0, e)
m = np.array([0] * 16).reshape((2, 2, 2, 2))
for k in range(16):
m[sfqet([0, 1, 2, 3], big_endian_qureg_value=k)] = k
assert list(m.reshape(16)) == list(range(16))
assert sfqet([0], big_endian_qureg_value=0b1, num_qubits=1) == (1,)
assert sfqet([1], big_endian_qureg_value=0b0, num_qubits=2) == (a, 0)
assert sfqet([1], big_endian_qureg_value=0b0, num_qubits=3) == (a, 0, a)
assert sfqet([2], big_endian_qureg_value=0b0, num_qubits=3) == (a, a, 0)
def test_qudit_sub_tensor_slice():
a = slice(None)
sfqet = cirq.slice_for_qubits_equal_to
assert sfqet([], 0, qid_shape=()) == ()
assert sfqet([0], 0, qid_shape=(3,)) == (0,)
assert sfqet([0], 1, qid_shape=(3,)) == (1,)
assert sfqet([0], 2, qid_shape=(3,)) == (2,)
assert sfqet([2], 0, qid_shape=(1, 2, 3)) == (a, a, 0)
assert sfqet([2], 2, qid_shape=(1, 2, 3)) == (a, a, 2)
assert sfqet([2], big_endian_qureg_value=2, qid_shape=(1, 2, 3)) == (a, a, 2)
assert sfqet([1, 3], 3 * 2 + 1, qid_shape=(2, 3, 4, 5)) == (a, 1, a, 2)
assert sfqet([3, 1], 5 * 2 + 1, qid_shape=(2, 3, 4, 5)) == (a, 2, a, 1)
assert sfqet([2, 1, 0], 9 * 2 + 3 * 1, qid_shape=(3,) * 3) == (2, 1, 0)
assert sfqet([1, 3], big_endian_qureg_value=5 * 1 + 2, qid_shape=(2, 3, 4, 5)) == (a, 1, a, 2)
assert sfqet([3, 1], big_endian_qureg_value=3 * 1 + 2, qid_shape=(2, 3, 4, 5)) == (a, 2, a, 1)
m = np.array([0] * 24).reshape((1, 2, 3, 4))
for k in range(24):
m[sfqet([3, 2, 1, 0], k, qid_shape=(1, 2, 3, 4))] = k
assert list(m.reshape(24)) == list(range(24))
assert sfqet([0], 1, num_qubits=1, qid_shape=(3,)) == (1,)
assert sfqet([1], 0, num_qubits=3, qid_shape=(3, 3, 3)) == (a, 0, a)
with pytest.raises(ValueError, match='len.* !='):
sfqet([], num_qubits=2, qid_shape=(1, 2, 3))
with pytest.raises(ValueError, match='exactly one'):
sfqet([0, 1, 2], 0b101, big_endian_qureg_value=0b101)
|
[
"numpy.sqrt",
"cirq.testing.random_unitary",
"numpy.diag",
"numpy.kron",
"numpy.array",
"cmath.exp",
"numpy.zeros",
"numpy.empty",
"pytest.raises",
"cirq.linalg.matrix_commutes",
"cirq.testing.random_density_matrix",
"cirq.slice_for_qubits_equal_to"
] |
[((13289, 13315), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (13297, 13315), True, 'import numpy as np\n'), ((13324, 13357), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (13332, 13357), True, 'import numpy as np\n'), ((13362, 13389), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (13370, 13389), True, 'import numpy as np\n'), ((13399, 13412), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (13406, 13412), True, 'import numpy as np\n'), ((13422, 13435), 'numpy.kron', 'np.kron', (['z', 'z'], {}), '(z, z)\n', (13429, 13435), True, 'import numpy as np\n'), ((13448, 13469), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['x', 'x'], {}), '(x, x)\n', (13463, 13469), False, 'from cirq.linalg import matrix_commutes\n'), ((13481, 13502), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['y', 'y'], {}), '(y, y)\n', (13496, 13502), False, 'from cirq.linalg import matrix_commutes\n'), ((13514, 13535), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['z', 'z'], {}), '(z, z)\n', (13529, 13535), False, 'from cirq.linalg import matrix_commutes\n'), ((13659, 13682), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['xx', 'zz'], {}), '(xx, zz)\n', (13674, 13682), False, 'from cirq.linalg import matrix_commutes\n'), ((13803, 13829), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (13811, 13829), True, 'import numpy as np\n'), ((13838, 13865), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (13846, 13865), True, 'import numpy as np\n'), ((13923, 13965), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['x', '(x + z * 0.1)'], {'atol': 'atol'}), '(x, x + z * 0.1, atol=atol)\n', (13938, 13965), False, 'from cirq.linalg import matrix_commutes\n'), ((738, 754), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (746, 754), True, 'import numpy as np\n'), ((784, 800), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (792, 800), True, 'import numpy as np\n'), ((830, 846), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (838, 846), True, 'import numpy as np\n'), ((877, 892), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (885, 892), True, 'import numpy as np\n'), ((922, 938), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (930, 938), True, 'import numpy as np\n'), ((968, 983), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (976, 983), True, 'import numpy as np\n'), ((1013, 1031), 'numpy.array', 'np.array', (['[[3.0j]]'], {}), '([[3.0j]])\n', (1021, 1031), True, 'import numpy as np\n'), ((1060, 1078), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (1068, 1078), True, 'import numpy as np\n'), ((1108, 1128), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (1116, 1128), True, 'import numpy as np\n'), ((1265, 1294), 'numpy.array', 'np.array', (['[[5.0j, 0], [0, 2]]'], {}), '([[5.0j, 0], [0, 2]])\n', (1273, 1294), True, 'import numpy as np\n'), ((1322, 1348), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (1330, 1348), True, 'import numpy as np\n'), ((1623, 1657), 'numpy.array', 'np.array', (['[[1, 1e-11], [1e-10, 1]]'], {}), '([[1, 1e-11], [1e-10, 1]])\n', (1631, 1657), True, 'import numpy as np\n'), ((1784, 1813), 'numpy.array', 'np.array', (['[[1, 0], [-0.5, 1]]'], {}), '([[1, 0], [-0.5, 1]])\n', (1792, 1813), True, 'import numpy as np\n'), ((1975, 2006), 'numpy.array', 'np.array', (['[[1, 0.5], [-0.5, 1]]'], {}), '([[1, 0.5], [-0.5, 1]])\n', (1983, 2006), True, 'import numpy as np\n'), ((2151, 2167), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (2159, 2167), True, 'import numpy as np\n'), ((2301, 2316), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (2309, 2316), True, 'import numpy as np\n'), ((2347, 2363), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (2355, 2363), True, 'import numpy as np\n'), ((2394, 2409), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (2402, 2409), True, 'import numpy as np\n'), ((2663, 2689), 'numpy.array', 'np.array', (['[[5, 0], [0, 2]]'], {}), '([[5, 0], [0, 2]])\n', (2671, 2689), True, 'import numpy as np\n'), ((2720, 2746), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (2728, 2746), True, 'import numpy as np\n'), ((2899, 2925), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (2907, 2925), True, 'import numpy as np\n'), ((2956, 2989), 'numpy.array', 'np.array', (['[[1, 1.0j], [-1.0j, 1]]'], {}), '([[1, 1.0j], [-1.0j, 1]])\n', (2964, 2989), True, 'import numpy as np\n'), ((3221, 3277), 'numpy.array', 'np.array', (['[[1, 1.0j + 1e-11], [-1.0j, 1 + 1.0j * 1e-09]]'], {}), '([[1, 1.0j + 1e-11], [-1.0j, 1 + 1.0j * 1e-09]])\n', (3229, 3277), True, 'import numpy as np\n'), ((3399, 3428), 'numpy.array', 'np.array', (['[[1, 0], [-0.5, 1]]'], {}), '([[1, 0], [-0.5, 1]])\n', (3407, 3428), True, 'import numpy as np\n'), ((3470, 3503), 'numpy.array', 'np.array', (['[[1, 0.25], [-0.25, 1]]'], {}), '([[1, 0.25], [-0.25, 1]])\n', (3478, 3503), True, 'import numpy as np\n'), ((3746, 3793), 'numpy.array', 'np.array', (['[[1, 0.5, 0.5], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0.5, 0.5], [0, 1, 0], [0, 0, 1]])\n', (3754, 3793), True, 'import numpy as np\n'), ((4042, 4058), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (4050, 4058), True, 'import numpy as np\n'), ((4186, 4201), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (4194, 4201), True, 'import numpy as np\n'), ((4230, 4246), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (4238, 4246), True, 'import numpy as np\n'), ((4275, 4293), 'numpy.array', 'np.array', (['[[1.0j]]'], {}), '([[1.0j]])\n', (4283, 4293), True, 'import numpy as np\n'), ((4583, 4610), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (4591, 4610), True, 'import numpy as np\n'), ((4639, 4668), 'numpy.array', 'np.array', (['[[1.0j, 0], [0, 1]]'], {}), '([[1.0j, 0], [0, 1]])\n', (4647, 4668), True, 'import numpy as np\n'), ((5341, 5370), 'numpy.array', 'np.array', (['[[1, 0], [-0.5, 1]]'], {}), '([[1, 0], [-0.5, 1]])\n', (5349, 5370), True, 'import numpy as np\n'), ((5530, 5579), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]])\n', (5538, 5579), True, 'import numpy as np\n'), ((5743, 5759), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (5751, 5759), True, 'import numpy as np\n'), ((5896, 5911), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (5904, 5911), True, 'import numpy as np\n'), ((5943, 5959), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (5951, 5959), True, 'import numpy as np\n'), ((6321, 6348), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (6329, 6348), True, 'import numpy as np\n'), ((6926, 6964), 'numpy.array', 'np.array', (['[[1, 1e-11], [0, 1 + 1e-11]]'], {}), '([[1, 1e-11], [0, 1 + 1e-11]])\n', (6934, 6964), True, 'import numpy as np\n'), ((7095, 7124), 'numpy.array', 'np.array', (['[[1, 0], [-0.5, 1]]'], {}), '([[1, 0], [-0.5, 1]])\n', (7103, 7124), True, 'import numpy as np\n'), ((7290, 7339), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]])\n', (7298, 7339), True, 'import numpy as np\n'), ((7522, 7538), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (7530, 7538), True, 'import numpy as np\n'), ((7699, 7714), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (7707, 7714), True, 'import numpy as np\n'), ((8263, 8291), 'numpy.array', 'np.array', (['[[-1, 0], [0, -1]]'], {}), '([[-1, 0], [0, -1]])\n', (8271, 8291), True, 'import numpy as np\n'), ((9027, 9065), 'numpy.array', 'np.array', (['[[1, 1e-11], [0, 1 + 1e-11]]'], {}), '([[1, 1e-11], [0, 1 + 1e-11]])\n', (9035, 9065), True, 'import numpy as np\n'), ((9212, 9241), 'numpy.array', 'np.array', (['[[1, 0], [-0.5, 1]]'], {}), '([[1, 0], [-0.5, 1]])\n', (9220, 9241), True, 'import numpy as np\n'), ((9464, 9517), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]])\n', (9472, 9517), True, 'import numpy as np\n'), ((9843, 9859), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (9851, 9859), True, 'import numpy as np\n'), ((10011, 10026), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (10019, 10026), True, 'import numpy as np\n'), ((10370, 10398), 'numpy.array', 'np.array', (['[[-1, 0], [0, -1]]'], {}), '([[-1, 0], [0, -1]])\n', (10378, 10398), True, 'import numpy as np\n'), ((10503, 10536), 'numpy.array', 'np.array', (['[[1.0j, 0], [0, -1.0j]]'], {}), '([[1.0j, 0], [0, -1.0j]])\n', (10511, 10536), True, 'import numpy as np\n'), ((11287, 11316), 'numpy.array', 'np.array', (['[[1, 0], [-0.5, 1]]'], {}), '([[1, 0], [-0.5, 1]])\n', (11295, 11316), True, 'import numpy as np\n'), ((11720, 11773), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]])\n', (11728, 11773), True, 'import numpy as np\n'), ((12056, 12071), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (12064, 12071), True, 'import numpy as np\n'), ((12099, 12117), 'numpy.array', 'np.array', (['[[3.0j]]'], {}), '([[3.0j]])\n', (12107, 12117), True, 'import numpy as np\n'), ((12143, 12180), 'cirq.testing.random_density_matrix', 'cirq.testing.random_density_matrix', (['(4)'], {}), '(4)\n', (12177, 12180), False, 'import cirq\n'), ((12208, 12238), 'cirq.testing.random_unitary', 'cirq.testing.random_unitary', (['(5)'], {}), '(5)\n', (12235, 12238), False, 'import cirq\n'), ((12468, 12496), 'numpy.array', 'np.array', (['[[0, 0.5], [0, 0]]'], {}), '([[0, 0.5], [0, 0]])\n', (12476, 12496), True, 'import numpy as np\n'), ((12653, 12700), 'numpy.array', 'np.array', (['[[0, 0.5, 0], [0, 0, 0.5], [0, 0, 0]]'], {}), '([[0, 0.5, 0], [0, 0, 0.5], [0, 0, 0]])\n', (12661, 12700), True, 'import numpy as np\n'), ((12853, 12869), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (12861, 12869), True, 'import numpy as np\n'), ((12871, 12887), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (12879, 12887), True, 'import numpy as np\n'), ((13185, 13200), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (13193, 13200), True, 'import numpy as np\n'), ((13202, 13217), 'numpy.array', 'np.array', (['[[2]]'], {}), '([[2]])\n', (13210, 13217), True, 'import numpy as np\n'), ((13246, 13261), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (13254, 13261), True, 'import numpy as np\n'), ((13263, 13278), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (13271, 13278), True, 'import numpy as np\n'), ((13551, 13572), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['x', 'y'], {}), '(x, y)\n', (13566, 13572), False, 'from cirq.linalg import matrix_commutes\n'), ((13588, 13609), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['x', 'z'], {}), '(x, z)\n', (13603, 13609), False, 'from cirq.linalg import matrix_commutes\n'), ((13625, 13646), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['y', 'z'], {}), '(y, z)\n', (13640, 13646), False, 'from cirq.linalg import matrix_commutes\n'), ((13714, 13745), 'numpy.diag', 'np.diag', (['[1, -1, -1, 1 + 1e-09]'], {}), '([1, -1, -1, 1 + 1e-09])\n', (13721, 13745), True, 'import numpy as np\n'), ((13981, 14023), 'cirq.linalg.matrix_commutes', 'matrix_commutes', (['x', '(x + z * 0.5)'], {'atol': 'atol'}), '(x, x + z * 0.5, atol=atol)\n', (13996, 14023), False, 'from cirq.linalg import matrix_commutes\n'), ((14110, 14123), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (14118, 14123), True, 'import numpy as np\n'), ((14125, 14141), 'numpy.array', 'np.array', (['[1.0j]'], {}), '([1.0j])\n', (14133, 14141), True, 'import numpy as np\n'), ((14269, 14284), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14277, 14284), True, 'import numpy as np\n'), ((14286, 14301), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14294, 14301), True, 'import numpy as np\n'), ((14347, 14362), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14355, 14362), True, 'import numpy as np\n'), ((14364, 14380), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (14372, 14380), True, 'import numpy as np\n'), ((14427, 14442), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (14435, 14442), True, 'import numpy as np\n'), ((14444, 14459), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (14452, 14459), True, 'import numpy as np\n'), ((14506, 14524), 'numpy.array', 'np.array', (['[[1, 2]]'], {}), '([[1, 2]])\n', (14514, 14524), True, 'import numpy as np\n'), ((14526, 14550), 'numpy.array', 'np.array', (['[[1.0j, 2.0j]]'], {}), '([[1.0j, 2.0j]])\n', (14534, 14550), True, 'import numpy as np\n'), ((14593, 14622), 'numpy.array', 'np.array', (['[[1, 2.0000000001]]'], {}), '([[1, 2.0000000001]])\n', (14601, 14622), True, 'import numpy as np\n'), ((14624, 14648), 'numpy.array', 'np.array', (['[[1.0j, 2.0j]]'], {}), '([[1.0j, 2.0j]])\n', (14632, 14648), True, 'import numpy as np\n'), ((14983, 15020), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[]', '(0)'], {}), '([], 0)\n', (15013, 15020), False, 'import cirq\n'), ((15040, 15078), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[0]', '(0)'], {}), '([0], 0)\n', (15070, 15078), False, 'import cirq\n'), ((15102, 15140), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[0]', '(1)'], {}), '([0], 1)\n', (15132, 15140), False, 'import cirq\n'), ((15164, 15202), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[1]', '(0)'], {}), '([1], 0)\n', (15194, 15202), False, 'import cirq\n'), ((15229, 15267), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[1]', '(1)'], {}), '([1], 1)\n', (15259, 15267), False, 'import cirq\n'), ((15294, 15332), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[2]', '(0)'], {}), '([2], 0)\n', (15324, 15332), False, 'import cirq\n'), ((15362, 15400), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[2]', '(1)'], {}), '([2], 1)\n', (15392, 15400), False, 'import cirq\n'), ((15431, 15472), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[0, 1]', '(0)'], {}), '([0, 1], 0)\n', (15461, 15472), False, 'import cirq\n'), ((15500, 15541), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[1, 2]', '(0)'], {}), '([1, 2], 0)\n', (15530, 15541), False, 'import cirq\n'), ((15572, 15613), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[1, 3]', '(0)'], {}), '([1, 3], 0)\n', (15602, 15613), False, 'import cirq\n'), ((15647, 15688), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[1, 3]', '(2)'], {}), '([1, 3], 2)\n', (15677, 15688), False, 'import cirq\n'), ((15722, 15763), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[3, 1]', '(2)'], {}), '([3, 1], 2)\n', (15752, 15763), False, 'import cirq\n'), ((15798, 15842), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[2, 1, 0]', '(1)'], {}), '([2, 1, 0], 1)\n', (15828, 15842), False, 'import cirq\n'), ((15874, 15918), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[2, 1, 0]', '(2)'], {}), '([2, 1, 0], 2)\n', (15904, 15918), False, 'import cirq\n'), ((15950, 15994), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[2, 1, 0]', '(4)'], {}), '([2, 1, 0], 4)\n', (15980, 15994), False, 'import cirq\n'), ((16026, 16070), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[0, 1, 2]', '(5)'], {}), '([0, 1, 2], 5)\n', (16056, 16070), False, 'import cirq\n'), ((16102, 16146), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[0, 2, 1]', '(5)'], {}), '([0, 2, 1], 5)\n', (16132, 16146), False, 'import cirq\n'), ((16366, 16418), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[0]', '(1)'], {'num_qubits': '(1)'}), '([0], 1, num_qubits=1)\n', (16396, 16418), False, 'import cirq\n'), ((16440, 16492), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[1]', '(0)'], {'num_qubits': '(2)'}), '([1], 0, num_qubits=2)\n', (16470, 16492), False, 'import cirq\n'), ((16516, 16568), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[1]', '(0)'], {'num_qubits': '(3)'}), '([1], 0, num_qubits=3)\n', (16546, 16568), False, 'import cirq\n'), ((16595, 16647), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[2]', '(0)'], {'num_qubits': '(3)'}), '([2], 0, num_qubits=3)\n', (16625, 16647), False, 'import cirq\n'), ((19692, 19735), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""len.* !="""'}), "(ValueError, match='len.* !=')\n", (19705, 19735), False, 'import pytest\n'), ((19800, 19846), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""exactly one"""'}), "(ValueError, match='exactly one')\n", (19813, 19846), False, 'import pytest\n'), ((1162, 1180), 'numpy.array', 'np.array', (['[[1, 1]]'], {}), '([[1, 1]])\n', (1170, 1180), True, 'import numpy as np\n'), ((1214, 1234), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (1222, 1234), True, 'import numpy as np\n'), ((1382, 1408), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (1390, 1408), True, 'import numpy as np\n'), ((1442, 1468), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (1450, 1468), True, 'import numpy as np\n'), ((1502, 1528), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (1510, 1528), True, 'import numpy as np\n'), ((1562, 1592), 'numpy.array', 'np.array', (['[[1, 0.1], [0.1, 1]]'], {}), '([[1, 0.1], [0.1, 1]])\n', (1570, 1592), True, 'import numpy as np\n'), ((1858, 1887), 'numpy.array', 'np.array', (['[[1, 0], [-0.6, 1]]'], {}), '([[1, 0], [-0.6, 1]])\n', (1866, 1887), True, 'import numpy as np\n'), ((2051, 2082), 'numpy.array', 'np.array', (['[[1, 0.5], [-0.6, 1]]'], {}), '([[1, 0.5], [-0.6, 1]])\n', (2059, 2082), True, 'import numpy as np\n'), ((2202, 2218), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (2210, 2218), True, 'import numpy as np\n'), ((2253, 2269), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (2261, 2269), True, 'import numpy as np\n'), ((2444, 2462), 'numpy.array', 'np.array', (['[[3.0j]]'], {}), '([[3.0j]])\n', (2452, 2462), True, 'import numpy as np\n'), ((2496, 2514), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (2504, 2514), True, 'import numpy as np\n'), ((2549, 2569), 'numpy.array', 'np.array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (2557, 2569), True, 'import numpy as np\n'), ((2605, 2634), 'numpy.array', 'np.array', (['[[5.0j, 0], [0, 2]]'], {}), '([[5.0j, 0], [0, 2]])\n', (2613, 2634), True, 'import numpy as np\n'), ((2781, 2807), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (2789, 2807), True, 'import numpy as np\n'), ((2842, 2868), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (2850, 2868), True, 'import numpy as np\n'), ((3016, 3049), 'numpy.array', 'np.array', (['[[1, 1.0j], [-1.0j, 1]]'], {}), '([[1, 1.0j], [-1.0j, 1]])\n', (3024, 3049), True, 'import numpy as np\n'), ((3048, 3060), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3055, 3060), True, 'import numpy as np\n'), ((3095, 3127), 'numpy.array', 'np.array', (['[[1, 1.0j], [1.0j, 1]]'], {}), '([[1, 1.0j], [1.0j, 1]])\n', (3103, 3127), True, 'import numpy as np\n'), ((3158, 3189), 'numpy.array', 'np.array', (['[[1, 0.1], [-0.1, 1]]'], {}), '([[1, 0.1], [-0.1, 1]])\n', (3166, 3189), True, 'import numpy as np\n'), ((3549, 3578), 'numpy.array', 'np.array', (['[[1, 0], [-0.6, 1]]'], {}), '([[1, 0], [-0.6, 1]])\n', (3557, 3578), True, 'import numpy as np\n'), ((3624, 3657), 'numpy.array', 'np.array', (['[[1, 0.25], [-0.35, 1]]'], {}), '([[1, 0.25], [-0.35, 1]])\n', (3632, 3657), True, 'import numpy as np\n'), ((3839, 3886), 'numpy.array', 'np.array', (['[[1, 0.5, 0.6], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0.5, 0.6], [0, 1, 0], [0, 0, 1]])\n', (3847, 3886), True, 'import numpy as np\n'), ((3932, 3977), 'numpy.array', 'np.array', (['[[1, 0, 0.6], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0.6], [0, 1, 0], [0, 0, 1]])\n', (3940, 3977), True, 'import numpy as np\n'), ((4091, 4107), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (4099, 4107), True, 'import numpy as np\n'), ((4140, 4156), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (4148, 4156), True, 'import numpy as np\n'), ((4324, 4339), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (4332, 4339), True, 'import numpy as np\n'), ((4372, 4390), 'numpy.array', 'np.array', (['[[3.0j]]'], {}), '([[3.0j]])\n', (4380, 4390), True, 'import numpy as np\n'), ((4422, 4440), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (4430, 4440), True, 'import numpy as np\n'), ((4473, 4493), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (4481, 4493), True, 'import numpy as np\n'), ((4527, 4554), 'numpy.array', 'np.array', (['[[1, 0], [0, -2]]'], {}), '([[1, 0], [0, -2]])\n', (4535, 4554), True, 'import numpy as np\n'), ((4699, 4725), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (4707, 4725), True, 'import numpy as np\n'), ((4758, 4784), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (4766, 4784), True, 'import numpy as np\n'), ((4817, 4843), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (4825, 4843), True, 'import numpy as np\n'), ((4876, 4903), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (4884, 4903), True, 'import numpy as np\n'), ((4932, 4959), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (4940, 4959), True, 'import numpy as np\n'), ((4962, 4974), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4969, 4974), True, 'import numpy as np\n'), ((5003, 5035), 'numpy.array', 'np.array', (['[[1, 1.0j], [1.0j, 1]]'], {}), '([[1, 1.0j], [1.0j, 1]])\n', (5011, 5035), True, 'import numpy as np\n'), ((5034, 5046), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (5041, 5046), True, 'import numpy as np\n'), ((5153, 5208), 'numpy.array', 'np.array', (['[[1, 1.0j + 1e-11], [1.0j, 1 + 1.0j * 1e-09]]'], {}), '([[1, 1.0j + 1e-11], [1.0j, 1 + 1.0j * 1e-09]])\n', (5161, 5208), True, 'import numpy as np\n'), ((5204, 5216), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (5211, 5216), True, 'import numpy as np\n'), ((5414, 5443), 'numpy.array', 'np.array', (['[[1, 0], [-0.6, 1]]'], {}), '([[1, 0], [-0.6, 1]])\n', (5422, 5443), True, 'import numpy as np\n'), ((5623, 5672), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]])\n', (5631, 5672), True, 'import numpy as np\n'), ((5795, 5811), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (5803, 5811), True, 'import numpy as np\n'), ((5847, 5863), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (5855, 5863), True, 'import numpy as np\n'), ((5995, 6013), 'numpy.array', 'np.array', (['[[1.0j]]'], {}), '([[1.0j]])\n', (6003, 6013), True, 'import numpy as np\n'), ((6047, 6062), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (6055, 6062), True, 'import numpy as np\n'), ((6098, 6116), 'numpy.array', 'np.array', (['[[3.0j]]'], {}), '([[3.0j]])\n', (6106, 6116), True, 'import numpy as np\n'), ((6151, 6169), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (6159, 6169), True, 'import numpy as np\n'), ((6205, 6225), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (6213, 6225), True, 'import numpy as np\n'), ((6262, 6289), 'numpy.array', 'np.array', (['[[1, 0], [0, -2]]'], {}), '([[1, 0], [0, -2]])\n', (6270, 6289), True, 'import numpy as np\n'), ((6384, 6413), 'numpy.array', 'np.array', (['[[1.0j, 0], [0, 1]]'], {}), '([[1.0j, 0], [0, 1]])\n', (6392, 6413), True, 'import numpy as np\n'), ((6447, 6473), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (6455, 6473), True, 'import numpy as np\n'), ((6509, 6535), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (6517, 6535), True, 'import numpy as np\n'), ((6571, 6597), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (6579, 6597), True, 'import numpy as np\n'), ((6633, 6660), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (6641, 6660), True, 'import numpy as np\n'), ((6692, 6719), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (6700, 6719), True, 'import numpy as np\n'), ((6722, 6734), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (6729, 6734), True, 'import numpy as np\n'), ((7171, 7200), 'numpy.array', 'np.array', (['[[1, 0], [-0.6, 1]]'], {}), '([[1, 0], [-0.6, 1]])\n', (7179, 7200), True, 'import numpy as np\n'), ((7386, 7435), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]])\n', (7394, 7435), True, 'import numpy as np\n'), ((7582, 7598), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (7590, 7598), True, 'import numpy as np\n'), ((7642, 7658), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (7650, 7658), True, 'import numpy as np\n'), ((7758, 7774), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (7766, 7774), True, 'import numpy as np\n'), ((7818, 7836), 'numpy.array', 'np.array', (['[[1.0j]]'], {}), '([[1.0j]])\n', (7826, 7836), True, 'import numpy as np\n'), ((7878, 7893), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (7886, 7893), True, 'import numpy as np\n'), ((7937, 7955), 'numpy.array', 'np.array', (['[[3.0j]]'], {}), '([[3.0j]])\n', (7945, 7955), True, 'import numpy as np\n'), ((7998, 8016), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (8006, 8016), True, 'import numpy as np\n'), ((8060, 8080), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (8068, 8080), True, 'import numpy as np\n'), ((8125, 8152), 'numpy.array', 'np.array', (['[[1, 0], [0, -2]]'], {}), '([[1, 0], [0, -2]])\n', (8133, 8152), True, 'import numpy as np\n'), ((8196, 8223), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (8204, 8223), True, 'import numpy as np\n'), ((8335, 8364), 'numpy.array', 'np.array', (['[[1.0j, 0], [0, 1]]'], {}), '([[1.0j, 0], [0, 1]])\n', (8343, 8364), True, 'import numpy as np\n'), ((8406, 8432), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (8414, 8432), True, 'import numpy as np\n'), ((8476, 8502), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (8484, 8502), True, 'import numpy as np\n'), ((8546, 8572), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (8554, 8572), True, 'import numpy as np\n'), ((8616, 8643), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (8624, 8643), True, 'import numpy as np\n'), ((8683, 8710), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (8691, 8710), True, 'import numpy as np\n'), ((8713, 8725), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (8720, 8725), True, 'import numpy as np\n'), ((9296, 9325), 'numpy.array', 'np.array', (['[[1, 0], [-0.6, 1]]'], {}), '([[1, 0], [-0.6, 1]])\n', (9304, 9325), True, 'import numpy as np\n'), ((9586, 9635), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]])\n', (9594, 9635), True, 'import numpy as np\n'), ((9704, 9757), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]])\n', (9712, 9757), True, 'import numpy as np\n'), ((9900, 9916), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (9908, 9916), True, 'import numpy as np\n'), ((9957, 9973), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (9965, 9973), True, 'import numpy as np\n'), ((10067, 10083), 'numpy.array', 'np.array', (['[[-1]]'], {}), '([[-1]])\n', (10075, 10083), True, 'import numpy as np\n'), ((10124, 10139), 'numpy.array', 'np.array', (['[[5]]'], {}), '([[5]])\n', (10132, 10139), True, 'import numpy as np\n'), ((10180, 10198), 'numpy.array', 'np.array', (['[[3.0j]]'], {}), '([[3.0j]])\n', (10188, 10198), True, 'import numpy as np\n'), ((10238, 10265), 'numpy.array', 'np.array', (['[[1, 0], [0, -2]]'], {}), '([[1, 0], [0, -2]])\n', (10246, 10265), True, 'import numpy as np\n'), ((10306, 10333), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (10314, 10333), True, 'import numpy as np\n'), ((10439, 10468), 'numpy.array', 'np.array', (['[[1.0j, 0], [0, 1]]'], {}), '([[1.0j, 0], [0, 1]])\n', (10447, 10468), True, 'import numpy as np\n'), ((10573, 10599), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (10581, 10599), True, 'import numpy as np\n'), ((10640, 10666), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (10648, 10666), True, 'import numpy as np\n'), ((10707, 10733), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (10715, 10733), True, 'import numpy as np\n'), ((10774, 10801), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (10782, 10801), True, 'import numpy as np\n'), ((10838, 10865), 'numpy.array', 'np.array', (['[[1, -1], [1, 1]]'], {}), '([[1, -1], [1, 1]])\n', (10846, 10865), True, 'import numpy as np\n'), ((10868, 10880), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (10875, 10880), True, 'import numpy as np\n'), ((10917, 10949), 'numpy.array', 'np.array', (['[[1, 1.0j], [1.0j, 1]]'], {}), '([[1, 1.0j], [1.0j, 1]])\n', (10925, 10949), True, 'import numpy as np\n'), ((10948, 10960), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (10955, 10960), True, 'import numpy as np\n'), ((11083, 11138), 'numpy.array', 'np.array', (['[[1, 1.0j + 1e-11], [1.0j, 1 + 1.0j * 1e-09]]'], {}), '([[1, 1.0j + 1e-11], [1.0j, 1 + 1.0j * 1e-09]])\n', (11091, 11138), True, 'import numpy as np\n'), ((11134, 11146), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (11141, 11146), True, 'import numpy as np\n'), ((11368, 11397), 'numpy.array', 'np.array', (['[[1, 0], [-0.6, 1]]'], {}), '([[1, 0], [-0.6, 1]])\n', (11376, 11397), True, 'import numpy as np\n'), ((11445, 11471), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (11453, 11471), True, 'import numpy as np\n'), ((11474, 11495), 'cmath.exp', 'cmath.exp', (['(1.0j * 0.1)'], {}), '(1.0j * 0.1)\n', (11483, 11495), False, 'import cmath\n'), ((11825, 11874), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]])\n', (11833, 11874), True, 'import numpy as np\n'), ((11935, 11988), 'numpy.array', 'np.array', (['[[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]'], {}), '([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]])\n', (11943, 11988), True, 'import numpy as np\n'), ((12270, 12296), 'numpy.array', 'np.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (12278, 12296), True, 'import numpy as np\n'), ((12328, 12344), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {}), '((1, 0))\n', (12336, 12344), True, 'import numpy as np\n'), ((12539, 12567), 'numpy.array', 'np.array', (['[[0, 0.6], [0, 0]]'], {}), '([[0, 0.6], [0, 0]])\n', (12547, 12567), True, 'import numpy as np\n'), ((12743, 12790), 'numpy.array', 'np.array', (['[[0, 0.5, 0], [0, 0, 0.6], [0, 0, 0]]'], {}), '([[0, 0.5, 0], [0, 0, 0.6], [0, 0, 0]])\n', (12751, 12790), True, 'import numpy as np\n'), ((12920, 12936), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (12928, 12936), True, 'import numpy as np\n'), ((12938, 12954), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (12946, 12954), True, 'import numpy as np\n'), ((12987, 13003), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (12995, 13003), True, 'import numpy as np\n'), ((13005, 13021), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (13013, 13021), True, 'import numpy as np\n'), ((13054, 13070), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (13062, 13070), True, 'import numpy as np\n'), ((13072, 13088), 'numpy.empty', 'np.empty', (['(1, 0)'], {}), '((1, 0))\n', (13080, 13088), True, 'import numpy as np\n'), ((13121, 13137), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (13129, 13137), True, 'import numpy as np\n'), ((13139, 13155), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (13147, 13155), True, 'import numpy as np\n'), ((14190, 14207), 'numpy.array', 'np.array', (['[[[1]]]'], {}), '([[[1]]])\n', (14198, 14207), True, 'import numpy as np\n'), ((14209, 14222), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (14217, 14222), True, 'import numpy as np\n'), ((14695, 14710), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14703, 14710), True, 'import numpy as np\n'), ((14712, 14730), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (14720, 14730), True, 'import numpy as np\n'), ((14780, 14795), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14788, 14795), True, 'import numpy as np\n'), ((14797, 14812), 'numpy.array', 'np.array', (['[[2]]'], {}), '([[2]])\n', (14805, 14812), True, 'import numpy as np\n'), ((14862, 14877), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14870, 14877), True, 'import numpy as np\n'), ((14879, 14894), 'numpy.array', 'np.array', (['[[2]]'], {}), '([[2]])\n', (14887, 14894), True, 'import numpy as np\n'), ((16176, 16194), 'numpy.array', 'np.array', (['([0] * 16)'], {}), '([0] * 16)\n', (16184, 16194), True, 'import numpy as np\n'), ((16251, 16298), 'cirq.slice_for_qubits_equal_to', 'cirq.slice_for_qubits_equal_to', (['[3, 2, 1, 0]', 'k'], {}), '([3, 2, 1, 0], k)\n', (16281, 16298), False, 'import cirq\n'), ((17963, 17981), 'numpy.array', 'np.array', (['([0] * 16)'], {}), '([0] * 16)\n', (17971, 17981), True, 'import numpy as np\n'), ((19368, 19386), 'numpy.array', 'np.array', (['([0] * 24)'], {}), '([0] * 24)\n', (19376, 19386), True, 'import numpy as np\n'), ((5079, 5112), 'numpy.array', 'np.array', (['[[1, -1.0j], [1.0j, 1]]'], {}), '([[1, -1.0j], [1.0j, 1]])\n', (5087, 5112), True, 'import numpy as np\n'), ((5111, 5123), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (5118, 5123), True, 'import numpy as np\n'), ((6770, 6802), 'numpy.array', 'np.array', (['[[1, 1.0j], [1.0j, 1]]'], {}), '([[1, 1.0j], [1.0j, 1]])\n', (6778, 6802), True, 'import numpy as np\n'), ((6801, 6813), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (6808, 6813), True, 'import numpy as np\n'), ((6849, 6882), 'numpy.array', 'np.array', (['[[1, -1.0j], [1.0j, 1]]'], {}), '([[1, -1.0j], [1.0j, 1]])\n', (6857, 6882), True, 'import numpy as np\n'), ((6881, 6893), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (6888, 6893), True, 'import numpy as np\n'), ((8769, 8796), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (8777, 8796), True, 'import numpy as np\n'), ((8799, 8811), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (8806, 8811), True, 'import numpy as np\n'), ((8855, 8887), 'numpy.array', 'np.array', (['[[1, 1.0j], [1.0j, 1]]'], {}), '([[1, 1.0j], [1.0j, 1]])\n', (8863, 8887), True, 'import numpy as np\n'), ((8886, 8898), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (8893, 8898), True, 'import numpy as np\n'), ((8942, 8975), 'numpy.array', 'np.array', (['[[1, -1.0j], [1.0j, 1]]'], {}), '([[1, -1.0j], [1.0j, 1]])\n', (8950, 8975), True, 'import numpy as np\n'), ((8974, 8986), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (8981, 8986), True, 'import numpy as np\n'), ((11001, 11034), 'numpy.array', 'np.array', (['[[1, -1.0j], [1.0j, 1]]'], {}), '([[1, -1.0j], [1.0j, 1]])\n', (11009, 11034), True, 'import numpy as np\n'), ((11033, 11045), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (11040, 11045), True, 'import numpy as np\n'), ((11545, 11571), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (11553, 11571), True, 'import numpy as np\n'), ((11574, 11595), 'cmath.exp', 'cmath.exp', (['(1.0j * 0.3)'], {}), '(1.0j * 0.3)\n', (11583, 11595), False, 'import cmath\n')]
|
#-*- coding:utf-8 -*-
import torch
from torchvision import transforms
import cv2
from PIL import Image, ImageOps
import numpy as np
import pickle
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import default_loader,make_dataset,IMG_EXTENSIONS
from pycocotools.coco import COCO
import os
class MultiViewDataInjector():
def __init__(self, transform_list):
self.transform_list = transform_list
def __call__(self,sample,mask):
output,mask = zip(*[transform(sample,mask) for transform in self.transform_list])
output_cat = torch.stack(output, dim=0)
mask_cat = torch.stack(mask)
return output_cat,mask_cat
class SSLMaskDataset(VisionDataset):
def __init__(self, root: str, mask_file: str, extensions = IMG_EXTENSIONS, transform = None):
self.root = root
self.transform = transform
self.samples = make_dataset(self.root, extensions = extensions) #Pytorch 1.9+
self.loader = default_loader
self.img_to_mask = self._get_masks(mask_file)
def _get_masks(self, mask_file):
with open(mask_file, "rb") as file:
return pickle.load(file)
def __getitem__(self, index: int):
path, _ = self.samples[index]
# Load Image
sample = self.loader(path)
# Load Mask
with open(self.img_to_mask[index], "rb") as file:
mask = pickle.load(file)
# Apply transforms
if self.transform is not None:
sample,mask = self.transform(sample,mask.unsqueeze(0))
return sample,mask
def __len__(self) -> int:
return len(self.samples)
class COCOMaskDataset(VisionDataset):
def __init__(self, root: str,annFile: str, transform = None):
self.root = root
self.coco = COCO(annFile)
self.transform = transform
#self.samples = make_dataset(self.root, extensions = extensions) #Pytorch 1.9+
self.loader = default_loader
ids = []
# perform filter
for k in self.coco.imgs.keys():
anns = self.coco.loadAnns(self.coco.getAnnIds(k))
if len(anns)>0:
ids.append(k)
self.ids = list(sorted(ids))
#self.img_to_mask = self._get_masks(mask_file)
def _get_masks(self, mask_file):
with open(mask_file, "rb") as file:
return pickle.load(file)
def __getitem__(self, index: int):
id = self.ids[index]
filename = self.coco.loadImgs(id)[0]["file_name"]
path = os.path.join(self.root, filename)
# Load Image
sample = self.loader(path)
anns = self.coco.loadAnns(self.coco.getAnnIds(id))
mask = np.max(np.stack([self.coco.annToMask(ann) * ann["category_id"]
for ann in anns]), axis=0)
# print(np.unique(mask))
# return sample,mask
# Apply transforms
mask = torch.LongTensor(mask)
if self.transform is not None:
sample,mask = self.transform(sample,mask.unsqueeze(0))
return sample,mask
def __len__(self) -> int:
return len(self.ids)
class GaussianBlur():
def __init__(self, kernel_size, sigma_min=0.1, sigma_max=2.0):
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.kernel_size = kernel_size
def __call__(self, img):
sigma = np.random.uniform(self.sigma_min, self.sigma_max)
img = cv2.GaussianBlur(np.array(img), (self.kernel_size, self.kernel_size), sigma)
return Image.fromarray(img.astype(np.uint8))
class CustomCompose:
def __init__(self, t_list,p_list):
self.t_list = t_list
self.p_list = p_list
def __call__(self, img, mask):
for p in self.p_list:
img,mask = p(img,mask)
for t in self.t_list:
img = t(img)
return img,mask
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
for t in self.t_list:
format_string += "\n"
format_string += f" {t}"
format_string += "\n)"
return format_string
class MaskRandomResizedCrop():
def __init__(self, size):
super().__init__()
self.size = size
self.totensor = transforms.ToTensor()
self.topil = transforms.ToPILImage()
def __call__(self, image, mask):
"""
Args:
image (PIL Image or Tensor): Image to be cropped and resized.
mask (Tensor): Mask to be cropped and resized.
Returns:
PIL Image or Tensor: Randomly cropped/resized image.
Mask Tensor: Randomly cropped/resized mask.
"""
#import ipdb;ipdb.set_trace()
i, j, h, w = transforms.RandomResizedCrop.get_params(image,scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0))
image = transforms.functional.resize(transforms.functional.crop(image, i, j, h, w),(self.size,self.size),interpolation=transforms.functional.InterpolationMode.BICUBIC)
image = self.topil(torch.clip(self.totensor(image),min=0, max=255))
mask = transforms.functional.resize(transforms.functional.crop(mask, i, j, h, w),(self.size,self.size),interpolation=transforms.functional.InterpolationMode.NEAREST)
return [image,mask]
class MaskRandomHorizontalFlip():
"""
Apply horizontal flip to a PIL Image and Mask.
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def __call__(self, image, mask):
"""
Args:
image (PIL Image or Tensor): Image to be flipped.
mask (Tensor): Mask to be flipped.
Returns:
PIL Image or Tensor: Randomly flipped image.
Mask Tensor: Randomly flipped mask.
"""
if torch.rand(1) < self.p:
image = transforms.functional.hflip(image)
mask = transforms.functional.hflip(mask)
return [image,mask]
return [image,mask]
class Solarize():
def __init__(self, threshold=128):
self.threshold = threshold
def __call__(self, sample):
return ImageOps.solarize(sample, self.threshold)
def get_transform(stage, gb_prob=1.0, solarize_prob=0., crop_size=224):
t_list = []
color_jitter = transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if stage in ('train', 'val'):
t_list = [
transforms.RandomApply([color_jitter], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur(kernel_size=23)], p=gb_prob),
transforms.RandomApply([Solarize()], p=solarize_prob),
transforms.ToTensor(),
normalize]
p_list = [
MaskRandomResizedCrop(crop_size),
MaskRandomHorizontalFlip(),
]
elif stage == 'ft':
t_list = [
transforms.ToTensor(),
normalize]
p_list = [
MaskRandomResizedCrop(crop_size),
MaskRandomHorizontalFlip(),
]
elif stage == 'test':
t_list = [
transforms.ToTensor(),
normalize]
p_list = [
transforms.Resize(256),
transforms.CenterCrop(crop_size),
]
transform = CustomCompose(t_list,p_list)
return transform
|
[
"torchvision.transforms.ToPILImage",
"torch.LongTensor",
"torchvision.transforms.ColorJitter",
"numpy.array",
"pycocotools.coco.COCO",
"torchvision.transforms.ToTensor",
"torchvision.datasets.folder.make_dataset",
"pickle.load",
"PIL.ImageOps.solarize",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.RandomResizedCrop.get_params",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomApply",
"torchvision.transforms.RandomGrayscale",
"torch.stack",
"os.path.join",
"torchvision.transforms.functional.hflip",
"torchvision.transforms.functional.crop",
"numpy.random.uniform",
"torch.rand"
] |
[((6391, 6433), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.4)', '(0.4)', '(0.2)', '(0.1)'], {}), '(0.4, 0.4, 0.2, 0.1)\n', (6413, 6433), False, 'from torchvision import transforms\n'), ((6450, 6525), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (6470, 6525), False, 'from torchvision import transforms\n'), ((585, 611), 'torch.stack', 'torch.stack', (['output'], {'dim': '(0)'}), '(output, dim=0)\n', (596, 611), False, 'import torch\n'), ((631, 648), 'torch.stack', 'torch.stack', (['mask'], {}), '(mask)\n', (642, 648), False, 'import torch\n'), ((912, 958), 'torchvision.datasets.folder.make_dataset', 'make_dataset', (['self.root'], {'extensions': 'extensions'}), '(self.root, extensions=extensions)\n', (924, 958), False, 'from torchvision.datasets.folder import default_loader, make_dataset, IMG_EXTENSIONS\n'), ((1835, 1848), 'pycocotools.coco.COCO', 'COCO', (['annFile'], {}), '(annFile)\n', (1839, 1848), False, 'from pycocotools.coco import COCO\n'), ((2572, 2605), 'os.path.join', 'os.path.join', (['self.root', 'filename'], {}), '(self.root, filename)\n', (2584, 2605), False, 'import os\n'), ((2981, 3003), 'torch.LongTensor', 'torch.LongTensor', (['mask'], {}), '(mask)\n', (2997, 3003), False, 'import torch\n'), ((3443, 3492), 'numpy.random.uniform', 'np.random.uniform', (['self.sigma_min', 'self.sigma_max'], {}), '(self.sigma_min, self.sigma_max)\n', (3460, 3492), True, 'import numpy as np\n'), ((4336, 4357), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4355, 4357), False, 'from torchvision import transforms\n'), ((4379, 4402), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4400, 4402), False, 'from torchvision import transforms\n'), ((4826, 4926), 'torchvision.transforms.RandomResizedCrop.get_params', 'transforms.RandomResizedCrop.get_params', (['image'], {'scale': '(0.08, 1.0)', 'ratio': '(3.0 / 4.0, 4.0 / 3.0)'}), '(image, scale=(0.08, 1.0), ratio=(\n 3.0 / 4.0, 4.0 / 3.0))\n', (4865, 4926), False, 'from torchvision import transforms\n'), ((6241, 6282), 'PIL.ImageOps.solarize', 'ImageOps.solarize', (['sample', 'self.threshold'], {}), '(sample, self.threshold)\n', (6258, 6282), False, 'from PIL import Image, ImageOps\n'), ((1167, 1184), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1178, 1184), False, 'import pickle\n'), ((1442, 1459), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1453, 1459), False, 'import pickle\n'), ((2404, 2421), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2415, 2421), False, 'import pickle\n'), ((3524, 3537), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3532, 3537), True, 'import numpy as np\n'), ((4961, 5006), 'torchvision.transforms.functional.crop', 'transforms.functional.crop', (['image', 'i', 'j', 'h', 'w'], {}), '(image, i, j, h, w)\n', (4987, 5006), False, 'from torchvision import transforms\n'), ((5221, 5265), 'torchvision.transforms.functional.crop', 'transforms.functional.crop', (['mask', 'i', 'j', 'h', 'w'], {}), '(mask, i, j, h, w)\n', (5247, 5265), False, 'from torchvision import transforms\n'), ((5899, 5912), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (5909, 5912), False, 'import torch\n'), ((5943, 5977), 'torchvision.transforms.functional.hflip', 'transforms.functional.hflip', (['image'], {}), '(image)\n', (5970, 5977), False, 'from torchvision import transforms\n'), ((5997, 6030), 'torchvision.transforms.functional.hflip', 'transforms.functional.hflip', (['mask'], {}), '(mask)\n', (6024, 6030), False, 'from torchvision import transforms\n'), ((6628, 6673), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[color_jitter]'], {'p': '(0.8)'}), '([color_jitter], p=0.8)\n', (6650, 6673), False, 'from torchvision import transforms\n'), ((6687, 6720), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (6713, 6720), False, 'from torchvision import transforms\n'), ((6880, 6901), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6899, 6901), False, 'from torchvision import transforms\n'), ((7114, 7135), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7133, 7135), False, 'from torchvision import transforms\n'), ((7354, 7375), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7373, 7375), False, 'from torchvision import transforms\n'), ((7440, 7462), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (7457, 7462), False, 'from torchvision import transforms\n'), ((7476, 7508), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['crop_size'], {}), '(crop_size)\n', (7497, 7508), False, 'from torchvision import transforms\n')]
|
from __future__ import absolute_import, print_function, division
import numpy as np
from numba import vectorize
from numba import ocl, float64
from numba import unittest_support as unittest
from numba import config
from numba.ocl.testing import OCLTestCase
sig = [float64(float64, float64)]
target='ocl'
class TestOCLVectorizeScalarArg(OCLTestCase):
def test_vectorize_scalar_arg(self):
@vectorize(sig, target=target)
def vector_add(a, b):
return a + b
A = np.arange(10, dtype=np.float64)
dA = ocl.to_device(A)
vector_add(1.0, dA)
def test_vectorize_all_scalars(self):
@vectorize(sig, target=target)
def vector_add(a, b):
return a + b
vector_add(1.0, 1.0)
if __name__ == '__main__':
unittest.main()
|
[
"numba.unittest_support.main",
"numba.vectorize",
"numba.float64",
"numba.ocl.to_device",
"numpy.arange"
] |
[((265, 290), 'numba.float64', 'float64', (['float64', 'float64'], {}), '(float64, float64)\n', (272, 290), False, 'from numba import ocl, float64\n'), ((802, 817), 'numba.unittest_support.main', 'unittest.main', ([], {}), '()\n', (815, 817), True, 'from numba import unittest_support as unittest\n'), ((406, 435), 'numba.vectorize', 'vectorize', (['sig'], {'target': 'target'}), '(sig, target=target)\n', (415, 435), False, 'from numba import vectorize\n'), ((504, 535), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (513, 535), True, 'import numpy as np\n'), ((549, 565), 'numba.ocl.to_device', 'ocl.to_device', (['A'], {}), '(A)\n', (562, 565), False, 'from numba import ocl, float64\n'), ((646, 675), 'numba.vectorize', 'vectorize', (['sig'], {'target': 'target'}), '(sig, target=target)\n', (655, 675), False, 'from numba import vectorize\n')]
|
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# TODO: Load ground truth as well (?)
import numpy as np
import pandas as pd
import os.path
import datetime
def read_eco(path, date_start, date_end):
"""
Parse ECO csv files.
Parameters
----------
path : Path to the directory of ECO csv files
date_start : Same as file name (e.g., '2012-06-01T09:00')
date_end : As above
Returns
-------
data : Pandas dataframe with measurements including 'active', 'reactive',
'voltage, 'phase_angle', 'current'
"""
# d = datetime.date.fromisoformat(date_start) # Only valid in python 3.7,
# dropped for now.
d_start = datetime.datetime.strptime(date_start, '%Y-%m-%dT%H:%M')
d = d_start
start_sec = d.hour * 3600 + d.minute * 60 + d.second
end_sec = 24 * 3600
d_end = datetime.datetime.strptime(date_end, '%Y-%m-%dT%H:%M')
phase_df_list = [pd.DataFrame(), pd.DataFrame(), pd.DataFrame()]
while d <= d_end:
print('ECO: Loading building ' + os.path.basename(path) + ', time ' +
d.strftime('%Y-%m-%dT%H:%M'))
f = os.path.join(path, d.strftime('%Y-%m-%d') + '.csv')
if not os.path.exists(f):
d += datetime.timedelta(days=1)
d = d.replace(hour=0, minute=0)
continue
if d.date() == d_end.date():
# Just making sure, this is redundant
d = d.replace(hour=0, minute=0)
end_sec = d_end.hour * 3600 + d_end.minute * 60 + d_end.second
df = pd.read_csv(f, header=None, index_col=False,
names=[i for i in range(1, 17)], dtype=np.float32)
df = df.iloc[start_sec:end_sec]
periods = df.shape[0] # No missing seconds in dataset
# From nilmtk ECO dataset converter
phases = []
for phase in range(1, 4):
df_phase = df.loc[:, [1 + phase, 5 + phase, 8 + phase, 13 + phase]]
power = df_phase.loc[:, (1 + phase, 13 + phase)].values
reactive = power[:, 0] * np.tan(power[:, 1] * np.pi / 180)
df_phase['Q'] = reactive
# No timezone
df_phase.index = pd.date_range(
start=d, periods=periods, freq='S')
column_names = {
1 + phase: 'active',
5 + phase: 'current',
8 + phase: 'voltage',
13 + phase: 'phase_angle',
'Q': 'reactive',
}
df_phase.columns = [column_names[col] for col in df_phase.columns]
power_active = df_phase['active']
tmp_before = np.size(power_active)
df_phase = df_phase[power_active != -1]
power_active = df_phase['active']
tmp_after = np.size(power_active)
if tmp_before != tmp_after:
print('Removed missing measurements - Size before: ' +
str(tmp_before) + ', size after:' + str(tmp_after))
phases.append(df_phase)
phase_df_list[phase - 1] = \
pd.concat([phase_df_list[phase - 1], df_phase])
d += datetime.timedelta(days=1)
d = d.replace(hour=0, minute=0)
start_sec = 0
agg_df = pd.DataFrame([], columns=['active', 'reactive', 'voltage'])
agg_df['active'] = phase_df_list[0]['active'] + \
phase_df_list[1]['active'] + \
phase_df_list[2]['active']
agg_df['reactive'] = phase_df_list[0]['reactive'] + \
phase_df_list[1]['reactive'] + phase_df_list[2]['reactive']
agg_df['voltage'] = (phase_df_list[0]['voltage'] +
phase_df_list[1]['voltage'] +
phase_df_list[2]['voltage']) / 3.0
for i in range(len(phase_df_list)):
phase_df_list[i] = \
phase_df_list[i].loc[(phase_df_list[i].index >= d_start) &
(phase_df_list[i].index <= d_end)]
agg_df = agg_df.loc[(agg_df.index >= d_start) & (agg_df.index <= d_end)]
return (phase_df_list, agg_df)
|
[
"numpy.tan",
"datetime.datetime.strptime",
"numpy.size",
"pandas.DataFrame",
"datetime.timedelta",
"pandas.concat",
"pandas.date_range"
] |
[((1174, 1230), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_start', '"""%Y-%m-%dT%H:%M"""'], {}), "(date_start, '%Y-%m-%dT%H:%M')\n", (1200, 1230), False, 'import datetime\n'), ((1340, 1394), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_end', '"""%Y-%m-%dT%H:%M"""'], {}), "(date_end, '%Y-%m-%dT%H:%M')\n", (1366, 1394), False, 'import datetime\n'), ((3723, 3782), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['active', 'reactive', 'voltage']"}), "([], columns=['active', 'reactive', 'voltage'])\n", (3735, 3782), True, 'import pandas as pd\n'), ((1416, 1430), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1428, 1430), True, 'import pandas as pd\n'), ((1432, 1446), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1444, 1446), True, 'import pandas as pd\n'), ((1448, 1462), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1460, 1462), True, 'import pandas as pd\n'), ((3621, 3647), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3639, 3647), False, 'import datetime\n'), ((1723, 1749), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1741, 1749), False, 'import datetime\n'), ((2667, 2716), 'pandas.date_range', 'pd.date_range', ([], {'start': 'd', 'periods': 'periods', 'freq': '"""S"""'}), "(start=d, periods=periods, freq='S')\n", (2680, 2716), True, 'import pandas as pd\n'), ((3116, 3137), 'numpy.size', 'np.size', (['power_active'], {}), '(power_active)\n', (3123, 3137), True, 'import numpy as np\n'), ((3260, 3281), 'numpy.size', 'np.size', (['power_active'], {}), '(power_active)\n', (3267, 3281), True, 'import numpy as np\n'), ((3560, 3607), 'pandas.concat', 'pd.concat', (['[phase_df_list[phase - 1], df_phase]'], {}), '([phase_df_list[phase - 1], df_phase])\n', (3569, 3607), True, 'import pandas as pd\n'), ((2541, 2574), 'numpy.tan', 'np.tan', (['(power[:, 1] * np.pi / 180)'], {}), '(power[:, 1] * np.pi / 180)\n', (2547, 2574), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
import numpy as np
from badgyal.policy_index import policy_index
columns = 'abcdefgh'
rows = '12345678'
promotions = 'rbq' # N is encoded as normal move
col_index = {columns[i] : i for i in range(len(columns))}
row_index = {rows[i] : i for i in range(len(rows))}
def index_to_position(x):
return columns[x[0]] + rows[x[1]]
def position_to_index(p):
return col_index[p[0]], row_index[p[1]]
def valid_index(i):
if i[0] > 7 or i[0] < 0:
return False
if i[1] > 7 or i[1] < 0:
return False
return True
def queen_move(start, direction, steps):
i = position_to_index(start)
dir_vectors = {'N': (0, 1), 'NE': (1, 1), 'E': (1, 0), 'SE': (1, -1),
'S':(0, -1), 'SW':(-1, -1), 'W': (-1, 0), 'NW': (-1, 1)}
v = dir_vectors[direction]
i = i[0] + v[0] * steps, i[1] + v[1] * steps
if not valid_index(i):
return None
return index_to_position(i)
def knight_move(start, direction, steps):
i = position_to_index(start)
dir_vectors = {'N': (1, 2), 'NE': (2, 1), 'E': (2, -1), 'SE': (1, -2),
'S':(-1, -2), 'SW':(-2, -1), 'W': (-2, 1), 'NW': (-1, 2)}
v = dir_vectors[direction]
i = i[0] + v[0] * steps, i[1] + v[1] * steps
if not valid_index(i):
return None
return index_to_position(i)
def make_map(kind='matrix'):
# 56 planes of queen moves
moves = []
for direction in ['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW']:
for steps in range(1, 8):
for r0 in rows:
for c0 in columns:
start = c0 + r0
end = queen_move(start, direction, steps)
if end == None:
moves.append('illegal')
else:
moves.append(start+end)
# 8 planes of knight moves
for direction in ['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW']:
for r0 in rows:
for c0 in columns:
start = c0 + r0
end = knight_move(start, direction, 1)
if end == None:
moves.append('illegal')
else:
moves.append(start+end)
# 9 promotions
for direction in ['NW', 'N', 'NE']:
for promotion in promotions:
for r0 in rows:
for c0 in columns:
# Promotion only in the second last rank
if r0 != '7':
moves.append('illegal')
continue
start = c0 + r0
end = queen_move(start, direction, 1)
if end == None:
moves.append('illegal')
else:
moves.append(start+end+promotion)
for m in policy_index:
if m not in moves:
raise ValueError('Missing move: {}'.format(m))
az_to_lc0 = np.zeros((80*8*8, len(policy_index)), dtype=np.float32)
indices = []
legal_moves = 0
for e, m in enumerate(moves):
if m == 'illegal':
indices.append(-1)
continue
legal_moves += 1
# Check for missing moves
if m not in policy_index:
raise ValueError('Missing move: {}'.format(m))
i = policy_index.index(m)
indices.append(i)
az_to_lc0[e][i] = 1
assert legal_moves == len(policy_index)
assert np.sum(az_to_lc0) == legal_moves
for e in range(80*8*8):
for i in range(len(policy_index)):
pass
if kind == 'matrix':
return az_to_lc0
elif kind == 'index':
return indices
if __name__ == "__main__":
# Generate policy map include file for lc0
if len(sys.argv) != 2:
raise ValueError("Output filename is needed as a command line argument")
az_to_lc0 = np.ravel(make_map('index'))
header = \
"""/*
This file is part of Leela Chess Zero.
Copyright (C) 2019 The LCZero Authors
Leela Chess is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Leela Chess is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Leela Chess. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
namespace lczero {
"""
line_length = 12
with open(sys.argv[1], 'w') as f:
f.write(header+'\n')
f.write('const short kConvPolicyMap[] = {\\\n')
for e, i in enumerate(az_to_lc0):
if e % line_length == 0 and e > 0:
f.write('\n')
f.write(str(i).rjust(5))
if e != len(az_to_lc0)-1:
f.write(',')
f.write('};\n\n')
f.write('} // namespace lczero')
|
[
"numpy.sum",
"badgyal.policy_index.policy_index.index"
] |
[((3307, 3328), 'badgyal.policy_index.policy_index.index', 'policy_index.index', (['m'], {}), '(m)\n', (3325, 3328), False, 'from badgyal.policy_index import policy_index\n'), ((3439, 3456), 'numpy.sum', 'np.sum', (['az_to_lc0'], {}), '(az_to_lc0)\n', (3445, 3456), True, 'import numpy as np\n')]
|
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import keras.backend as k
import numpy as np
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from keras.models import Sequential
from art.classifiers import KerasClassifier
from art.poison_detection import ActivationDefence
from art.utils import load_mnist, master_seed
logger = logging.getLogger('testLogger')
NB_TRAIN, NB_TEST, BATCH_SIZE = 300, 10, 128
class TestActivationDefence(unittest.TestCase):
# python -m unittest discover art/ -p 'activation_defence_unittest.py'
@classmethod
def setUpClass(cls):
(x_train, y_train), (x_test, y_test), _, _ = load_mnist()
x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
cls.mnist = (x_train, y_train), (x_test, y_test)
k.set_learning_phase(1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
cls.classifier = KerasClassifier((0, 1), model=model)
cls.classifier.fit(x_train, y_train, nb_epochs=2, batch_size=128)
cls.defence = ActivationDefence(cls.classifier, x_train, y_train)
def setUp(self):
# Set master seed
master_seed(1234)
@unittest.expectedFailure
def test_wrong_parameters_1(self):
self.defence.set_params(nb_clusters=0)
@unittest.expectedFailure
def test_wrong_parameters_2(self):
self.defence.set_params(clustering_method='what')
@unittest.expectedFailure
def test_wrong_parameters_3(self):
self.defence.set_params(reduce='what')
@unittest.expectedFailure
def test_wrong_parameters_4(self):
self.defence.set_params(cluster_analysis='what')
def test_activations(self):
(x_train, _), (_, _) = self.mnist
activations = self.defence._get_activations()
self.assertEqual(len(x_train), len(activations))
def test_output_clusters(self):
# Get MNIST
(x_train, _), (_, _) = self.mnist
n_classes = self.classifier.nb_classes
for nb_clusters in range(2, 5):
clusters_by_class, _ = self.defence.cluster_activations(nb_clusters=nb_clusters)
# Verify expected number of classes
self.assertEqual(np.shape(clusters_by_class)[0], n_classes)
# Check we get the expected number of clusters:
found_clusters = len(np.unique(clusters_by_class[0]))
self.assertEqual(found_clusters, nb_clusters)
# Check right amount of data
n_dp = 0
for i in range(0, n_classes):
n_dp += len(clusters_by_class[i])
self.assertEqual(len(x_train), n_dp)
def test_detect_poison(self):
# Get MNIST
(x_train, _), (_, _) = self.mnist
report, is_clean_lst = self.defence.detect_poison(nb_clusters=2, nb_dims=10, reduce='PCA')
sum_clean1 = sum(is_clean_lst)
# Check number of items in is_clean
self.assertEqual(len(x_train), len(is_clean_lst))
# Test right number of clusters
found_clusters = len(np.unique(self.defence.clusters_by_class[0]))
self.assertEqual(found_clusters, 2)
report, is_clean_lst = self.defence.detect_poison(nb_clusters=3, nb_dims=10, reduce='PCA',
cluster_analysis='distance')
self.assertEqual(len(x_train), len(is_clean_lst))
# Test change of state to new number of clusters:
found_clusters = len(np.unique(self.defence.clusters_by_class[0]))
self.assertEqual(found_clusters, 3)
# Test clean data has changed
sum_clean2 = sum(is_clean_lst)
self.assertNotEqual(sum_clean1, sum_clean2)
report, is_clean_lst = self.defence.detect_poison(nb_clusters=2, nb_dims=10, reduce='PCA',
cluster_analysis='distance')
sum_dist = sum(is_clean_lst)
report, is_clean_lst = self.defence.detect_poison(nb_clusters=2, nb_dims=10, reduce='PCA',
cluster_analysis='smaller')
sum_size = sum(is_clean_lst)
self.assertNotEqual(sum_dist, sum_size)
def test_analyze_cluster(self):
# Get MNIST
(x_train, _), (_, _) = self.mnist
self.defence.analyze_clusters(cluster_analysis='relative-size')
self.defence.analyze_clusters(cluster_analysis='silhouette-scores')
report, dist_clean_by_class = self.defence.analyze_clusters(cluster_analysis='distance')
n_classes = self.classifier.nb_classes
self.assertEqual(n_classes, len(dist_clean_by_class))
# Check right amount of data
n_dp = 0
for i in range(0, n_classes):
n_dp += len(dist_clean_by_class[i])
self.assertEqual(len(x_train), n_dp)
report, sz_clean_by_class = self.defence.analyze_clusters(cluster_analysis='smaller')
n_classes = self.classifier.nb_classes
self.assertEqual(n_classes, len(sz_clean_by_class))
# Check right amount of data
n_dp = 0
sum_sz = 0
sum_dis = 0
for i in range(0, n_classes):
n_dp += len(sz_clean_by_class[i])
sum_sz += sum(sz_clean_by_class[i])
sum_dis += sum(dist_clean_by_class[i])
self.assertEqual(len(x_train), n_dp)
# Very unlikely that they are the same
self.assertNotEqual(sum_dis, sum_sz, msg='This is very unlikely to happen... there may be an error')
if __name__ == '__main__':
unittest.main()
|
[
"logging.getLogger",
"art.utils.load_mnist",
"art.classifiers.KerasClassifier",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"numpy.unique",
"keras.models.Sequential",
"keras.layers.Dense",
"art.utils.master_seed",
"keras.layers.Dropout",
"unittest.main",
"numpy.shape",
"keras.backend.set_learning_phase",
"art.poison_detection.ActivationDefence"
] |
[((1522, 1553), 'logging.getLogger', 'logging.getLogger', (['"""testLogger"""'], {}), "('testLogger')\n", (1539, 1553), False, 'import logging\n'), ((7169, 7184), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7182, 7184), False, 'import unittest\n'), ((1823, 1835), 'art.utils.load_mnist', 'load_mnist', ([], {}), '()\n', (1833, 1835), False, 'from art.utils import load_mnist, master_seed\n'), ((1968, 1991), 'keras.backend.set_learning_phase', 'k.set_learning_phase', (['(1)'], {}), '(1)\n', (1988, 1991), True, 'import keras.backend as k\n'), ((2008, 2020), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2018, 2020), False, 'from keras.models import Sequential\n'), ((2543, 2579), 'art.classifiers.KerasClassifier', 'KerasClassifier', (['(0, 1)'], {'model': 'model'}), '((0, 1), model=model)\n', (2558, 2579), False, 'from art.classifiers import KerasClassifier\n'), ((2677, 2728), 'art.poison_detection.ActivationDefence', 'ActivationDefence', (['cls.classifier', 'x_train', 'y_train'], {}), '(cls.classifier, x_train, y_train)\n', (2694, 2728), False, 'from art.poison_detection import ActivationDefence\n'), ((2785, 2802), 'art.utils.master_seed', 'master_seed', (['(1234)'], {}), '(1234)\n', (2796, 2802), False, 'from art.utils import load_mnist, master_seed\n'), ((2039, 2124), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'x_train.shape[1:]'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]\n )\n", (2045, 2124), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((2139, 2176), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2145, 2176), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((2196, 2226), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2208, 2226), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((2246, 2259), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2253, 2259), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((2279, 2288), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2286, 2288), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((2308, 2337), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2313, 2337), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((2357, 2369), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2364, 2369), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((2389, 2420), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (2394, 2420), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n'), ((4675, 4719), 'numpy.unique', 'np.unique', (['self.defence.clusters_by_class[0]'], {}), '(self.defence.clusters_by_class[0])\n', (4684, 4719), True, 'import numpy as np\n'), ((5098, 5142), 'numpy.unique', 'np.unique', (['self.defence.clusters_by_class[0]'], {}), '(self.defence.clusters_by_class[0])\n', (5107, 5142), True, 'import numpy as np\n'), ((3972, 4003), 'numpy.unique', 'np.unique', (['clusters_by_class[0]'], {}), '(clusters_by_class[0])\n', (3981, 4003), True, 'import numpy as np\n'), ((3836, 3863), 'numpy.shape', 'np.shape', (['clusters_by_class'], {}), '(clusters_by_class)\n', (3844, 3863), True, 'import numpy as np\n')]
|
# はじめてのNumPy
# 参考 http://qiita.com/wellflat/items/284ecc4116208d155e01
# 2016/1/16
import numpy as np
a = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
print(a)
print(a.flags)
# C_CONTIGUOUS : True ## データがメモリ上に連続しているか(C配列型)
# F_CONTIGUOUS : False ## 同上(Fortran配列型)
# OWNDATA : True ## 自分のデータかどうか、ビュー(後述)の場合はFalse
# WRITEABLE : True ## データ変更可能か
# ALIGNED : True ## データ型がアラインされているか
# UPDATEIFCOPY : False ## Trueには変更できないので特に気にしなくて良い
# 次元数
print( "%d次元" % a.ndim )
# 2
# 要素数
print( "要素数:%d" % a.size )
# 12
# 各次元の要素数(行数, 列数)
print( "各次元の要素数(行数, 列数):(%d, %d)" % a.shape )
# (4, 3)
# 1要素のバイト数
print( "1要素のバイト数:%d" % a.itemsize )
# 4
# 64bit版だと、8かも
# 次の行までのバイト数
# 24バイトで次の行、8バイトで次の列
print( "%dバイトで次の行, %dバイトで次の列" % a.strides )
# (12, 4)
# 1,2,3
# 4,5,6
# の順で直列に並んでいるということ。
# 配列全体のバイト数
print( "配列全体のバイト数:%dbyte" % a.nbytes )
# 48 # a.itemsize * a.size
# 要素のデータ型
print( "型:%s" % a.dtype )
# int32
|
[
"numpy.array"
] |
[((108, 165), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n', (116, 165), True, 'import numpy as np\n')]
|
import numpy as np
km2 = np.array([44410., 5712., 37123., 0., 25757.])
anos2 = np.array([2003, 1991, 1990, 2019, 2006])
idade = 2019 - anos2
km_media = km2 / idade
|
[
"numpy.array"
] |
[((26, 76), 'numpy.array', 'np.array', (['[44410.0, 5712.0, 37123.0, 0.0, 25757.0]'], {}), '([44410.0, 5712.0, 37123.0, 0.0, 25757.0])\n', (34, 76), True, 'import numpy as np\n'), ((80, 120), 'numpy.array', 'np.array', (['[2003, 1991, 1990, 2019, 2006]'], {}), '([2003, 1991, 1990, 2019, 2006])\n', (88, 120), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.signal import hilbert
from PyEMD.compact import filt6, pade6
# Visualisation is an optional module. To minimise installation, `matplotlib` is not added
# by default. Please install extras with `pip install -r requirement-extra.txt`.
try:
import pylab as plt
except ImportError:
pass
class Visualisation(object):
"""Simple visualisation helper.
This class is for quick and simple result visualisation.
"""
PLOT_WIDTH = 6
PLOT_HEIGHT_PER_IMF = 1.5
def __init__(self, emd_instance=None):
self.emd_instance = emd_instance
self.imfs = None
self.residue = None
if emd_instance is not None:
self.imfs, self.residue = self.emd_instance.get_imfs_and_residue()
def _check_imfs(self, imfs, residue, include_residue):
"""Checks for passed imfs and residue."""
imfs = imfs if imfs is not None else self.imfs
residue = residue if residue is not None else self.residue
if imfs is None:
raise AttributeError("No imfs passed to plot")
if include_residue and residue is None:
raise AttributeError("Requested to plot residue but no residue provided")
return imfs, residue
def plot_imfs(self, imfs=None, residue=None, t=None, include_residue=True):
"""Plots and shows all IMFs.
All parameters are optional since the `emd` object could have been passed when instantiating this object.
The residual is an optional and can be excluded by setting `include_residue=False`.
"""
imfs, residue = self._check_imfs(imfs, residue, include_residue)
num_rows, t_length = imfs.shape
num_rows += include_residue is True
t = t if t is not None else range(t_length)
fig, axes = plt.subplots(num_rows, 1, figsize=(self.PLOT_WIDTH, num_rows * self.PLOT_HEIGHT_PER_IMF))
if num_rows == 1:
axes = list(axes)
axes[0].set_title("Time series")
for num, imf in enumerate(imfs):
ax = axes[num]
ax.plot(t, imf)
ax.set_ylabel("IMF " + str(num + 1))
if include_residue:
ax = axes[-1]
ax.plot(t, residue)
ax.set_ylabel("Res")
# Making the layout a bit more pleasant to the eye
plt.tight_layout()
def plot_instant_freq(self, t, imfs=None, order=False, alpha=None):
"""Plots and shows instantaneous frequencies for all provided imfs.
The necessary parameter is `t` which is the time array used to compute the EMD.
One should pass `imfs` if no `emd` instances is passed when creating the Visualisation object.
Parameters
----------
order : bool (default: False)
Represents whether the finite difference scheme is
low-order (1st order forward scheme) or high-order (6th order
compact scheme). The default value is False (low-order)
alpha : float (default: None)
Filter intensity. Default value is None, which
is equivalent to `alpha` = 0.5, meaning that no filter is applied.
The `alpha` values must be in between -0.5 (fully active) and 0.5
(no filter).
"""
if alpha is not None:
assert -0.5 < alpha < 0.5, "`alpha` must be in between -0.5 and 0.5"
imfs, _ = self._check_imfs(imfs, None, False)
num_rows = imfs.shape[0]
imfs_inst_freqs = self._calc_inst_freq(imfs, t, order=order, alpha=alpha)
fig, axes = plt.subplots(num_rows, 1, figsize=(self.PLOT_WIDTH, num_rows * self.PLOT_HEIGHT_PER_IMF))
if num_rows == 1:
axes = fig.axes
axes[0].set_title("Instantaneous frequency")
for num, imf_inst_freq in enumerate(imfs_inst_freqs):
ax = axes[num]
ax.plot(t, imf_inst_freq)
ax.set_ylabel("IMF {} [Hz]".format(num + 1))
# Making the layout a bit more pleasant to the eye
plt.tight_layout()
def _calc_inst_phase(self, sig, alpha):
"""Extract analytical signal through the Hilbert Transform."""
analytic_signal = hilbert(sig) # Apply Hilbert transform to each row
if alpha is not None:
assert -0.5 < alpha < 0.5, "`alpha` must be in between -0.5 and 0.5"
real_part = np.array([filt6(row.real, alpha) for row in analytic_signal])
imag_part = np.array([filt6(row.imag, alpha) for row in analytic_signal])
analytic_signal = real_part + 1j * imag_part
phase = np.unwrap(np.angle(analytic_signal)) # Compute angle between img and real
if alpha is not None:
phase = np.array([filt6(row, alpha) for row in phase]) # Filter phase
return phase
def _calc_inst_freq(self, sig, t, order, alpha):
"""Extracts instantaneous frequency through the Hilbert Transform."""
inst_phase = self._calc_inst_phase(sig, alpha=alpha)
if order is False:
inst_freqs = np.diff(inst_phase) / (2 * np.pi * (t[1] - t[0]))
inst_freqs = np.concatenate((inst_freqs, inst_freqs[:, -1].reshape(inst_freqs[:, -1].shape[0], 1)), axis=1)
else:
inst_freqs = [pade6(row, t[1] - t[0]) / (2.0 * np.pi) for row in inst_phase]
if alpha is None:
return np.array(inst_freqs)
else:
return np.array([filt6(row, alpha) for row in inst_freqs]) # Filter freqs
def show(self):
plt.show()
if __name__ == "__main__":
from PyEMD import EMD
# Simple signal example
t = np.arange(0, 3, 0.01)
S = np.sin(13 * t + 0.2 * t ** 1.4) - np.cos(3 * t)
emd = EMD()
emd.emd(S)
imfs, res = emd.get_imfs_and_residue()
# Initiate visualisation with emd instance
vis = Visualisation(emd)
# Create a plot with all IMFs and residue
vis.plot_imfs(imfs=imfs, residue=res, t=t, include_residue=True)
# Create a plot with instantaneous frequency of all IMFs
vis.plot_instant_freq(t, imfs=imfs)
# Show both plots
vis.show()
|
[
"PyEMD.compact.filt6",
"PyEMD.compact.pade6",
"pylab.tight_layout",
"numpy.diff",
"numpy.angle",
"numpy.array",
"numpy.cos",
"numpy.sin",
"pylab.subplots",
"scipy.signal.hilbert",
"PyEMD.EMD",
"numpy.arange",
"pylab.show"
] |
[((5626, 5647), 'numpy.arange', 'np.arange', (['(0)', '(3)', '(0.01)'], {}), '(0, 3, 0.01)\n', (5635, 5647), True, 'import numpy as np\n'), ((5715, 5720), 'PyEMD.EMD', 'EMD', ([], {}), '()\n', (5718, 5720), False, 'from PyEMD import EMD\n'), ((1818, 1912), 'pylab.subplots', 'plt.subplots', (['num_rows', '(1)'], {'figsize': '(self.PLOT_WIDTH, num_rows * self.PLOT_HEIGHT_PER_IMF)'}), '(num_rows, 1, figsize=(self.PLOT_WIDTH, num_rows * self.\n PLOT_HEIGHT_PER_IMF))\n', (1830, 1912), True, 'import pylab as plt\n'), ((2341, 2359), 'pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2357, 2359), True, 'import pylab as plt\n'), ((3579, 3673), 'pylab.subplots', 'plt.subplots', (['num_rows', '(1)'], {'figsize': '(self.PLOT_WIDTH, num_rows * self.PLOT_HEIGHT_PER_IMF)'}), '(num_rows, 1, figsize=(self.PLOT_WIDTH, num_rows * self.\n PLOT_HEIGHT_PER_IMF))\n', (3591, 3673), True, 'import pylab as plt\n'), ((4031, 4049), 'pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4047, 4049), True, 'import pylab as plt\n'), ((4192, 4204), 'scipy.signal.hilbert', 'hilbert', (['sig'], {}), '(sig)\n', (4199, 4204), False, 'from scipy.signal import hilbert\n'), ((5523, 5533), 'pylab.show', 'plt.show', ([], {}), '()\n', (5531, 5533), True, 'import pylab as plt\n'), ((5656, 5687), 'numpy.sin', 'np.sin', (['(13 * t + 0.2 * t ** 1.4)'], {}), '(13 * t + 0.2 * t ** 1.4)\n', (5662, 5687), True, 'import numpy as np\n'), ((5690, 5703), 'numpy.cos', 'np.cos', (['(3 * t)'], {}), '(3 * t)\n', (5696, 5703), True, 'import numpy as np\n'), ((4610, 4635), 'numpy.angle', 'np.angle', (['analytic_signal'], {}), '(analytic_signal)\n', (4618, 4635), True, 'import numpy as np\n'), ((5372, 5392), 'numpy.array', 'np.array', (['inst_freqs'], {}), '(inst_freqs)\n', (5380, 5392), True, 'import numpy as np\n'), ((5054, 5073), 'numpy.diff', 'np.diff', (['inst_phase'], {}), '(inst_phase)\n', (5061, 5073), True, 'import numpy as np\n'), ((4389, 4411), 'PyEMD.compact.filt6', 'filt6', (['row.real', 'alpha'], {}), '(row.real, alpha)\n', (4394, 4411), False, 'from PyEMD.compact import filt6, pade6\n'), ((4475, 4497), 'PyEMD.compact.filt6', 'filt6', (['row.imag', 'alpha'], {}), '(row.imag, alpha)\n', (4480, 4497), False, 'from PyEMD.compact import filt6, pade6\n'), ((4735, 4752), 'PyEMD.compact.filt6', 'filt6', (['row', 'alpha'], {}), '(row, alpha)\n', (4740, 4752), False, 'from PyEMD.compact import filt6, pade6\n'), ((5264, 5287), 'PyEMD.compact.pade6', 'pade6', (['row', '(t[1] - t[0])'], {}), '(row, t[1] - t[0])\n', (5269, 5287), False, 'from PyEMD.compact import filt6, pade6\n'), ((5436, 5453), 'PyEMD.compact.filt6', 'filt6', (['row', 'alpha'], {}), '(row, alpha)\n', (5441, 5453), False, 'from PyEMD.compact import filt6, pade6\n')]
|
import random
import numpy as np
import threading
import multiprocessing
def TestSquare(square, color):
for y in range(len(square)):
for x in range(len(square[y])):
if square[y][x] != color:
return False
return True
def TestRug(num, dimensions, squareSize, colors, outQueue, lock):
#Create random rug
rug = np.zeros((dimensions[0], dimensions[1]))
for y in range(dimensions[1]):
for x in range(dimensions[0]):
rug[y][x] = random.randint(1, colors)
#Test rug
for y in range(dimensions[1] - (squareSize[1] - 1)):
currentColor = -1
for x in range (dimensions[0]):
if rug[y][x] == currentColor:
colorXCount += 1
if colorXCount >= squareSize[0]:
if TestSquare(rug[y + 1 : y + squareSize[1], x - (squareSize[0] - 1) : x + 1], currentColor): # Don't need to test the row we just tested
lock.acquire()
try:
print(f"Rug {num} discarded.\n{rug[y : y + squareSize[1], x - (squareSize[0] - 1) : x + 1]}")
finally:
lock.release()
outQueue.put(True)
return
else:
currentColor = rug[y][x]
colorXCount = 1
#This goes slightly faster if we don't print every rug, but it's boring :)
lock.acquire()
try:
print(f"Rug {num} checked")
finally:
lock.release()
outQueue.put(False)
return
if __name__ == '__main__':
#Editable Variables
colors = 3
dimensions = [100, 100]
squareSize = [4, 4]
rugCount = 50000
maxProcesses = 100
#Do not edit
rugsFailed = 0
pool = multiprocessing.Pool(processes=maxProcesses)
m = multiprocessing.Manager()
queue = m.Queue()
lock = m.Lock()
#Create worker threads
for i in range (rugCount):
pool.apply_async(TestRug, args=(i+1, dimensions, squareSize, colors, queue, lock))
#Do work
pool.close()
pool.join()
#Get results
while not queue.empty():
if queue.get():
rugsFailed += 1
#Print results
print(f"{rugsFailed} rugs discarded.")
print(f"{(rugsFailed / rugCount) * 100}%")
wait = input("PRESS ENTER TO CONTINUE.")
|
[
"multiprocessing.Manager",
"numpy.zeros",
"random.randint",
"multiprocessing.Pool"
] |
[((383, 423), 'numpy.zeros', 'np.zeros', (['(dimensions[0], dimensions[1])'], {}), '((dimensions[0], dimensions[1]))\n', (391, 423), True, 'import numpy as np\n'), ((1886, 1930), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'maxProcesses'}), '(processes=maxProcesses)\n', (1906, 1930), False, 'import multiprocessing\n'), ((1940, 1965), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (1963, 1965), False, 'import multiprocessing\n'), ((525, 550), 'random.randint', 'random.randint', (['(1)', 'colors'], {}), '(1, colors)\n', (539, 550), False, 'import random\n')]
|
import numpy as np
from sparc.videoprocessing.processing import Processing
from sparc.videoprocessing.lkopticalflow import LKOpticalFlow
from opencmiss.zinc.scenecoordinatesystem import SCENECOORDINATESYSTEM_LOCAL, \
SCENECOORDINATESYSTEM_WINDOW_PIXEL_TOP_LEFT
from opencmiss.zinc.field import FieldFindMeshLocation
class TrackingTool(object):
def __init__(self, model):
self._master_model = model
self._tracking_points_model = model.get_tracking_points_model()
self._image_plane_model = model.get_image_plane_model()
self._image_buffer = model.get_image_buffer()
self._processor = Processing()
self._object_tracker = LKOpticalFlow(win=(20, 20), max_level=2)
self._key_index = -1
def track_key_points(self):
key_points = self._tracking_points_model.get_key_points()
if len(key_points) and self._key_index != -1:
# if self._key_index == -1:
# # Have to at least analyse something to set up the mask in the processor.
# self._analyse_roi(0, (0, 0, 1, 1))
coordinate_field = self._tracking_points_model.get_coordinate_field()
field_module = coordinate_field.getFieldmodule()
field_module.beginChange()
image_points = self._image_plane_model.convert_to_image_coordinates(key_points)
numpy_points = np.asarray(image_points, dtype=np.float32)
number_of_images = self._image_plane_model.get_frame_count()
# previous_gray_image = self._processor.get_gray_image()
_, previous_gray_image = self._processor.get_filtered_image()
image_index = self._key_index
while image_index < number_of_images:
time = self._image_plane_model.get_time_for_frame_index(image_index)
file_name = self._image_plane_model.get_image_file_name_at(image_index)
self._process_image(file_name)
_, current_gray_image = self._processor.get_filtered_image()
# current_gray_image = self._processor.get_gray_image()
new_numpy_points, st, err = self._object_tracker.lk(previous_gray_image, current_gray_image, numpy_points)
new_image_points = [(float(point[0]), float(point[1])) for point in new_numpy_points]
# new_key_points = self._image_plane_model.convert_to_model_coordinates(new_image_points)
new_key_points = new_image_points
self._tracking_points_model.set_key_points_at_time(new_key_points, time)
numpy_points = new_numpy_points
previous_gray_image = current_gray_image
image_index += 1
field_module.endChange()
def load_saved_data(self, file_name):
import json
with open(file_name) as f:
contents = f.read()
saved_data = json.loads(contents)
index_list = []
locations_list = []
for key in saved_data:
if key != 'time_array':
index_list.append(int(key) - 1)
locations_list.append(saved_data[key][0])
sorted_order = [i[0] for i in sorted(enumerate(index_list), key=lambda x: x[1])]
key_points = [locations_list[index] for index in sorted_order]
self._tracking_points_model.create_electrode_key_points(key_points)
def analyse_roi(self, image_index, zinc_sceneviewer, element, rectangle_description):
image_roi = self._convert_to_image_roi(zinc_sceneviewer, element, rectangle_description)
roi_for_cv2 = [image_roi[0], image_roi[1], image_roi[0]+image_roi[2], image_roi[1]+image_roi[3]]
image_key_points = self._analyse_roi(image_index, roi_for_cv2)
image_points = image_key_points.tolist()
key_points = self._image_plane_model.convert_to_model_coordinates(image_points)
# key_points = image_points
self._tracking_points_model.create_electrode_key_points(key_points)
def clear(self):
self._tracking_points_model.create_model()
self._master_model.get_tracking_points_scene().create_graphics()
def _process_image(self, file_name):
self._processor.read_image(file_name)
self._processor.rgb_and_blur_and_hsv(threshold=3)
self._processor.determine_electrode_mask()
# self._processor.filter_and_threshold()
def _analyse_roi(self, image_index, image_roi):
self._key_index = image_index
# file_name = self._image_plane_model.get_image_file_name_at(image_index)
# temp_index = -31
file_name = self._image_buffer[image_index - 1]
self._process_image(file_name)
self._processor.mask_and_image(image_roi)
self._processor.final_mask()
image_points = self._processor.detect_electrodes()
return image_points
def _convert_to_image_roi(self, scene_viewer, element, rectangle_description):
"""
Return a description of the rectangle in image pixels. The resulting description is [top left corner x and y,
width, height]. E.g. (1, 0, 48, 140).
:param scene_viewer:
:param element:
:param rectangle_description: top left and bottom right corners of the rectangle in NDC top left coordinates.
:return: A tuple in image pixels (x, y, width, height) describing a rectangle.
"""
x1 = rectangle_description[0]
y1 = rectangle_description[1]
x2 = rectangle_description[2]
y2 = rectangle_description[3]
coordinate_field = self._image_plane_model.get_coordinate_field()
top_left_mesh_location = _determine_the_mesh_location(
scene_viewer, x1, y1, element, coordinate_field)
bottom_right_mesh_location = _determine_the_mesh_location(
scene_viewer, x2, y2, element, coordinate_field)
return self._image_plane_model.calculate_image_pixels_rectangle(top_left_mesh_location,
bottom_right_mesh_location)
def _determine_the_mesh_location(scene_viewer, x, y, element, coordinate_field):
mesh = element.getMesh()
field_module = mesh.getFieldmodule()
field_module.beginChange()
element_group = field_module.createFieldElementGroup(mesh)
mesh_group = element_group.getMeshGroup()
mesh_group.addElement(element)
field_mouse_location = field_module.createFieldConstant([x, -y])
field_scene_viewer_projection = field_module.createFieldSceneviewerProjection(
scene_viewer, SCENECOORDINATESYSTEM_LOCAL, SCENECOORDINATESYSTEM_WINDOW_PIXEL_TOP_LEFT)
field_projection = field_module.createFieldProjection(coordinate_field, field_scene_viewer_projection)
field_x_y_projection = field_module.createFieldComponent(field_projection, [1, 2])
field_find_mesh_location = field_module.createFieldFindMeshLocation(field_mouse_location,
field_x_y_projection, mesh_group)
field_find_mesh_location.setSearchMode(FieldFindMeshLocation.SEARCH_MODE_NEAREST)
field_cache = field_module.createFieldcache()
found_element, xi_location = field_find_mesh_location.evaluateMeshLocation(field_cache, 2)
del field_cache
del field_find_mesh_location
del field_x_y_projection
del field_projection
del field_scene_viewer_projection
del field_mouse_location
del mesh_group
del element_group
del coordinate_field
field_module.endChange()
return found_element, xi_location
|
[
"json.loads",
"sparc.videoprocessing.processing.Processing",
"numpy.asarray",
"sparc.videoprocessing.lkopticalflow.LKOpticalFlow"
] |
[((636, 648), 'sparc.videoprocessing.processing.Processing', 'Processing', ([], {}), '()\n', (646, 648), False, 'from sparc.videoprocessing.processing import Processing\n'), ((680, 720), 'sparc.videoprocessing.lkopticalflow.LKOpticalFlow', 'LKOpticalFlow', ([], {'win': '(20, 20)', 'max_level': '(2)'}), '(win=(20, 20), max_level=2)\n', (693, 720), False, 'from sparc.videoprocessing.lkopticalflow import LKOpticalFlow\n'), ((1389, 1431), 'numpy.asarray', 'np.asarray', (['image_points'], {'dtype': 'np.float32'}), '(image_points, dtype=np.float32)\n', (1399, 1431), True, 'import numpy as np\n'), ((2912, 2932), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (2922, 2932), False, 'import json\n')]
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
import torch.nn.functional as F
from op_tester import op_tester
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(Path(__file__).resolve().parent.parent)
def test_mul(op_tester):
d1 = np.random.rand(2).astype(np.float32)
d2 = np.random.rand(2).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.mul([i1, i2])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + i2,
popart.reservedGradientPrefix() + o
]
def reference(ref_data):
t1 = torch.tensor(d1, requires_grad=True)
t2 = torch.tensor(d2, requires_grad=True)
out = t1 * t2
d__o = torch.tensor(ref_data.getOutputTensorGrad(0))
assert not torch.isnan(d__o).any()
out.backward(d__o)
return [out, t1.grad, t2.grad, None]
op_tester.setPatterns(['PreUniRepl', 'MulArgGradOp'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, step_type='train')
def test_broadcast_mul(op_tester):
d1 = np.random.rand(2, 2).astype(np.float32)
d2 = np.random.rand(2).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.mul([i1, i2])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + i2,
popart.reservedGradientPrefix() + o
]
def reference(ref_data):
t1 = torch.tensor(d1, requires_grad=True)
t2 = torch.tensor(d2, requires_grad=True)
out = t1 * t2
d__o = torch.tensor(ref_data.getOutputTensorGrad(0))
assert not torch.isnan(d__o).any()
out.backward(d__o)
return [out, t1.grad, t2.grad, None]
op_tester.setPatterns(['PreUniRepl', 'MulArgGradOp'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, step_type='train')
input_infos = (([], np.float16, [], np.float32), ([
2, 1
], np.float16, [], np.float32), ([2, 1], np.float16, [1], np.float32),
([1], np.float32, [2, 2], np.float16))
@pytest.mark.parametrize("in_infos", input_infos)
def test_mixed_precision_floating_point_mul(in_infos, op_tester):
(shape0, type0, shape1, type1) = in_infos
d1 = np.array(np.random.rand(*shape0)).astype(type0)
d2 = np.array(np.random.rand(*shape1)).astype(type1)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.mul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1).float()
t2 = torch.tensor(d2).float()
out = t1 * t2
# poplar takes fp16 output type in case of mixed precision inputs
return [out.half()]
op_tester.atol = 1e-03
op_tester.run(init_builder, reference)
def test_fp16_and_nonscalar_fp32_input_mul(op_tester):
d1 = np.array(np.random.rand(2, 2).astype(np.float16))
d2 = np.array(np.random.rand(2, 2).astype(np.float32))
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.mul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
return [None]
with pytest.raises(popart.popart_exception) as e_info:
op_tester.run(init_builder, reference)
assert (e_info.value.args[0].endswith(
"incompatible types FLOAT16 and FLOAT (shapes [2 2] and [2 2])"))
|
[
"numpy.random.rand",
"pathlib.Path",
"op_tester.op_tester.setPatterns",
"op_tester.op_tester.run",
"torch.tensor",
"pytest.mark.parametrize",
"popart.reservedGradientPrefix",
"pytest.raises",
"torch.isnan"
] |
[((2531, 2579), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_infos"""', 'input_infos'], {}), "('in_infos', input_infos)\n", (2554, 2579), False, 'import pytest\n'), ((1155, 1241), 'op_tester.op_tester.setPatterns', 'op_tester.setPatterns', (["['PreUniRepl', 'MulArgGradOp']"], {'enableRuntimeAsserts': '(False)'}), "(['PreUniRepl', 'MulArgGradOp'], enableRuntimeAsserts=\n False)\n", (1176, 1241), False, 'from op_tester import op_tester\n'), ((1267, 1324), 'op_tester.op_tester.run', 'op_tester.run', (['init_builder', 'reference'], {'step_type': '"""train"""'}), "(init_builder, reference, step_type='train')\n", (1280, 1324), False, 'from op_tester import op_tester\n'), ((2170, 2256), 'op_tester.op_tester.setPatterns', 'op_tester.setPatterns', (["['PreUniRepl', 'MulArgGradOp']"], {'enableRuntimeAsserts': '(False)'}), "(['PreUniRepl', 'MulArgGradOp'], enableRuntimeAsserts=\n False)\n", (2191, 2256), False, 'from op_tester import op_tester\n'), ((2282, 2339), 'op_tester.op_tester.run', 'op_tester.run', (['init_builder', 'reference'], {'step_type': '"""train"""'}), "(init_builder, reference, step_type='train')\n", (2295, 2339), False, 'from op_tester import op_tester\n'), ((3275, 3313), 'op_tester.op_tester.run', 'op_tester.run', (['init_builder', 'reference'], {}), '(init_builder, reference)\n', (3288, 3313), False, 'from op_tester import op_tester\n'), ((863, 899), 'torch.tensor', 'torch.tensor', (['d1'], {'requires_grad': '(True)'}), '(d1, requires_grad=True)\n', (875, 899), False, 'import torch\n'), ((913, 949), 'torch.tensor', 'torch.tensor', (['d2'], {'requires_grad': '(True)'}), '(d2, requires_grad=True)\n', (925, 949), False, 'import torch\n'), ((1878, 1914), 'torch.tensor', 'torch.tensor', (['d1'], {'requires_grad': '(True)'}), '(d1, requires_grad=True)\n', (1890, 1914), False, 'import torch\n'), ((1928, 1964), 'torch.tensor', 'torch.tensor', (['d2'], {'requires_grad': '(True)'}), '(d2, requires_grad=True)\n', (1940, 1964), False, 'import torch\n'), ((3758, 3796), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (3771, 3796), False, 'import pytest\n'), ((3816, 3854), 'op_tester.op_tester.run', 'op_tester.run', (['init_builder', 'reference'], {}), '(init_builder, reference)\n', (3829, 3854), False, 'from op_tester import op_tester\n'), ((359, 376), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (373, 376), True, 'import numpy as np\n'), ((405, 422), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (419, 422), True, 'import numpy as np\n'), ((1371, 1391), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (1385, 1391), True, 'import numpy as np\n'), ((1420, 1437), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1434, 1437), True, 'import numpy as np\n'), ((674, 705), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (703, 705), False, 'import popart\n'), ((724, 755), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (753, 755), False, 'import popart\n'), ((774, 805), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (803, 805), False, 'import popart\n'), ((1689, 1720), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (1718, 1720), False, 'import popart\n'), ((1739, 1770), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (1768, 1770), False, 'import popart\n'), ((1789, 1820), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (1818, 1820), False, 'import popart\n'), ((2710, 2733), 'numpy.random.rand', 'np.random.rand', (['*shape0'], {}), '(*shape0)\n', (2724, 2733), True, 'import numpy as np\n'), ((2767, 2790), 'numpy.random.rand', 'np.random.rand', (['*shape1'], {}), '(*shape1)\n', (2781, 2790), True, 'import numpy as np\n'), ((3056, 3072), 'torch.tensor', 'torch.tensor', (['d1'], {}), '(d1)\n', (3068, 3072), False, 'import torch\n'), ((3094, 3110), 'torch.tensor', 'torch.tensor', (['d2'], {}), '(d2)\n', (3106, 3110), False, 'import torch\n'), ((3389, 3409), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (3403, 3409), True, 'import numpy as np\n'), ((3448, 3468), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (3462, 3468), True, 'import numpy as np\n'), ((283, 297), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (287, 297), False, 'from pathlib import Path\n'), ((1053, 1070), 'torch.isnan', 'torch.isnan', (['d__o'], {}), '(d__o)\n', (1064, 1070), False, 'import torch\n'), ((2068, 2085), 'torch.isnan', 'torch.isnan', (['d__o'], {}), '(d__o)\n', (2079, 2085), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright 2019-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The StellarGraph implementation that encapsulates a NetworkX graph.
"""
__all__ = ["NetworkXStellarGraph"]
from stellargraph.core.schema import EdgeType
from stellargraph.core.graph import StellarGraph
import random
import itertools as it
from collections import defaultdict, namedtuple
import warnings
import pandas as pd
import numpy as np
import networkx as nx
from typing import Iterable, Iterator, Any, Mapping, List, Set, Optional
from .schema import GraphSchema
from .utils import is_real_iterable
NeighbourWithWeight = namedtuple("NeighbourWithWeight", ["node", "weight"])
def _convert_from_node_attribute(
G, attr_name, node_types, node_type_name=None, node_type_default=None, dtype="f"
):
"""
Transform the node attributes to feature vectors, for use with machine learning models.
Each node is assumed to have a numeric array stored in the attribute_name and
which is suitable for use in machine learning models.
Args:
G: NetworkX graph
attr_name: Name of node attribute to use for conversion
node_types: Node types in graph
node_type_name: (optional) The name of the node attribute specifying the type.
node_type_default: (optional) The node type of nodes without explicit type.
dtype: (optional) The numpy datatype to create the features array.
Returns:
index_map: a dictionary of node_type -> {node_id: node_index}
attribute_arrays: a dictionary of node_type -> numpy array storing the features
"""
attribute_arrays = {}
node_index_map = {}
# Enumerate all nodes in graph
nodes_by_type = {
# XXX: This lookup does not really make sense if node_type_name is not specified - why is it optional?
nt: [
n
for n, ndata in G.nodes(data=True)
if ndata.get(node_type_name, node_type_default) == nt
]
for nt in node_types
}
# Get the target values for each node type
for nt in node_types:
nt_node_list = nodes_by_type[nt]
# Add None to node list as ID of unknown nodes
nt_node_list.append(None)
# Create map between node id and index (including None)
node_index_map[nt] = {nid: ii for ii, nid in enumerate(nt_node_list)}
# The node data
attr_data = [
v if v is None else G.nodes[v].get(attr_name) for v in nt_node_list
]
# Get the size of the features
data_sizes = {
np.size(G.nodes[v].get(attr_name, []))
for v in nt_node_list
if v is not None
}
# Warn if nodes don't have the attribute
if 0 in data_sizes:
warnings.warn(
"Some nodes have no value for attribute '{}', "
"using default value.".format(attr_name),
RuntimeWarning,
stacklevel=2,
)
data_sizes.discard(0)
# Check all are the same for this node type
if len(data_sizes) > 1:
raise ValueError(
"Data sizes in nodes of type {} are inconsistent "
"for the attribute '{}' ".format(nt, attr_name)
)
# If some node_type have no nodes with the attribute, skip them
if len(data_sizes) == 0:
continue
# Create zero attribute array
data_size = data_sizes.pop()
# Dummy feature/target value for invalid nodes,
# this will be inserted into the array in two cases:
# 1. node ID of None (representing sampling for a missing neighbour)
# 2. node with no attribute
# TODO: Make these two cases more explicit, allow custom values.
default_value = np.zeros(data_size)
# Convert to numpy array
attribute_arrays[nt] = np.asarray(
[x if x is not None else default_value for x in attr_data]
)
return node_index_map, attribute_arrays
def _convert_from_node_data(data, node_type_map, node_types, dtype="f"):
"""
Store the node data as feature vectors, for use with machine learning models.
For a single node type, the data can be either:
* a Pandas DataFrame with the index being node IDs and the columns the numeric
feature values. Note that the features must be numeric.
* a list or iterable of `(node_id, node_feature)` pairs where node_feature is
a value, a list of values, or a numpy array representing the numeric feature
values.
For multiple node types, the data can be either:
* a dictionary of node_type -> DataFrame with the index of each DataFrame
being node IDs and the columns the numeric feature values.
Note that the features must be numeric and can be different sizes for each
node type.
* a list or iterable of `(node_id, node_feature)` pairs where node_feature is
a value, a list of values, or a numpy array representing the numeric feature
values.
Args:
data: dict, list or DataFrame
The data for the nodes, partitioned by node type
node_type_map: dict
Mapping of node_id to node_type
node_types: list
List of the node types in the data
dtype: Numpy datatype optional (default='float32')
The numpy datatype to create the features array.
Returns:
index_map: a dictionary of node_type -> {node_id: node_index}
attribute_arrays: a dictionary of node_type -> numpy array storing the features
"""
# if data is a dict of pandas dataframes or iterators, pull the features for each node type in the dictionary
if isinstance(data, dict):
# The keys should match the node types
if not all(k in node_types for k in data.keys()):
raise ValueError(
"All node types in supplied feature dict should be in the graph"
)
data_arrays = {}
data_index = {}
for nt, arr in data.items():
if isinstance(arr, pd.DataFrame):
node_index_map = {nid: nii for nii, nid in enumerate(arr.index)}
try:
data_arr = arr.values.astype(dtype)
except ValueError:
raise ValueError(
"Node data passed as Pandas arrays should contain only numeric values"
)
elif isinstance(arr, (Iterable, list)):
data_arr = []
node_index_map = {}
for ii, (node_id, datum) in enumerate(arr):
data_arr.append(datum)
node_index_map[node_id] = ii
data_arr = np.vstack(data_arr)
else:
raise TypeError(
"Node data should be a pandas array, an iterable, a list, or name of a node_attribute"
)
# Add default value to end of feature array
default_value = np.zeros(data_arr.shape[1])
data_arrays[nt] = np.vstack([data_arr, default_value])
node_index_map[None] = data_arr.shape[0]
data_index[nt] = node_index_map
# If data is a pd.Dataframe, try pulling out the type
elif isinstance(data, pd.DataFrame):
if len(node_types) > 1:
raise TypeError(
"When there is more than one node type, pass node features as a dictionary."
)
node_type = next(iter(node_types))
data_index, data_arrays = _convert_from_node_data(
{node_type: data}, node_type_map, node_types, dtype
)
# If data an iterator try recreating the nodes by type
elif isinstance(data, (Iterator, list)):
node_data_by_type = {nt: [] for nt in node_types}
for d in data:
node_type = node_type_map.get(d[0])
if node_type is None:
raise TypeError("Node type not found in importing feature vectors!")
node_data_by_type[node_type].append(d)
data_index, data_arrays = _convert_from_node_data(
node_data_by_type, node_type_map, node_types, dtype
)
else:
raise TypeError(
"Node data should be a dictionary, a pandas array, an iterable, or a tuple."
)
return data_index, data_arrays
class NetworkXStellarGraph(StellarGraph):
"""
Implementation based on encapsulating a NetworkX graph.
"""
def __init__(
self,
graph,
is_directed,
edge_weight_label,
node_type_name,
edge_type_name,
node_type_default,
edge_type_default,
feature_name,
target_name,
node_features,
dtype,
):
if is_directed:
if not isinstance(graph, nx.MultiDiGraph):
graph = nx.MultiDiGraph(graph)
else:
if not isinstance(graph, nx.MultiGraph):
graph = nx.MultiGraph(graph)
self._graph = graph
# Name of optional attribute for edge weights
self._edge_weight_label = edge_weight_label
# Names of attributes that store the type of nodes and edges
self._node_type_attr = node_type_name
self._edge_type_attr = edge_type_name
# Default types of nodes and edges
self._node_type_default = node_type_default
self._edge_type_default = edge_type_default
# Names for the feature/target type (used if they are supplied and
# feature/target spec not supplied"
self._feature_attr = feature_name
self._target_attr = target_name
# Ensure that the incoming graph data has node & edge types
# TODO: This requires traversing all nodes and edges. Is there another way?
node_types = set()
type_for_node = {}
for n, ndata in graph.nodes(data=True):
type_for_node[n] = self._get_node_type(ndata)
node_types.add(type_for_node[n])
edge_types = set()
for n1, n2, k, edata in graph.edges(keys=True, data=True):
edge_types.add(self._get_edge_type(edata))
# If node_features is a string, load features from this attribute of the nodes in the graph
if isinstance(node_features, str):
data_index_maps, data_arrays = _convert_from_node_attribute(
graph,
node_features,
node_types,
self._node_type_attr,
self._node_type_default,
dtype,
)
# Otherwise try importing node_features as a Numpy array or Pandas Dataframe
elif node_features is not None:
data_index_maps, data_arrays = _convert_from_node_data(
node_features, type_for_node, node_types, dtype
)
else:
data_index_maps = {}
data_arrays = {}
# TODO: What other convenience attributes do we need?
self._nodes_by_type = None
# This stores the feature vectors per node type as numpy arrays
self._node_attribute_arrays = data_arrays
# This stores the map between node ID and index in the attribute arrays
self._node_index_maps = data_index_maps
def __repr__(self):
directed_str = "Directed" if self.is_directed() else "Undirected"
s = "{}: {} multigraph\n".format(type(self).__name__, directed_str)
s += " Nodes: {}, Edges: {}\n".format(
self.number_of_nodes(), self.number_of_edges()
)
return s
def _get_node_type(self, node_data):
node_type = node_data.get(self._node_type_attr)
if node_type is None:
node_type = self._node_type_default
node_data[self._node_type_attr] = node_type
return node_type
def _get_edge_type(self, edge_data):
edge_type = edge_data.get(self._edge_type_attr)
if edge_type is None:
edge_type = self._edge_type_default
edge_data[self._edge_type_attr] = edge_type
return edge_type
def check_graph_for_ml(self, features=True):
"""
Checks if all properties required for machine learning training/inference are set up.
An error will be raised if the graph is not correctly setup.
"""
# TODO: This are simple tests and miss many problems that could arise, improve!
# Check features on the nodes:
if features and len(self._node_attribute_arrays) == 0:
raise RuntimeError(
"This StellarGraph has no numeric feature attributes for nodes"
"Node features are required for machine learning"
)
# TODO: check the schema
# TODO: check the feature node_ids against the graph node ids?
def get_index_for_nodes(self, nodes, node_type=None):
"""
Get the indices for the specified node or nodes.
If the node type is not specified the node types will be found
for all nodes. It is therefore important to supply the ``node_type``
for this method to be fast.
Args:
n: (list or hashable) Node ID or list of node IDs
node_type: (hashable) the type of the nodes.
Returns:
Numpy array containing the indices for the requested nodes.
"""
if not is_real_iterable(nodes):
nodes = [nodes]
# Get the node type if not specified.
if node_type is None:
node_types = {
self._get_node_type(self._graph.nodes[n])
for n in nodes
if n is not None
}
if len(node_types) > 1:
raise ValueError("All nodes must be of the same type.")
if len(node_types) == 0:
raise ValueError(
"At least one node must be given if node_type not specified"
)
node_type = node_types.pop()
# Get index for nodes of this type
nt_id_to_index = self._node_index_maps[node_type]
node_indices = [nt_id_to_index.get(n) for n in nodes]
return node_indices
def node_features(self, nodes, node_type=None):
"""
Get the numeric feature vectors for the specified node or nodes.
If the node type is not specified the node types will be found
for all nodes. It is therefore important to supply the ``node_type``
for this method to be fast.
Args:
n: (list or hashable) Node ID or list of node IDs
node_type: (hashable) the type of the nodes.
Returns:
Numpy array containing the node features for the requested nodes.
"""
# TODO: add @property decorator
if not is_real_iterable(nodes):
nodes = [nodes]
# Get the node type if not specified.
if node_type is None:
node_types = {
self._get_node_type(self._graph.nodes[n])
for n in nodes
if n is not None
}
if len(node_types) > 1:
raise ValueError("All nodes must be of the same type.")
if len(node_types) == 0:
raise ValueError(
"At least one node must be given if node_type not specified"
)
node_type = node_types.pop()
# Check node_types
if (
node_type not in self._node_attribute_arrays
or node_type not in self._node_index_maps
):
raise ValueError(f"Features not found for node type '{node_type}'")
# Edge case: if we are given no nodes, what do we do?
if len(nodes) == 0:
feature_size = self._node_attribute_arrays[node_type].shape[1]
return np.empty((0, feature_size))
# Get index for nodes of this type
nt_id_to_index = self._node_index_maps[node_type]
node_indices = [nt_id_to_index.get(n) for n in nodes]
if None in node_indices:
problem_nodes = [
node for node, index in zip(nodes, node_indices) if index is None
]
raise ValueError(
"Could not find features for nodes with IDs {}.".format(problem_nodes)
)
features = self._node_attribute_arrays[node_type][node_indices]
return features
def node_feature_sizes(self, node_types=None):
"""
Get the feature sizes for the specified node types.
Args:
node_types: (list) A list of node types. If None all current node types
will be used.
Returns:
A dictionary of node type and integer feature size.
"""
# TODO: unit test!
if not node_types:
node_types = self.node_types
self.check_graph_for_ml(features=True)
fsize = {nt: self._node_attribute_arrays[nt].shape[1] for nt in node_types}
return fsize
def nodes_of_type(self, node_type=None):
"""
Get the nodes of the graph with the specified node types.
Args:
node_type:
Returns:
A list of node IDs with type node_type
"""
# TODO: unit test!
if node_type is None:
return list(self)
else:
return [
n
for n, ndata in self._graph.nodes(data=True)
if self._get_node_type(ndata) == node_type
]
def node_type(self, node):
"""
Get the type of the node
Args:
node: Node ID
Returns:
Node type
"""
return self._get_node_type(self._graph.nodes[node])
@property
def node_types(self):
"""
Get a list of all node types in the graph.
Returns:
set of types
"""
# TODO: unit test!
# TODO: create a schmea when we geenrate _node_attribute_arrays and use it?
if len(self._node_attribute_arrays) > 0:
return set(self._node_attribute_arrays.keys())
else:
return {
self._get_node_type(ndata) for n, ndata in self._graph.nodes(data=True)
}
def info(self, show_attributes=True, sample=None):
"""
Return an information string summarizing information on the current graph.
This includes node and edge type information and their attributes.
Note: This requires processing all nodes and edges and could take a long
time for a large graph.
Args:
sample (int): To speed up the graph analysis, use only a random sample of
this many nodes and edges.
Returns:
An information string.
"""
directed_str = "Directed" if self.is_directed() else "Undirected"
s = "{}: {} multigraph\n".format(type(self).__name__, directed_str)
s += " Nodes: {}, Edges: {}\n".format(
self.number_of_nodes(), self.number_of_edges()
)
# Sample the nodes for our analysis
if sample:
all_nodes = list(self._graph.nodes)
snodes = random.sample(all_nodes, sample)
else:
snodes = None
gs = self.create_graph_schema(create_type_maps=False, nodes=snodes)
def is_of_edge_type(e, edge_type):
et2 = (
self._get_node_type(self._graph.nodes[e[0]]),
self._get_edge_type(self._graph.edges[e]),
self._get_node_type(self._graph.nodes[e[1]]),
)
return et2 == edge_type
# Go over all node types
s += "\n Node types:\n"
for nt in gs.node_types:
# Filter nodes by type
nt_nodes = [
ndata
for n, ndata in self._graph.nodes(data=True)
if self._get_node_type(ndata) == nt
]
s += " {}: [{}]\n".format(nt, len(nt_nodes))
# Get the attributes for this node type
attrs = set(it.chain(*[ndata.keys() for ndata in nt_nodes]))
attrs.discard(self._node_type_attr)
if show_attributes and len(attrs) > 0:
s += " Attributes: {}\n".format(attrs)
s += " Edge types: "
s += ", ".join(["{}-{}->{}".format(*e) for e in gs.schema[nt]]) + "\n"
s += "\n Edge types:\n"
for et in gs.edge_types:
# Filter edges by type
et_edges = [
e[3]
for e in self._graph.edges(keys=True, data=True)
if is_of_edge_type(e[:3], et)
]
if len(et_edges) > 0:
s += " {et[0]}-{et[1]}->{et[2]}: [{len}]\n".format(
et=et, len=len(et_edges)
)
# Get the attributes for this edge type
attrs = set(it.chain(*[edata.keys() for edata in et_edges]))
attrs.discard(self._edge_type_attr)
if show_attributes and len(attrs) > 0:
s += " Attributes: {}\n".format(attrs)
return s
def create_graph_schema(self, create_type_maps=True, nodes=None):
"""
Create graph schema in dict of dict format from current graph.
Note the assumption we make that there is only one
edge of a particular edge type per node pair.
This means that specifying an edge by node0, node1 and edge type
is unique.
Arguments:
create_type_maps (bool): If True quick lookup of node/edge types is
created in the schema. This can be slow.
nodes (list): A list of node IDs to use to build schema. This must
represent all node types and all edge types in the graph.
If specified, `create_type_maps` must be False.
If not specified, all nodes and edges in the graph are used.
Returns:
GraphSchema object.
"""
if nodes is None:
nodes = self.nodes()
edges = self.edges(triple=True)
elif create_type_maps is False:
edges = (
(src, dst, self._get_edge_type(data))
for src, dst, data in self._graph.edges(nodes, data=True)
)
else:
raise ValueError("Creating type maps for subsampled nodes is not supported")
# Create node type index list
node_types = sorted({self.node_type(n) for n in nodes}, key=str)
graph_schema = {nt: set() for nt in node_types}
# Create edge type index list
edge_types = set()
for n1, n2, edge_type in edges:
# Edge type tuple
node_type_1 = self.node_type(n1)
node_type_2 = self.node_type(n2)
# Add edge type to node_type_1 data
edge_type_tri = EdgeType(node_type_1, edge_type, node_type_2)
edge_types.add(edge_type_tri)
graph_schema[node_type_1].add(edge_type_tri)
# Also add type to node_2 data if not digraph
if not self.is_directed():
edge_type_tri = EdgeType(node_type_2, edge_type, node_type_1)
edge_types.add(edge_type_tri)
graph_schema[node_type_2].add(edge_type_tri)
# Create ordered list of edge_types
edge_types = sorted(edge_types)
# Create keys for node and edge types
schema = {
node_label: [
edge_types[einx]
for einx in sorted([edge_types.index(et) for et in list(node_data)])
]
for node_label, node_data in graph_schema.items()
}
# Create quick type lookups for nodes and edges.
# Note: we encode the type index, in the assumption it will take
# less storage.
if create_type_maps:
node_type_map = {
n: node_types.index(self.node_type(n)) for n in self.nodes()
}
edge_type_map = {
(src, tgt, key): edge_types.index(
EdgeType(
node_types[node_type_map[src]],
self._get_edge_type(data),
node_types[node_type_map[tgt]],
)
)
for src, tgt, key, data in self._graph.edges(keys=True, data=True)
}
else:
node_type_map = edge_type_map = None
return GraphSchema(
self.is_directed(),
node_types,
edge_types,
schema,
node_type_map,
edge_type_map,
)
######################################################################
# Generic graph interface:
def is_directed(self) -> bool:
return self._graph.is_directed()
def number_of_nodes(self) -> int:
return self._graph.number_of_nodes()
def number_of_edges(self) -> int:
return self._graph.number_of_edges()
def nodes(self) -> Iterable[Any]:
return self._graph.nodes()
def edges(self, triple=False) -> Iterable[Any]:
if triple:
# returns triples of format (node 1, node 2, edge type)
return (
(src, dst, self._get_edge_type(data))
for src, dst, data in self._graph.edges(data=True)
)
else:
# returns pairs of format (node 1, node 2)
return self._graph.edges()
def has_node(self, node: Any) -> bool:
return self._graph.__contains__(node)
def _transform_edges(self, edges, get_node, include_edge_weight, edge_types):
edge_types_set = set(edge_types) if edge_types is not None else None
def get(e):
if include_edge_weight:
return NeighbourWithWeight(
get_node(e), e[2].get(self._edge_weight_label)
)
return get_node(e)
def is_correct_type(e):
return (
edge_types_set is None
or e[2].get(self._edge_type_attr) in edge_types_set
)
return [get(e) for e in edges if is_correct_type(e)]
def _in(self, node, include_edge_weight, edge_types):
return self._transform_edges(
self._graph.in_edges(node, data=True),
lambda e: e[0],
include_edge_weight,
edge_types,
)
def _out(self, node, include_edge_weight, edge_types):
return self._transform_edges(
self._graph.out_edges(node, data=True),
lambda e: e[1],
include_edge_weight,
edge_types,
)
def neighbors(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
if self.is_directed():
in_nodes = self._in(node, include_edge_weight, edge_types)
out_nodes = self._out(node, include_edge_weight, edge_types)
return in_nodes + out_nodes
return self._transform_edges(
self._graph.edges(node, data=True),
lambda e: e[1],
include_edge_weight,
edge_types,
)
def in_nodes(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
if self.is_directed():
return self._in(node, include_edge_weight, edge_types)
return self.neighbors(node, include_edge_weight, edge_types)
def out_nodes(
self, node: Any, include_edge_weight=False, edge_types=None
) -> Iterable[Any]:
if self.is_directed():
return self._out(node, include_edge_weight, edge_types)
return self.neighbors(node, include_edge_weight, edge_types)
########################################################################
# Heavy duty methods:
def node_degrees(self) -> Mapping[Any, int]:
return self._graph.degree()
def to_adjacency_matrix(self, nodes: Optional[Iterable] = None):
if nodes is not None:
return nx.adjacency_matrix(self._graph.subgraph(nodes))
return nx.to_scipy_sparse_matrix(
self._graph, dtype="float32", weight=self._edge_weight_label, format="coo"
)
def to_networkx(self):
# Despite this class using NetworkX, this implementation does not directly use that
# representation, so that it can be reused as we move away from being NetworkX-based.
if self.is_directed():
graph = nx.MultiDiGraph()
else:
graph = nx.MultiGraph()
types = self.node_types
for ty in types:
node_ids = self.nodes_of_type(ty)
ty_dict = {self._node_type_attr: ty}
if ty in self._node_attribute_arrays:
# has features!
features = self.node_features(node_ids, node_type=ty)
for node_id, node_features in zip(node_ids, features):
graph.add_node(
node_id, **ty_dict, **{self._feature_attr: node_features},
)
else:
# no features, so just add the type
graph.add_nodes_from(node_ids, **ty_dict)
graph.add_edges_from(self._graph.edges(data=True))
return graph
# XXX This has not yet been standardised in the interface.
def adjacency_types(self, graph_schema: GraphSchema):
"""
Obtains the edges in the form of the typed mapping:
{edge_type_triple: {source_node: [target_node, ...]}}
Args:
graph_schema: The graph schema.
Returns:
The edge types mapping.
"""
edge_types = graph_schema.edge_types
adj = {et: defaultdict(lambda: [None]) for et in edge_types}
for n1, nbrdict in self._graph.adjacency():
for et in edge_types:
neigh_et = [
n2
for n2, nkeys in nbrdict.items()
for k in nkeys
if graph_schema.is_of_edge_type((n1, n2, k), et)
]
# Create adjacency list in lexicographical order
# Otherwise sampling methods will not be deterministic
# even when the seed is set.
adj[et][n1] = sorted(neigh_et, key=str)
return adj
# XXX This has not yet been standardised in the interface.
def edge_weights(self, source_node: Any, target_node: Any) -> List[Any]:
"""
Obtains the weights of edges between the given pair of nodes.
Args:
source_node (any): The source node.
target_node (any): The target node.
Returns:
list: The edge weights.
"""
edge_weight_label = self._edge_weight_label
return [
v.get(edge_weight_label)
for v in self._graph[source_node][target_node].values()
]
# XXX This has not yet been standardised in the interface.
def node_attributes(self, node: Any) -> Set[Any]:
"""
Obtains the names of any (non-standard) node attributes that are
available in the user data.
Args:
node (any): The node of interest.
Returns:
set: The collection of node attributes.
"""
attrs = set(self._graph.nodes[node].keys())
# Don't use node type as attribute:
attrs.discard(self._node_type_attr)
return attrs
|
[
"random.sample",
"collections.namedtuple",
"networkx.MultiDiGraph",
"stellargraph.core.schema.EdgeType",
"numpy.asarray",
"networkx.MultiGraph",
"numpy.zeros",
"numpy.empty",
"networkx.to_scipy_sparse_matrix",
"numpy.vstack",
"collections.defaultdict"
] |
[((1144, 1197), 'collections.namedtuple', 'namedtuple', (['"""NeighbourWithWeight"""', "['node', 'weight']"], {}), "('NeighbourWithWeight', ['node', 'weight'])\n", (1154, 1197), False, 'from collections import defaultdict, namedtuple\n'), ((4335, 4354), 'numpy.zeros', 'np.zeros', (['data_size'], {}), '(data_size)\n', (4343, 4354), True, 'import numpy as np\n'), ((4420, 4492), 'numpy.asarray', 'np.asarray', (['[(x if x is not None else default_value) for x in attr_data]'], {}), '([(x if x is not None else default_value) for x in attr_data])\n', (4430, 4492), True, 'import numpy as np\n'), ((28793, 28899), 'networkx.to_scipy_sparse_matrix', 'nx.to_scipy_sparse_matrix', (['self._graph'], {'dtype': '"""float32"""', 'weight': 'self._edge_weight_label', 'format': '"""coo"""'}), "(self._graph, dtype='float32', weight=self.\n _edge_weight_label, format='coo')\n", (28818, 28899), True, 'import networkx as nx\n'), ((7587, 7614), 'numpy.zeros', 'np.zeros', (['data_arr.shape[1]'], {}), '(data_arr.shape[1])\n', (7595, 7614), True, 'import numpy as np\n'), ((7645, 7681), 'numpy.vstack', 'np.vstack', (['[data_arr, default_value]'], {}), '([data_arr, default_value])\n', (7654, 7681), True, 'import numpy as np\n'), ((16430, 16457), 'numpy.empty', 'np.empty', (['(0, feature_size)'], {}), '((0, feature_size))\n', (16438, 16457), True, 'import numpy as np\n'), ((19826, 19858), 'random.sample', 'random.sample', (['all_nodes', 'sample'], {}), '(all_nodes, sample)\n', (19839, 19858), False, 'import random\n'), ((23551, 23596), 'stellargraph.core.schema.EdgeType', 'EdgeType', (['node_type_1', 'edge_type', 'node_type_2'], {}), '(node_type_1, edge_type, node_type_2)\n', (23559, 23596), False, 'from stellargraph.core.schema import EdgeType\n'), ((29182, 29199), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (29197, 29199), True, 'import networkx as nx\n'), ((29234, 29249), 'networkx.MultiGraph', 'nx.MultiGraph', ([], {}), '()\n', (29247, 29249), True, 'import networkx as nx\n'), ((30430, 30458), 'collections.defaultdict', 'defaultdict', (['(lambda : [None])'], {}), '(lambda : [None])\n', (30441, 30458), False, 'from collections import defaultdict, namedtuple\n'), ((9443, 9465), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', (['graph'], {}), '(graph)\n', (9458, 9465), True, 'import networkx as nx\n'), ((9557, 9577), 'networkx.MultiGraph', 'nx.MultiGraph', (['graph'], {}), '(graph)\n', (9570, 9577), True, 'import networkx as nx\n'), ((23826, 23871), 'stellargraph.core.schema.EdgeType', 'EdgeType', (['node_type_2', 'edge_type', 'node_type_1'], {}), '(node_type_2, edge_type, node_type_1)\n', (23834, 23871), False, 'from stellargraph.core.schema import EdgeType\n'), ((7305, 7324), 'numpy.vstack', 'np.vstack', (['data_arr'], {}), '(data_arr)\n', (7314, 7324), True, 'import numpy as np\n')]
|
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import backend as k
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
from keras.models import load_model
from keras.applications.vgg16 import VGG16, decode_predictions, preprocess_input
import cv2
import numpy as np
img_width, img_height = 224, 224
train_data_dir = "train"
validation_data_dir = "validation"
nb_train_samples = 300
nb_validation_samples = 100
batch_size = 16
epochs = 50
model = applications.VGG16(weights="imagenet", include_top=False, input_shape=(img_width, img_height, 3))
# Freeze the layers which you don't want to train. Here I am freezing the first 5 layers.
for layer in model.layers[:5]:
layer.trainable = False
# Adding custom Layers
x = model.output
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(2, activation='softmax')(x)
# Creating the final model
model_final = Model(input=model.input, output=predictions)
# compile the model
model_final.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
metrics=["accuracy"])
# Initiate the train and test generators with data Augmentation
train_datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True, fill_mode='nearest', zoom_range=0.3,
width_shift_range=0.3, height_shift_range=0.3, rotation_range=30)
test_datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True, fill_mode='nearest', zoom_range=0.3,
width_shift_range=0.3, height_shift_range=0.3, rotation_range=30)
# Now we will generate the new augmented data
train_generator = train_datagen.flow_from_directory(
train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode="categorical")
validation_generator = test_datagen.flow_from_directory(validation_data_dir, target_size=(img_height, img_width),
class_mode="categorical")
# Save the model according to the conditions
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False,
mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
# It's time to fit the new final layers for the model:
model_final.fit_generator(train_generator, samples_per_epoch=nb_train_samples, nb_epoch=epochs,
validation_data=validation_generator, nb_val_samples=nb_validation_samples,
callbacks=[checkpoint, early])
im = cv2.resize(cv2.imread('test/gaff2.jpg', (img_width, img_height)))
im = np.expand_dims(im, axis=0).astype(np.float)
im = preprocess_input(im)
out = model_final.predict(im)
print(out)
print(np.argmax(out))
|
[
"cv2.imread",
"keras.layers.Flatten",
"keras.callbacks.ModelCheckpoint",
"numpy.argmax",
"keras.preprocessing.image.ImageDataGenerator",
"keras.optimizers.SGD",
"keras.applications.vgg16.preprocess_input",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"numpy.expand_dims",
"keras.layers.Dense",
"keras.layers.Dropout",
"keras.applications.VGG16"
] |
[((688, 790), 'keras.applications.VGG16', 'applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(img_width, img_height, 3)'}), "(weights='imagenet', include_top=False, input_shape=(\n img_width, img_height, 3))\n", (706, 790), False, 'from keras import applications\n'), ((1181, 1225), 'keras.models.Model', 'Model', ([], {'input': 'model.input', 'output': 'predictions'}), '(input=model.input, output=predictions)\n', (1186, 1225), False, 'from keras.models import Sequential, Model\n'), ((1474, 1646), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'horizontal_flip': '(True)', 'fill_mode': '"""nearest"""', 'zoom_range': '(0.3)', 'width_shift_range': '(0.3)', 'height_shift_range': '(0.3)', 'rotation_range': '(30)'}), "(rescale=1.0 / 255, horizontal_flip=True, fill_mode=\n 'nearest', zoom_range=0.3, width_shift_range=0.3, height_shift_range=\n 0.3, rotation_range=30)\n", (1492, 1646), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1687, 1859), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'horizontal_flip': '(True)', 'fill_mode': '"""nearest"""', 'zoom_range': '(0.3)', 'width_shift_range': '(0.3)', 'height_shift_range': '(0.3)', 'rotation_range': '(30)'}), "(rescale=1.0 / 255, horizontal_flip=True, fill_mode=\n 'nearest', zoom_range=0.3, width_shift_range=0.3, height_shift_range=\n 0.3, rotation_range=30)\n", (1705, 1859), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2345, 2478), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""vgg16_1.h5"""'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""auto"""', 'period': '(1)'}), "('vgg16_1.h5', monitor='val_acc', verbose=1, save_best_only=\n True, save_weights_only=False, mode='auto', period=1)\n", (2360, 2478), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping\n'), ((2511, 2598), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode=\n 'auto')\n", (2524, 2598), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping\n'), ((3032, 3052), 'keras.applications.vgg16.preprocess_input', 'preprocess_input', (['im'], {}), '(im)\n', (3048, 3052), False, 'from keras.applications.vgg16 import VGG16, decode_predictions, preprocess_input\n'), ((982, 991), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (989, 991), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((999, 1029), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (1004, 1029), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((1037, 1049), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1044, 1049), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((1057, 1087), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (1062, 1087), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((1105, 1135), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (1110, 1135), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2923, 2976), 'cv2.imread', 'cv2.imread', (['"""test/gaff2.jpg"""', '(img_width, img_height)'], {}), "('test/gaff2.jpg', (img_width, img_height))\n", (2933, 2976), False, 'import cv2\n'), ((3102, 3116), 'numpy.argmax', 'np.argmax', (['out'], {}), '(out)\n', (3111, 3116), True, 'import numpy as np\n'), ((1310, 1349), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.0001)', 'momentum': '(0.9)'}), '(lr=0.0001, momentum=0.9)\n', (1324, 1349), False, 'from keras import optimizers\n'), ((2983, 3009), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (2997, 3009), True, 'import numpy as np\n')]
|
"""Model and evaluate."""
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import LeaveOneGroupOut, permutation_test_score
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR
def fit_model(model, X, y, runs, scoring, n_permutations=100):
"""Fit model using leave-one-run-out CV and permutation testing."""
logo = LeaveOneGroupOut()
splits = logo.split(X, y, runs)
score, permutation_scores, p_value = permutation_test_score(
model, X, y, groups=runs, cv=splits, scoring=scoring,
n_permutations=n_permutations, n_jobs=-1)
print(f"CV score real data: {score}")
print(f"Mean CV score scrambled data: {np.mean(permutation_scores)}")
print(f"P-value = {p_value}")
return(score, permutation_scores, p_value)
def construct_model(estimator_name, hyperparameters, standardize):
"""Instantiate model from estimator name and set hyperparameters."""
scaler = StandardScaler()
estimator = get_estimator(estimator_name)
set_hyperparameters(estimator, hyperparameters)
if standardize:
pipe = [
('preproc', scaler),
('estimator', estimator)
]
else:
pipe = [
('estimator', estimator)
]
return Pipeline(pipe)
def get_estimator(name):
"""Import and return specified estimator class."""
if name == "SVC":
return SVC()
if name == "SVR":
return SVR()
if name == "LinearSVC":
return LinearSVC()
if name == "LinearSVR":
return LinearSVR()
raise ValueError(f"Option {name} is not implemented.")
def set_hyperparameters(model, hyperparameters):
"""Set set of hyperparamters to the specified model."""
model.set_params(**hyperparameters)
|
[
"sklearn.model_selection.permutation_test_score",
"numpy.mean",
"sklearn.model_selection.LeaveOneGroupOut",
"sklearn.svm.LinearSVR",
"sklearn.svm.LinearSVC",
"sklearn.preprocessing.StandardScaler",
"sklearn.pipeline.Pipeline",
"sklearn.svm.SVR",
"sklearn.svm.SVC"
] |
[((412, 430), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (428, 430), False, 'from sklearn.model_selection import LeaveOneGroupOut, permutation_test_score\n'), ((509, 631), 'sklearn.model_selection.permutation_test_score', 'permutation_test_score', (['model', 'X', 'y'], {'groups': 'runs', 'cv': 'splits', 'scoring': 'scoring', 'n_permutations': 'n_permutations', 'n_jobs': '(-1)'}), '(model, X, y, groups=runs, cv=splits, scoring=scoring,\n n_permutations=n_permutations, n_jobs=-1)\n', (531, 631), False, 'from sklearn.model_selection import LeaveOneGroupOut, permutation_test_score\n'), ((999, 1015), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1013, 1015), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1318, 1332), 'sklearn.pipeline.Pipeline', 'Pipeline', (['pipe'], {}), '(pipe)\n', (1326, 1332), False, 'from sklearn.pipeline import Pipeline\n'), ((1452, 1457), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (1455, 1457), False, 'from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR\n'), ((1495, 1500), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (1498, 1500), False, 'from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR\n'), ((1544, 1555), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (1553, 1555), False, 'from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR\n'), ((1599, 1610), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {}), '()\n', (1608, 1610), False, 'from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR\n'), ((731, 758), 'numpy.mean', 'np.mean', (['permutation_scores'], {}), '(permutation_scores)\n', (738, 758), True, 'import numpy as np\n')]
|
from synapse.nn.layers import Layer
from synapse.core.tensor import Tensor
from synapse.core.differentiable import Differentiable
import numpy as np
from typing import Callable
def tanhBackward(grad: Tensor, t1: Tensor) -> Tensor:
data = grad.data * (1 - np.tanh(t1.data) ** 2)
return Tensor(data)
@Differentiable(tanhBackward)
def Tanh(t1: Tensor) -> Tensor:
data = np.tanh(t1.data)
requires_grad = t1.requires_grad
return Tensor(data, requires_grad)
def reluBackward(grad: Tensor, t1: Tensor) -> Tensor:
data = grad.data * np.where(t1.data > 0, 1, 0)
return Tensor(data)
@Differentiable(reluBackward)
def ReLU(t1: Tensor) -> Tensor:
data = np.maximum(0, t1.data, t1.data) # Use in place operation
return Tensor(data, t1.requires_grad)
class Softmax():
def forward(self, t1: Tensor) -> Tensor:
expData = np.exp(t1.data)
data = expData / np.sum(expData, axis=0)
requires_grad = t1.requires_grad
return Tensor(expData, requires_grad)
def gradFn(self, t1: Tensor) -> Callable[[np.ndarray], Tensor]:
def SoftmaxBackward(grad: np.ndarray) -> Tensor:
"""TODO"""
pass
return
return
|
[
"numpy.where",
"numpy.tanh",
"synapse.core.differentiable.Differentiable",
"numpy.exp",
"numpy.sum",
"synapse.core.tensor.Tensor",
"numpy.maximum"
] |
[((313, 341), 'synapse.core.differentiable.Differentiable', 'Differentiable', (['tanhBackward'], {}), '(tanhBackward)\n', (327, 341), False, 'from synapse.core.differentiable import Differentiable\n'), ((611, 639), 'synapse.core.differentiable.Differentiable', 'Differentiable', (['reluBackward'], {}), '(reluBackward)\n', (625, 639), False, 'from synapse.core.differentiable import Differentiable\n'), ((298, 310), 'synapse.core.tensor.Tensor', 'Tensor', (['data'], {}), '(data)\n', (304, 310), False, 'from synapse.core.tensor import Tensor\n'), ((385, 401), 'numpy.tanh', 'np.tanh', (['t1.data'], {}), '(t1.data)\n', (392, 401), True, 'import numpy as np\n'), ((451, 478), 'synapse.core.tensor.Tensor', 'Tensor', (['data', 'requires_grad'], {}), '(data, requires_grad)\n', (457, 478), False, 'from synapse.core.tensor import Tensor\n'), ((596, 608), 'synapse.core.tensor.Tensor', 'Tensor', (['data'], {}), '(data)\n', (602, 608), False, 'from synapse.core.tensor import Tensor\n'), ((683, 714), 'numpy.maximum', 'np.maximum', (['(0)', 't1.data', 't1.data'], {}), '(0, t1.data, t1.data)\n', (693, 714), True, 'import numpy as np\n'), ((751, 781), 'synapse.core.tensor.Tensor', 'Tensor', (['data', 't1.requires_grad'], {}), '(data, t1.requires_grad)\n', (757, 781), False, 'from synapse.core.tensor import Tensor\n'), ((557, 584), 'numpy.where', 'np.where', (['(t1.data > 0)', '(1)', '(0)'], {}), '(t1.data > 0, 1, 0)\n', (565, 584), True, 'import numpy as np\n'), ((864, 879), 'numpy.exp', 'np.exp', (['t1.data'], {}), '(t1.data)\n', (870, 879), True, 'import numpy as np\n'), ((985, 1015), 'synapse.core.tensor.Tensor', 'Tensor', (['expData', 'requires_grad'], {}), '(expData, requires_grad)\n', (991, 1015), False, 'from synapse.core.tensor import Tensor\n'), ((905, 928), 'numpy.sum', 'np.sum', (['expData'], {'axis': '(0)'}), '(expData, axis=0)\n', (911, 928), True, 'import numpy as np\n'), ((264, 280), 'numpy.tanh', 'np.tanh', (['t1.data'], {}), '(t1.data)\n', (271, 280), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, LassoCV, Ridge, RidgeCV
from sklearn.metrics import r2_score, mean_squared_error
"""
1. get_Xy(df): Separate features and target variable
2. get_score(X_train,X_val,y_train,y_val)
3. categorical(X_train,X_val,X_test,cat_variable)
"""
def get_Xy(df):
df = df.dropna()
target = 'opening_weekend_usa'
all_column = df.columns.values.tolist()
all_column.remove(target)
y = df[target]
X = df[all_column]
return X, y
def get_score(X_train,X_val,y_train,y_val):
# fit linear regression to training data
lr_model = LinearRegression()
lr_model.fit(X_train, y_train)
y_pred = lr_model.predict(X_val)
# score fit model on validation data
train_score = lr_model.score(X_train, y_train)
val_score = lr_model.score(X_val, y_val)
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
# report results
print('\nTrain R^2 score was:', train_score)
print('Validation R^2 score was:', val_score)
print(f'RMSE: {rmse:.2f} \n')
# print('Feature coefficient results:')
# for feature, coef in zip(X.columns, lr_model.coef_):
# print(feature, ':', f'{coef:.2f}')
# Visualization
fig, ax = plt.subplots(1, 1)
plt.scatter(y_val, y_pred, alpha=0.4)
ax.set_xlabel('Opening weekend revenue ($ in millions)',fontsize=20)
ax.set_ylabel('Prediction ($ in millions)',fontsize=20)
ax.set_title('R$^2$: %0.2f' % val_score, fontsize=20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
x=np.linspace(0,0.7e2,50)
# x=np.linspace(4,9,50)
y=x
plt.plot(x,y,color='firebrick',linewidth=3,alpha=0.6)
plt.ylim(0,)
plt.xlim(0,)
return fig, lr_model, y_pred
def categorical_multilabel(X_train,X_val,X_test,cat_variable):
"""
Input: X_train,X_val,X_test,categorical_variable
Processing: preprocessing the three sets separately:
1. Separate continuous and categorical variable
2. Scaling + polynomial fit the conitnuous variables and get_dummies on the categorical variable
3. Combine back the continuous and categorical data
Return: tranformed X_train, X_val, X_test
"""
scaler = StandardScaler()
poly = PolynomialFeatures(degree=2,interaction_only = False)
# Train set
# Convert genre to dummies
X_train_genre = X_train[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
known_columns = X_train_genre.columns
# Scaling continuous variables
X_train_con = X_train[con_feature]
X_train_con_scaled = scaler.fit_transform(X_train_con)
X_train_con_scaled_df = pd.DataFrame(X_train_con_scaled, columns=X_train_con.columns, index=X_train_con.index)
X_train_poly = poly.fit_transform(X_train_con_scaled)
X_train_poly_df = pd.DataFrame(X_train_poly, columns=poly.get_feature_names(X_train_con.columns), index=X_train_con.index)
#Combine
# X_train = pd.concat([X_train_genre,X_train_con_scaled_df],axis=1)
X_train = pd.concat([X_train_genre,X_train_poly_df],axis=1)
# Val set
# Convert genre to dummies
X_val_genre = X_val[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
val_columns = X_val_genre.columns
X_val_genre = X_val_genre[[x for x in val_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in val_columns] }
X_val_genre = X_val_genre.assign(**fill_dict)
# Scaling continuous variables
X_val_con = X_val[con_feature]
X_val_con_scaled = scaler.transform(X_val_con)
X_val_con_scaled_df = pd.DataFrame(X_val_con_scaled, columns=X_val_con.columns, index=X_val_con.index)
X_val_poly = poly.transform(X_val_con_scaled)
X_val_poly_df = pd.DataFrame(X_val_poly, columns=poly.get_feature_names(X_val_con.columns), index=X_val_con.index)
#Combine
# X_val = pd.concat([X_val_genre,X_val_con_scaled_df],axis=1)
X_val = pd.concat([X_val_genre,X_val_poly_df],axis=1)
# Test set
# Convert genre to dummies
X_test_genre = X_test[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
test_columns = X_test.columns
X_test_genre = X_test_genre[[x for x in test_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in test_columns] }
X_test_genre = X_test_genre.assign(**fill_dict)
# Scaling continuous variables
X_test_con = X_test[con_feature]
X_test_con_scaled = scaler.transform(X_test_con)
X_test_con_scaled_df = pd.DataFrame(X_test_con_scaled, columns=X_test_con.columns, index=X_test_con.index)
X_test_poly = poly.transform(X_test_con_scaled)
X_test_poly_df = pd.DataFrame(X_test_poly, columns=poly.get_feature_names(X_test_con.columns), index=X_test_con.index)
#Combine
# X_test = pd.concat([X_test_genre,X_test_con_scaled_df],axis=1)
X_test = pd.concat([X_test_genre,X_test_poly_df],axis=1)
return X_train,X_val,X_test
def categorical_singlelabel(X_train,X_val,X_test,cat_variable):
"""
Input: X_train,X_val,X_test,categorical_variable
Processing: preprocessing the three sets separately:
1. Separate continuous and categorical variable
2. Scaling + polynomial fit the conitnuous variables and get_dummies on the categorical variable
3. Combine back the continuous and categorical data
Return: tranformed X_train, X_val, X_test
"""
scaler = StandardScaler()
poly = PolynomialFeatures(degree=2,interaction_only = False)
# Train set
# Convert genre to dummies
X_train_genre = pd.get_dummies(X_train[cat_variable])
known_columns = X_train_genre.columns
# Scaling continuous variables
X_train_con = X_train[con_feature]
X_train_con_scaled = scaler.fit_transform(X_train_con)
X_train_con_scaled_df = pd.DataFrame(X_train_con_scaled, columns=X_train_con.columns, index=X_train_con.index)
X_train_poly = poly.fit_transform(X_train_con_scaled)
X_train_poly_df = pd.DataFrame(X_train_poly, columns=poly.get_feature_names(X_train_con.columns), index=X_train_con.index)
#Combine
X_train = pd.concat([X_train_genre,X_train_con_scaled_df],axis=1)
# X_train = pd.concat([X_train_genre,X_train_poly_df],axis=1)
# Val set
# Convert genre to dummies
X_val_genre = pd.get_dummies(X_val[cat_variable])
val_columns = X_val_genre.columns
X_val_genre = X_val_genre[[x for x in val_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in val_columns] }
X_val_genre = X_val_genre.assign(**fill_dict)
# Scaling continuous variables
X_val_con = X_val[con_feature]
X_val_con_scaled = scaler.transform(X_val_con)
X_val_con_scaled_df = pd.DataFrame(X_val_con_scaled, columns=X_val_con.columns, index=X_val_con.index)
X_val_poly = poly.transform(X_val_con_scaled)
X_val_poly_df = pd.DataFrame(X_val_poly, columns=poly.get_feature_names(X_val_con.columns), index=X_val_con.index)
#Combine
X_val = pd.concat([X_val_genre,X_val_con_scaled_df],axis=1)
# X_val = pd.concat([X_val_genre,X_val_poly_df],axis=1)
# Test set
# Convert genre to dummies
X_test_genre = pd.get_dummies(X_test[cat_variable])
test_columns = X_test.columns
X_test_genre = X_test_genre[[x for x in test_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in test_columns] }
X_test_genre = X_test_genre.assign(**fill_dict)
# Scaling continuous variables
X_test_con = X_test[con_feature]
X_test_con_scaled = scaler.transform(X_test_con)
X_test_con_scaled_df = pd.DataFrame(X_test_con_scaled, columns=X_test_con.columns, index=X_test_con.index)
X_test_poly = poly.transform(X_test_con_scaled)
X_test_poly_df = pd.DataFrame(X_test_poly, columns=poly.get_feature_names(X_test_con.columns), index=X_test_con.index)
#Combine
X_test = pd.concat([X_test_genre,X_test_con_scaled_df],axis=1)
# X_test = pd.concat([X_test_genre,X_test_poly_df],axis=1)
return X_train,X_val,X_test
def opt_cat_number(df, cat_variable):
"""
Decide how many categories to keep for the categorical variable.
"""
score = []
for i in range(0,100,1):
df[cat_variable].value_counts()
top = df[cat_variable].value_counts().index.tolist()[:i]
discard = list(set(df[cat_variable].unique()).difference(set(top)))
# The rest will go to "Other"
df['new_cat_variable'] = df[cat_variable].replace(discard,'Other')
# Get the data from all_df with both continuous and selected categorical variable
X, y = get_Xy(df)
# train_test_split
X_, X_test, y_, y_test = train_test_split(X, y, test_size=.2, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_, y_, test_size=.25, random_state=3)
X_train,X_val,X_test = categorical_singlelabel(X_train,X_val,X_test,'new_cat_variable')
lr_model = LinearRegression()
lr_model.fit(X_train, y_train)
y_pred = lr_model.predict(X_val)
val_score = lr_model.score(X_val, y_val)
score.append(round(val_score,3))
best_score = max(score)
num = score.index(best_score)
print('Optimal number of categories to keep is', num)
print('Best score is', best_score)
return num, best_score
|
[
"seaborn.set",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xticks",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.plot",
"pandas.get_dummies",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.mean_squared_error",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"pandas.concat",
"matplotlib.pyplot.subplots"
] |
[((93, 102), 'seaborn.set', 'sns.set', ([], {}), '()\n', (100, 102), True, 'import seaborn as sns\n'), ((1065, 1083), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1081, 1083), False, 'from sklearn.linear_model import LinearRegression, Lasso, LassoCV, Ridge, RidgeCV\n'), ((1713, 1731), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1725, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1773), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_val', 'y_pred'], {'alpha': '(0.4)'}), '(y_val, y_pred, alpha=0.4)\n', (1747, 1773), True, 'import matplotlib.pyplot as plt\n'), ((1971, 1994), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (1981, 1994), True, 'import matplotlib.pyplot as plt\n'), ((1999, 2022), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (2009, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2055), 'numpy.linspace', 'np.linspace', (['(0)', '(70.0)', '(50)'], {}), '(0, 70.0, 50)\n', (2042, 2055), True, 'import numpy as np\n'), ((2100, 2157), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""firebrick"""', 'linewidth': '(3)', 'alpha': '(0.6)'}), "(x, y, color='firebrick', linewidth=3, alpha=0.6)\n", (2108, 2157), True, 'import matplotlib.pyplot as plt\n'), ((2158, 2169), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)'], {}), '(0)\n', (2166, 2169), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2186), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)'], {}), '(0)\n', (2183, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2704), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2702, 2704), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder\n'), ((2716, 2768), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)', 'interaction_only': '(False)'}), '(degree=2, interaction_only=False)\n', (2734, 2768), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder\n'), ((3111, 3202), 'pandas.DataFrame', 'pd.DataFrame', (['X_train_con_scaled'], {'columns': 'X_train_con.columns', 'index': 'X_train_con.index'}), '(X_train_con_scaled, columns=X_train_con.columns, index=\n X_train_con.index)\n', (3123, 3202), True, 'import pandas as pd\n'), ((3484, 3535), 'pandas.concat', 'pd.concat', (['[X_train_genre, X_train_poly_df]'], {'axis': '(1)'}), '([X_train_genre, X_train_poly_df], axis=1)\n', (3493, 3535), True, 'import pandas as pd\n'), ((4063, 4148), 'pandas.DataFrame', 'pd.DataFrame', (['X_val_con_scaled'], {'columns': 'X_val_con.columns', 'index': 'X_val_con.index'}), '(X_val_con_scaled, columns=X_val_con.columns, index=X_val_con.index\n )\n', (4075, 4148), True, 'import pandas as pd\n'), ((4406, 4453), 'pandas.concat', 'pd.concat', (['[X_val_genre, X_val_poly_df]'], {'axis': '(1)'}), '([X_val_genre, X_val_poly_df], axis=1)\n', (4415, 4453), True, 'import pandas as pd\n'), ((4996, 5084), 'pandas.DataFrame', 'pd.DataFrame', (['X_test_con_scaled'], {'columns': 'X_test_con.columns', 'index': 'X_test_con.index'}), '(X_test_con_scaled, columns=X_test_con.columns, index=\n X_test_con.index)\n', (5008, 5084), True, 'import pandas as pd\n'), ((5356, 5405), 'pandas.concat', 'pd.concat', (['[X_test_genre, X_test_poly_df]'], {'axis': '(1)'}), '([X_test_genre, X_test_poly_df], axis=1)\n', (5365, 5405), True, 'import pandas as pd\n'), ((5909, 5925), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5923, 5925), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder\n'), ((5937, 5989), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)', 'interaction_only': '(False)'}), '(degree=2, interaction_only=False)\n', (5955, 5989), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder\n'), ((6063, 6100), 'pandas.get_dummies', 'pd.get_dummies', (['X_train[cat_variable]'], {}), '(X_train[cat_variable])\n', (6077, 6100), True, 'import pandas as pd\n'), ((6305, 6396), 'pandas.DataFrame', 'pd.DataFrame', (['X_train_con_scaled'], {'columns': 'X_train_con.columns', 'index': 'X_train_con.index'}), '(X_train_con_scaled, columns=X_train_con.columns, index=\n X_train_con.index)\n', (6317, 6396), True, 'import pandas as pd\n'), ((6606, 6663), 'pandas.concat', 'pd.concat', (['[X_train_genre, X_train_con_scaled_df]'], {'axis': '(1)'}), '([X_train_genre, X_train_con_scaled_df], axis=1)\n', (6615, 6663), True, 'import pandas as pd\n'), ((6794, 6829), 'pandas.get_dummies', 'pd.get_dummies', (['X_val[cat_variable]'], {}), '(X_val[cat_variable])\n', (6808, 6829), True, 'import pandas as pd\n'), ((7230, 7315), 'pandas.DataFrame', 'pd.DataFrame', (['X_val_con_scaled'], {'columns': 'X_val_con.columns', 'index': 'X_val_con.index'}), '(X_val_con_scaled, columns=X_val_con.columns, index=X_val_con.index\n )\n', (7242, 7315), True, 'import pandas as pd\n'), ((7507, 7560), 'pandas.concat', 'pd.concat', (['[X_val_genre, X_val_con_scaled_df]'], {'axis': '(1)'}), '([X_val_genre, X_val_con_scaled_df], axis=1)\n', (7516, 7560), True, 'import pandas as pd\n'), ((7691, 7727), 'pandas.get_dummies', 'pd.get_dummies', (['X_test[cat_variable]'], {}), '(X_test[cat_variable])\n', (7705, 7727), True, 'import pandas as pd\n'), ((8136, 8224), 'pandas.DataFrame', 'pd.DataFrame', (['X_test_con_scaled'], {'columns': 'X_test_con.columns', 'index': 'X_test_con.index'}), '(X_test_con_scaled, columns=X_test_con.columns, index=\n X_test_con.index)\n', (8148, 8224), True, 'import pandas as pd\n'), ((8427, 8482), 'pandas.concat', 'pd.concat', (['[X_test_genre, X_test_con_scaled_df]'], {'axis': '(1)'}), '([X_test_genre, X_test_con_scaled_df], axis=1)\n', (8436, 8482), True, 'import pandas as pd\n'), ((1317, 1350), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (1335, 1350), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((9234, 9288), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (9250, 9288), False, 'from sklearn.model_selection import train_test_split\n'), ((9329, 9385), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_', 'y_'], {'test_size': '(0.25)', 'random_state': '(3)'}), '(X_, y_, test_size=0.25, random_state=3)\n', (9345, 9385), False, 'from sklearn.model_selection import train_test_split\n'), ((9503, 9521), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9519, 9521), False, 'from sklearn.linear_model import LinearRegression, Lasso, LassoCV, Ridge, RidgeCV\n')]
|
import plotly.graph_objects as go
from pcutils.kitti_util import compute_box_3d
from PIL import Image
import numpy as np
ptc_layout_config={
'title': {
'text': 'test vis LiDAR',
'font': {
'size': 20,
'color': 'rgb(150,150,150)',
},
'xanchor': 'left',
'yanchor': 'top'},
'paper_bgcolor': 'rgb(0,0,0)',
'width' : 900,
'height' : 600,
'margin' : {
'l': 20,
'r': 20,
'b': 20,
't': 20
},
'legend': {
'font':{
'size':20,
'color': 'rgb(150,150,150)',
},
'itemsizing': 'constant'
},
"hoverlabel": {
"namelength": -1,
},
'showlegend': False,
'scene': {
'aspectmode': 'manual',
'aspectratio': {'x': 0.75, 'y': 0.25, 'z': 0.05},
'camera': {'eye': {'x': 0, 'y': 0, 'z': 0.5}},
'xaxis': {'color': 'rgb(150,150,150)',
'dtick': 10,
'gridcolor': 'rgb(100,100,100)',
'range': [-150, 150],
'showbackground': False,
'showgrid': True,
'showline': False,
'showticklabels': True,
'tickmode': 'linear',
'tickprefix': 'x:'},
'yaxis': {'color': 'rgb(150,150,150)',
'dtick': 10,
'gridcolor': 'rgb(100,100,100)',
'range': [-50, 50],
'showbackground': False,
'showgrid': True,
'showline': False,
'showticklabels': True,
'tickmode': 'linear',
'tickprefix': 'y:'},
'zaxis': {'color': 'rgb(150,150,150)',
'dtick': 10,
'gridcolor': 'rgb(100,100,100)',
'range': [-10, 10],
'showbackground': False,
'showgrid': True,
'showline': False,
'showticklabels': True,
'tickmode': 'linear',
'tickprefix': 'z:'}},
}
def showimg(img, labels=None, predictions=None, predictions2=None, classes=['Car', 'Truck', 'Van'], scale_factor=0.7):
# Create figure
fig = go.Figure()
# Constants
img_width = img.shape[1]
img_height = img.shape[0]
# Add invisible scatter trace.
# This trace is added to help the autoresize logic work.
fig.add_trace(
go.Scatter(
x=[0, img_width * scale_factor],
y=[0, img_height * scale_factor],
mode="markers",
marker_opacity=0
)
)
# Configure axes
fig.update_xaxes(
visible=False,
range=[0, img_width * scale_factor]
)
fig.update_yaxes(
visible=False,
range=[0, img_height * scale_factor],
# the scaleanchor attribute ensures that the aspect ratio stays constant
scaleanchor="x"
)
# Add image
fig.add_layout_image(
dict(
x=0,
sizex=img_width * scale_factor,
y=img_height * scale_factor,
sizey=img_height * scale_factor,
xref="x",
yref="y",
opacity=1.0,
layer="below",
sizing="stretch",
source=Image.fromarray(img[:, :, [2,1,0]]))
)
if labels is not None:
for label in labels:
if label.cls_type in classes:
fig.add_shape(
# unfilled Rectangle
type="rect",
x0=label.xmin * scale_factor,
y0=(img_height-label.ymin) * scale_factor,
x1=label.xmax * scale_factor,
y1=(img_height-label.ymax) * scale_factor,
line=dict(
color="LightGreen", width=2
),
)
if predictions is not None:
for label in predictions:
if label.cls_type in classes:
fig.add_shape(
# unfilled Rectangle
type="rect",
x0=label.xmin * scale_factor,
y0=(img_height-label.ymin) * scale_factor,
x1=label.xmax * scale_factor,
y1=(img_height-label.ymax) * scale_factor,
line=dict(
color="Red", width=2
),
name=label.cls_type
)
if predictions2 is not None:
for label in predictions2:
if label.cls_type in classes:
fig.add_shape(
# unfilled Rectangle
type="rect",
x0=label.xmin * scale_factor,
y0=(img_height-label.ymin) * scale_factor,
x1=label.xmax * scale_factor,
y1=(img_height-label.ymax) * scale_factor,
line=dict(
color="Yellow", width=2
),
name=label.cls_type
)
# Configure other layout
fig.update_layout(
width=img_width * scale_factor,
height=img_height * scale_factor,
margin={"l": 0, "r": 0, "t": 0, "b": 0},
)
# Disable the autosize on double click because it adds unwanted margins around the image
# More detail: https://plot.ly/python/configuration-options/
fig.show(config={'doubleClick': 'reset'})
return fig
def get_linemarks(obj, calib):
_, corners = compute_box_3d(obj, calib.P)
corners = calib.project_rect_to_velo(corners)
mid_front = (corners[0] + corners[1]) / 2
mid_left = (corners[0] + corners[3]) / 2
mid_right = (corners[1] + corners[2]) / 2
corners = np.vstack(
(corners, np.vstack([mid_front, mid_left, mid_right])))
idx = [0,8,9,10,8,1,2,3,0,4,5,1,5,6,2,6,7,3,7,4]
return corners[idx, :]
def get_bbox(obj, calib, name='bbox', color='yellow', width=3):
markers = get_linemarks(obj, calib)
return go.Scatter3d(
mode='lines',
x=markers[:, 0],
y=markers[:, 1],
z=markers[:, 2],
line=dict(color=color, width=width),
name=name)
def get_lidar(ptc, name='LiDAR', size=0.8):
return [go.Scatter3d(
x=ptc[:,0],
y=ptc[:,1],
z=ptc[:,2],
mode='markers',
marker_size=size,
name=name)]
def showvelo(lidar, calib, labels=None, predictions=None, classes=['Car', 'Truck', 'Van'], size=0.8):
gt_bboxes = [] if labels is None else [get_bbox(obj, calib, name='gt_bbox', color='lightgreen') for obj in labels if obj.cls_type in classes]
pred_bboxes = [] if predictions is None else [get_bbox(obj, calib, name='pred_bbox', color='red') for obj in predictions if obj.cls_type in classes]
fig = go.Figure(data=get_lidar(lidar, size=size) +
gt_bboxes + pred_bboxes, layout=ptc_layout_config)
fig.show()
return fig
|
[
"PIL.Image.fromarray",
"pcutils.kitti_util.compute_box_3d",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Scatter",
"plotly.graph_objects.Scatter3d",
"numpy.vstack"
] |
[((2323, 2334), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2332, 2334), True, 'import plotly.graph_objects as go\n'), ((5634, 5662), 'pcutils.kitti_util.compute_box_3d', 'compute_box_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (5648, 5662), False, 'from pcutils.kitti_util import compute_box_3d\n'), ((2535, 2651), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[0, img_width * scale_factor]', 'y': '[0, img_height * scale_factor]', 'mode': '"""markers"""', 'marker_opacity': '(0)'}), "(x=[0, img_width * scale_factor], y=[0, img_height * scale_factor\n ], mode='markers', marker_opacity=0)\n", (2545, 2651), True, 'import plotly.graph_objects as go\n'), ((6367, 6467), 'plotly.graph_objects.Scatter3d', 'go.Scatter3d', ([], {'x': 'ptc[:, 0]', 'y': 'ptc[:, 1]', 'z': 'ptc[:, 2]', 'mode': '"""markers"""', 'marker_size': 'size', 'name': 'name'}), "(x=ptc[:, 0], y=ptc[:, 1], z=ptc[:, 2], mode='markers',\n marker_size=size, name=name)\n", (6379, 6467), True, 'import plotly.graph_objects as go\n'), ((5893, 5936), 'numpy.vstack', 'np.vstack', (['[mid_front, mid_left, mid_right]'], {}), '([mid_front, mid_left, mid_right])\n', (5902, 5936), True, 'import numpy as np\n'), ((3380, 3417), 'PIL.Image.fromarray', 'Image.fromarray', (['img[:, :, [2, 1, 0]]'], {}), '(img[:, :, [2, 1, 0]])\n', (3395, 3417), False, 'from PIL import Image\n')]
|
# -*- coding: utf-8 -*-
"""
This module is a work in progress, as such concepts are subject to change.
MAIN IDEA:
`MultiTaskSamples` serves as a structure to contain and manipulate a set of
samples with potentially many different types of labels and features.
"""
import logging
import utool as ut
import ubelt as ub
import numpy as np
from wbia import dtool as dt
import pandas as pd
import sklearn
import sklearn.metrics
import sklearn.ensemble
import sklearn.impute
import sklearn.pipeline
import sklearn.neural_network
from wbia.algo.verif import sklearn_utils
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class XValConfig(dt.Config):
_param_info_list = [
# ut.ParamInfo('type', 'StratifiedKFold'),
ut.ParamInfo('type', 'StratifiedGroupKFold'),
ut.ParamInfo('n_splits', 3),
ut.ParamInfo(
'shuffle', True, hideif=lambda cfg: cfg['type'] == 'StratifiedGroupKFold'
),
ut.ParamInfo(
'random_state',
3953056901,
hideif=lambda cfg: cfg['type'] == 'StratifiedGroupKFold',
),
]
@ut.reloadable_class
class ClfProblem(ut.NiceRepr):
def __init__(pblm):
pblm.deploy_task_clfs = None
pblm.eval_task_clfs = None
pblm.xval_kw = XValConfig()
pblm.eval_task_clfs = None
pblm.task_combo_res = None
pblm.verbose = True
def set_pandas_options(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def set_pandas_options_low(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 5
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def set_pandas_options_normal(pblm):
# pd.options.display.max_rows = 10
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
def learn_evaluation_classifiers(pblm, task_keys=None, clf_keys=None, data_keys=None):
"""
Evaluates by learning classifiers using cross validation.
Do not use this to learn production classifiers.
python -m wbia.algo.verif.vsone evaluate_classifiers --db PZ_PB_RF_TRAIN --show
Example:
CommandLine:
python -m clf_helpers learn_evaluation_classifiers
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.verif.clf_helpers import * # NOQA
>>> pblm = IrisProblem()
>>> pblm.setup()
>>> pblm.verbose = True
>>> pblm.eval_clf_keys = ['Logit', 'RF']
>>> pblm.eval_task_keys = ['iris']
>>> pblm.eval_data_keys = ['learn(all)']
>>> result = pblm.learn_evaluation_classifiers()
>>> res = pblm.task_combo_res['iris']['Logit']['learn(all)']
>>> res.print_report()
>>> res = pblm.task_combo_res['iris']['RF']['learn(all)']
>>> res.print_report()
>>> print(result)
"""
pblm.eval_task_clfs = ut.AutoVivification()
pblm.task_combo_res = ut.AutoVivification()
if task_keys is None:
task_keys = pblm.eval_task_keys
if data_keys is None:
data_keys = pblm.eval_data_keys
if clf_keys is None:
clf_keys = pblm.eval_clf_keys
if task_keys is None:
task_keys = [pblm.primary_task_key]
if data_keys is None:
data_keys = [pblm.default_data_key]
if clf_keys is None:
clf_keys = [pblm.default_clf_key]
if pblm.verbose:
ut.cprint('[pblm] learn_evaluation_classifiers', color='blue')
ut.cprint('[pblm] task_keys = {}'.format(task_keys))
ut.cprint('[pblm] data_keys = {}'.format(data_keys))
ut.cprint('[pblm] clf_keys = {}'.format(clf_keys))
Prog = ut.ProgPartial(freq=1, adjust=False, prehack='%s')
task_prog = Prog(task_keys, label='Task')
for task_key in task_prog:
dataset_prog = Prog(data_keys, label='Data')
for data_key in dataset_prog:
clf_prog = Prog(clf_keys, label='CLF')
for clf_key in clf_prog:
pblm._ensure_evaluation_clf(task_key, data_key, clf_key)
def _ensure_evaluation_clf(pblm, task_key, data_key, clf_key, use_cache=True):
"""
Learns and caches an evaluation (cross-validated) classifier and tests
and caches the results.
data_key = 'learn(sum,glob)'
clf_key = 'RF'
"""
# TODO: add in params used to construct features into the cfgstr
if hasattr(pblm.samples, 'sample_hashid'):
ibs = pblm.infr.ibs
sample_hashid = pblm.samples.sample_hashid()
feat_dims = pblm.samples.X_dict[data_key].columns.values.tolist()
# cfg_prefix = sample_hashid + pblm.qreq_.get_cfgstr() + feat_cfgstr
est_kw1, est_kw2 = pblm._estimator_params(clf_key)
param_id = ut.get_dict_hashid(est_kw1)
xval_id = pblm.xval_kw.get_cfgstr()
cfgstr = '_'.join(
[
sample_hashid,
param_id,
xval_id,
task_key,
data_key,
clf_key,
ut.hashid_arr(feat_dims, 'feats'),
]
)
fname = 'eval_clfres_' + ibs.dbname
else:
fname = 'foo'
feat_dims = None
cfgstr = 'bar'
use_cache = False
# TODO: ABI class should not be caching
cacher_kw = dict(appname='vsone_rf_train', enabled=use_cache, verbose=1)
cacher_clf = ub.Cacher(fname, cfgstr=cfgstr, meta=[feat_dims], **cacher_kw)
data = cacher_clf.tryload()
if not data:
data = pblm._train_evaluation_clf(task_key, data_key, clf_key)
cacher_clf.save(data)
clf_list, res_list = data
labels = pblm.samples.subtasks[task_key]
combo_res = ClfResult.combine_results(res_list, labels)
pblm.eval_task_clfs[task_key][clf_key][data_key] = clf_list
pblm.task_combo_res[task_key][clf_key][data_key] = combo_res
def _train_evaluation_clf(pblm, task_key, data_key, clf_key, feat_dims=None):
"""
Learns a cross-validated classifier on the dataset
Ignore:
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem()
>>> pblm.load_features()
>>> pblm.load_samples()
>>> data_key = 'learn(all)'
>>> task_key = 'photobomb_state'
>>> clf_key = 'RF-OVR'
>>> task_key = 'match_state'
>>> data_key = pblm.default_data_key
>>> clf_key = pblm.default_clf_key
"""
X_df = pblm.samples.X_dict[data_key]
labels = pblm.samples.subtasks[task_key]
assert np.all(labels.encoded_df.index == X_df.index)
clf_partial = pblm._get_estimator(clf_key)
xval_kw = pblm.xval_kw.asdict()
clf_list = []
res_list = []
skf_list = pblm.samples.stratified_kfold_indices(**xval_kw)
skf_prog = ut.ProgIter(skf_list, label='skf-train-eval')
for train_idx, test_idx in skf_prog:
X_df_train = X_df.iloc[train_idx]
assert X_df_train.index.tolist() == ut.take(pblm.samples.index, train_idx)
# train_uv = X_df.iloc[train_idx].index
# X_train = X_df.loc[train_uv]
# y_train = labels.encoded_df.loc[train_uv]
if feat_dims is not None:
X_df_train = X_df_train[feat_dims]
X_train = X_df_train.values
y_train = labels.encoded_df.iloc[train_idx].values.ravel()
clf = clf_partial()
clf.fit(X_train, y_train)
# Note: There is a corner case where one fold doesn't get any
# labels of a certain class. Because y_train is an encoded integer,
# the clf.classes_ attribute will cause predictions to agree with
# other classifiers trained on the same labels.
# Evaluate results
res = ClfResult.make_single(
clf, X_df, test_idx, labels, data_key, feat_dims=feat_dims
)
clf_list.append(clf)
res_list.append(res)
return clf_list, res_list
def _external_classifier_result(
pblm, clf, task_key, data_key, feat_dims=None, test_idx=None
):
"""
Given an external classifier (ensure its trained on disjoint data)
evaluate all data on it.
Args:
test_idx (list): subset of this classifier to test on
(defaults to all if None)
"""
X_df = pblm.samples.X_dict[data_key]
if test_idx is None:
test_idx = np.arange(len(X_df))
labels = pblm.samples.subtasks[task_key]
res = ClfResult.make_single(
clf, X_df, test_idx, labels, data_key, feat_dims=feat_dims
)
return res
def learn_deploy_classifiers(pblm, task_keys=None, clf_key=None, data_key=None):
"""
Learns on data without any train/validation split
"""
if pblm.verbose > 0:
ut.cprint('[pblm] learn_deploy_classifiers', color='blue')
if clf_key is None:
clf_key = pblm.default_clf_key
if data_key is None:
data_key = pblm.default_data_key
if task_keys is None:
task_keys = list(pblm.samples.supported_tasks())
if pblm.deploy_task_clfs is None:
pblm.deploy_task_clfs = ut.AutoVivification()
Prog = ut.ProgPartial(freq=1, adjust=False, prehack='%s')
task_prog = Prog(task_keys, label='Task')
task_clfs = {}
for task_key in task_prog:
clf = pblm._train_deploy_clf(task_key, data_key, clf_key)
task_clfs[task_key] = clf
pblm.deploy_task_clfs[task_key][clf_key][data_key] = clf
return task_clfs
def _estimator_params(pblm, clf_key):
est_type = clf_key.split('-')[0]
if est_type in {'RF', 'RandomForest'}:
est_kw1 = {
# 'max_depth': 4,
'bootstrap': True,
'class_weight': None,
'criterion': 'entropy',
'max_features': 'sqrt',
# 'max_features': None,
'min_samples_leaf': 5,
'min_samples_split': 2,
# 'n_estimators': 64,
'n_estimators': 256,
}
# Hack to only use missing values if we have the right sklearn
if 'missing_values' in ut.get_func_kwargs(
sklearn.ensemble.RandomForestClassifier.__init__
):
est_kw1['missing_values'] = np.nan
est_kw2 = {
'random_state': 3915904814,
'verbose': 0,
'n_jobs': -1,
}
elif est_type in {'SVC', 'SVM'}:
est_kw1 = dict(kernel='linear')
est_kw2 = {}
elif est_type in {'Logit', 'LogisticRegression'}:
est_kw1 = {}
est_kw2 = {}
elif est_type in {'MLP'}:
est_kw1 = dict(
activation='relu',
alpha=1e-05,
batch_size='auto',
beta_1=0.9,
beta_2=0.999,
early_stopping=False,
epsilon=1e-08,
hidden_layer_sizes=(10, 10),
learning_rate='constant',
learning_rate_init=0.001,
max_iter=200,
momentum=0.9,
nesterovs_momentum=True,
power_t=0.5,
random_state=3915904814,
shuffle=True,
solver='lbfgs',
tol=0.0001,
validation_fraction=0.1,
warm_start=False,
)
est_kw2 = dict(verbose=False)
else:
raise KeyError('Unknown Estimator')
return est_kw1, est_kw2
def _get_estimator(pblm, clf_key):
"""
Returns sklearn classifier
"""
tup = clf_key.split('-')
wrap_type = None if len(tup) == 1 else tup[1]
est_type = tup[0]
multiclass_wrapper = {
None: ut.identity,
'OVR': sklearn.multiclass.OneVsRestClassifier,
'OVO': sklearn.multiclass.OneVsOneClassifier,
}[wrap_type]
est_class = {
'RF': sklearn.ensemble.RandomForestClassifier,
'SVC': sklearn.svm.SVC,
'Logit': sklearn.linear_model.LogisticRegression,
'MLP': sklearn.neural_network.MLPClassifier,
}[est_type]
est_kw1, est_kw2 = pblm._estimator_params(est_type)
est_params = ut.merge_dicts(est_kw1, est_kw2)
# steps = []
# steps.append((est_type, est_class(**est_params)))
# if wrap_type is not None:
# steps.append((wrap_type, multiclass_wrapper))
if est_type == 'MLP':
def clf_partial():
pipe = sklearn.pipeline.Pipeline(
[
('inputer', sklearn.impute.SimpleImputer(strategy='mean')),
# ('scale', sklearn.preprocessing.StandardScaler),
('est', est_class(**est_params)),
]
)
return multiclass_wrapper(pipe)
elif est_type == 'Logit':
def clf_partial():
pipe = sklearn.pipeline.Pipeline(
[
('inputer', sklearn.impute.SimpleImputer(strategy='mean')),
('est', est_class(**est_params)),
]
)
return multiclass_wrapper(pipe)
else:
def clf_partial():
return multiclass_wrapper(est_class(**est_params))
return clf_partial
def _train_deploy_clf(pblm, task_key, data_key, clf_key):
X_df = pblm.samples.X_dict[data_key]
labels = pblm.samples.subtasks[task_key]
assert np.all(labels.encoded_df.index == X_df.index)
clf_partial = pblm._get_estimator(clf_key)
logger.info(
'Training deployment {} classifier on {} for {}'.format(
clf_key, data_key, task_key
)
)
clf = clf_partial()
index = X_df.index
X = X_df.loc[index].values
y = labels.encoded_df.loc[index].values.ravel()
clf.fit(X, y)
return clf
def _optimize_rf_hyperparams(pblm, data_key=None, task_key=None):
"""
helper script I've only run interactively
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.verif.vsone import * # NOQA
>>> pblm = OneVsOneProblem.from_empty('PZ_PB_RF_TRAIN')
#>>> pblm = OneVsOneProblem.from_empty('GZ_Master1')
>>> pblm.load_samples()
>>> pblm.load_features()
>>> pblm.build_feature_subsets()
>>> data_key=None
>>> task_key=None
"""
from sklearn.model_selection import RandomizedSearchCV # NOQA
from sklearn.model_selection import GridSearchCV # NOQA
from sklearn.ensemble import RandomForestClassifier
from wbia.algo.verif import sklearn_utils
if data_key is None:
data_key = pblm.default_data_key
if task_key is None:
task_key = pblm.primary_task_key
# Load data
X = pblm.samples.X_dict[data_key].values
y = pblm.samples.subtasks[task_key].y_enc
groups = pblm.samples.group_ids
# Define estimator and parameter search space
grid = {
'bootstrap': [True, False],
'class_weight': [None, 'balanced'],
'criterion': ['entropy', 'gini'],
# 'max_features': ['sqrt', 'log2'],
'max_features': ['sqrt'],
'min_samples_leaf': list(range(2, 11)),
'min_samples_split': list(range(2, 11)),
'n_estimators': [8, 64, 128, 256, 512, 1024],
}
est = RandomForestClassifier(missing_values=np.nan)
if False:
# debug
params = ut.util_dict.all_dict_combinations(grid)[0]
est.set_params(verbose=10, n_jobs=1, **params)
est.fit(X=X, y=y)
cv = sklearn_utils.StratifiedGroupKFold(n_splits=3)
if True:
n_iter = 25
SearchCV = ut.partial(RandomizedSearchCV, n_iter=n_iter)
else:
n_iter = ut.prod(map(len, grid.values()))
SearchCV = GridSearchCV
search = SearchCV(est, grid, cv=cv, verbose=10)
n_cpus = ut.num_cpus()
thresh = n_cpus * 1.5
n_jobs_est = 1
n_jobs_ser = min(n_cpus, n_iter)
if n_iter < thresh:
n_jobs_est = int(max(1, thresh / n_iter))
est.set_params(n_jobs=n_jobs_est)
search.set_params(n_jobs=n_jobs_ser)
search.fit(X=X, y=y, groups=groups)
res = search.cv_results_.copy()
alias = ut.odict(
[
('rank_test_score', 'rank'),
('mean_test_score', 'μ-test'),
('std_test_score', 'σ-test'),
('mean_train_score', 'μ-train'),
('std_train_score', 'σ-train'),
('mean_fit_time', 'fit_time'),
('params', 'params'),
]
)
res = ut.dict_subset(res, alias.keys())
cvresult_df = pd.DataFrame(res).rename(columns=alias)
cvresult_df = cvresult_df.sort_values('rank').reset_index(drop=True)
params = pd.DataFrame.from_dict(cvresult_df['params'].values.tolist())
logger.info('Varied params:')
logger.info(ut.repr4(ut.map_vals(set, params.to_dict('list'))))
logger.info('Ranked Params')
logger.info(params)
logger.info('Ranked scores on development set:')
logger.info(cvresult_df)
logger.info('Best parameters set found on hyperparam set:')
logger.info('best_params_ = %s' % (ut.repr4(search.best_params_),))
logger.info('Fastest params')
cvresult_df.loc[cvresult_df['fit_time'].idxmin()]['params']
def _dev_calib(pblm):
"""
interactive script only
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import calibration_curve
from sklearn.metrics import log_loss, brier_score_loss
# Load data
data_key = pblm.default_data_key
task_key = pblm.primary_task_key
X = pblm.samples.X_dict[data_key].values
y = pblm.samples.subtasks[task_key].y_enc
groups = pblm.samples.group_ids
# Split into test/train/valid
cv = sklearn_utils.StratifiedGroupKFold(n_splits=2)
test_idx, train_idx = next(cv.split(X, y, groups))
# valid_idx = train_idx[0::2]
# train_idx = train_idx[1::2]
# train_valid_idx = np.hstack([train_idx, valid_idx])
# Train Uncalibrated RF
est_kw = pblm._estimator_params('RF')[0]
uncal_clf = RandomForestClassifier(**est_kw)
uncal_clf.fit(X[train_idx], y[train_idx])
uncal_probs = uncal_clf.predict_proba(X[test_idx]).T[1]
uncal_score = log_loss(y[test_idx] == 1, uncal_probs)
uncal_brier = brier_score_loss(y[test_idx] == 1, uncal_probs)
# Train Calibrated RF
method = 'isotonic' if len(test_idx) > 2000 else 'sigmoid'
precal_clf = RandomForestClassifier(**est_kw)
# cv = sklearn_utils.StratifiedGroupKFold(n_splits=3)
cal_clf = CalibratedClassifierCV(precal_clf, cv=2, method=method)
cal_clf.fit(X[train_idx], y[train_idx])
cal_probs = cal_clf.predict_proba(X[test_idx]).T[1]
cal_score = log_loss(y[test_idx] == 1, cal_probs)
cal_brier = brier_score_loss(y[test_idx] == 1, cal_probs)
logger.info('cal_brier = %r' % (cal_brier,))
logger.info('uncal_brier = %r' % (uncal_brier,))
logger.info('uncal_score = %r' % (uncal_score,))
logger.info('cal_score = %r' % (cal_score,))
import wbia.plottool as pt
ut.qtensure()
pt.figure()
ax = pt.gca()
y_test = y[test_idx] == 1
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, uncal_probs, n_bins=10
)
ax.plot([0, 1], [0, 1], 'k:', label='Perfectly calibrated')
ax.plot(
mean_predicted_value,
fraction_of_positives,
's-',
label='%s (%1.3f)' % ('uncal-RF', uncal_brier),
)
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, cal_probs, n_bins=10
)
ax.plot(
mean_predicted_value,
fraction_of_positives,
's-',
label='%s (%1.3f)' % ('cal-RF', cal_brier),
)
pt.legend()
@ut.reloadable_class
class ClfResult(ut.NiceRepr):
r"""
Handles evaluation statistics for a multiclass classifier trained on a
specific dataset with specific labels.
"""
# Attributes that identify the task and data the classifier is evaluated on
_key_attrs = ['task_key', 'data_key', 'class_names']
# Attributes about results and labels of individual samples
_datafame_attrs = ['probs_df', 'probhats_df', 'target_bin_df', 'target_enc_df']
def __init__(res):
pass
def __nice__(res):
return '{}, {}, {}'.format(res.task_key, res.data_key, len(res.index))
@property
def index(res):
return res.probs_df.index
@classmethod
def make_single(ClfResult, clf, X_df, test_idx, labels, data_key, feat_dims=None):
"""
Make a result for a single cross validiation subset
"""
X_df_test = X_df.iloc[test_idx]
if feat_dims is not None:
X_df_test = X_df_test[feat_dims]
index = X_df_test.index
# clf_probs = clf.predict_proba(X_df_test)
# index = pd.Series(test_idx, name='test_idx')
# Ensure shape corresponds with all classes
def align_cols(arr, arr_cols, target_cols):
import utool as ut
alignx = ut.list_alignment(arr_cols, target_cols, missing=True)
aligned_arrT = ut.none_take(arr.T, alignx)
aligned_arrT = ut.replace_nones(aligned_arrT, np.zeros(len(arr)))
aligned_arr = np.vstack(aligned_arrT).T
return aligned_arr
res = ClfResult()
res.task_key = labels.task_name
res.data_key = data_key
res.class_names = ut.lmap(str, labels.class_names)
res.feat_dims = feat_dims
res.probs_df = sklearn_utils.predict_proba_df(clf, X_df_test, res.class_names)
res.target_bin_df = labels.indicator_df.iloc[test_idx]
res.target_enc_df = labels.encoded_df.iloc[test_idx]
if hasattr(clf, 'estimators_') and labels.n_classes > 2:
# The n-th estimator in the OVR classifier predicts the prob of the
# n-th class (as label 1).
probs_hat = np.hstack(
[est.predict_proba(X_df_test)[:, 1:2] for est in clf.estimators_]
)
res.probhats_df = pd.DataFrame(
align_cols(probs_hat, clf.classes_, labels.classes_),
index=index,
columns=res.class_names,
)
# In the OVR-case, ideally things will sum to 1, but when they
# don't normalization happens. An Z-value of more than 1 means
# overconfidence, and under 0 means underconfidence.
res.confidence_ratio = res.probhats_df.sum(axis=1)
else:
res.probhats_df = None
return res
def compress(res, flags):
res2 = ClfResult()
res2.task_key = res.task_key
res2.data_key = res.data_key
res2.class_names = res.class_names
res2.probs_df = res.probs_df[flags]
res2.target_bin_df = res.target_bin_df[flags]
res2.target_enc_df = res.target_enc_df[flags]
if res.probhats_df is None:
res2.probhats_df = None
else:
res2.probhats_df = res.probhats_df[flags]
# res2.confidence_ratio = res.confidence_ratio[flags]
return res2
@classmethod
def combine_results(ClfResult, res_list, labels=None):
"""
Combine results from cross validation runs into a single result
representing the performance of the entire dataset
"""
# Ensure that res_lists are not overlapping
for r1, r2 in ut.combinations(res_list, 2):
assert (
len(r1.index.intersection(r2.index)) == 0
), 'ClfResult dataframes must be disjoint'
# sanity check
for r in res_list:
assert np.all(r.index == r.probs_df.index)
assert np.all(r.index == r.target_bin_df.index)
assert np.all(r.index == r.target_enc_df.index)
# Combine them with pandas
res = ClfResult()
res0 = res_list[0]
# Transfer single attributes (which should all be the same)
for attr in ClfResult._key_attrs:
val = getattr(res0, attr)
setattr(res, attr, val)
assert all(
[getattr(r, attr) == val for r in res_list]
), 'ClfResult with different key attributes are incompatible'
# Combine dataframe properties (which should all have disjoint indices)
for attr in ClfResult._datafame_attrs:
if getattr(res0, attr) is not None:
combo_attr = pd.concat([getattr(r, attr) for r in res_list])
setattr(res, attr, combo_attr)
else:
setattr(res, attr, None)
for attr in ClfResult._datafame_attrs:
val = getattr(res, attr)
if val is not None:
assert np.all(res.index == val.index), 'index got weird'
return res
def hardness_analysis(res, samples, infr=None, method='argmax'):
"""
samples = pblm.samples
# TODO MWE with sklearn data
# ClfResult.make_single(ClfResult, clf, X_df, test_idx, labels,
# data_key, feat_dims=None):
import sklearn.datasets
iris = sklearn.datasets.load_iris()
# TODO: make this setup simpler
pblm = ClfProblem()
task_key, clf_key, data_key = 'iris', 'RF', 'learn(all)'
X_df = pd.DataFrame(iris.data, columns=iris.feature_names)
samples = MultiTaskSamples(X_df.index)
samples.apply_indicators({'iris': {name: iris.target == idx
for idx, name in enumerate(iris.target_names)}})
samples.X_dict = {'learn(all)': X_df}
pblm.samples = samples
pblm.xval_kw['type'] = 'StratifiedKFold'
clf_list, res_list = pblm._train_evaluation_clf(
task_key, data_key, clf_key)
labels = pblm.samples.subtasks[task_key]
res = ClfResult.combine_results(res_list, labels)
res.get_thresholds('mcc', 'maximize')
predict_method = 'argmax'
"""
meta = {}
easiness = ut.ziptake(res.probs_df.values, res.target_enc_df.values)
# pred = sklearn_utils.predict_from_probs(res.probs_df, predict_method)
if method == 'max-mcc':
method = res.get_thresholds('mcc', 'maximize')
pred = sklearn_utils.predict_from_probs(res.probs_df, method, force=True)
meta['easiness'] = np.array(easiness).ravel()
meta['hardness'] = 1 - meta['easiness']
meta['aid1'] = res.probs_df.index.get_level_values(0)
meta['aid2'] = res.probs_df.index.get_level_values(1)
# meta['aid1'] = samples.aid_pairs.T[0].take(res.probs_df.index.values)
# meta['aid2'] = samples.aid_pairs.T[1].take(res.probs_df.index.values)
# meta['pred'] = res.probs_df.values.argmax(axis=1)
meta['pred'] = pred.values
meta['real'] = res.target_enc_df.values.ravel()
meta['failed'] = meta['pred'] != meta['real']
meta = pd.DataFrame(meta)
meta = meta.set_index(['aid1', 'aid2'], drop=False)
if infr is not None:
ibs = infr.ibs
edges = list(meta.index.tolist())
conf_dict = infr.get_edge_attrs(
'confidence',
edges,
on_missing='filter',
default=ibs.const.CONFIDENCE.CODE.UNKNOWN,
)
conf_df = pd.DataFrame.from_dict(conf_dict, orient='index')
conf_df = conf_df[0].map(ibs.const.CONFIDENCE.CODE_TO_INT)
meta = meta.assign(real_conf=conf_df)
meta['real_conf'] = np.nan_to_num(meta['real_conf']).astype(np.int)
meta = meta.sort_values('hardness', ascending=False)
res.meta = meta
return res.meta
def missing_classes(res):
# Find classes that were never predicted
unique_predictions = np.unique(res.probs_df.values.argmax(axis=1))
n_classes = len(res.class_names)
missing_classes = ut.index_complement(unique_predictions, n_classes)
return missing_classes
def augment_if_needed(res):
"""
Adds in dummy values for missing classes
"""
missing_classes = res.missing_classes()
n_classes = len(res.class_names)
y_test_enc_aug = res.target_enc_df.values
y_test_bin_aug = res.target_bin_df.values
clf_probs_aug = res.probs_df.values
sample_weight = np.ones(len(y_test_enc_aug))
n_missing = len(missing_classes)
if res.probhats_df is not None:
clf_probhats_aug = res.probhats_df.values
else:
clf_probhats_aug = None
# Check if augmentation is necessary
if n_missing > 0:
missing_bin = np.zeros((n_missing, n_classes))
missing_bin[(np.arange(n_missing), missing_classes)] = 1.0
missing_enc = np.array(missing_classes)[:, None]
y_test_enc_aug = np.vstack([y_test_enc_aug, missing_enc])
y_test_bin_aug = np.vstack([y_test_bin_aug, missing_bin])
clf_probs_aug = np.vstack([clf_probs_aug, missing_bin])
# make sample weights where dummies have no weight
sample_weight = np.hstack([sample_weight, np.full(n_missing, 0)])
if res.probhats_df is not None:
clf_probhats_aug = np.vstack([clf_probhats_aug, missing_bin])
res.clf_probs = clf_probs_aug
res.clf_probhats = clf_probhats_aug
res.y_test_enc = y_test_enc_aug
res.y_test_bin = y_test_bin_aug
res.sample_weight = sample_weight
def extended_clf_report(res, verbose=True):
res.augment_if_needed()
pred_enc = res.clf_probs.argmax(axis=1)
y_pred = pred_enc
y_true = res.y_test_enc
sample_weight = res.sample_weight
target_names = res.class_names
report = sklearn_utils.classification_report2(
y_true,
y_pred,
target_names=target_names,
sample_weight=sample_weight,
verbose=verbose,
)
return report
def print_report(res):
res.augment_if_needed()
pred_enc = res.clf_probs.argmax(axis=1)
res.extended_clf_report()
report = sklearn.metrics.classification_report(
y_true=res.y_test_enc,
y_pred=pred_enc,
target_names=res.class_names,
sample_weight=res.sample_weight,
)
logger.info('Precision/Recall Report:')
logger.info(report)
def get_thresholds(res, metric='mcc', value='maximize'):
"""
get_metric = 'thresholds'
at_metric = metric = 'mcc'
at_value = value = 'maximize'
a = []
b = []
for x in np.linspace(0, 1, 1000):
a += [cfms.get_metric_at_metric('thresholds', 'fpr', x, subindex=True)]
b += [cfms.get_thresh_at_metric('fpr', x)]
a = np.array(a)
b = np.array(b)
d = (a - b)
logger.info((d.min(), d.max()))
"""
threshes = {}
for class_name in res.class_names:
cfms = res.confusions(class_name)
thresh = cfms.get_metric_at_metric('thresh', metric, value)
threshes[class_name] = thresh
return threshes
@profile
def get_pos_threshes(
res,
metric='fpr',
value=1e-4,
maximize=False,
warmup=200,
priors=None,
min_thresh=0.5,
):
"""
Finds a threshold that achieves the desired `value` for the desired
metric, while maximizing or minimizing the threshold.
For positive classification you want to minimize the threshold.
Priors can be passed in to augment probabilities depending on support.
By default a class prior is 1 for threshold minimization and 0 for
maximization.
"""
pos_threshes = {}
if priors is None:
priors = {name: float(not maximize) for name in res.class_names}
for class_name in res.class_names:
cfms = res.confusions(class_name)
learned_thresh = cfms.get_metric_at_metric('thresh', metric, value)
# learned_thresh = cfms.get_thresh_at_metric(
# metric, value, maximize=maximize)
prior_thresh = priors[class_name]
n_support = cfms.n_pos
if warmup is not None:
"""
python -m wbia.plottool.draw_func2 plot_func --show --range=0,1 \
--func="lambda x: np.maximum(0, (x - .6) / (1 - .6))"
"""
# If n_support < warmup: then interpolate to learned thresh
nmax = warmup if isinstance(warmup, int) else warmup[class_name]
# alpha varies from 0 to 1
alpha = min(nmax, n_support) / nmax
# transform alpha through nonlinear function (similar to ReLU)
p = 0.6 # transition point
alpha = max(0, (alpha - p) / (1 - p))
thresh = prior_thresh * (1 - alpha) + learned_thresh * (alpha)
else:
thresh = learned_thresh
pos_threshes[class_name] = max(min_thresh, thresh)
return pos_threshes
def report_thresholds(res, warmup=200):
# import vtool as vt
ut.cprint('Threshold Report', 'yellow')
y_test_bin = res.target_bin_df.values
# y_test_enc = y_test_bin.argmax(axis=1)
# clf_probs = res.probs_df.values
# The maximum allowed false positive rate
# We expect that we will make 1 error every 1,000 decisions
# thresh_df['foo'] = [1, 2, 3]
# thresh_df['foo'][res.class_names[k]] = 1
# for k in [2, 0, 1]:
choice_mv = ut.odict(
[
('@fpr=.01', ('fpr', 0.01)),
('@fpr=.001', ('fpr', 0.001)),
('@fpr=.0001', ('fpr', 1e-4)),
('@fpr=.0000', ('fpr', 0)),
('@max(mcc)', ('mcc', 'max')),
# (class_name + '@max(acc)', ('acc', 'max')),
# (class_name + '@max(mk)', ('mk', 'max')),
# (class_name + '@max(bm)', ('bm', 'max')),
]
)
for k in range(y_test_bin.shape[1]):
thresh_dict = ut.odict()
class_name = res.class_names[k]
cfms = res.confusions(class_name)
# probs, labels = clf_probs.T[k], y_test_bin.T[k]
# cfms = vt.ConfusionMetrics().fit(probs, labels)
for k, mv in choice_mv.items():
metric, value = mv
idx = cfms.get_index_at_metric(metric, value)
key = class_name + k
thresh_dict[key] = ut.odict()
for metric in ['thresh', 'fpr', 'tpr', 'tpa', 'bm', 'mk', 'mcc']:
thresh_dict[key][metric] = cfms.get_metric_at_index(metric, idx)
thresh_df = pd.DataFrame.from_dict(thresh_dict, orient='index')
thresh_df = thresh_df.loc[list(thresh_dict.keys())]
if cfms.n_pos > 0 and cfms.n_neg > 0:
logger.info('Raw 1vR {} Thresholds'.format(class_name))
logger.info(ut.indent(thresh_df.to_string(float_format='{:.4f}'.format)))
# chosen_type = class_name + '@fpr=0'
# pos_threshes[class_name] = thresh_df.loc[chosen_type]['thresh']
for choice_k, choice_mv in iter(choice_mv.items()):
metric, value = choice_mv
pos_threshes = res.get_pos_threshes(metric, value, warmup=warmup)
logger.info('Choosing threshold based on %s' % (choice_k,))
res.report_auto_thresholds(pos_threshes)
def report_auto_thresholds(res, threshes, verbose=True):
report_lines = []
print_ = report_lines.append
print_(
'Chosen thresholds = %s'
% (ut.repr2(threshes, nl=1, precision=4, align=True),)
)
res.augment_if_needed()
target_names = res.class_names
sample_weight = res.sample_weight
y_true = res.y_test_enc.ravel()
y_pred, can_autodecide = sklearn_utils.predict_from_probs(
res.clf_probs,
threshes,
res.class_names,
force=False,
multi=False,
return_flags=True,
)
can_autodecide[res.sample_weight == 0] = False
auto_pred = y_pred[can_autodecide].astype(np.int)
auto_true = y_true[can_autodecide].ravel()
auto_probs = res.clf_probs[can_autodecide]
total_cases = int(sample_weight.sum())
print_('Will autodecide for %r/%r cases' % (can_autodecide.sum(), (total_cases)))
def frac_str(a, b):
return '{:}/{:} = {:.2f}%'.format(int(a), int(b), a / b)
y_test_bin = res.target_bin_df.values
supported_class_idxs = [k for k, y in enumerate(y_test_bin.T) if y.sum() > 0]
print_(' * Auto-Decide Per-Class Summary')
for k in supported_class_idxs:
# Look at fail/succs in threshold
name = res.class_names[k]
# number of times this class appears overall
n_total_k = (y_test_bin.T[k]).sum()
# get the cases where this class was predicted
auto_true_k = auto_true == k
auto_pred_k = auto_pred == k
# number of cases auto predicted
n_pred_k = auto_pred_k.sum()
# number of times auto was right
n_tp = (auto_true_k & auto_pred_k).sum()
# number of times auto was wrong
n_fp = (~auto_true_k & auto_pred_k).sum()
fail_str = frac_str(n_fp, n_pred_k)
pass_str = frac_str(n_tp, n_total_k)
fmtstr = '\n'.join(
[
'{name}:',
' {n_total_k} samples existed, and did {n_pred_k} auto predictions',
' got {pass_str} right',
' made {fail_str} errors',
]
)
print_(ut.indent(fmtstr.format(**locals())))
report = sklearn_utils.classification_report2(
y_true,
y_pred,
target_names=target_names,
sample_weight=can_autodecide.astype(np.float),
verbose=False,
)
print_(' * Auto-Decide Confusion')
print_(ut.indent(str(report['confusion'])))
print_(' * Auto-Decide Metrics')
print_(ut.indent(str(report['metrics'])))
if 'mcc' in report:
print_(ut.indent(str(report['mcc'])))
try:
auto_truth_bin = res.y_test_bin[can_autodecide]
for k in supported_class_idxs:
auto_truth_k = auto_truth_bin.T[k]
auto_probs_k = auto_probs.T[k]
if auto_probs_k.sum():
auc = sklearn.metrics.roc_auc_score(auto_truth_k, auto_probs_k)
print_(
' * Auto AUC(Macro): {:.4f} for class={}'.format(
auc, res.class_names[k]
)
)
except ValueError:
pass
report = '\n'.join(report_lines)
if verbose:
logger.info(report)
return report
def confusions(res, class_name):
import vtool as vt
y_test_bin = res.target_bin_df.values
clf_probs = res.probs_df.values
k = res.class_names.index(class_name)
probs, labels = clf_probs.T[k], y_test_bin.T[k]
confusions = vt.ConfusionMetrics().fit(probs, labels)
return confusions
def ishow_roc(res):
import vtool as vt
import wbia.plottool as pt
ut.qtensure()
y_test_bin = res.target_bin_df.values
# The maximum allowed false positive rate
# We expect that we will make 1 error every 1,000 decisions
# thresh_df['foo'] = [1, 2, 3]
# thresh_df['foo'][res.class_names[k]] = 1
# for k in [2, 0, 1]:
for k in range(y_test_bin.shape[1]):
if y_test_bin.shape[1] == 2 and k == 0:
# only show one in the binary case
continue
class_name = res.class_names[k]
confusions = res.confusions(class_name)
ROCInteraction = vt.interact_roc_factory(
confusions, show_operating_point=True
)
fnum = pt.ensure_fnum(k)
# ROCInteraction.static_plot(fnum, None, name=class_name)
inter = ROCInteraction(fnum=fnum, pnum=None, name=class_name)
inter.start()
# if False:
# X = probs
# y = labels
# encoder = vt.ScoreNormalizer()
# encoder.fit(probs, labels)
# learn_thresh = encoder.learn_threshold2()
# encoder.inverse_normalize(learn_thresh)
# encoder.visualize(fnum=k)
pass
def show_roc(res, class_name, **kwargs):
import vtool as vt
labels = res.target_bin_df[class_name].values
probs = res.probs_df[class_name].values
confusions = vt.ConfusionMetrics().fit(probs, labels)
confusions.draw_roc_curve(**kwargs)
def roc_scores_ovr_hat(res):
res.augment_if_needed()
for k in range(len(res.class_names)):
class_k_truth = res.y_test_bin.T[k]
class_k_probs = res.probhats_df.values.T[k]
auc = sklearn.metrics.roc_auc_score(class_k_truth, class_k_probs)
yield auc
def roc_scores_ovr(res):
res.augment_if_needed()
for k in range(res.y_test_bin.shape[1]):
class_k_truth = res.y_test_bin.T[k]
class_k_probs = res.clf_probs.T[k]
auc = sklearn.metrics.roc_auc_score(class_k_truth, class_k_probs)
yield auc
def confusions_ovr(res):
# one_vs_rest confusions
import vtool as vt
res.augment_if_needed()
for k in range(res.y_test_bin.shape[1]):
class_k_truth = res.y_test_bin.T[k]
class_k_probs = res.clf_probs.T[k]
cfms = vt.ConfusionMetrics().fit(class_k_probs, class_k_truth)
# auc = sklearn.metrics.roc_auc_score(class_k_truth, class_k_probs)
yield res.class_names[k], cfms
def roc_score(res):
res.augment_if_needed()
auc_learn = sklearn.metrics.roc_auc_score(res.y_test_bin, res.clf_probs)
return auc_learn
@ut.reloadable_class
class MultiTaskSamples(ut.NiceRepr):
"""
Handles samples (i.e. feature-label pairs) with a combination of
non-mutually exclusive subclassification labels
CommandLine:
python -m wbia.algo.verif.clf_helpers MultiTaskSamples
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.verif.clf_helpers import * # NOQA
>>> samples = MultiTaskSamples([0, 1, 2, 3])
>>> tasks_to_indicators = ut.odict([
>>> ('task1', ut.odict([
>>> ('state1', [0, 0, 0, 1]),
>>> ('state2', [0, 0, 1, 0]),
>>> ('state3', [1, 1, 0, 0]),
>>> ])),
>>> ('task2', ut.odict([
>>> ('state4', [0, 0, 0, 1]),
>>> ('state5', [1, 1, 1, 0]),
>>> ]))
>>> ])
>>> samples.apply_indicators(tasks_to_indicators)
"""
def __init__(samples, index):
samples.index = index
samples.subtasks = ut.odict()
# def set_simple_scores(samples, simple_scores):
# if simple_scores is not None:
# edges = ut.emap(tuple, samples.aid_pairs.tolist())
# assert (edges == simple_scores.index.tolist())
# samples.simple_scores = simple_scores
# def set_feats(samples, X_dict):
# if X_dict is not None:
# edges = ut.emap(tuple, samples.aid_pairs.tolist())
# for X in X_dict.values():
# assert np.all(edges == X.index.tolist())
# samples.X_dict = X_dict
def supported_tasks(samples):
for task_key, labels in samples.subtasks.items():
labels = samples.subtasks[task_key]
if labels.has_support():
yield task_key
def apply_indicators(samples, tasks_to_indicators):
"""
Adds labels for a specific task
Args:
tasks_to_indicators (dict): takes the form:
{
`my_task_name1' {
'class1': [list of bools indicating class membership]
...
'classN': [list of bools indicating class membership]
}
...
`my_task_nameN': ...
}
"""
n_samples = None
samples.n_tasks = len(tasks_to_indicators)
for task_name, indicator in tasks_to_indicators.items():
labels = MultiClassLabels.from_indicators(
indicator, task_name=task_name, index=samples.index
)
samples.subtasks[task_name] = labels
if n_samples is None:
n_samples = labels.n_samples
elif n_samples != labels.n_samples:
raise ValueError('numer of samples is different')
samples.n_samples = n_samples
def apply_encoded_labels(samples, y_enc, class_names, task_name):
"""
Adds labels for a specific task. Alternative to `apply_indicators`
Args:
y_enc (list): integer label indicating the class for each sample
class_names (list): list of strings indicating the class-domain
task_name (str): key for denoting this specific task
"""
# convert to indicator structure and use that
tasks_to_indicators = ut.odict(
[
(
task_name,
ut.odict(
[
(name, np.array(y_enc) == i)
for i, name in enumerate(class_names)
]
),
)
]
)
samples.apply_indicators(tasks_to_indicators)
# @ut.memoize
def encoded_2d(samples):
encoded_2d = pd.concat([v.encoded_df for k, v in samples.items()], axis=1)
return encoded_2d
def class_name_basis(samples):
"""corresponds with indexes returned from encoded1d"""
class_name_basis = [
t[::-1]
for t in ut.product(*[v.class_names for k, v in samples.items()][::-1])
]
# class_name_basis = [(b, a) for a, b in ut.product(*[
# v.class_names for k, v in samples.items()][::-1])]
return class_name_basis
def class_idx_basis_2d(samples):
"""2d-index version of class_name_basis"""
class_idx_basis_2d = [
(b, a)
for a, b in ut.product(
*[range(v.n_classes) for k, v in samples.items()][::-1]
)
]
return class_idx_basis_2d
def class_idx_basis_1d(samples):
"""1d-index version of class_name_basis"""
n_states = np.prod([v.n_classes for k, v in samples.items()])
class_idx_basis_1d = np.arange(n_states, dtype=np.int)
return class_idx_basis_1d
# @ut.memoize
def encoded_1d(samples):
"""Returns a unique label for each combination of samples"""
# from sklearn.preprocessing import MultiLabelBinarizer
encoded_2d = samples.encoded_2d()
class_space = [v.n_classes for k, v in samples.items()]
offsets = np.array([1] + np.cumprod(class_space).tolist()[:-1])[None, :]
encoded_1d = (offsets * encoded_2d).sum(axis=1)
# e = MultiLabelBinarizer()
# bin_coeff = e.fit_transform(encoded_2d)
# bin_basis = (2 ** np.arange(bin_coeff.shape[1]))[None, :]
# # encoded_1d = (bin_coeff * bin_basis).sum(axis=1)
# encoded_1d = (bin_coeff * bin_basis[::-1]).sum(axis=1)
# # vt.unique_rows(sklearn.preprocessing.MultiLabelBinarizer().fit_transform(encoded_2d))
# [v.encoded_df.values for k, v in samples.items()]
# encoded_df_1d = pd.concat([v.encoded_df for k, v in samples.items()], axis=1)
return encoded_1d
def __nice__(samples):
return 'nS=%r, nT=%r' % (len(samples), samples.n_tasks)
def __getitem__(samples, task_key):
return samples.subtasks[task_key]
def __len__(samples):
return samples.n_samples
def print_info(samples):
for task_name, labels in samples.items():
labels.print_info()
logger.info('hist(all) = %s' % (ut.repr4(samples.make_histogram())))
logger.info('len(all) = %s' % (len(samples)))
def make_histogram(samples):
"""label histogram"""
class_name_basis = samples.class_name_basis()
class_idx_basis_1d = samples.class_idx_basis_1d()
# logger.info('class_idx_basis_1d = %r' % (class_idx_basis_1d,))
# logger.info(samples.encoded_1d())
multi_task_idx_hist = ut.dict_hist(
samples.encoded_1d().values, labels=class_idx_basis_1d
)
multi_task_hist = ut.map_keys(lambda k: class_name_basis[k], multi_task_idx_hist)
return multi_task_hist
def items(samples):
for task_name, labels in samples.subtasks.items():
yield task_name, labels
# def take(samples, idxs):
# mask = ut.index_to_boolmask(idxs, len(samples))
# return samples.compress(mask)
@property
def group_ids(samples):
return None
def stratified_kfold_indices(samples, **xval_kw):
"""
TODO: check xval label frequency
"""
from sklearn import model_selection
X = np.empty((len(samples), 0))
y = samples.encoded_1d().values
groups = samples.group_ids
type_ = xval_kw.pop('type', 'StratifiedGroupKFold')
if type_ == 'StratifiedGroupKFold':
assert groups is not None
# FIXME: The StratifiedGroupKFold could be implemented better.
splitter = sklearn_utils.StratifiedGroupKFold(**xval_kw)
skf_list = list(splitter.split(X=X, y=y, groups=groups))
elif type_ == 'StratifiedKFold':
splitter = model_selection.StratifiedKFold(**xval_kw)
skf_list = list(splitter.split(X=X, y=y))
return skf_list
def subsplit_indices(samples, subset_idx, **xval_kw):
"""split an existing set"""
from sklearn import model_selection
X = np.empty((len(subset_idx), 0))
y = samples.encoded_1d().values[subset_idx]
groups = samples.group_ids[subset_idx]
xval_kw_ = xval_kw.copy()
if 'n_splits' not in xval_kw_:
xval_kw_['n_splits'] = 3
type_ = xval_kw_.pop('type', 'StratifiedGroupKFold')
if type_ == 'StratifiedGroupKFold':
assert groups is not None
# FIXME: The StratifiedGroupKFold could be implemented better.
splitter = sklearn_utils.StratifiedGroupKFold(**xval_kw_)
rel_skf_list = list(splitter.split(X=X, y=y, groups=groups))
elif type_ == 'StratifiedKFold':
splitter = model_selection.StratifiedKFold(**xval_kw_)
rel_skf_list = list(splitter.split(X=X, y=y))
# map back into original coords
skf_list = [
(subset_idx[rel_idx1], subset_idx[rel_idx2])
for rel_idx1, rel_idx2 in rel_skf_list
]
for idx1, idx2 in skf_list:
assert len(np.intersect1d(subset_idx, idx1)) == len(idx1)
assert len(np.intersect1d(subset_idx, idx2)) == len(idx2)
# assert
return skf_list
@ut.reloadable_class
class MultiClassLabels(ut.NiceRepr):
"""
Used by samples to encode a single set of mutually exclusive labels. These
can either be binary or multiclass.
import pandas as pd
pd.options.display.max_rows = 10
# pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
"""
def __init__(labels):
# Helper Info
labels.task_name = None
labels.n_samples = None
labels.n_classes = None
labels.class_names = None
labels.classes_ = None
# Core data
labels.indicator_df = None
labels.encoded_df = None
labels.default_class = None
def has_support(labels):
return len(labels.make_histogram()) > 1
def lookup_class_idx(labels, class_name):
return ut.dzip(labels.class_names, labels.classes_)[class_name]
@classmethod
def from_indicators(MultiClassLabels, indicator, index=None, task_name=None):
labels = MultiClassLabels()
n_samples = len(next(iter(indicator.values())))
# if index is None:
# index = pd.Series(np.arange(n_samples), name='index')
indicator_df = pd.DataFrame(indicator, index=index)
assert np.all(
indicator_df.sum(axis=1).values
), 'states in the same task must be mutually exclusive'
labels.indicator_df = indicator_df
labels.class_names = indicator_df.columns.values
labels.encoded_df = pd.DataFrame(
indicator_df.values.argmax(axis=1), columns=[task_name], index=index
)
labels.task_name = task_name
labels.n_samples = n_samples
labels.n_classes = len(labels.class_names)
if labels.n_classes == 1:
labels.n_classes = 2 # 1 column means binary case
labels.classes_ = np.arange(labels.n_classes)
labels.default_class_name = labels.class_names[1]
return labels
@property
def target_type(labels):
return sklearn.utils.multiclass.type_of_target(labels.y_enc)
def one_vs_rest_task_names(labels):
return [
labels.task_name + '(' + labels.class_names[k] + '-v-rest)'
for k in range(labels.n_classes)
]
def gen_one_vs_rest_labels(labels):
"""
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.verif.clf_helpers import * # NOQA
>>> indicator = ut.odict([
>>> ('state1', [0, 0, 0, 1]),
>>> ('state2', [0, 0, 1, 0]),
>>> ('state3', [1, 1, 0, 0]),
>>> ])
>>> labels = MultiClassLabels.from_indicators(indicator, task_name='task1')
>>> sublabels = list(labels.gen_one_vs_rest_labels())
>>> sublabel = sublabels[0]
"""
if labels.target_type == 'binary':
yield labels
return
task_names_1vR = labels.one_vs_rest_task_names()
for k in range(labels.n_classes):
class_name = labels.class_names[k]
task_name = task_names_1vR[k]
index = labels.indicator_df.index
indicator_df = pd.DataFrame()
indicator_df['not-' + class_name] = 1 - labels.indicator_df[class_name]
indicator_df[class_name] = labels.indicator_df[class_name]
indicator_df.index = index
# indicator = labels.encoded_df == k
# indicator.rename(columns={indicator.columns[0]: class_name}, inplace=True)
n_samples = len(indicator_df)
sublabel = MultiClassLabels()
sublabel.indicator_df = indicator_df
sublabel.class_names = indicator_df.columns.values
# if len(indicator_df.columns) == 1:
# sublabel.encoded_df = pd.DataFrame(
# indicator_df.values.T[0],
# columns=[task_name]
# )
# else:
sublabel.encoded_df = pd.DataFrame(
indicator_df.values.argmax(axis=1), columns=[task_name], index=index
)
sublabel.task_name = task_name
sublabel.n_samples = n_samples
sublabel.n_classes = len(sublabel.class_names)
# if sublabel.n_classes == 1:
# sublabel.n_classes = 2 # 1 column means binary case
sublabel.classes_ = np.arange(sublabel.n_classes)
# sublabel = MultiClassLabels.from_indicators(indicator,
# task_name=subname, index=samples.index)
yield sublabel
@property
def y_bin(labels):
return labels.indicator_df.values
@property
def y_enc(labels):
return labels.encoded_df.values.ravel()
def __nice__(labels):
parts = []
if labels.task_name is not None:
parts.append(labels.task_name)
parts.append('nD=%r' % (labels.n_samples))
parts.append('nC=%r' % (labels.n_classes))
return ' '.join(parts)
def __len__(labels):
return labels.n_samples
def make_histogram(labels):
class_idx_hist = ut.dict_hist(labels.y_enc)
class_hist = ut.map_keys(lambda idx: labels.class_names[idx], class_idx_hist)
return class_hist
def print_info(labels):
logger.info(
'hist(%s) = %s' % (labels.task_name, ut.repr4(labels.make_histogram()))
)
logger.info('len(%s) = %s' % (labels.task_name, len(labels)))
class IrisProblem(ClfProblem):
"""
Simple demo using the abstract clf problem to work on the iris dataset.
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.verif.clf_helpers import * # NOQA
>>> pblm = IrisProblem()
>>> pblm.setup()
>>> pblm.samples
"""
def setup(pblm):
import sklearn.datasets
iris = sklearn.datasets.load_iris()
pblm.primary_task_key = 'iris'
pblm.default_data_key = 'learn(all)'
pblm.default_clf_key = 'RF'
X_df = pd.DataFrame(iris.data, columns=iris.feature_names)
samples = MultiTaskSamples(X_df.index)
samples.apply_indicators(
{
'iris': {
name: iris.target == idx for idx, name in enumerate(iris.target_names)
}
}
)
samples.X_dict = {'learn(all)': X_df}
pblm.samples = samples
pblm.xval_kw['type'] = 'StratifiedKFold'
|
[
"logging.getLogger",
"utool.list_alignment",
"sklearn.metrics.classification_report",
"utool.index_complement",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.StratifiedKFold",
"utool.take",
"numpy.array",
"sklearn.metrics.log_loss",
"utool.qtensure",
"sklearn.calibration.CalibratedClassifierCV",
"utool.lmap",
"utool.hashid_arr",
"numpy.arange",
"utool.ProgPartial",
"utool.dzip",
"utool.ProgIter",
"utool.repr2",
"sklearn.metrics.brier_score_loss",
"pandas.DataFrame.from_dict",
"numpy.vstack",
"utool.dict_hist",
"pandas.DataFrame",
"wbia.algo.verif.sklearn_utils.predict_proba_df",
"utool.map_keys",
"wbia.algo.verif.sklearn_utils.predict_from_probs",
"wbia.plottool.figure",
"sklearn.datasets.load_iris",
"utool.partial",
"utool.none_take",
"wbia.algo.verif.sklearn_utils.StratifiedGroupKFold",
"utool.util_dict.all_dict_combinations",
"wbia.plottool.gca",
"ubelt.Cacher",
"utool.AutoVivification",
"sklearn.ensemble.RandomForestClassifier",
"wbia.plottool.legend",
"vtool.ConfusionMetrics",
"utool.num_cpus",
"utool.odict",
"utool.get_dict_hashid",
"sklearn.calibration.calibration_curve",
"utool.combinations",
"utool.get_func_kwargs",
"wbia.algo.verif.sklearn_utils.classification_report2",
"utool.inject2",
"numpy.intersect1d",
"numpy.cumprod",
"utool.cprint",
"utool.ziptake",
"utool.repr4",
"numpy.zeros",
"sklearn.utils.multiclass.type_of_target",
"utool.ParamInfo",
"utool.merge_dicts",
"wbia.plottool.ensure_fnum",
"sklearn.impute.SimpleImputer",
"numpy.full",
"numpy.all",
"vtool.interact_roc_factory",
"numpy.nan_to_num"
] |
[((597, 617), 'utool.inject2', 'ut.inject2', (['__name__'], {}), '(__name__)\n', (607, 617), True, 'import utool as ut\n'), ((627, 652), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (644, 652), False, 'import logging\n'), ((768, 812), 'utool.ParamInfo', 'ut.ParamInfo', (['"""type"""', '"""StratifiedGroupKFold"""'], {}), "('type', 'StratifiedGroupKFold')\n", (780, 812), True, 'import utool as ut\n'), ((822, 849), 'utool.ParamInfo', 'ut.ParamInfo', (['"""n_splits"""', '(3)'], {}), "('n_splits', 3)\n", (834, 849), True, 'import utool as ut\n'), ((859, 950), 'utool.ParamInfo', 'ut.ParamInfo', (['"""shuffle"""', '(True)'], {'hideif': "(lambda cfg: cfg['type'] == 'StratifiedGroupKFold')"}), "('shuffle', True, hideif=lambda cfg: cfg['type'] ==\n 'StratifiedGroupKFold')\n", (871, 950), True, 'import utool as ut\n'), ((978, 1080), 'utool.ParamInfo', 'ut.ParamInfo', (['"""random_state"""', '(3953056901)'], {'hideif': "(lambda cfg: cfg['type'] == 'StratifiedGroupKFold')"}), "('random_state', 3953056901, hideif=lambda cfg: cfg['type'] ==\n 'StratifiedGroupKFold')\n", (990, 1080), True, 'import utool as ut\n'), ((3366, 3387), 'utool.AutoVivification', 'ut.AutoVivification', ([], {}), '()\n', (3385, 3387), True, 'import utool as ut\n'), ((3418, 3439), 'utool.AutoVivification', 'ut.AutoVivification', ([], {}), '()\n', (3437, 3439), True, 'import utool as ut\n'), ((4202, 4252), 'utool.ProgPartial', 'ut.ProgPartial', ([], {'freq': '(1)', 'adjust': '(False)', 'prehack': '"""%s"""'}), "(freq=1, adjust=False, prehack='%s')\n", (4216, 4252), True, 'import utool as ut\n'), ((6070, 6132), 'ubelt.Cacher', 'ub.Cacher', (['fname'], {'cfgstr': 'cfgstr', 'meta': '[feat_dims]'}), '(fname, cfgstr=cfgstr, meta=[feat_dims], **cacher_kw)\n', (6079, 6132), True, 'import ubelt as ub\n'), ((7308, 7353), 'numpy.all', 'np.all', (['(labels.encoded_df.index == X_df.index)'], {}), '(labels.encoded_df.index == X_df.index)\n', (7314, 7353), True, 'import numpy as np\n'), ((7578, 7623), 'utool.ProgIter', 'ut.ProgIter', (['skf_list'], {'label': '"""skf-train-eval"""'}), "(skf_list, label='skf-train-eval')\n", (7589, 7623), True, 'import utool as ut\n'), ((10077, 10127), 'utool.ProgPartial', 'ut.ProgPartial', ([], {'freq': '(1)', 'adjust': '(False)', 'prehack': '"""%s"""'}), "(freq=1, adjust=False, prehack='%s')\n", (10091, 10127), True, 'import utool as ut\n'), ((13263, 13295), 'utool.merge_dicts', 'ut.merge_dicts', (['est_kw1', 'est_kw2'], {}), '(est_kw1, est_kw2)\n', (13277, 13295), True, 'import utool as ut\n'), ((14596, 14641), 'numpy.all', 'np.all', (['(labels.encoded_df.index == X_df.index)'], {}), '(labels.encoded_df.index == X_df.index)\n', (14602, 14641), True, 'import numpy as np\n'), ((16640, 16685), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'missing_values': 'np.nan'}), '(missing_values=np.nan)\n', (16662, 16685), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((16892, 16938), 'wbia.algo.verif.sklearn_utils.StratifiedGroupKFold', 'sklearn_utils.StratifiedGroupKFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (16926, 16938), False, 'from wbia.algo.verif import sklearn_utils\n'), ((17229, 17242), 'utool.num_cpus', 'ut.num_cpus', ([], {}), '()\n', (17240, 17242), True, 'import utool as ut\n'), ((17608, 17840), 'utool.odict', 'ut.odict', (["[('rank_test_score', 'rank'), ('mean_test_score', 'μ-test'), (\n 'std_test_score', 'σ-test'), ('mean_train_score', 'μ-train'), (\n 'std_train_score', 'σ-train'), ('mean_fit_time', 'fit_time'), ('params',\n 'params')]"], {}), "([('rank_test_score', 'rank'), ('mean_test_score', 'μ-test'), (\n 'std_test_score', 'σ-test'), ('mean_train_score', 'μ-train'), (\n 'std_train_score', 'σ-train'), ('mean_fit_time', 'fit_time'), ('params',\n 'params')])\n", (17616, 17840), True, 'import utool as ut\n'), ((19379, 19425), 'wbia.algo.verif.sklearn_utils.StratifiedGroupKFold', 'sklearn_utils.StratifiedGroupKFold', ([], {'n_splits': '(2)'}), '(n_splits=2)\n', (19413, 19425), False, 'from wbia.algo.verif import sklearn_utils\n'), ((19725, 19757), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**est_kw)\n', (19747, 19757), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((19894, 19933), 'sklearn.metrics.log_loss', 'log_loss', (['(y[test_idx] == 1)', 'uncal_probs'], {}), '(y[test_idx] == 1, uncal_probs)\n', (19902, 19933), False, 'from sklearn.metrics import log_loss, brier_score_loss\n'), ((19956, 20003), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['(y[test_idx] == 1)', 'uncal_probs'], {}), '(y[test_idx] == 1, uncal_probs)\n', (19972, 20003), False, 'from sklearn.metrics import log_loss, brier_score_loss\n'), ((20123, 20155), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**est_kw)\n', (20145, 20155), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((20236, 20291), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['precal_clf'], {'cv': '(2)', 'method': 'method'}), '(precal_clf, cv=2, method=method)\n', (20258, 20291), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((20420, 20457), 'sklearn.metrics.log_loss', 'log_loss', (['(y[test_idx] == 1)', 'cal_probs'], {}), '(y[test_idx] == 1, cal_probs)\n', (20428, 20457), False, 'from sklearn.metrics import log_loss, brier_score_loss\n'), ((20478, 20523), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['(y[test_idx] == 1)', 'cal_probs'], {}), '(y[test_idx] == 1, cal_probs)\n', (20494, 20523), False, 'from sklearn.metrics import log_loss, brier_score_loss\n'), ((20791, 20804), 'utool.qtensure', 'ut.qtensure', ([], {}), '()\n', (20802, 20804), True, 'import utool as ut\n'), ((20813, 20824), 'wbia.plottool.figure', 'pt.figure', ([], {}), '()\n', (20822, 20824), True, 'import wbia.plottool as pt\n'), ((20838, 20846), 'wbia.plottool.gca', 'pt.gca', ([], {}), '()\n', (20844, 20846), True, 'import wbia.plottool as pt\n'), ((20936, 20985), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_test', 'uncal_probs'], {'n_bins': '(10)'}), '(y_test, uncal_probs, n_bins=10)\n', (20953, 20985), False, 'from sklearn.calibration import calibration_curve\n'), ((21307, 21354), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_test', 'cal_probs'], {'n_bins': '(10)'}), '(y_test, cal_probs, n_bins=10)\n', (21324, 21354), False, 'from sklearn.calibration import calibration_curve\n'), ((21555, 21566), 'wbia.plottool.legend', 'pt.legend', ([], {}), '()\n', (21564, 21566), True, 'import wbia.plottool as pt\n'), ((23252, 23284), 'utool.lmap', 'ut.lmap', (['str', 'labels.class_names'], {}), '(str, labels.class_names)\n', (23259, 23284), True, 'import utool as ut\n'), ((23343, 23406), 'wbia.algo.verif.sklearn_utils.predict_proba_df', 'sklearn_utils.predict_proba_df', (['clf', 'X_df_test', 'res.class_names'], {}), '(clf, X_df_test, res.class_names)\n', (23373, 23406), False, 'from wbia.algo.verif import sklearn_utils\n'), ((25250, 25278), 'utool.combinations', 'ut.combinations', (['res_list', '(2)'], {}), '(res_list, 2)\n', (25265, 25278), True, 'import utool as ut\n'), ((27899, 27956), 'utool.ziptake', 'ut.ziptake', (['res.probs_df.values', 'res.target_enc_df.values'], {}), '(res.probs_df.values, res.target_enc_df.values)\n', (27909, 27956), True, 'import utool as ut\n'), ((28144, 28210), 'wbia.algo.verif.sklearn_utils.predict_from_probs', 'sklearn_utils.predict_from_probs', (['res.probs_df', 'method'], {'force': '(True)'}), '(res.probs_df, method, force=True)\n', (28176, 28210), False, 'from wbia.algo.verif import sklearn_utils\n'), ((28818, 28836), 'pandas.DataFrame', 'pd.DataFrame', (['meta'], {}), '(meta)\n', (28830, 28836), True, 'import pandas as pd\n'), ((29813, 29863), 'utool.index_complement', 'ut.index_complement', (['unique_predictions', 'n_classes'], {}), '(unique_predictions, n_classes)\n', (29832, 29863), True, 'import utool as ut\n'), ((31698, 31828), 'wbia.algo.verif.sklearn_utils.classification_report2', 'sklearn_utils.classification_report2', (['y_true', 'y_pred'], {'target_names': 'target_names', 'sample_weight': 'sample_weight', 'verbose': 'verbose'}), '(y_true, y_pred, target_names=\n target_names, sample_weight=sample_weight, verbose=verbose)\n', (31734, 31828), False, 'from wbia.algo.verif import sklearn_utils\n'), ((32076, 32221), 'sklearn.metrics.classification_report', 'sklearn.metrics.classification_report', ([], {'y_true': 'res.y_test_enc', 'y_pred': 'pred_enc', 'target_names': 'res.class_names', 'sample_weight': 'res.sample_weight'}), '(y_true=res.y_test_enc, y_pred=\n pred_enc, target_names=res.class_names, sample_weight=res.sample_weight)\n', (32113, 32221), False, 'import sklearn\n'), ((35183, 35222), 'utool.cprint', 'ut.cprint', (['"""Threshold Report"""', '"""yellow"""'], {}), "('Threshold Report', 'yellow')\n", (35192, 35222), True, 'import utool as ut\n'), ((35620, 35792), 'utool.odict', 'ut.odict', (["[('@fpr=.01', ('fpr', 0.01)), ('@fpr=.001', ('fpr', 0.001)), ('@fpr=.0001',\n ('fpr', 0.0001)), ('@fpr=.0000', ('fpr', 0)), ('@max(mcc)', ('mcc', 'max'))\n ]"], {}), "([('@fpr=.01', ('fpr', 0.01)), ('@fpr=.001', ('fpr', 0.001)), (\n '@fpr=.0001', ('fpr', 0.0001)), ('@fpr=.0000', ('fpr', 0)), (\n '@max(mcc)', ('mcc', 'max'))])\n", (35628, 35792), True, 'import utool as ut\n'), ((38000, 38123), 'wbia.algo.verif.sklearn_utils.predict_from_probs', 'sklearn_utils.predict_from_probs', (['res.clf_probs', 'threshes', 'res.class_names'], {'force': '(False)', 'multi': '(False)', 'return_flags': '(True)'}), '(res.clf_probs, threshes, res.class_names,\n force=False, multi=False, return_flags=True)\n', (38032, 38123), False, 'from wbia.algo.verif import sklearn_utils\n'), ((41581, 41594), 'utool.qtensure', 'ut.qtensure', ([], {}), '()\n', (41592, 41594), True, 'import utool as ut\n'), ((44237, 44297), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['res.y_test_bin', 'res.clf_probs'], {}), '(res.y_test_bin, res.clf_probs)\n', (44266, 44297), False, 'import sklearn\n'), ((45314, 45324), 'utool.odict', 'ut.odict', ([], {}), '()\n', (45322, 45324), True, 'import utool as ut\n'), ((49095, 49128), 'numpy.arange', 'np.arange', (['n_states'], {'dtype': 'np.int'}), '(n_states, dtype=np.int)\n', (49104, 49128), True, 'import numpy as np\n'), ((51057, 51120), 'utool.map_keys', 'ut.map_keys', (['(lambda k: class_name_basis[k])', 'multi_task_idx_hist'], {}), '(lambda k: class_name_basis[k], multi_task_idx_hist)\n', (51068, 51120), True, 'import utool as ut\n'), ((54844, 54880), 'pandas.DataFrame', 'pd.DataFrame', (['indicator'], {'index': 'index'}), '(indicator, index=index)\n', (54856, 54880), True, 'import pandas as pd\n'), ((55493, 55520), 'numpy.arange', 'np.arange', (['labels.n_classes'], {}), '(labels.n_classes)\n', (55502, 55520), True, 'import numpy as np\n'), ((55660, 55713), 'sklearn.utils.multiclass.type_of_target', 'sklearn.utils.multiclass.type_of_target', (['labels.y_enc'], {}), '(labels.y_enc)\n', (55699, 55713), False, 'import sklearn\n'), ((58773, 58799), 'utool.dict_hist', 'ut.dict_hist', (['labels.y_enc'], {}), '(labels.y_enc)\n', (58785, 58799), True, 'import utool as ut\n'), ((58821, 58885), 'utool.map_keys', 'ut.map_keys', (['(lambda idx: labels.class_names[idx])', 'class_idx_hist'], {}), '(lambda idx: labels.class_names[idx], class_idx_hist)\n', (58832, 58885), True, 'import utool as ut\n'), ((59534, 59562), 'sklearn.datasets.load_iris', 'sklearn.datasets.load_iris', ([], {}), '()\n', (59560, 59562), False, 'import sklearn\n'), ((59700, 59751), 'pandas.DataFrame', 'pd.DataFrame', (['iris.data'], {'columns': 'iris.feature_names'}), '(iris.data, columns=iris.feature_names)\n', (59712, 59751), True, 'import pandas as pd\n'), ((3930, 3992), 'utool.cprint', 'ut.cprint', (['"""[pblm] learn_evaluation_classifiers"""'], {'color': '"""blue"""'}), "('[pblm] learn_evaluation_classifiers', color='blue')\n", (3939, 3992), True, 'import utool as ut\n'), ((5350, 5377), 'utool.get_dict_hashid', 'ut.get_dict_hashid', (['est_kw1'], {}), '(est_kw1)\n', (5368, 5377), True, 'import utool as ut\n'), ((9665, 9723), 'utool.cprint', 'ut.cprint', (['"""[pblm] learn_deploy_classifiers"""'], {'color': '"""blue"""'}), "('[pblm] learn_deploy_classifiers', color='blue')\n", (9674, 9723), True, 'import utool as ut\n'), ((10039, 10060), 'utool.AutoVivification', 'ut.AutoVivification', ([], {}), '()\n', (10058, 10060), True, 'import utool as ut\n'), ((17004, 17049), 'utool.partial', 'ut.partial', (['RandomizedSearchCV'], {'n_iter': 'n_iter'}), '(RandomizedSearchCV, n_iter=n_iter)\n', (17014, 17049), True, 'import utool as ut\n'), ((22856, 22910), 'utool.list_alignment', 'ut.list_alignment', (['arr_cols', 'target_cols'], {'missing': '(True)'}), '(arr_cols, target_cols, missing=True)\n', (22873, 22910), True, 'import utool as ut\n'), ((22938, 22965), 'utool.none_take', 'ut.none_take', (['arr.T', 'alignx'], {}), '(arr.T, alignx)\n', (22950, 22965), True, 'import utool as ut\n'), ((25483, 25518), 'numpy.all', 'np.all', (['(r.index == r.probs_df.index)'], {}), '(r.index == r.probs_df.index)\n', (25489, 25518), True, 'import numpy as np\n'), ((25538, 25578), 'numpy.all', 'np.all', (['(r.index == r.target_bin_df.index)'], {}), '(r.index == r.target_bin_df.index)\n', (25544, 25578), True, 'import numpy as np\n'), ((25598, 25638), 'numpy.all', 'np.all', (['(r.index == r.target_enc_df.index)'], {}), '(r.index == r.target_enc_df.index)\n', (25604, 25638), True, 'import numpy as np\n'), ((29230, 29279), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['conf_dict'], {'orient': '"""index"""'}), "(conf_dict, orient='index')\n", (29252, 29279), True, 'import pandas as pd\n'), ((30571, 30603), 'numpy.zeros', 'np.zeros', (['(n_missing, n_classes)'], {}), '((n_missing, n_classes))\n', (30579, 30603), True, 'import numpy as np\n'), ((30765, 30805), 'numpy.vstack', 'np.vstack', (['[y_test_enc_aug, missing_enc]'], {}), '([y_test_enc_aug, missing_enc])\n', (30774, 30805), True, 'import numpy as np\n'), ((30835, 30875), 'numpy.vstack', 'np.vstack', (['[y_test_bin_aug, missing_bin]'], {}), '([y_test_bin_aug, missing_bin])\n', (30844, 30875), True, 'import numpy as np\n'), ((30904, 30943), 'numpy.vstack', 'np.vstack', (['[clf_probs_aug, missing_bin]'], {}), '([clf_probs_aug, missing_bin])\n', (30913, 30943), True, 'import numpy as np\n'), ((36151, 36161), 'utool.odict', 'ut.odict', ([], {}), '()\n', (36159, 36161), True, 'import utool as ut\n'), ((36792, 36843), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['thresh_dict'], {'orient': '"""index"""'}), "(thresh_dict, orient='index')\n", (36814, 36843), True, 'import pandas as pd\n'), ((42177, 42239), 'vtool.interact_roc_factory', 'vt.interact_roc_factory', (['confusions'], {'show_operating_point': '(True)'}), '(confusions, show_operating_point=True)\n', (42200, 42239), True, 'import vtool as vt\n'), ((42289, 42306), 'wbia.plottool.ensure_fnum', 'pt.ensure_fnum', (['k'], {}), '(k)\n', (42303, 42306), True, 'import wbia.plottool as pt\n'), ((43307, 43366), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['class_k_truth', 'class_k_probs'], {}), '(class_k_truth, class_k_probs)\n', (43336, 43366), False, 'import sklearn\n'), ((43613, 43672), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['class_k_truth', 'class_k_probs'], {}), '(class_k_truth, class_k_probs)\n', (43642, 43672), False, 'import sklearn\n'), ((51988, 52033), 'wbia.algo.verif.sklearn_utils.StratifiedGroupKFold', 'sklearn_utils.StratifiedGroupKFold', ([], {}), '(**xval_kw)\n', (52022, 52033), False, 'from wbia.algo.verif import sklearn_utils\n'), ((52922, 52968), 'wbia.algo.verif.sklearn_utils.StratifiedGroupKFold', 'sklearn_utils.StratifiedGroupKFold', ([], {}), '(**xval_kw_)\n', (52956, 52968), False, 'from wbia.algo.verif import sklearn_utils\n'), ((54476, 54520), 'utool.dzip', 'ut.dzip', (['labels.class_names', 'labels.classes_'], {}), '(labels.class_names, labels.classes_)\n', (54483, 54520), True, 'import utool as ut\n'), ((56834, 56848), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (56846, 56848), True, 'import pandas as pd\n'), ((58047, 58076), 'numpy.arange', 'np.arange', (['sublabel.n_classes'], {}), '(sublabel.n_classes)\n', (58056, 58076), True, 'import numpy as np\n'), ((7763, 7801), 'utool.take', 'ut.take', (['pblm.samples.index', 'train_idx'], {}), '(pblm.samples.index, train_idx)\n', (7770, 7801), True, 'import utool as ut\n'), ((11099, 11167), 'utool.get_func_kwargs', 'ut.get_func_kwargs', (['sklearn.ensemble.RandomForestClassifier.__init__'], {}), '(sklearn.ensemble.RandomForestClassifier.__init__)\n', (11117, 11167), True, 'import utool as ut\n'), ((16745, 16785), 'utool.util_dict.all_dict_combinations', 'ut.util_dict.all_dict_combinations', (['grid'], {}), '(grid)\n', (16779, 16785), True, 'import utool as ut\n'), ((18046, 18063), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (18058, 18063), True, 'import pandas as pd\n'), ((23070, 23093), 'numpy.vstack', 'np.vstack', (['aligned_arrT'], {}), '(aligned_arrT)\n', (23079, 23093), True, 'import numpy as np\n'), ((26568, 26598), 'numpy.all', 'np.all', (['(res.index == val.index)'], {}), '(res.index == val.index)\n', (26574, 26598), True, 'import numpy as np\n'), ((28239, 28257), 'numpy.array', 'np.array', (['easiness'], {}), '(easiness)\n', (28247, 28257), True, 'import numpy as np\n'), ((30701, 30726), 'numpy.array', 'np.array', (['missing_classes'], {}), '(missing_classes)\n', (30709, 30726), True, 'import numpy as np\n'), ((31165, 31207), 'numpy.vstack', 'np.vstack', (['[clf_probhats_aug, missing_bin]'], {}), '([clf_probhats_aug, missing_bin])\n', (31174, 31207), True, 'import numpy as np\n'), ((36590, 36600), 'utool.odict', 'ut.odict', ([], {}), '()\n', (36598, 36600), True, 'import utool as ut\n'), ((41418, 41439), 'vtool.ConfusionMetrics', 'vt.ConfusionMetrics', ([], {}), '()\n', (41437, 41439), True, 'import vtool as vt\n'), ((42988, 43009), 'vtool.ConfusionMetrics', 'vt.ConfusionMetrics', ([], {}), '()\n', (43007, 43009), True, 'import vtool as vt\n'), ((52167, 52209), 'sklearn.model_selection.StratifiedKFold', 'model_selection.StratifiedKFold', ([], {}), '(**xval_kw)\n', (52198, 52209), False, 'from sklearn import model_selection\n'), ((53106, 53149), 'sklearn.model_selection.StratifiedKFold', 'model_selection.StratifiedKFold', ([], {}), '(**xval_kw_)\n', (53137, 53149), False, 'from sklearn import model_selection\n'), ((5678, 5711), 'utool.hashid_arr', 'ut.hashid_arr', (['feat_dims', '"""feats"""'], {}), "(feat_dims, 'feats')\n", (5691, 5711), True, 'import utool as ut\n'), ((18618, 18647), 'utool.repr4', 'ut.repr4', (['search.best_params_'], {}), '(search.best_params_)\n', (18626, 18647), True, 'import utool as ut\n'), ((29433, 29465), 'numpy.nan_to_num', 'np.nan_to_num', (["meta['real_conf']"], {}), "(meta['real_conf'])\n", (29446, 29465), True, 'import numpy as np\n'), ((30629, 30649), 'numpy.arange', 'np.arange', (['n_missing'], {}), '(n_missing)\n', (30638, 30649), True, 'import numpy as np\n'), ((31061, 31082), 'numpy.full', 'np.full', (['n_missing', '(0)'], {}), '(n_missing, 0)\n', (31068, 31082), True, 'import numpy as np\n'), ((37751, 37800), 'utool.repr2', 'ut.repr2', (['threshes'], {'nl': '(1)', 'precision': '(4)', 'align': '(True)'}), '(threshes, nl=1, precision=4, align=True)\n', (37759, 37800), True, 'import utool as ut\n'), ((40724, 40781), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['auto_truth_k', 'auto_probs_k'], {}), '(auto_truth_k, auto_probs_k)\n', (40753, 40781), False, 'import sklearn\n'), ((43981, 44002), 'vtool.ConfusionMetrics', 'vt.ConfusionMetrics', ([], {}), '()\n', (44000, 44002), True, 'import vtool as vt\n'), ((53448, 53480), 'numpy.intersect1d', 'np.intersect1d', (['subset_idx', 'idx1'], {}), '(subset_idx, idx1)\n', (53462, 53480), True, 'import numpy as np\n'), ((53518, 53550), 'numpy.intersect1d', 'np.intersect1d', (['subset_idx', 'idx2'], {}), '(subset_idx, idx2)\n', (53532, 53550), True, 'import numpy as np\n'), ((13644, 13689), 'sklearn.impute.SimpleImputer', 'sklearn.impute.SimpleImputer', ([], {'strategy': '"""mean"""'}), "(strategy='mean')\n", (13672, 13689), False, 'import sklearn\n'), ((14088, 14133), 'sklearn.impute.SimpleImputer', 'sklearn.impute.SimpleImputer', ([], {'strategy': '"""mean"""'}), "(strategy='mean')\n", (14116, 14133), False, 'import sklearn\n'), ((49483, 49506), 'numpy.cumprod', 'np.cumprod', (['class_space'], {}), '(class_space)\n', (49493, 49506), True, 'import numpy as np\n'), ((47810, 47825), 'numpy.array', 'np.array', (['y_enc'], {}), '(y_enc)\n', (47818, 47825), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Clip burn depth and environmental variables
(tree cover, tree species, elevation, slope, topsoil carbon content)
to fire perimeters and assign class overwinter/other to each perimeter
based on the id
Requirements:
- list of overwinteirng fire ids (output of algorithm.R)
- and parent fires (extracted from nndist)
- fire perimeters (i.e. rasterised AKFED burned area with unique IDs for each perimeter)
- AKFED burn depth data
- tree cover data (i.e. from MODIS continuous fields)
- other geographic data (i.e. here tree species, elevation, slope, soil carbon)
Output: csv file of mean values of each geographic variable for each fire perimeter
@author: RCScholten
"""
# import required modules
import rasterio
import fiona
import numpy as np
import pandas as pd
# inputs (partly set below...)
outfile = 'spatial.csv'
overwinter_ids = 'overwintering_ids.txt' # list of overwintering ids (output from algorithm)
nndist = 'nndist.csv'
depth_path = 'depth_20012018.tif' # burn depth raster for clipping
tc_path = 'tc_500_20002017.tif' # take the tree cover of the year before the fire
#%% main
# load overwintering ids
with open(overwinter_ids, 'r') as f:
ho_ids = f.read().splitlines()
ho_ids = [int(hoid) for hoid in ho_ids[1:]]
# extract list of their original fire perimeter ID
id2fireid = pd.read_csv(nndist)
fire_ids = list(set(id2fireid[id2fireid['ID'].isin(ho_ids)].FireID_tgt))
# start writing csv file
f = open(outfile, "w")
f.write('FireID,Year,ShortNm,ign2,depth_mean,n_depth,tc,n_tc,sp_bs,sp_ws,sp_dc,sp_gs,sp_nv,sp_p,n_ts,Elev,n,slope,n,soil30,soil100,soil200,soil300,n\n')
# loop through regions
for region in ['NT', 'AK']:
# inputs and outputs according to region (these are specific to each region)
if region == 'NT':
shape_path = 'perimeters_NT.shp'
img_paths = ['treespecies_nwt.tif',
'arcticdem_NWT.tif',
'arcticdem_slope_NWT.tif',
'ncscd_ytnt.tif']
elif region == 'AK':
shape_path = 'perimeters_AK.shp'
img_paths = ['treespecies_ak.tif',
'arcticdem_AK.tif',
'arcticdem_slope_AK.tif',
'ncscd_ak.tif']
# start clipping
# open images with rasterio
depth_img = rasterio.open(depth_path)
tc_img = rasterio.open(tc_path)
# iteration through features
with fiona.open(shape_path, "r") as shapefile1:
for feat1 in shapefile1:
# extract relevant information from feature
geom = feat1["geometry"]
obj_id = feat1["properties"]["FireID"]
year = int(feat1["properties"]["Year"])
# determine whether the feature produced an overwintering fire or not
ign2 = 'overwinter' if obj_id in fire_ids else 'other'
# year 2018 has to be skipped since we don't have burned area for 2019
# and consequently don't know if fire scars in 2018 caused holdovers in 2019
if year > 2017:
continue
# extract tiff values masking with feature
arr_depth, trans = rasterio.mask.mask(depth_img, [geom], crop=True) #, all_touched=True)
arr_tc, trans = rasterio.mask.mask(tc_img, [geom], crop=True, filled=False) #, all_touched=True)
# convert NoData to NAN
arr_depth = np.where(arr_depth == 0.0, np.nan, arr_depth)
# calculate stats on target year
mean_depth = np.nanmean(arr_depth[year-2001, :, :]) # 2001 = year0
n_depth = np.count_nonzero(~np.isnan(arr_depth[year-2001, :, :]))
# rasterio assigns 0 as nodataval if the nodatavalue is missing in a raster
# since 0 is a valid value for our rasters we need a workaround
# we return the masked array (l 108, filled = False)
# and count the number of 'False' = not masked values in the mask for n
# if the whole array is masked (geom does not hit the center point of a raster pixel)
# it cannot calculate the nanmean, therefore the if-clause
if False in arr_tc.mask:
mean_tc = np.nanmean(arr_tc[year-2001, :, :]) # 2000 = year0
n_tc = np.count_nonzero(~arr_tc.mask[year-2001, :, :])
else:
mean_tc = np.nan
n_tc = 0
# add all these to a list
out = [obj_id, year, region, ign2, mean_depth, n_depth, mean_tc, n_tc]
# loop throug other images and add to list
for img_path in img_paths:
img1 = rasterio.open(img_path)
if not img1.nodata:
arr1, trans = rasterio.mask.mask(img1, [geom], crop=True, filled=False)
if False in arr_tc.mask:
mean = np.nanmean(arr1, axis=(1, 2))
n = np.count_nonzero(~arr1.mask[0, :, :])
else:
mean = [np.nan] * arr1.shape[0]
n = 0
else:
arr1, trans = rasterio.mask.mask(img1, [geom], crop=True)
arr1 = np.where(arr1 == img1.nodata, np.nan, arr1)
mean = np.nanmean(arr1, axis=(1, 2))
n = np.count_nonzero(~np.isnan(arr1[0, :, :]))
out.extend(mean)
out.append(n)
# write data into csv
out = tuple(out)
line = '%s,%i,%s,%s,%f,%i,%f,%i,%f,%f,%f,%f,%f,%f,%i,%f,%i,%f,%i,%f,%f,%f,%f,%i\n' %out
f.write(line)
# close file
f = None
|
[
"pandas.read_csv",
"numpy.where",
"rasterio.open",
"numpy.count_nonzero",
"numpy.nanmean",
"numpy.isnan",
"fiona.open",
"rasterio.mask.mask"
] |
[((1354, 1373), 'pandas.read_csv', 'pd.read_csv', (['nndist'], {}), '(nndist)\n', (1365, 1373), True, 'import pandas as pd\n'), ((2325, 2350), 'rasterio.open', 'rasterio.open', (['depth_path'], {}), '(depth_path)\n', (2338, 2350), False, 'import rasterio\n'), ((2364, 2386), 'rasterio.open', 'rasterio.open', (['tc_path'], {}), '(tc_path)\n', (2377, 2386), False, 'import rasterio\n'), ((2430, 2457), 'fiona.open', 'fiona.open', (['shape_path', '"""r"""'], {}), "(shape_path, 'r')\n", (2440, 2457), False, 'import fiona\n'), ((3164, 3212), 'rasterio.mask.mask', 'rasterio.mask.mask', (['depth_img', '[geom]'], {'crop': '(True)'}), '(depth_img, [geom], crop=True)\n', (3182, 3212), False, 'import rasterio\n'), ((3262, 3321), 'rasterio.mask.mask', 'rasterio.mask.mask', (['tc_img', '[geom]'], {'crop': '(True)', 'filled': '(False)'}), '(tc_img, [geom], crop=True, filled=False)\n', (3280, 3321), False, 'import rasterio\n'), ((3404, 3449), 'numpy.where', 'np.where', (['(arr_depth == 0.0)', 'np.nan', 'arr_depth'], {}), '(arr_depth == 0.0, np.nan, arr_depth)\n', (3412, 3449), True, 'import numpy as np\n'), ((3521, 3561), 'numpy.nanmean', 'np.nanmean', (['arr_depth[year - 2001, :, :]'], {}), '(arr_depth[year - 2001, :, :])\n', (3531, 3561), True, 'import numpy as np\n'), ((4202, 4239), 'numpy.nanmean', 'np.nanmean', (['arr_tc[year - 2001, :, :]'], {}), '(arr_tc[year - 2001, :, :])\n', (4212, 4239), True, 'import numpy as np\n'), ((4286, 4335), 'numpy.count_nonzero', 'np.count_nonzero', (['(~arr_tc.mask[year - 2001, :, :])'], {}), '(~arr_tc.mask[year - 2001, :, :])\n', (4302, 4335), True, 'import numpy as np\n'), ((4650, 4673), 'rasterio.open', 'rasterio.open', (['img_path'], {}), '(img_path)\n', (4663, 4673), False, 'import rasterio\n'), ((3619, 3657), 'numpy.isnan', 'np.isnan', (['arr_depth[year - 2001, :, :]'], {}), '(arr_depth[year - 2001, :, :])\n', (3627, 3657), True, 'import numpy as np\n'), ((4744, 4801), 'rasterio.mask.mask', 'rasterio.mask.mask', (['img1', '[geom]'], {'crop': '(True)', 'filled': '(False)'}), '(img1, [geom], crop=True, filled=False)\n', (4762, 4801), False, 'import rasterio\n'), ((5142, 5185), 'rasterio.mask.mask', 'rasterio.mask.mask', (['img1', '[geom]'], {'crop': '(True)'}), '(img1, [geom], crop=True)\n', (5160, 5185), False, 'import rasterio\n'), ((5213, 5256), 'numpy.where', 'np.where', (['(arr1 == img1.nodata)', 'np.nan', 'arr1'], {}), '(arr1 == img1.nodata, np.nan, arr1)\n', (5221, 5256), True, 'import numpy as np\n'), ((5284, 5313), 'numpy.nanmean', 'np.nanmean', (['arr1'], {'axis': '(1, 2)'}), '(arr1, axis=(1, 2))\n', (5294, 5313), True, 'import numpy as np\n'), ((4878, 4907), 'numpy.nanmean', 'np.nanmean', (['arr1'], {'axis': '(1, 2)'}), '(arr1, axis=(1, 2))\n', (4888, 4907), True, 'import numpy as np\n'), ((4936, 4973), 'numpy.count_nonzero', 'np.count_nonzero', (['(~arr1.mask[0, :, :])'], {}), '(~arr1.mask[0, :, :])\n', (4952, 4973), True, 'import numpy as np\n'), ((5356, 5379), 'numpy.isnan', 'np.isnan', (['arr1[0, :, :]'], {}), '(arr1[0, :, :])\n', (5364, 5379), True, 'import numpy as np\n')]
|
import os
import numpy
import numpy as np
import torch
from dg_util.python_utils import misc_util
from torch import nn
numpy.set_printoptions(precision=4)
torch.set_printoptions(precision=4, sci_mode=False)
def batch_norm_layer(channels):
return nn.BatchNorm2d(channels)
def nonlinearity():
return nn.ReLU(inplace=True)
NONLINEARITY = nonlinearity
NORM_LAYER = batch_norm_layer
TIME_STR = misc_util.get_time_str()
BASE_LOG_DIR = "logs"
CHECK_FOR_NEW_DATA = False
IMAGENET_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float32) * 255
IMAGENET_STD = np.array([0.229, 0.224, 0.225], dtype=np.float32) * 255
COOKIE_PATH = os.path.join(os.path.dirname(__file__), "youtube_scrape", "cookies.txt")
|
[
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.set_printoptions",
"dg_util.python_utils.misc_util.get_time_str",
"numpy.array",
"os.path.dirname",
"numpy.set_printoptions"
] |
[((121, 156), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (143, 156), False, 'import numpy\n'), ((157, 208), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': '(4)', 'sci_mode': '(False)'}), '(precision=4, sci_mode=False)\n', (179, 208), False, 'import torch\n'), ((405, 429), 'dg_util.python_utils.misc_util.get_time_str', 'misc_util.get_time_str', ([], {}), '()\n', (427, 429), False, 'from dg_util.python_utils import misc_util\n'), ((254, 278), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (268, 278), False, 'from torch import nn\n'), ((312, 333), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (319, 333), False, 'from torch import nn\n'), ((497, 546), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {'dtype': 'np.float32'}), '([0.485, 0.456, 0.406], dtype=np.float32)\n', (505, 546), True, 'import numpy as np\n'), ((568, 617), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {'dtype': 'np.float32'}), '([0.229, 0.224, 0.225], dtype=np.float32)\n', (576, 617), True, 'import numpy as np\n'), ((651, 676), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (666, 676), False, 'import os\n')]
|
import os
import subprocess
import numpy as np
import matplotlib.pyplot as pyplot
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from simtk import unit
import openmmtools
from cg_openmm.utilities.util import set_box_vectors, get_box_vectors
from simtk.openmm.app.pdbfile import PDBFile
from simtk.openmm.app.dcdfile import DCDFile
from mdtraj.formats import PDBTrajectoryFile
from mdtraj import Topology, Trajectory
from pymbar import timeseries
from scipy.special import erf
from scipy.optimize import minimize_scalar
import time
from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler
from openmmtools.multistate import ReplicaExchangeAnalyzer
# quiet down some citation spam
MultiStateSampler._global_citation_silence = True
kB = (unit.MOLAR_GAS_CONSTANT_R).in_units_of(unit.kilojoule / (unit.kelvin * unit.mole))
def make_replica_dcd_files(
topology, timestep=5*unit.femtosecond, time_interval=200,
output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1):
"""
Make dcd files from replica exchange simulation trajectory data.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param timestep: Time step used in the simulation (default=5*unit.femtosecond)
:type timestep: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>` float * simtk.unit
:param time_interval: frequency, in number of time steps, at which positions were recorded (default=200)
:type time_interval: int
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the dcd trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many time intervals when writing dcd trajectories (default=1)
:type frame_stride: int
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of replicas:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
n_replicas=len(states)
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for replica_index in range(n_replicas):
replica_positions = extract_trajectory(topology, replica_index=replica_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
n_frames_tot = replica_positions.shape[0]
# Determine simulation time (in ps) for each frame:
time_delta_ps = (timestep*time_interval).value_in_unit(unit.picosecond)
traj_times = np.linspace(
frame_begin*time_delta_ps,
(frame_begin+frame_stride*(n_frames_tot-1))*time_delta_ps,
num=n_frames_tot,
)
file_name = f"{output_dir}/replica_{replica_index+1}.dcd"
# Trajectories are written in nanometers:
replica_traj = Trajectory(
replica_positions,
Topology.from_openmm(topology),
time=traj_times,
)
Trajectory.save_dcd(replica_traj,file_name)
return file_list
def make_replica_pdb_files(
topology, output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1):
"""
Make pdb files from replica exchange simulation trajectory data.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the pdb trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many frames when writing pdb trajectories (default=1)
:type frame_stride: int
:returns:
- file_list ( List( str ) ) - A list of names for the files that were written
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of replicas:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
n_replicas = len(states)
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for replica_index in range(n_replicas):
replica_positions = extract_trajectory(topology, replica_index=replica_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
file_name = f"{output_dir}/replica_{replica_index+1}.pdb"
# Trajectories are written in nanometers:
replica_traj = Trajectory(
replica_positions,
Topology.from_openmm(topology),
)
Trajectory.save_pdb(replica_traj,file_name)
return file_list
def make_state_dcd_files(
topology, timestep=5*unit.femtosecond, time_interval=200,
output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1, center=True):
"""
Make dcd files by state from replica exchange simulation trajectory data.
Note: these are discontinuous trajectories with constant temperature state.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param timestep: Time step used in the simulation (default=5*unit.femtosecond)
:type timestep: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>` float * simtk.unit
:param time_interval: frequency, in number of time steps, at which positions were recorded (default=200)
:type time_interval: int
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the dcd trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many time intervals when writing dcd trajectories (default=1)
:type frame_stride: int
:param center: align the center of mass of each structure in the discontinuous state trajectory (default=True)
:type center: Boolean
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of states:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for state_index in range(len(states)):
state_positions = extract_trajectory(topology, state_index=state_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
n_frames_tot = state_positions.shape[0]
# Determine simulation time (in ps) for each frame:
time_delta_ps = (timestep*time_interval).value_in_unit(unit.picosecond)
traj_times = np.linspace(
frame_begin*time_delta_ps,
(frame_begin+frame_stride*(n_frames_tot-1))*time_delta_ps,
num=n_frames_tot,
)
file_name = f"{output_dir}/state_{state_index+1}.dcd"
# Trajectories are written in nanometers:
state_traj = Trajectory(
state_positions,
Topology.from_openmm(topology),
time=traj_times,
)
if center:
ref_traj = state_traj[0]
state_traj.superpose(ref_traj)
# This rewrites to state_traj
Trajectory.save_dcd(state_traj,file_name)
return file_list
def make_state_pdb_files(
topology, output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1, center=True):
"""
Make pdb files by state from replica exchange simulation trajectory data.
Note: these are discontinuous trajectories with constant temperature state.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the pdb trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many frames when writing pdb trajectories (default=1)
:type frame_stride: int
:param center: align the center of mass of each structure in the discontinuous state trajectory (default=True)
:type center: Boolean
:returns:
- file_list ( List( str ) ) - A list of names for the files that were written
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of states:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for state_index in range(len(states)):
state_positions = extract_trajectory(topology, state_index=state_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
file_name = f"{output_dir}/state_{state_index+1}.pdb"
# Trajectories are written in nanometers:
state_traj = Trajectory(
state_positions,
Topology.from_openmm(topology),
)
if center:
ref_traj = state_traj[0]
state_traj.superpose(ref_traj)
# This rewrites to state_traj
Trajectory.save_pdb(state_traj,file_name)
return file_list
def extract_trajectory(
topology, output_data="output/output.nc", checkpoint_data="output_checkpoint.nc",
state_index=None, replica_index=None,
frame_begin=0, frame_stride=1, frame_end=-1):
"""
Internal function for extract trajectory (replica or state) from .nc file,
Based on YANK extract_trajectory code.
"""
reporter = MultiStateReporter(output_data, open_mode='r', checkpoint_storage=checkpoint_data)
# Get dimensions
trajectory_storage = reporter._storage_checkpoint
n_iterations = reporter.read_last_iteration()
n_frames = trajectory_storage.variables['positions'].shape[0]
n_atoms = trajectory_storage.variables['positions'].shape[2]
# Determine frames to extract.
# Convert negative indices to last indices.
if frame_begin < 0:
frame_begin = n_frames + frame_begin
if frame_end < 0:
frame_end = n_frames + frame_end + 1
frame_indices = range(frame_begin, frame_end, frame_stride)
if len(frame_indices) == 0:
raise ValueError('No frames selected')
# Determine the number of frames that the trajectory will have.
if state_index is None:
n_trajectory_frames = len(frame_indices)
else:
# With SAMS, an iteration can have 0 or more replicas in a given state.
# Deconvolute state indices.
state_indices = [None for _ in frame_indices]
for i, iteration in enumerate(frame_indices):
replica_indices = reporter._storage_analysis.variables['states'][iteration, :]
state_indices[i] = np.where(replica_indices == state_index)[0]
n_trajectory_frames = sum(len(x) for x in state_indices)
# Initialize positions and box vectors arrays.
# MDTraj Cython code expects float32 positions.
positions = np.zeros((n_trajectory_frames, n_atoms, 3), dtype=np.float32)
# Extract state positions and box vectors.
if state_index is not None:
# Extract state positions
frame_idx = 0
for i, iteration in enumerate(frame_indices):
for replica_index in state_indices[i]:
positions[frame_idx, :, :] = trajectory_storage.variables['positions'][iteration, replica_index, :, :].astype(np.float32)
frame_idx += 1
else: # Extract replica positions
for i, iteration in enumerate(frame_indices):
positions[i, :, :] = trajectory_storage.variables['positions'][iteration, replica_index, :, :].astype(np.float32)
return positions
def process_replica_exchange_data(
output_data="output/output.nc", output_directory="output", series_per_page=4,
write_data_file=True, plot_production_only=False, print_timing=False,
equil_nskip=1, frame_begin=0, frame_end=-1,
):
"""
Read replica exchange simulation data, detect equilibrium and decorrelation time, and plot replica exchange results.
:param output_data: path to output .nc file from replica exchange simulation, (default='output/output.nc')
:type output_data: str
:param output_directory: path to which output files will be written (default='output')
:type output_directory: stry
:param series_per_page: number of replica data series to plot per pdf page (default=4)
:type series_per_page: int
:param write_data_file: Option to write a text data file containing the state_energies array (default=True)
:type write_data_file: Boolean
:param plot_production_only: Option to plot only the production region, as determined from pymbar detectEquilibration (default=False)
:type plot_production_only: Boolean
:param equil_nskip: skip this number of frames to sparsify the energy timeseries for pymbar detectEquilibration (default=1) - this is used only when frame_begin=0 and the trajectory has less than 40000 frames.
:type equil_nskip: Boolean
:param frame_begin: analyze starting from this frame, discarding all prior as equilibration period (default=0)
:type frame_begin: int
:param frame_end: analyze up to this frame only, discarding the rest (default=-1).
:type frame_end: int
:returns:
- replica_energies ( `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ( np.float( [number_replicas,number_simulation_steps] ), simtk.unit ) ) - The potential energies for all replicas at all (printed) time steps
- replica_state_indices ( np.int64( [number_replicas,number_simulation_steps] ), simtk.unit ) - The thermodynamic state assignments for all replicas at all (printed) time steps
- production_start ( int - The frame at which the production region begins for all replicas, as determined from pymbar detectEquilibration
- sample_spacing ( int - The number of frames between uncorrelated state energies, estimated using heuristic algorithm )
- n_transit ( np.float( [number_replicas] ) ) - Number of half-transitions between state 0 and n for each replica
- mixing_stats ( tuple ( np.float( [number_replicas x number_replicas] ) , np.float( [ number_replicas ] ) , float( statistical inefficiency ) ) ) - transition matrix, corresponding eigenvalues, and statistical inefficiency
"""
t1 = time.perf_counter()
# Read the simulation coordinates for individual temperature replicas
reporter = MultiStateReporter(output_data, open_mode="r")
t2 = time.perf_counter()
if print_timing:
print(f"open data time: {t2-t1}")
# figure out what the time between output is.
# We assume all use the same time step (which i think is required)
mcmove = reporter.read_mcmc_moves()[0]
time_interval = mcmove.n_steps*mcmove.timestep
t3 = time.perf_counter()
if print_timing:
print(f"read_mcmc_moves time: {t3-t2}")
# figure out what the temperature list is
states = reporter.read_thermodynamic_states()[0]
t4 = time.perf_counter()
if print_timing:
print(f"read_thermodynamics_states time: {t4-t3}")
temperature_list = []
for s in states:
temperature_list.append(s.temperature)
analyzer = ReplicaExchangeAnalyzer(reporter)
t5 = time.perf_counter()
(
replica_energies,
unsampled_state_energies,
neighborhoods,
replica_state_indices,
) = analyzer.read_energies()
# Truncate output of read_energies() to last frame of interest
if frame_end > 0:
# Use frames from frame_begin to frame_end
replica_energies = replica_energies[:,:,:frame_end]
unsampled_state_energies = unsampled_state_energies[:,:,:frame_end]
neighborhoods = neighborhoods[:,:,:frame_end]
replica_state_indices = replica_state_indices[:,:frame_end]
t6 = time.perf_counter()
if print_timing:
print(f"read_energies time: {t6-t5}")
n_particles = np.shape(reporter.read_sampler_states(iteration=0)[0].positions)[0]
temps = np.array([temp._value for temp in temperature_list])
beta_k = 1 / (kB * temps)
n_replicas = len(temperature_list)
for k in range(n_replicas):
replica_energies[:, k, :] *= beta_k[k] ** (-1)
t7 = time.perf_counter()
if print_timing:
print(f"reduce replica energies time: {t7-t6}")
total_steps = len(replica_energies[0][0])
state_energies = np.zeros([n_replicas, total_steps])
t8 = time.perf_counter()
# there must be some better way to do this as list comprehension.
for step in range(total_steps):
for state in range(n_replicas):
state_energies[state, step] = replica_energies[
np.where(replica_state_indices[:, step] == state)[0], 0, step
]
t9 = time.perf_counter()
if print_timing:
print(f"assign state energies time: {t9-t8}")
# can run physical-valication on these state_energies
# Use pymbar timeseries module to detect production period
t10 = time.perf_counter()
# Start of equilibrated data:
t0 = np.zeros((n_replicas))
# Statistical inefficiency:
g = np.zeros((n_replicas))
subsample_indices = {}
# If sufficiently large, discard the first 20000 frames as equilibration period and use
# subsampleCorrelatedData to get the energy decorrelation time.
if total_steps >= 40000 or frame_begin > 0:
if frame_begin > 0:
# If specified, use frame_begin as the start of the production region
production_start=frame_begin
else:
# Otherwise, use frame 20000
production_start=20000
for state in range(n_replicas):
subsample_indices[state] = timeseries.subsampleCorrelatedData(
state_energies[state][production_start:],
conservative=True,
)
g[state] = subsample_indices[state][1]-subsample_indices[state][0]
else:
# For small trajectories, use detectEquilibration
for state in range(n_replicas):
t0[state], g[state], Neff_max = timeseries.detectEquilibration(state_energies[state], nskip=equil_nskip)
# Choose the latest equil timestep to apply to all states
production_start = int(np.max(t0))
# Assume a normal distribution (very rough approximation), and use mean plus
# the number of standard deviations which leads to (n_replica-1)/n_replica coverage
# For 12 replicas this should be the mean + 1.7317 standard deviations
# x standard deviations is the solution to (n_replica-1)/n_replica = erf(x/sqrt(2))
# This is equivalent to a target of 23/24 CDF value
print(f"g: {g.astype(int)}")
def erf_fun(x):
return np.power((erf(x/np.sqrt(2))-(n_replicas-1)/n_replicas),2)
# x must be larger than zero
opt_g_results = minimize_scalar(
erf_fun,
bounds=(0,10)
)
if not opt_g_results.success:
print("Error solving for correlation time, exiting...")
print(f"erf opt results: {opt_g_results}")
exit()
sample_spacing = int(np.ceil(np.mean(g)+opt_g_results.x*np.std(g)))
t11 = time.perf_counter()
if print_timing:
print(f"detect equil and subsampling time: {t11-t10}")
print("state mean energies variance")
for state in range(n_replicas):
state_mean = np.mean(state_energies[state,production_start::sample_spacing])
state_std = np.std(state_energies[state,production_start::sample_spacing])
print(
f" {state:4d} {state_mean:10.6f} {state_std:10.6f}"
)
t12 = time.perf_counter()
if write_data_file == True:
f = open(os.path.join(output_directory, "replica_energies.dat"), "w")
for step in range(total_steps):
f.write(f"{step:10d}")
for replica_index in range(n_replicas):
f.write(f"{replica_energies[replica_index,replica_index,step]:12.6f}")
f.write("\n")
f.close()
t13 = time.perf_counter()
if print_timing:
print(f"Optionally write .dat file: {t13-t12}")
t14 = time.perf_counter()
if plot_production_only==True:
plot_replica_exchange_energies(
state_energies[:,production_start:],
temperature_list,
series_per_page,
time_interval=time_interval,
time_shift=production_start*time_interval,
file_name=f"{output_directory}/rep_ex_ener.pdf",
)
plot_replica_exchange_energy_histograms(
state_energies[:,production_start:],
temperature_list,
file_name=f"{output_directory}/rep_ex_ener_hist.pdf",
)
plot_replica_exchange_summary(
replica_state_indices[:,production_start:],
temperature_list,
series_per_page,
time_interval=time_interval,
time_shift=production_start*time_interval,
file_name=f"{output_directory}/rep_ex_states.pdf",
)
plot_replica_state_matrix(
replica_state_indices[:,production_start:],
file_name=f"{output_directory}/state_probability_matrix.pdf",
)
else:
plot_replica_exchange_energies(
state_energies,
temperature_list,
series_per_page,
time_interval=time_interval,
file_name=f"{output_directory}/rep_ex_ener.pdf",
)
plot_replica_exchange_energy_histograms(
state_energies,
temperature_list,
file_name=f"{output_directory}/rep_ex_ener_hist.pdf",
)
plot_replica_exchange_summary(
replica_state_indices,
temperature_list,
series_per_page,
time_interval=time_interval,
file_name=f"{output_directory}/rep_ex_states.pdf",
)
plot_replica_state_matrix(
replica_state_indices,
file_name=f"{output_directory}/state_probability_matrix.pdf",
)
t15 = time.perf_counter()
if print_timing:
print(f"plotting time: {t15-t14}")
# Analyze replica exchange state transitions
# For each replica, how many times does the thermodynamic state go between state 0 and state n
# For consistency with the other mixing statistics, use only the production region here
replica_state_indices_prod = replica_state_indices[:,production_start:]
# Number of one-way transitions from states 0 to n or states n to 0
n_transit = np.zeros((n_replicas,1))
# Replica_state_indices is [n_replicas x n_iterations]
for rep in range(n_replicas):
last_bound = None
for i in range(replica_state_indices_prod.shape[1]):
if replica_state_indices_prod[rep,i] == 0 or replica_state_indices_prod[rep,i] == (n_replicas-1):
if last_bound is None:
# This is the first time state 0 or n is visited
pass
else:
if last_bound != replica_state_indices_prod[rep,i]:
# This is a completed transition from 0 to n or n to 0
n_transit[rep] += 1
last_bound = replica_state_indices_prod[rep,i]
t16 = time.perf_counter()
if print_timing:
print(f"replica transition analysis: {t16-t15}")
# Compute transition matrix from the analyzer
mixing_stats = analyzer.generate_mixing_statistics(number_equilibrated=production_start)
t17 = time.perf_counter()
if print_timing:
print(f"compute transition matrix: {t17-t16}")
print(f"total time elapsed: {t17-t1}")
return (replica_energies, replica_state_indices, production_start, sample_spacing, n_transit, mixing_stats)
def run_replica_exchange(
topology,
system,
positions,
total_simulation_time=1.0 * unit.picosecond,
simulation_time_step=None,
temperature_list=None,
friction=1.0 / unit.picosecond,
minimize=True,
exchange_frequency=1000,
output_data="output/output.nc",
):
"""
Run a OpenMMTools replica exchange simulation using an OpenMM coarse grained model.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param system: OpenMM System()
:type system: `System() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1openmm_1_1System.html>`_
:param positions: Positions array for the model we would like to test
:type positions: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ( np.array( [cgmodel.num_beads,3] ), simtk.unit )
:param total_simulation_time: Total run time for individual simulations
:type total_simulation_time: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param simulation_time_step: Simulation integration time step
:type simulation_time_step: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param temperature_list: List of temperatures for which to perform replica exchange simulations, default = None
:type temperature: List( float * simtk.unit.temperature )
:param friction: Langevin thermostat friction coefficient, default = 1 / ps
:type friction: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param minimize: Whether minimization is done before running the simulation
:type minimize: bool
:param output_data: Name of NETCDF file where we will write simulation data
:type output_data: string
:param exchange_frequency: Number of time steps between replica exchange attempts, Default = None
:type exchange_frequency: int
:param output_data: file to put the output .nc
:type output_data: netCDF4 file as generated by OpenMM
:returns:
- replica_energies ( `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ( np.float( [number_replicas,number_simulation_steps] ), simtk.unit ) ) - The potential energies for all replicas at all (printed) time steps
- replica_positions ( `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ( np.float( [number_replicas,number_simulation_steps,cgmodel.num_beads,3] ), simtk.unit ) ) - The positions for all replicas at all (printed) time steps
- replica_state_indices ( np.int64( [number_replicas,number_simulation_steps] ), simtk.unit ) - The thermodynamic state assignments for all replicas at all (printed) time steps
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> from cg_openmm.simulation.rep_exch import *
>>> cgmodel = CGModel()
>>> replica_energies,replica_positions,replica_state_indices = run_replica_exchange(cgmodel.topology,cgmodel.system,cgmodel.positions)
"""
simulation_steps = int(np.floor(total_simulation_time / simulation_time_step))
exchange_attempts = int(np.floor(simulation_steps / exchange_frequency))
if temperature_list is None:
temperature_list = [((300.0 + i) * unit.kelvin) for i in range(-50, 50, 10)]
num_replicas = len(temperature_list)
sampler_states = list()
thermodynamic_states = list()
# Define thermodynamic states.
# box_vectors = system.getDefaultPeriodicBoxVectors()
for temperature in temperature_list:
thermodynamic_state = openmmtools.states.ThermodynamicState(
system=system, temperature=temperature
)
thermodynamic_states.append(thermodynamic_state)
sampler_states.append(
openmmtools.states.SamplerState(positions)
) # no box vectors, non-periodic system.
# Create and configure simulation object.
move = openmmtools.mcmc.LangevinDynamicsMove(
timestep=simulation_time_step,
collision_rate=friction,
n_steps=exchange_frequency,
reassign_velocities=False,
)
simulation = ReplicaExchangeSampler(
mcmc_moves=move,
number_of_iterations=exchange_attempts,
replica_mixing_scheme='swap-neighbors',
)
if os.path.exists(output_data):
os.remove(output_data)
reporter = MultiStateReporter(output_data, checkpoint_interval=1)
simulation.create(thermodynamic_states, sampler_states, reporter)
if minimize:
simulation.minimize()
print("Running OpenMM replica exchange simulation...")
print(f"Time step: {simulation_time_step}")
print(f"Iterations: {exchange_attempts}")
try:
simulation.run()
except BaseException:
print("Replica exchange simulation failed, try verifying your model/simulation settings.")
exit()
return
def restart_replica_exchange(
total_simulation_time=1*unit.nanosecond,
simulation_time_step=5*unit.picosecond,
exchange_frequency=200,
output_data="output/output.nc",
):
"""
Restart an OpenMMTools replica exchange simulation using an OpenMM coarse grained model and
output .nc files from the previous segment of the simulation.
:param total_simulation_time: Total run time to add to the original simulation (default=1*unit.nanosecond)
:type total_simulation_time: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param simulation_time_step: Simulation integration time step (default=5*unit.picosecond)
:type simulation_time_step: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param exchange_frequency: Number of time steps between replica exchange attempts (default=200)
:type exchange_frequency: int
:param output_data: Path to the NETCDF file for previous segment of simulation - this will be appended to (default="output/output.nc")
:type output_data: str
"""
simulation_steps = int(np.floor(total_simulation_time / simulation_time_step))
exchange_attempts = int(np.floor(simulation_steps / exchange_frequency))
# Load in the reporter from the original simulation:
reporter = MultiStateReporter(output_data, open_mode="r+")
simulation = ReplicaExchangeSampler.from_storage(reporter)
print("Running OpenMM replica exchange simulation...")
print(f"Time step: {simulation_time_step}")
print(f"Iterations: {exchange_attempts}")
simulation.extend(n_iterations=exchange_attempts)
return
def get_minimum_energy_ensemble(
topology, replica_energies, replica_positions, ensemble_size=5, file_name=None
):
"""
Get an ensemble of low (potential) energy poses, and write the lowest energy structure to a PDB file if a file_name is provided.
:param topology: OpenMM Topology()
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param replica_energies: List of dimension num_replicas X simulation_steps, which gives the energies for all replicas at all simulation steps
:type replica_energies: List( List( float * simtk.unit.energy for simulation_steps ) for num_replicas )
:param replica_positions: List of positions for all output frames for all replicas
:type replica_positions: np.array( ( float * simtk.unit.positions for num_beads ) for simulation_steps )
:param file_name: Output destination for PDB coordinates of minimum energy pose, Default = None
:returns:
- ensemble ( List() ) - A list of poses that are in the minimum energy ensemble.
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> from cg_openmm.simulation.rep_exch import *
>>> cgmodel = CGModel()
>>> replica_energies,replica_positions,replica_state_indices = run_replica_exchange(cgmodel.topology,cgmodel.system,cgmodel.positions)
>>> ensemble_size = 5
>>> file_name = "minimum.pdb"
>>> minimum_energy_ensemble = get_minimum_energy_ensemble(cgmodel.topology,replica_energies,replica_positions,ensemble_size=ensemble_size,file_name=file_name)
"""
# Get the minimum energy structure sampled during the simulation
ensemble = []
ensemble_energies = []
for replica in range(len(replica_energies)):
energies = np.array([energy for energy in replica_energies[replica][replica]])
for energy in range(len(energies)):
if len(ensemble) < ensemble_size:
ensemble.append(replica_positions[replica][energy])
ensemble_energies.append(energies[energy])
else:
for comparison in range(len(ensemble_energies)):
if energies[energy] < ensemble_energies[comparison]:
ensemble_energies[comparison] = energies[energy]
ensemble[comparison] = replica_positions[replica][energy]
if file_name is None:
index = 1
for pose in ensemble:
file = open(str("re_min_" + str(index) + ".pdb"), "w")
PDBFile.writeFile(topology, pose, file=file)
else:
file = open(file_name, "w")
for pose in ensemble:
PDBFile.writeFile(topology, pose, file=file)
return ensemble
def plot_replica_exchange_energies(
state_energies,
temperature_list,
series_per_page,
time_interval=1.0 * unit.picosecond,
time_shift=0.0 * unit.picosecond,
file_name="rep_ex_ener.pdf",
legend=True,
):
"""
Plot the potential energies for a batch of replica exchange trajectories
:param state_energies: List of dimension num_replicas X simulation_steps, which gives the energies for all replicas at all simulation steps
:type state_energies: List( List( float * simtk.unit.energy for simulation_steps ) for num_replicas )
:param temperature_list: List of temperatures for which to perform replica exchange simulations, default = [(300.0 * unit.kelvin).__add__(i * unit.kelvin) for i in range(-20,100,10)]
:type temperature: List( float * simtk.unit.temperature )
:param time_interval: interval between energy exchanges.
:type time_interval: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param time_shift: amount of time before production period to shift the time axis(default = 0)
:type time_shift: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param file_name: The pathname of the output file for plotting results, default = "replica_exchange_energies.png"
:type file_name: str
:param legend: Controls whether a legend is added to the plot
:type legend: Logical
"""
simulation_times = np.array(
[
step * time_interval.value_in_unit(unit.picosecond)
for step in range(len(state_energies[0]))
]
)
simulation_times += time_shift.value_in_unit(unit.picosecond)
# To improve pdf render speed, sparsify data to display less than 2000 data points
n_xdata = len(simulation_times)
if n_xdata <= 1000:
plot_stride = 1
else:
plot_stride = int(np.floor(n_xdata/1000))
# If more than series_per_page replicas, split into separate pages for better visibility
nmax = series_per_page
npage = int(np.ceil(len(temperature_list)/nmax))
with PdfPages(file_name) as pdf:
page_num=1
plotted_per_page=0
pyplot.figure()
for state in range(len(temperature_list)):
if plotted_per_page <= (nmax):
pyplot.plot(
simulation_times[::plot_stride],
state_energies[state,::plot_stride],
alpha=0.5,
linewidth=1,
)
plotted_per_page += 1
if (plotted_per_page >= nmax) or (state==(len(temperature_list)-1)):
# Save and close previous page
pyplot.xlabel("Simulation Time ( Picoseconds )")
pyplot.ylabel("Potential Energy ( kJ / mol )")
pyplot.title("Replica Exchange Simulation")
if legend:
pyplot.legend(
[round(temperature.value_in_unit(unit.kelvin), 1) for temperature in temperature_list[(0+(page_num-1)*nmax):(page_num*nmax)]],
loc="center left",
bbox_to_anchor=(1, 0.5),
title="T (K)",
)
pdf.savefig(bbox_inches="tight") # Save current fig to pdf page
pyplot.close()
plotted_per_page = 0
page_num += 1
return
def plot_replica_exchange_energy_histograms(
state_energies,
temperature_list,
file_name="rep_ex_ener_hist.pdf",
legend=True,
):
"""
Plot the potential energies for a batch of replica exchange trajectories
:param state_energies: List of dimension num_replicas X simulation_steps, which gives the energies for all replicas at all simulation steps
:type state_energies: List( List( float * simtk.unit.energy for simulation_steps ) for num_replicas )
:param temperature_list: List of temperatures for which to perform replica exchange simulations, default = [(300.0 * unit.kelvin).__add__(i * unit.kelvin) for i in range(-20,100,10)]
:type temperature: List( float * simtk.unit.temperature )
:param file_name: The pathname of the output file for plotting results, default = "replica_exchange_energies.png"
:type file_name: str
:param legend: Controls whether a legend is added to the plot
:type legend: Logical
"""
figure = pyplot.figure(figsize=(8.5,11))
for state in range(len(temperature_list)):
n_out, bin_edges_out = np.histogram(
state_energies[state,:],bins=20,density=True,
)
bin_centers = np.zeros((len(bin_edges_out)-1,1))
for i in range(len(bin_edges_out)-1):
bin_centers[i] = (bin_edges_out[i]+bin_edges_out[i+1])/2
pyplot.plot(bin_centers,n_out,'o-',alpha=0.5,linewidth=1,markersize=6)
pyplot.xlabel("Potential Energy ( kJ / mol )")
pyplot.ylabel("Probability")
pyplot.title("Replica Exchange Energy Histogram")
if legend:
pyplot.legend(
[round(temperature._value, 1) for temperature in temperature_list],
loc="center left",
bbox_to_anchor=(1, 0.5),
title="T (K)",
)
pyplot.savefig(file_name, bbox_inches="tight")
pyplot.close()
return
def plot_replica_state_matrix(
replica_state_indices,
file_name='state_probability_matrix.pdf'
):
# Plot a matrix of replica vs. state, coloring each box in the grid by normalized frequency
# For each replica, histogram the state indices data
# Then normalize the data and create [n_replica x n_state] patch graph
n_replicas = replica_state_indices.shape[0]
hist_all = np.zeros((n_replicas, n_replicas))
state_bin_edges = np.linspace(-0.5,n_replicas-0.5,n_replicas+1)
state_bin_centers = 0.5+state_bin_edges[0:n_replicas]
for rep in range(n_replicas):
hist_all[rep,:], bin_edges = np.histogram(
replica_state_indices[rep,:],bins=state_bin_edges,density=True,
)
# No need for global normalization, since each replica's state probabilities must sum to 1
hist_norm = np.zeros_like(hist_all)
for rep in range(n_replicas):
for state in range(n_replicas):
hist_norm[rep,state] = hist_all[rep,state]/np.max(hist_all[rep,:])
mean_score = np.mean(hist_norm)
min_score = np.amin(hist_norm)
ax = pyplot.subplot(111)
cmap=pyplot.get_cmap('nipy_spectral')
norm=Normalize(vmin=0,vmax=1)
ax.imshow(hist_norm,cmap=cmap,norm=norm)
ax.set_aspect('equal', 'box')
# Append colorbar axis to right side
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",size="5%",pad=0.20)
pyplot.colorbar(
cm.ScalarMappable(cmap=cmap,norm=norm),
cax=cax,
label='normalized frequency',
)
ax.set_xlabel("State")
ax.set_ylabel("Replica")
pyplot.suptitle(f"Replica exchange state probabilities\n(Mean: {mean_score:.4f} Min: {min_score:.4f})")
pyplot.savefig(file_name)
pyplot.close()
return hist_all
def plot_replica_exchange_summary(
replica_states,
temperature_list,
series_per_page,
time_interval=1.0 * unit.picosecond,
time_shift=0.0 * unit.picosecond,
file_name="rep_ex_states.pdf",
legend=True,
):
"""
Plot the thermodynamic state assignments for individual temperature replicas as a function of the simulation time, in order to obtain a visual summary of the replica exchanges from a OpenMM simulation.
:param replica_states: List of dimension num_replicas X simulation_steps, which gives the thermodynamic state indices for all replicas at all simulation steps
:type replica_states: List( List( float * simtk.unit.energy for simulation_steps ) for num_replicas )
:param temperature_list: List of temperatures for which to perform replica exchange simulations, default = [(300.0 * unit.kelvin).__add__(i * unit.kelvin) for i in range(-20,100,10)]
:type temperature: List( float * simtk.unit.temperature )
:param time_interval: interval between energy exchanges.
:type time_interval: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param time_shift: amount of time before production period to shift the time axis(default = 0)
:type time_shift: `SIMTK <https://simtk.org/>`_ `Unit() <http://docs.openmm.org/7.1.0/api-python/generated/simtk.unit.unit.Unit.html>`_
:param file_name: The pathname of the output file for plotting results, default = "replica_exchange_state_transitions.png"
:type file_name: str
:param legend: Controls whether a legend is added to the plot
:type legend: Logical
"""
simulation_times = np.array(
[
step * time_interval.value_in_unit(unit.picosecond)
for step in range(len(replica_states[0]))
]
)
simulation_times += time_shift.value_in_unit(unit.picosecond)
# To improve pdf render speed, sparsify data to display less than 2000 data points
n_xdata = len(simulation_times)
if n_xdata <= 1000:
plot_stride = 1
else:
plot_stride = int(np.floor(n_xdata/1000))
# If more than series_per_page replicas, split into separate pages for better visibility
nmax = series_per_page
npage = int(np.ceil(len(temperature_list)/nmax))
with PdfPages(file_name) as pdf:
page_num=1
plotted_per_page=0
pyplot.figure()
for replica in range(len(replica_states)):
state_indices = np.array([int(round(state)) for state in replica_states[replica]])
if plotted_per_page <= (nmax):
pyplot.plot(
simulation_times[::plot_stride],
state_indices[::plot_stride],
alpha=0.5,
linewidth=1
)
plotted_per_page += 1
if (plotted_per_page >= nmax) or (replica==(len(replica_states)-1)):
# Save and close previous page
pyplot.xlabel("Simulation Time ( Picoseconds )")
pyplot.ylabel("Thermodynamic State Index")
pyplot.title("State Exchange Summary")
if legend:
pyplot.legend(
[i for i in range((page_num-1)*nmax,page_num*nmax)],
loc="center left",
bbox_to_anchor=(1, 0.5),
title="Replica Index",
)
pdf.savefig(bbox_inches="tight") # Save current fig to pdf page
pyplot.close()
plotted_per_page = 0
page_num += 1
return
|
[
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"openmmtools.multistate.MultiStateReporter",
"numpy.array",
"os.remove",
"os.path.exists",
"numpy.mean",
"numpy.histogram",
"simtk.openmm.app.pdbfile.PDBFile.writeFile",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"time.perf_counter",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"scipy.optimize.minimize_scalar",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"openmmtools.mcmc.LangevinDynamicsMove",
"matplotlib.pyplot.savefig",
"numpy.amin",
"pymbar.timeseries.detectEquilibration",
"pymbar.timeseries.subsampleCorrelatedData",
"numpy.floor",
"mdtraj.Trajectory.save_pdb",
"openmmtools.states.SamplerState",
"openmmtools.multistate.ReplicaExchangeSampler.from_storage",
"matplotlib.pyplot.subplot",
"simtk.unit.MOLAR_GAS_CONSTANT_R.in_units_of",
"openmmtools.multistate.ReplicaExchangeAnalyzer",
"matplotlib.colors.Normalize",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.get_cmap",
"os.path.join",
"openmmtools.multistate.ReplicaExchangeSampler",
"openmmtools.states.ThermodynamicState",
"numpy.zeros",
"matplotlib.pyplot.figure",
"mdtraj.Trajectory.save_dcd",
"numpy.zeros_like",
"mdtraj.Topology.from_openmm"
] |
[((906, 992), 'simtk.unit.MOLAR_GAS_CONSTANT_R.in_units_of', 'unit.MOLAR_GAS_CONSTANT_R.in_units_of', (['(unit.kilojoule / (unit.kelvin * unit.mole))'], {}), '(unit.kilojoule / (unit.kelvin * unit.\n mole))\n', (943, 992), False, 'from simtk import unit\n'), ((2514, 2551), 'os.path.join', 'os.path.join', (['output_dir', 'output_data'], {}), '(output_dir, output_data)\n', (2526, 2551), False, 'import os\n'), ((2602, 2653), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data_path'], {'open_mode': '"""r"""'}), "(output_data_path, open_mode='r')\n", (2620, 2653), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((5086, 5123), 'os.path.join', 'os.path.join', (['output_dir', 'output_data'], {}), '(output_dir, output_data)\n', (5098, 5123), False, 'import os\n'), ((5174, 5225), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data_path'], {'open_mode': '"""r"""'}), "(output_data_path, open_mode='r')\n", (5192, 5225), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((7816, 7853), 'os.path.join', 'os.path.join', (['output_dir', 'output_data'], {}), '(output_dir, output_data)\n', (7828, 7853), False, 'import os\n'), ((7902, 7953), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data_path'], {'open_mode': '"""r"""'}), "(output_data_path, open_mode='r')\n", (7920, 7953), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((10740, 10777), 'os.path.join', 'os.path.join', (['output_dir', 'output_data'], {}), '(output_dir, output_data)\n', (10752, 10777), False, 'import os\n'), ((10826, 10877), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data_path'], {'open_mode': '"""r"""'}), "(output_data_path, open_mode='r')\n", (10844, 10877), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((12166, 12253), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data'], {'open_mode': '"""r"""', 'checkpoint_storage': 'checkpoint_data'}), "(output_data, open_mode='r', checkpoint_storage=\n checkpoint_data)\n", (12184, 12253), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((13643, 13704), 'numpy.zeros', 'np.zeros', (['(n_trajectory_frames, n_atoms, 3)'], {'dtype': 'np.float32'}), '((n_trajectory_frames, n_atoms, 3), dtype=np.float32)\n', (13651, 13704), True, 'import numpy as np\n'), ((17111, 17130), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (17128, 17130), False, 'import time\n'), ((17225, 17271), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data'], {'open_mode': '"""r"""'}), "(output_data, open_mode='r')\n", (17243, 17271), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((17282, 17301), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (17299, 17301), False, 'import time\n'), ((17600, 17619), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (17617, 17619), False, 'import time\n'), ((17807, 17826), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (17824, 17826), False, 'import time\n'), ((18022, 18055), 'openmmtools.multistate.ReplicaExchangeAnalyzer', 'ReplicaExchangeAnalyzer', (['reporter'], {}), '(reporter)\n', (18045, 18055), False, 'from openmmtools.multistate import ReplicaExchangeAnalyzer\n'), ((18070, 18089), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18087, 18089), False, 'import time\n'), ((18665, 18684), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18682, 18684), False, 'import time\n'), ((18851, 18903), 'numpy.array', 'np.array', (['[temp._value for temp in temperature_list]'], {}), '([temp._value for temp in temperature_list])\n', (18859, 18903), True, 'import numpy as np\n'), ((19070, 19089), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19087, 19089), False, 'import time\n'), ((19243, 19278), 'numpy.zeros', 'np.zeros', (['[n_replicas, total_steps]'], {}), '([n_replicas, total_steps])\n', (19251, 19278), True, 'import numpy as np\n'), ((19289, 19308), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19306, 19308), False, 'import time\n'), ((19629, 19648), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19646, 19648), False, 'import time\n'), ((19870, 19889), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19887, 19889), False, 'import time\n'), ((19938, 19958), 'numpy.zeros', 'np.zeros', (['n_replicas'], {}), '(n_replicas)\n', (19946, 19958), True, 'import numpy as np\n'), ((20001, 20021), 'numpy.zeros', 'np.zeros', (['n_replicas'], {}), '(n_replicas)\n', (20009, 20021), True, 'import numpy as np\n'), ((21780, 21820), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['erf_fun'], {'bounds': '(0, 10)'}), '(erf_fun, bounds=(0, 10))\n', (21795, 21820), False, 'from scipy.optimize import minimize_scalar\n'), ((22107, 22126), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22124, 22126), False, 'import time\n'), ((22582, 22601), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22599, 22601), False, 'import time\n'), ((22986, 23005), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (23003, 23005), False, 'import time\n'), ((23109, 23128), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (23126, 23128), False, 'import time\n'), ((25083, 25102), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (25100, 25102), False, 'import time\n'), ((25594, 25619), 'numpy.zeros', 'np.zeros', (['(n_replicas, 1)'], {}), '((n_replicas, 1))\n', (25602, 25619), True, 'import numpy as np\n'), ((26378, 26397), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (26395, 26397), False, 'import time\n'), ((26648, 26667), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (26665, 26667), False, 'import time\n'), ((31185, 31341), 'openmmtools.mcmc.LangevinDynamicsMove', 'openmmtools.mcmc.LangevinDynamicsMove', ([], {'timestep': 'simulation_time_step', 'collision_rate': 'friction', 'n_steps': 'exchange_frequency', 'reassign_velocities': '(False)'}), '(timestep=simulation_time_step,\n collision_rate=friction, n_steps=exchange_frequency,\n reassign_velocities=False)\n', (31222, 31341), False, 'import openmmtools\n'), ((31391, 31515), 'openmmtools.multistate.ReplicaExchangeSampler', 'ReplicaExchangeSampler', ([], {'mcmc_moves': 'move', 'number_of_iterations': 'exchange_attempts', 'replica_mixing_scheme': '"""swap-neighbors"""'}), "(mcmc_moves=move, number_of_iterations=\n exchange_attempts, replica_mixing_scheme='swap-neighbors')\n", (31413, 31515), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((31550, 31577), 'os.path.exists', 'os.path.exists', (['output_data'], {}), '(output_data)\n', (31564, 31577), False, 'import os\n'), ((31626, 31680), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data'], {'checkpoint_interval': '(1)'}), '(output_data, checkpoint_interval=1)\n', (31644, 31680), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((33579, 33626), 'openmmtools.multistate.MultiStateReporter', 'MultiStateReporter', (['output_data'], {'open_mode': '"""r+"""'}), "(output_data, open_mode='r+')\n", (33597, 33626), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((33644, 33689), 'openmmtools.multistate.ReplicaExchangeSampler.from_storage', 'ReplicaExchangeSampler.from_storage', (['reporter'], {}), '(reporter)\n', (33679, 33689), False, 'from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler\n'), ((41289, 41321), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(8.5, 11)'}), '(figsize=(8.5, 11))\n', (41302, 41321), True, 'import matplotlib.pyplot as pyplot\n'), ((41769, 41815), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Potential Energy ( kJ / mol )"""'], {}), "('Potential Energy ( kJ / mol )')\n", (41782, 41815), True, 'import matplotlib.pyplot as pyplot\n'), ((41820, 41848), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (41833, 41848), True, 'import matplotlib.pyplot as pyplot\n'), ((41853, 41902), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Replica Exchange Energy Histogram"""'], {}), "('Replica Exchange Energy Histogram')\n", (41865, 41902), True, 'import matplotlib.pyplot as pyplot\n'), ((42136, 42182), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['file_name'], {'bbox_inches': '"""tight"""'}), "(file_name, bbox_inches='tight')\n", (42150, 42182), True, 'import matplotlib.pyplot as pyplot\n'), ((42187, 42201), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (42199, 42201), True, 'import matplotlib.pyplot as pyplot\n'), ((42642, 42676), 'numpy.zeros', 'np.zeros', (['(n_replicas, n_replicas)'], {}), '((n_replicas, n_replicas))\n', (42650, 42676), True, 'import numpy as np\n'), ((42704, 42755), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(n_replicas - 0.5)', '(n_replicas + 1)'], {}), '(-0.5, n_replicas - 0.5, n_replicas + 1)\n', (42715, 42755), True, 'import numpy as np\n'), ((43109, 43132), 'numpy.zeros_like', 'np.zeros_like', (['hist_all'], {}), '(hist_all)\n', (43122, 43132), True, 'import numpy as np\n'), ((43312, 43330), 'numpy.mean', 'np.mean', (['hist_norm'], {}), '(hist_norm)\n', (43319, 43330), True, 'import numpy as np\n'), ((43347, 43365), 'numpy.amin', 'np.amin', (['hist_norm'], {}), '(hist_norm)\n', (43354, 43365), True, 'import numpy as np\n'), ((43380, 43399), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(111)'], {}), '(111)\n', (43394, 43399), True, 'import matplotlib.pyplot as pyplot\n'), ((43414, 43446), 'matplotlib.pyplot.get_cmap', 'pyplot.get_cmap', (['"""nipy_spectral"""'], {}), "('nipy_spectral')\n", (43429, 43446), True, 'import matplotlib.pyplot as pyplot\n'), ((43457, 43482), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(1)'}), '(vmin=0, vmax=1)\n', (43466, 43482), False, 'from matplotlib.colors import Normalize\n'), ((43627, 43650), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (43646, 43650), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((43916, 44032), 'matplotlib.pyplot.suptitle', 'pyplot.suptitle', (['f"""Replica exchange state probabilities\n(Mean: {mean_score:.4f} Min: {min_score:.4f})"""'], {}), '(\n f"""Replica exchange state probabilities\n(Mean: {mean_score:.4f} Min: {min_score:.4f})"""\n )\n', (43931, 44032), True, 'import matplotlib.pyplot as pyplot\n'), ((44031, 44056), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['file_name'], {}), '(file_name)\n', (44045, 44056), True, 'import matplotlib.pyplot as pyplot\n'), ((44061, 44075), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (44073, 44075), True, 'import matplotlib.pyplot as pyplot\n'), ((3357, 3487), 'numpy.linspace', 'np.linspace', (['(frame_begin * time_delta_ps)', '((frame_begin + frame_stride * (n_frames_tot - 1)) * time_delta_ps)'], {'num': 'n_frames_tot'}), '(frame_begin * time_delta_ps, (frame_begin + frame_stride * (\n n_frames_tot - 1)) * time_delta_ps, num=n_frames_tot)\n', (3368, 3487), True, 'import numpy as np\n'), ((3812, 3856), 'mdtraj.Trajectory.save_dcd', 'Trajectory.save_dcd', (['replica_traj', 'file_name'], {}), '(replica_traj, file_name)\n', (3831, 3856), False, 'from mdtraj import Topology, Trajectory\n'), ((5962, 6006), 'mdtraj.Trajectory.save_pdb', 'Trajectory.save_pdb', (['replica_traj', 'file_name'], {}), '(replica_traj, file_name)\n', (5981, 6006), False, 'from mdtraj import Topology, Trajectory\n'), ((8629, 8759), 'numpy.linspace', 'np.linspace', (['(frame_begin * time_delta_ps)', '((frame_begin + frame_stride * (n_frames_tot - 1)) * time_delta_ps)'], {'num': 'n_frames_tot'}), '(frame_begin * time_delta_ps, (frame_begin + frame_stride * (\n n_frames_tot - 1)) * time_delta_ps, num=n_frames_tot)\n', (8640, 8759), True, 'import numpy as np\n'), ((9222, 9264), 'mdtraj.Trajectory.save_dcd', 'Trajectory.save_dcd', (['state_traj', 'file_name'], {}), '(state_traj, file_name)\n', (9241, 9264), False, 'from mdtraj import Topology, Trajectory\n'), ((11728, 11770), 'mdtraj.Trajectory.save_pdb', 'Trajectory.save_pdb', (['state_traj', 'file_name'], {}), '(state_traj, file_name)\n', (11747, 11770), False, 'from mdtraj import Topology, Trajectory\n'), ((22331, 22395), 'numpy.mean', 'np.mean', (['state_energies[state, production_start::sample_spacing]'], {}), '(state_energies[state, production_start::sample_spacing])\n', (22338, 22395), True, 'import numpy as np\n'), ((22415, 22478), 'numpy.std', 'np.std', (['state_energies[state, production_start::sample_spacing]'], {}), '(state_energies[state, production_start::sample_spacing])\n', (22421, 22478), True, 'import numpy as np\n'), ((30312, 30366), 'numpy.floor', 'np.floor', (['(total_simulation_time / simulation_time_step)'], {}), '(total_simulation_time / simulation_time_step)\n', (30320, 30366), True, 'import numpy as np\n'), ((30397, 30444), 'numpy.floor', 'np.floor', (['(simulation_steps / exchange_frequency)'], {}), '(simulation_steps / exchange_frequency)\n', (30405, 30444), True, 'import numpy as np\n'), ((30834, 30911), 'openmmtools.states.ThermodynamicState', 'openmmtools.states.ThermodynamicState', ([], {'system': 'system', 'temperature': 'temperature'}), '(system=system, temperature=temperature)\n', (30871, 30911), False, 'import openmmtools\n'), ((31587, 31609), 'os.remove', 'os.remove', (['output_data'], {}), '(output_data)\n', (31596, 31609), False, 'import os\n'), ((33373, 33427), 'numpy.floor', 'np.floor', (['(total_simulation_time / simulation_time_step)'], {}), '(total_simulation_time / simulation_time_step)\n', (33381, 33427), True, 'import numpy as np\n'), ((33457, 33504), 'numpy.floor', 'np.floor', (['(simulation_steps / exchange_frequency)'], {}), '(simulation_steps / exchange_frequency)\n', (33465, 33504), True, 'import numpy as np\n'), ((35751, 35818), 'numpy.array', 'np.array', (['[energy for energy in replica_energies[replica][replica]]'], {}), '([energy for energy in replica_energies[replica][replica]])\n', (35759, 35818), True, 'import numpy as np\n'), ((38900, 38919), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['file_name'], {}), '(file_name)\n', (38908, 38919), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((38982, 38997), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (38995, 38997), True, 'import matplotlib.pyplot as pyplot\n'), ((41400, 41461), 'numpy.histogram', 'np.histogram', (['state_energies[state, :]'], {'bins': '(20)', 'density': '(True)'}), '(state_energies[state, :], bins=20, density=True)\n', (41412, 41461), True, 'import numpy as np\n'), ((41680, 41755), 'matplotlib.pyplot.plot', 'pyplot.plot', (['bin_centers', 'n_out', '"""o-"""'], {'alpha': '(0.5)', 'linewidth': '(1)', 'markersize': '(6)'}), "(bin_centers, n_out, 'o-', alpha=0.5, linewidth=1, markersize=6)\n", (41691, 41755), True, 'import matplotlib.pyplot as pyplot\n'), ((42884, 42963), 'numpy.histogram', 'np.histogram', (['replica_state_indices[rep, :]'], {'bins': 'state_bin_edges', 'density': '(True)'}), '(replica_state_indices[rep, :], bins=state_bin_edges, density=True)\n', (42896, 42963), True, 'import numpy as np\n'), ((43745, 43784), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (43762, 43784), True, 'import matplotlib.cm as cm\n'), ((46471, 46490), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['file_name'], {}), '(file_name)\n', (46479, 46490), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((46553, 46568), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (46566, 46568), True, 'import matplotlib.pyplot as pyplot\n'), ((3720, 3750), 'mdtraj.Topology.from_openmm', 'Topology.from_openmm', (['topology'], {}), '(topology)\n', (3740, 3750), False, 'from mdtraj import Topology, Trajectory\n'), ((5899, 5929), 'mdtraj.Topology.from_openmm', 'Topology.from_openmm', (['topology'], {}), '(topology)\n', (5919, 5929), False, 'from mdtraj import Topology, Trajectory\n'), ((8980, 9010), 'mdtraj.Topology.from_openmm', 'Topology.from_openmm', (['topology'], {}), '(topology)\n', (9000, 9010), False, 'from mdtraj import Topology, Trajectory\n'), ((11515, 11545), 'mdtraj.Topology.from_openmm', 'Topology.from_openmm', (['topology'], {}), '(topology)\n', (11535, 11545), False, 'from mdtraj import Topology, Trajectory\n'), ((20603, 20702), 'pymbar.timeseries.subsampleCorrelatedData', 'timeseries.subsampleCorrelatedData', (['state_energies[state][production_start:]'], {'conservative': '(True)'}), '(state_energies[state][production_start:],\n conservative=True)\n', (20637, 20702), False, 'from pymbar import timeseries\n'), ((20982, 21054), 'pymbar.timeseries.detectEquilibration', 'timeseries.detectEquilibration', (['state_energies[state]'], {'nskip': 'equil_nskip'}), '(state_energies[state], nskip=equil_nskip)\n', (21012, 21054), False, 'from pymbar import timeseries\n'), ((22656, 22710), 'os.path.join', 'os.path.join', (['output_directory', '"""replica_energies.dat"""'], {}), "(output_directory, 'replica_energies.dat')\n", (22668, 22710), False, 'import os\n'), ((31034, 31076), 'openmmtools.states.SamplerState', 'openmmtools.states.SamplerState', (['positions'], {}), '(positions)\n', (31065, 31076), False, 'import openmmtools\n'), ((36501, 36545), 'simtk.openmm.app.pdbfile.PDBFile.writeFile', 'PDBFile.writeFile', (['topology', 'pose'], {'file': 'file'}), '(topology, pose, file=file)\n', (36518, 36545), False, 'from simtk.openmm.app.pdbfile import PDBFile\n'), ((36634, 36678), 'simtk.openmm.app.pdbfile.PDBFile.writeFile', 'PDBFile.writeFile', (['topology', 'pose'], {'file': 'file'}), '(topology, pose, file=file)\n', (36651, 36678), False, 'from simtk.openmm.app.pdbfile import PDBFile\n'), ((38684, 38708), 'numpy.floor', 'np.floor', (['(n_xdata / 1000)'], {}), '(n_xdata / 1000)\n', (38692, 38708), True, 'import numpy as np\n'), ((46251, 46275), 'numpy.floor', 'np.floor', (['(n_xdata / 1000)'], {}), '(n_xdata / 1000)\n', (46259, 46275), True, 'import numpy as np\n'), ((13398, 13438), 'numpy.where', 'np.where', (['(replica_indices == state_index)'], {}), '(replica_indices == state_index)\n', (13406, 13438), True, 'import numpy as np\n'), ((21167, 21177), 'numpy.max', 'np.max', (['t0'], {}), '(t0)\n', (21173, 21177), True, 'import numpy as np\n'), ((22053, 22063), 'numpy.mean', 'np.mean', (['g'], {}), '(g)\n', (22060, 22063), True, 'import numpy as np\n'), ((39108, 39219), 'matplotlib.pyplot.plot', 'pyplot.plot', (['simulation_times[::plot_stride]', 'state_energies[state, ::plot_stride]'], {'alpha': '(0.5)', 'linewidth': '(1)'}), '(simulation_times[::plot_stride], state_energies[state, ::\n plot_stride], alpha=0.5, linewidth=1)\n', (39119, 39219), True, 'import matplotlib.pyplot as pyplot\n'), ((39512, 39560), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Simulation Time ( Picoseconds )"""'], {}), "('Simulation Time ( Picoseconds )')\n", (39525, 39560), True, 'import matplotlib.pyplot as pyplot\n'), ((39577, 39623), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Potential Energy ( kJ / mol )"""'], {}), "('Potential Energy ( kJ / mol )')\n", (39590, 39623), True, 'import matplotlib.pyplot as pyplot\n'), ((39640, 39683), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Replica Exchange Simulation"""'], {}), "('Replica Exchange Simulation')\n", (39652, 39683), True, 'import matplotlib.pyplot as pyplot\n'), ((40182, 40196), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (40194, 40196), True, 'import matplotlib.pyplot as pyplot\n'), ((43262, 43286), 'numpy.max', 'np.max', (['hist_all[rep, :]'], {}), '(hist_all[rep, :])\n', (43268, 43286), True, 'import numpy as np\n'), ((46804, 46906), 'matplotlib.pyplot.plot', 'pyplot.plot', (['simulation_times[::plot_stride]', 'state_indices[::plot_stride]'], {'alpha': '(0.5)', 'linewidth': '(1)'}), '(simulation_times[::plot_stride], state_indices[::plot_stride],\n alpha=0.5, linewidth=1)\n', (46815, 46906), True, 'import matplotlib.pyplot as pyplot\n'), ((47200, 47248), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Simulation Time ( Picoseconds )"""'], {}), "('Simulation Time ( Picoseconds )')\n", (47213, 47248), True, 'import matplotlib.pyplot as pyplot\n'), ((47265, 47307), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Thermodynamic State Index"""'], {}), "('Thermodynamic State Index')\n", (47278, 47307), True, 'import matplotlib.pyplot as pyplot\n'), ((47324, 47362), 'matplotlib.pyplot.title', 'pyplot.title', (['"""State Exchange Summary"""'], {}), "('State Exchange Summary')\n", (47336, 47362), True, 'import matplotlib.pyplot as pyplot\n'), ((47793, 47807), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (47805, 47807), True, 'import matplotlib.pyplot as pyplot\n'), ((22080, 22089), 'numpy.std', 'np.std', (['g'], {}), '(g)\n', (22086, 22089), True, 'import numpy as np\n'), ((19531, 19580), 'numpy.where', 'np.where', (['(replica_state_indices[:, step] == state)'], {}), '(replica_state_indices[:, step] == state)\n', (19539, 19580), True, 'import numpy as np\n'), ((21672, 21682), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21679, 21682), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import math
import serial
import time
ser = serial.Serial('/dev/ttyACM0', baudrate = 9600, timeout = 1)
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
path_lower = np.array([0,80,0])
path_upper = np.array([179,255,255])
font = cv2.FONT_HERSHEY_COMPLEX
kernel = np.ones((5,5),np.uint8)
f_dist = 2*400
while True:
ret, frame = cap.read()
if not ret:
cap = cv2.VideoCapture(0)
continue
(h, w) = frame.shape[:2]
blur = cv2.GaussianBlur(frame,(5,5),cv2.BORDER_DEFAULT)
hsvvid = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
path_mask = cv2.inRange(hsvvid, path_lower, path_upper)
opening = cv2.morphologyEx(path_mask, cv2.MORPH_OPEN, kernel)
erosion = cv2.erode(opening,kernel,iterations = 1)
dilation = cv2.dilate(erosion,kernel,iterations = 5)
path_contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame, path_contours, -1, (0,255,0), 3)
if len(path_contours) > 0:
largest = max(path_contours, key = cv2.contourArea)
x_2, y_2, w_2, h_2 = cv2.boundingRect(largest)
cv2.rectangle(frame, (x_2, y_2), (x_2 + w_2, y_2 + h_2), (0, 0, 255), 3)
error = x_2 + (w_2/2) - w/2
#cv2.putText(frame, str(error), (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
blackbox = cv2.minAreaRect(largest)
(x_min, y_min), (w_min, h_min), ang = blackbox
if ang > 45:
ang = ang - 90
if w_min < h_min and ang < 0:
ang = 90 + ang
if w_min > h_min and ang > 0:
ang = ang - 90
ang = int(ang)
box = cv2.boxPoints(blackbox)
box = np.int0(box)
cv2.drawContours(frame, [box], 0, (0,0,255), 3)
#cv2.putText(frame, str(ang), (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
if error != 0:
error_angle = abs((180/math.pi)*math.asin(abs(error)/f_dist)/error)*error
else:
error_angle = 0
tot_angle = ang + error_angle
if tot_angle < -10:
i = 'l'
ser.write(i.encode())
print('go left')
left_text = 'Go left'
cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.05)
elif tot_angle > 10:
i = 'r'
ser.write(i.encode())
print('go right')
right_text = 'Go right'
cv2.putText(frame, right_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.05)
else:
i = 'f'
ser.write(i.encode())
print('go straight')
straight_text = 'Go straight'
cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.175)
else:
i = 'r'
ser.write(i.encode())
print('looking for path')
straight_text = 'looking for path'
cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.05)
cv2.imshow('path video', frame)
key = cv2.waitKey(1)
if key == 27: #press esc to exit
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"time.sleep",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.erode",
"cv2.minAreaRect",
"cv2.waitKey",
"cv2.drawContours",
"numpy.ones",
"cv2.boxPoints",
"numpy.int0",
"cv2.putText",
"cv2.morphologyEx",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.inRange",
"serial.Serial",
"cv2.VideoCapture",
"cv2.findContours",
"cv2.dilate",
"cv2.boundingRect"
] |
[((81, 136), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""'], {'baudrate': '(9600)', 'timeout': '(1)'}), "('/dev/ttyACM0', baudrate=9600, timeout=1)\n", (94, 136), False, 'import serial\n'), ((150, 169), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (166, 169), False, 'import cv2\n'), ((219, 239), 'numpy.array', 'np.array', (['[0, 80, 0]'], {}), '([0, 80, 0])\n', (227, 239), True, 'import numpy as np\n'), ((252, 277), 'numpy.array', 'np.array', (['[179, 255, 255]'], {}), '([179, 255, 255])\n', (260, 277), True, 'import numpy as np\n'), ((321, 346), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (328, 346), True, 'import numpy as np\n'), ((3320, 3343), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3341, 3343), False, 'import cv2\n'), ((519, 570), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame', '(5, 5)', 'cv2.BORDER_DEFAULT'], {}), '(frame, (5, 5), cv2.BORDER_DEFAULT)\n', (535, 570), False, 'import cv2\n'), ((582, 619), 'cv2.cvtColor', 'cv2.cvtColor', (['blur', 'cv2.COLOR_BGR2HSV'], {}), '(blur, cv2.COLOR_BGR2HSV)\n', (594, 619), False, 'import cv2\n'), ((639, 682), 'cv2.inRange', 'cv2.inRange', (['hsvvid', 'path_lower', 'path_upper'], {}), '(hsvvid, path_lower, path_upper)\n', (650, 682), False, 'import cv2\n'), ((698, 749), 'cv2.morphologyEx', 'cv2.morphologyEx', (['path_mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(path_mask, cv2.MORPH_OPEN, kernel)\n', (714, 749), False, 'import cv2\n'), ((765, 805), 'cv2.erode', 'cv2.erode', (['opening', 'kernel'], {'iterations': '(1)'}), '(opening, kernel, iterations=1)\n', (774, 805), False, 'import cv2\n'), ((822, 863), 'cv2.dilate', 'cv2.dilate', (['erosion', 'kernel'], {'iterations': '(5)'}), '(erosion, kernel, iterations=5)\n', (832, 863), False, 'import cv2\n'), ((896, 966), 'cv2.findContours', 'cv2.findContours', (['dilation', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (912, 966), False, 'import cv2\n'), ((972, 1030), 'cv2.drawContours', 'cv2.drawContours', (['frame', 'path_contours', '(-1)', '(0, 255, 0)', '(3)'], {}), '(frame, path_contours, -1, (0, 255, 0), 3)\n', (988, 1030), False, 'import cv2\n'), ((3191, 3222), 'cv2.imshow', 'cv2.imshow', (['"""path video"""', 'frame'], {}), "('path video', frame)\n", (3201, 3222), False, 'import cv2\n'), ((3234, 3248), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3245, 3248), False, 'import cv2\n'), ((439, 458), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (455, 458), False, 'import cv2\n'), ((1154, 1179), 'cv2.boundingRect', 'cv2.boundingRect', (['largest'], {}), '(largest)\n', (1170, 1179), False, 'import cv2\n'), ((1189, 1261), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x_2, y_2)', '(x_2 + w_2, y_2 + h_2)', '(0, 0, 255)', '(3)'], {}), '(frame, (x_2, y_2), (x_2 + w_2, y_2 + h_2), (0, 0, 255), 3)\n', (1202, 1261), False, 'import cv2\n'), ((1409, 1433), 'cv2.minAreaRect', 'cv2.minAreaRect', (['largest'], {}), '(largest)\n', (1424, 1433), False, 'import cv2\n'), ((1713, 1736), 'cv2.boxPoints', 'cv2.boxPoints', (['blackbox'], {}), '(blackbox)\n', (1726, 1736), False, 'import cv2\n'), ((1752, 1764), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (1759, 1764), True, 'import numpy as np\n'), ((1774, 1823), 'cv2.drawContours', 'cv2.drawContours', (['frame', '[box]', '(0)', '(0, 0, 255)', '(3)'], {}), '(frame, [box], 0, (0, 0, 255), 3)\n', (1790, 1823), False, 'import cv2\n'), ((3077, 3162), 'cv2.putText', 'cv2.putText', (['frame', 'straight_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA\n )\n', (3088, 3162), False, 'import cv2\n'), ((3167, 3183), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3177, 3183), False, 'import time\n'), ((2271, 2347), 'cv2.putText', 'cv2.putText', (['frame', 'left_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\n', (2282, 2347), False, 'import cv2\n'), ((2361, 2377), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (2371, 2377), False, 'import time\n'), ((2547, 2624), 'cv2.putText', 'cv2.putText', (['frame', 'right_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, right_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\n', (2558, 2624), False, 'import cv2\n'), ((2638, 2654), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (2648, 2654), False, 'import time\n'), ((2818, 2903), 'cv2.putText', 'cv2.putText', (['frame', 'straight_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA\n )\n', (2829, 2903), False, 'import cv2\n'), ((2912, 2929), 'time.sleep', 'time.sleep', (['(0.175)'], {}), '(0.175)\n', (2922, 2929), False, 'import time\n')]
|
import os
import re
import ast
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext as _build_ext
package_name = "omicexperiment"
# version parsing from __init__ pulled from scikit-bio
# https://github.com/biocore/scikit-bio/blob/master/setup.py
# which is itself based off Flask's setup.py https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
# Bootstrap setup.py with numpy
# from the solution by coldfix http://stackoverflow.com/a/21621689/579416
class build_ext_numpy(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
import builtins
builtins.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
with open('omicexperiment/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.md')).read()
try:
import pypandoc
long_description = pypandoc.convert(README + '\n\n' + CHANGES, 'rst', format='md')
except ImportError:
long_description= README + '\n\n' + CHANGES
setup_requires = [
'numpy >= 1.10.4'
]
install_requires = [
'numpy >= 1.10.4',
'scipy>=0.16.1',
'pandas >= 0.17.1',
'biom-format >= 2.1.5',
'lxml>=3.5.0',
'pygal >= 2.1.1',
'scikit-bio==0.4.2',
'pyyaml',
'bokeh==0.13.0']
setup(name=package_name,
version=version,
license='BSD',
description="For analysis of omic experiments.",
long_description=long_description,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
cmdclass={'build_ext': build_ext_numpy},
author='<NAME>',
author_email='<EMAIL>',
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url='https://github.com/bassio/omicexperiment',
download_url = 'https://github.com/bassio/omicexperiment/tarball/' + version,
keywords='bioinformatics',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='omicexperiment.tests',
install_requires=install_requires,
setup_requires=setup_requires,
entry_points="""\
""",
)
|
[
"pypandoc.convert",
"re.compile",
"setuptools.find_packages",
"os.path.join",
"ast.literal_eval",
"os.path.dirname",
"numpy.get_include",
"setuptools.command.build_ext.build_ext.finalize_options"
] |
[((406, 444), 're.compile', 're.compile', (['"""__version__\\\\s+=\\\\s+(.*)"""'], {}), "('__version__\\\\s+=\\\\s+(.*)')\n", (416, 444), False, 'import re\n'), ((1057, 1082), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1072, 1082), False, 'import os\n'), ((1244, 1307), 'pypandoc.convert', 'pypandoc.convert', (["(README + '\\n\\n' + CHANGES)", '"""rst"""'], {'format': '"""md"""'}), "(README + '\\n\\n' + CHANGES, 'rst', format='md')\n", (1260, 1307), False, 'import pypandoc\n'), ((627, 660), 'setuptools.command.build_ext.build_ext.finalize_options', '_build_ext.finalize_options', (['self'], {}), '(self)\n', (654, 660), True, 'from setuptools.command.build_ext import build_ext as _build_ext\n'), ((1009, 1030), 'ast.literal_eval', 'ast.literal_eval', (['hit'], {}), '(hit)\n', (1025, 1030), False, 'import ast\n'), ((2343, 2358), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2356, 2358), False, 'from setuptools import setup, find_packages\n'), ((852, 871), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (869, 871), False, 'import numpy\n'), ((1099, 1130), 'os.path.join', 'os.path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (1111, 1130), False, 'import os\n'), ((1154, 1186), 'os.path.join', 'os.path.join', (['here', '"""CHANGES.md"""'], {}), "(here, 'CHANGES.md')\n", (1166, 1186), False, 'import os\n')]
|
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import division
from builtins import zip
from builtins import next
from builtins import range
from ....pipeline import engine as pe
from ....interfaces import utility as niu
from ....interfaces import fsl
from ....interfaces import ants
def cleanup_edge_pipeline(name='Cleanup'):
"""
Perform some de-spiking filtering to clean up the edge of the fieldmap
(copied from fsl_prepare_fieldmap)
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_mask']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']),
name='outputnode')
fugue = pe.Node(fsl.FUGUE(save_fmap=True, despike_2dfilter=True,
despike_threshold=2.1), name='Despike')
erode = pe.Node(fsl.maths.MathsCommand(
nan2zeros=True, args='-kernel 2D -ero'), name='MskErode')
newmsk = pe.Node(fsl.MultiImageMaths(op_string='-sub %s -thr 0.5 -bin'),
name='NewMask')
applymsk = pe.Node(fsl.ApplyMask(nan2zeros=True), name='ApplyMask')
join = pe.Node(niu.Merge(2), name='Merge')
addedge = pe.Node(fsl.MultiImageMaths(op_string='-mas %s -add %s'),
name='AddEdge')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, fugue, [('in_file', 'fmap_in_file'),
('in_mask', 'mask_file')]),
(inputnode, erode, [('in_mask', 'in_file')]),
(inputnode, newmsk, [('in_mask', 'in_file')]),
(erode, newmsk, [('out_file', 'operand_files')]),
(fugue, applymsk, [('fmap_out_file', 'in_file')]),
(newmsk, applymsk, [('out_file', 'mask_file')]),
(erode, join, [('out_file', 'in1')]),
(applymsk, join, [('out_file', 'in2')]),
(inputnode, addedge, [('in_file', 'in_file')]),
(join, addedge, [('out', 'operand_files')]),
(addedge, outputnode, [('out_file', 'out_file')])
])
return wf
def vsm2warp(name='Shiftmap2Warping'):
"""
Converts a voxel shift map (vsm) to a displacements field (warp).
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_vsm', 'in_ref', 'scaling', 'enc_dir']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_warp']),
name='outputnode')
fixhdr = pe.Node(niu.Function(
input_names=['in_file', 'in_file_hdr'], output_names=['out_file'],
function=copy_hdr), name='Fix_hdr')
vsm = pe.Node(fsl.maths.BinaryMaths(operation='mul'), name='ScaleField')
vsm2dfm = pe.Node(fsl.ConvertWarp(relwarp=True, out_relwarp=True),
name='vsm2dfm')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, fixhdr, [('in_vsm', 'in_file'),
('in_ref', 'in_file_hdr')]),
(inputnode, vsm, [('scaling', 'operand_value')]),
(fixhdr, vsm, [('out_file', 'in_file')]),
(vsm, vsm2dfm, [('out_file', 'shift_in_file')]),
(inputnode, vsm2dfm, [('in_ref', 'reference'),
('enc_dir', 'shift_direction')]),
(vsm2dfm, outputnode, [('out_file', 'out_warp')])
])
return wf
def dwi_flirt(name='DWICoregistration', excl_nodiff=False,
flirt_param={}):
"""
Generates a workflow for linear registration of dwi volumes
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['reference', 'in_file', 'ref_mask', 'in_xfms', 'in_bval']),
name='inputnode')
initmat = pe.Node(niu.Function(
input_names=['in_bval', 'in_xfms', 'excl_nodiff'],
output_names=['init_xfms'], function=_checkinitxfm), name='InitXforms')
initmat.inputs.excl_nodiff = excl_nodiff
dilate = pe.Node(fsl.maths.MathsCommand(
nan2zeros=True, args='-kernel sphere 5 -dilM'), name='MskDilate')
split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
pick_ref = pe.Node(niu.Select(), name='Pick_b0')
n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias')
enhb0 = pe.Node(niu.Function(
input_names=['in_file', 'in_mask', 'clip_limit'],
output_names=['out_file'], function=enhance), name='B0Equalize')
enhb0.inputs.clip_limit = 0.015
enhdw = pe.MapNode(niu.Function(
input_names=['in_file', 'in_mask'], output_names=['out_file'],
function=enhance), name='DWEqualize', iterfield=['in_file'])
flirt = pe.MapNode(fsl.FLIRT(**flirt_param), name='CoRegistration',
iterfield=['in_file', 'in_matrix_file'])
thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'],
name='RemoveNegative')
merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_file', 'out_xfms']), name='outputnode')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, split, [('in_file', 'in_file')]),
(inputnode, dilate, [('ref_mask', 'in_file')]),
(inputnode, enhb0, [('ref_mask', 'in_mask')]),
(inputnode, initmat, [('in_xfms', 'in_xfms'),
('in_bval', 'in_bval')]),
(inputnode, n4, [('reference', 'input_image'),
('ref_mask', 'mask_image')]),
(dilate, flirt, [('out_file', 'ref_weight'),
('out_file', 'in_weight')]),
(n4, enhb0, [('output_image', 'in_file')]),
(split, enhdw, [('out_files', 'in_file')]),
(dilate, enhdw, [('out_file', 'in_mask')]),
(enhb0, flirt, [('out_file', 'reference')]),
(enhdw, flirt, [('out_file', 'in_file')]),
(initmat, flirt, [('init_xfms', 'in_matrix_file')]),
(flirt, thres, [('out_file', 'in_file')]),
(thres, merge, [('out_file', 'in_files')]),
(merge, outputnode, [('merged_file', 'out_file')]),
(flirt, outputnode, [('out_matrix_file', 'out_xfms')])
])
return wf
def apply_all_corrections(name='UnwarpArtifacts'):
"""
Combines two lists of linear transforms with the deformation field
map obtained typically after the SDC process.
Additionally, computes the corresponding bspline coefficients and
the map of determinants of the jacobian.
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_sdc', 'in_hmc', 'in_ecc', 'in_dwi']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_file', 'out_warp', 'out_coeff', 'out_jacobian']),
name='outputnode')
warps = pe.MapNode(fsl.ConvertWarp(relwarp=True),
iterfield=['premat', 'postmat'],
name='ConvertWarp')
selref = pe.Node(niu.Select(index=[0]), name='Reference')
split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
unwarp = pe.MapNode(fsl.ApplyWarp(), iterfield=['in_file', 'field_file'],
name='UnwarpDWIs')
coeffs = pe.MapNode(fsl.WarpUtils(out_format='spline'),
iterfield=['in_file'], name='CoeffComp')
jacobian = pe.MapNode(fsl.WarpUtils(write_jacobian=True),
iterfield=['in_file'], name='JacobianComp')
jacmult = pe.MapNode(fsl.MultiImageMaths(op_string='-mul %s'),
iterfield=['in_file', 'operand_files'],
name='ModulateDWIs')
thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'],
name='RemoveNegative')
merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, warps, [('in_sdc', 'warp1'),
('in_hmc', 'premat'),
('in_ecc', 'postmat'),
('in_dwi', 'reference')]),
(inputnode, split, [('in_dwi', 'in_file')]),
(split, selref, [('out_files', 'inlist')]),
(warps, unwarp, [('out_file', 'field_file')]),
(split, unwarp, [('out_files', 'in_file')]),
(selref, unwarp, [('out', 'ref_file')]),
(selref, coeffs, [('out', 'reference')]),
(warps, coeffs, [('out_file', 'in_file')]),
(selref, jacobian, [('out', 'reference')]),
(coeffs, jacobian, [('out_file', 'in_file')]),
(unwarp, jacmult, [('out_file', 'in_file')]),
(jacobian, jacmult, [('out_jacobian', 'operand_files')]),
(jacmult, thres, [('out_file', 'in_file')]),
(thres, merge, [('out_file', 'in_files')]),
(warps, outputnode, [('out_file', 'out_warp')]),
(coeffs, outputnode, [('out_file', 'out_coeff')]),
(jacobian, outputnode, [('out_jacobian', 'out_jacobian')]),
(merge, outputnode, [('merged_file', 'out_file')])
])
return wf
def extract_bval(in_dwi, in_bval, b=0, out_file=None):
"""
Writes an image containing only the volumes with b-value specified at
input
"""
import numpy as np
import nibabel as nb
import os.path as op
if out_file is None:
fname, ext = op.splitext(op.basename(in_dwi))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_tsoi%s" % (fname, ext))
im = nb.load(in_dwi)
dwidata = im.get_data()
bvals = np.loadtxt(in_bval)
if b == 'diff':
selection = np.where(bvals != 0)
elif b == 'nodiff':
selection = np.where(bvals == 0)
else:
selection = np.where(bvals == b)
extdata = np.squeeze(dwidata.take(selection, axis=3))
hdr = im.header.copy()
hdr.set_data_shape(extdata.shape)
nb.Nifti1Image(extdata, im.affine, hdr).to_filename(out_file)
return out_file
def hmc_split(in_file, in_bval, ref_num=0, lowbval=5.0):
"""
Selects the reference and moving volumes from a dwi dataset
for the purpose of HMC.
"""
import numpy as np
import nibabel as nb
import os.path as op
from nipype.interfaces.base import isdefined
im = nb.load(in_file)
data = im.get_data()
hdr = im.header.copy()
bval = np.loadtxt(in_bval)
lowbs = np.where(bval <= lowbval)[0]
volid = lowbs[0]
if (isdefined(ref_num) and (ref_num < len(lowbs))):
volid = ref_num
if volid == 0:
data = data[..., 1:]
bval = bval[1:]
elif volid == (data.shape[-1] - 1):
data = data[..., :-1]
bval = bval[:-1]
else:
data = np.concatenate((data[..., :volid], data[..., (volid + 1):]),
axis=3)
bval = np.hstack((bval[:volid], bval[(volid + 1):]))
out_ref = op.abspath('hmc_ref.nii.gz')
out_mov = op.abspath('hmc_mov.nii.gz')
out_bval = op.abspath('bval_split.txt')
refdata = data[..., volid]
hdr.set_data_shape(refdata.shape)
nb.Nifti1Image(refdata, im.affine, hdr).to_filename(out_ref)
hdr.set_data_shape(data.shape)
nb.Nifti1Image(data, im.affine, hdr).to_filename(out_mov)
np.savetxt(out_bval, bval)
return [out_ref, out_mov, out_bval, volid]
def remove_comp(in_file, in_bval, volid=0, out_file=None):
"""
Removes the volume ``volid`` from the 4D nifti file
"""
import numpy as np
import nibabel as nb
import os.path as op
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_extract%s" % (fname, ext))
im = nb.load(in_file)
data = im.get_data()
hdr = im.header.copy()
bval = np.loadtxt(in_bval)
if volid == 0:
data = data[..., 1:]
bval = bval[1:]
elif volid == (data.shape[-1] - 1):
data = data[..., :-1]
bval = bval[:-1]
else:
data = np.concatenate((data[..., :volid], data[..., (volid + 1):]),
axis=3)
bval = np.hstack((bval[:volid], bval[(volid + 1):]))
hdr.set_data_shape(data.shape)
nb.Nifti1Image(data, im.affine, hdr).to_filename(out_file)
out_bval = op.abspath('bval_extract.txt')
np.savetxt(out_bval, bval)
return out_file, out_bval
def insert_mat(inlist, volid=0):
import numpy as np
import os.path as op
idfname = op.abspath('identity.mat')
out = inlist
np.savetxt(idfname, np.eye(4))
out.insert(volid, idfname)
return out
def recompose_dwi(in_dwi, in_bval, in_corrected, out_file=None):
"""
Recompose back the dMRI data accordingly the b-values table after EC
correction
"""
import numpy as np
import nibabel as nb
import os.path as op
if out_file is None:
fname, ext = op.splitext(op.basename(in_dwi))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_eccorrect%s" % (fname, ext))
im = nb.load(in_dwi)
dwidata = im.get_data()
bvals = np.loadtxt(in_bval)
dwis = np.where(bvals != 0)[0].tolist()
if len(dwis) != len(in_corrected):
raise RuntimeError(('Length of DWIs in b-values table and after'
'correction should match'))
for bindex, dwi in zip(dwis, in_corrected):
dwidata[..., bindex] = nb.load(dwi).get_data()
nb.Nifti1Image(dwidata, im.affine, im.header).to_filename(out_file)
return out_file
def recompose_xfm(in_bval, in_xfms):
"""
Insert identity transformation matrices in b0 volumes to build up a list
"""
import numpy as np
import os.path as op
bvals = np.loadtxt(in_bval)
out_matrix = np.array([np.eye(4)] * len(bvals))
xfms = iter([np.loadtxt(xfm) for xfm in in_xfms])
out_files = []
for i, b in enumerate(bvals):
if b == 0.0:
mat = np.eye(4)
else:
mat = next(xfms)
out_name = op.abspath('eccor_%04d.mat' % i)
out_files.append(out_name)
np.savetxt(out_name, mat)
return out_files
def time_avg(in_file, index=[0], out_file=None):
"""
Average the input time-series, selecting the indices given in index
.. warning:: time steps should be already registered (corrected for
head motion artifacts).
"""
import numpy as np
import nibabel as nb
import os.path as op
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_baseline%s" % (fname, ext))
index = np.atleast_1d(index).tolist()
imgs = np.array(nb.four_to_three(nb.load(in_file)))[index]
if len(index) == 1:
data = imgs[0].get_data().astype(np.float32)
else:
data = np.average(np.array([im.get_data().astype(np.float32)
for im in imgs]), axis=0)
hdr = imgs[0].header.copy()
hdr.set_data_shape(data.shape)
hdr.set_xyzt_units('mm')
hdr.set_data_dtype(np.float32)
nb.Nifti1Image(data, imgs[0].affine, hdr).to_filename(out_file)
return out_file
def b0_indices(in_bval, max_b=10.0):
"""
Extract the indices of slices in a b-values file with a low b value
"""
import numpy as np
bval = np.loadtxt(in_bval)
return np.argwhere(bval <= max_b).flatten().tolist()
def b0_average(in_dwi, in_bval, max_b=10.0, out_file=None):
"""
A function that averages the *b0* volumes from a DWI dataset.
As current dMRI data are being acquired with all b-values > 0.0,
the *lowb* volumes are selected by specifying the parameter max_b.
.. warning:: *b0* should be already registered (head motion artifact should
be corrected).
"""
import numpy as np
import nibabel as nb
import os.path as op
if out_file is None:
fname, ext = op.splitext(op.basename(in_dwi))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_avg_b0%s" % (fname, ext))
imgs = np.array(nb.four_to_three(nb.load(in_dwi)))
bval = np.loadtxt(in_bval)
index = np.argwhere(bval <= max_b).flatten().tolist()
b0s = [im.get_data().astype(np.float32)
for im in imgs[index]]
b0 = np.average(np.array(b0s), axis=0)
hdr = imgs[0].header.copy()
hdr.set_data_shape(b0.shape)
hdr.set_xyzt_units('mm')
hdr.set_data_dtype(np.float32)
nb.Nifti1Image(b0, imgs[0].affine, hdr).to_filename(out_file)
return out_file
def rotate_bvecs(in_bvec, in_matrix):
"""
Rotates the input bvec file accordingly with a list of matrices.
.. note:: the input affine matrix transforms points in the destination
image to their corresponding coordinates in the original image.
Therefore, this matrix should be inverted first, as we want to know
the target position of :math:`\\vec{r}`.
"""
import os
import numpy as np
name, fext = os.path.splitext(os.path.basename(in_bvec))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('%s_rotated.bvec' % name)
bvecs = np.loadtxt(in_bvec).T
new_bvecs = []
if len(bvecs) != len(in_matrix):
raise RuntimeError(('Number of b-vectors (%d) and rotation '
'matrices (%d) should match.') % (len(bvecs),
len(in_matrix)))
for bvec, mat in zip(bvecs, in_matrix):
if np.all(bvec == 0.0):
new_bvecs.append(bvec)
else:
invrot = np.linalg.inv(np.loadtxt(mat))[:3, :3]
newbvec = invrot.dot(bvec)
new_bvecs.append((newbvec / np.linalg.norm(newbvec)))
np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f')
return out_file
def eddy_rotate_bvecs(in_bvec, eddy_params):
"""
Rotates the input bvec file accordingly with a list of parameters sourced
from ``eddy``, as explained `here
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/EDDY/Faq#Will_eddy_rotate_my_bevcs_for_me.3F>`_.
"""
import os
import numpy as np
from math import sin, cos
name, fext = os.path.splitext(os.path.basename(in_bvec))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('%s_rotated.bvec' % name)
bvecs = np.loadtxt(in_bvec).T
new_bvecs = []
params = np.loadtxt(eddy_params)
if len(bvecs) != len(params):
raise RuntimeError(('Number of b-vectors and rotation '
'matrices should match.'))
for bvec, row in zip(bvecs, params):
if np.all(bvec == 0.0):
new_bvecs.append(bvec)
else:
ax = row[3]
ay = row[4]
az = row[5]
Rx = np.array([[1.0, 0.0, 0.0],
[0.0, cos(ax), -sin(ax)],
[0.0, sin(ax), cos(ax)]])
Ry = np.array([[cos(ay), 0.0, sin(ay)],
[0.0, 1.0, 0.0],
[-sin(ay), 0.0, cos(ay)]])
Rz = np.array([[cos(az), -sin(az), 0.0],
[sin(az), cos(az), 0.0],
[0.0, 0.0, 1.0]])
R = Rx.dot(Ry).dot(Rz)
invrot = np.linalg.inv(R)
newbvec = invrot.dot(bvec)
new_bvecs.append(newbvec / np.linalg.norm(newbvec))
np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f')
return out_file
def compute_readout(params):
"""
Computes readout time from epi params (see `eddy documentation
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/EDDY/Faq#How_do_I_know_what_to_put_into_my_--acqp_file.3F>`_).
.. warning:: ``params['echospacing']`` should be in *sec* units.
"""
epi_factor = 1.0
acc_factor = 1.0
try:
if params['epi_factor'] > 1:
epi_factor = float(params['epi_factor'] - 1)
except:
pass
try:
if params['acc_factor'] > 1:
acc_factor = 1.0 / params['acc_factor']
except:
pass
return acc_factor * epi_factor * params['echospacing']
def siemens2rads(in_file, out_file=None):
"""
Converts input phase difference map to rads
"""
import numpy as np
import nibabel as nb
import os.path as op
import math
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_rads.nii.gz' % fname)
in_file = np.atleast_1d(in_file).tolist()
im = nb.load(in_file[0])
data = im.get_data().astype(np.float32)
hdr = im.header.copy()
if len(in_file) == 2:
data = nb.load(in_file[1]).get_data().astype(np.float32) - data
elif (data.ndim == 4) and (data.shape[-1] == 2):
data = np.squeeze(data[..., 1] - data[..., 0])
hdr.set_data_shape(data.shape[:3])
imin = data.min()
imax = data.max()
data = (2.0 * math.pi * (data - imin) / (imax - imin)) - math.pi
hdr.set_data_dtype(np.float32)
hdr.set_xyzt_units('mm')
hdr['datatype'] = 16
nb.Nifti1Image(data, im.affine, hdr).to_filename(out_file)
return out_file
def rads2radsec(in_file, delta_te, out_file=None):
"""
Converts input phase difference map to rads
"""
import numpy as np
import nibabel as nb
import os.path as op
import math
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_radsec.nii.gz' % fname)
im = nb.load(in_file)
data = im.get_data().astype(np.float32) * (1.0 / delta_te)
nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file)
return out_file
def demean_image(in_file, in_mask=None, out_file=None):
"""
Demean image data inside mask
"""
import numpy as np
import nibabel as nb
import os.path as op
import math
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_demean.nii.gz' % fname)
im = nb.load(in_file)
data = im.get_data().astype(np.float32)
msk = np.ones_like(data)
if in_mask is not None:
msk = nb.load(in_mask).get_data().astype(np.float32)
msk[msk > 0] = 1.0
msk[msk < 1] = 0.0
mean = np.median(data[msk == 1].reshape(-1))
data[msk == 1] = data[msk == 1] - mean
nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file)
return out_file
def add_empty_vol(in_file, out_file=None):
"""
Adds an empty vol to the phase difference image
"""
import nibabel as nb
import os.path as op
import numpy as np
import math
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_4D.nii.gz' % fname)
im = nb.load(in_file)
zim = nb.Nifti1Image(np.zeros_like(im.get_data()), im.affine,
im.header)
nb.funcs.concat_images([im, zim]).to_filename(out_file)
return out_file
def reorient_bvecs(in_dwi, old_dwi, in_bvec):
"""
Checks reorientations of ``in_dwi`` w.r.t. ``old_dwi`` and
reorients the in_bvec table accordingly.
"""
import os
import numpy as np
import nibabel as nb
name, fext = os.path.splitext(os.path.basename(in_bvec))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('%s_reorient.bvec' % name)
bvecs = np.loadtxt(in_bvec).T
new_bvecs = []
N = nb.load(in_dwi).affine
O = nb.load(old_dwi).affine
RS = N.dot(np.linalg.inv(O))[:3, :3]
sc_idx = np.where((np.abs(RS) != 1) & (RS != 0))
S = np.ones_like(RS)
S[sc_idx] = RS[sc_idx]
R = RS / S
new_bvecs = [R.dot(b) for b in bvecs]
np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f')
return out_file
def copy_hdr(in_file, in_file_hdr, out_file=None):
import numpy as np
import nibabel as nb
import os.path as op
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_fixhdr.nii.gz' % fname)
imref = nb.load(in_file_hdr)
hdr = imref.header.copy()
hdr.set_data_dtype(np.float32)
vsm = nb.load(in_file).get_data().astype(np.float32)
hdr.set_data_shape(vsm.shape)
hdr.set_xyzt_units('mm')
nii = nb.Nifti1Image(vsm, imref.affine, hdr)
nii.to_filename(out_file)
return out_file
def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None):
import numpy as np
import nibabel as nb
import os.path as op
from skimage import exposure, img_as_int
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_enh.nii.gz' % fname)
im = nb.load(in_file)
imdata = im.get_data()
imshape = im.shape
if in_mask is not None:
msk = nb.load(in_mask).get_data()
msk[msk > 0] = 1
msk[msk < 1] = 0
imdata = imdata * msk
immin = imdata.min()
imdata = (imdata - immin).astype(np.uint16)
adapted = exposure.equalize_adapthist(imdata.reshape(imshape[0], -1),
clip_limit=clip_limit)
nb.Nifti1Image(adapted.reshape(imshape), im.affine,
im.header).to_filename(out_file)
return out_file
def _checkinitxfm(in_bval, excl_nodiff, in_xfms=None):
from nipype.interfaces.base import isdefined
import numpy as np
import os.path as op
bvals = np.loadtxt(in_bval)
gen_id = ((in_xfms is None) or
(not isdefined(in_xfms)) or
(len(in_xfms) != len(bvals)))
init_xfms = []
if excl_nodiff:
dws = np.where(bvals != 0)[0].tolist()
else:
dws = list(range(len(bvals)))
if gen_id:
for i in dws:
xfm_file = op.abspath('init_%04d.mat' % i)
np.savetxt(xfm_file, np.eye(4))
init_xfms.append(xfm_file)
else:
init_xfms = [in_xfms[i] for i in dws]
return init_xfms
|
[
"builtins.next",
"nibabel.load",
"numpy.hstack",
"nibabel.funcs.concat_images",
"math.cos",
"numpy.array",
"numpy.loadtxt",
"numpy.linalg.norm",
"numpy.where",
"numpy.concatenate",
"numpy.abs",
"numpy.eye",
"os.path.splitext",
"numpy.squeeze",
"builtins.zip",
"numpy.savetxt",
"nibabel.Nifti1Image",
"nipype.interfaces.base.isdefined",
"numpy.atleast_1d",
"numpy.ones_like",
"numpy.linalg.inv",
"numpy.argwhere",
"os.path.basename",
"os.path.abspath",
"numpy.all",
"math.sin"
] |
[((9396, 9411), 'nibabel.load', 'nb.load', (['in_dwi'], {}), '(in_dwi)\n', (9403, 9411), True, 'import nibabel as nb\n'), ((9452, 9471), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (9462, 9471), True, 'import numpy as np\n'), ((10159, 10175), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (10166, 10175), True, 'import nibabel as nb\n'), ((10239, 10258), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (10249, 10258), True, 'import numpy as np\n'), ((10771, 10799), 'os.path.abspath', 'op.abspath', (['"""hmc_ref.nii.gz"""'], {}), "('hmc_ref.nii.gz')\n", (10781, 10799), True, 'import os.path as op\n'), ((10814, 10842), 'os.path.abspath', 'op.abspath', (['"""hmc_mov.nii.gz"""'], {}), "('hmc_mov.nii.gz')\n", (10824, 10842), True, 'import os.path as op\n'), ((10858, 10886), 'os.path.abspath', 'op.abspath', (['"""bval_split.txt"""'], {}), "('bval_split.txt')\n", (10868, 10886), True, 'import os.path as op\n'), ((11124, 11150), 'numpy.savetxt', 'np.savetxt', (['out_bval', 'bval'], {}), '(out_bval, bval)\n', (11134, 11150), True, 'import numpy as np\n'), ((11655, 11671), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (11662, 11671), True, 'import nibabel as nb\n'), ((11735, 11754), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (11745, 11754), True, 'import numpy as np\n'), ((12222, 12252), 'os.path.abspath', 'op.abspath', (['"""bval_extract.txt"""'], {}), "('bval_extract.txt')\n", (12232, 12252), True, 'import os.path as op\n'), ((12257, 12283), 'numpy.savetxt', 'np.savetxt', (['out_bval', 'bval'], {}), '(out_bval, bval)\n', (12267, 12283), True, 'import numpy as np\n'), ((12411, 12437), 'os.path.abspath', 'op.abspath', (['"""identity.mat"""'], {}), "('identity.mat')\n", (12421, 12437), True, 'import os.path as op\n'), ((13032, 13047), 'nibabel.load', 'nb.load', (['in_dwi'], {}), '(in_dwi)\n', (13039, 13047), True, 'import nibabel as nb\n'), ((13088, 13107), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (13098, 13107), True, 'import numpy as np\n'), ((13345, 13368), 'builtins.zip', 'zip', (['dwis', 'in_corrected'], {}), '(dwis, in_corrected)\n', (13348, 13368), False, 'from builtins import zip\n'), ((13711, 13730), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (13721, 13730), True, 'import numpy as np\n'), ((15391, 15410), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (15401, 15410), True, 'import numpy as np\n'), ((16234, 16253), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (16244, 16253), True, 'import numpy as np\n'), ((17220, 17261), 'os.path.abspath', 'os.path.abspath', (["('%s_rotated.bvec' % name)"], {}), "('%s_rotated.bvec' % name)\n", (17235, 17261), False, 'import os\n'), ((17597, 17618), 'builtins.zip', 'zip', (['bvecs', 'in_matrix'], {}), '(bvecs, in_matrix)\n', (17600, 17618), False, 'from builtins import zip\n'), ((18427, 18468), 'os.path.abspath', 'os.path.abspath', (["('%s_rotated.bvec' % name)"], {}), "('%s_rotated.bvec' % name)\n", (18442, 18468), False, 'import os\n'), ((18536, 18559), 'numpy.loadtxt', 'np.loadtxt', (['eddy_params'], {}), '(eddy_params)\n', (18546, 18559), True, 'import numpy as np\n'), ((18736, 18754), 'builtins.zip', 'zip', (['bvecs', 'params'], {}), '(bvecs, params)\n', (18739, 18754), False, 'from builtins import zip\n'), ((20725, 20744), 'nibabel.load', 'nb.load', (['in_file[0]'], {}), '(in_file[0])\n', (20732, 20744), True, 'import nibabel as nb\n'), ((21778, 21794), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (21785, 21794), True, 'import nibabel as nb\n'), ((22364, 22380), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (22371, 22380), True, 'import nibabel as nb\n'), ((22435, 22453), 'numpy.ones_like', 'np.ones_like', (['data'], {}), '(data)\n', (22447, 22453), True, 'import numpy as np\n'), ((23198, 23214), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (23205, 23214), True, 'import nibabel as nb\n'), ((23771, 23813), 'os.path.abspath', 'os.path.abspath', (["('%s_reorient.bvec' % name)"], {}), "('%s_reorient.bvec' % name)\n", (23786, 23813), False, 'import os\n'), ((24033, 24049), 'numpy.ones_like', 'np.ones_like', (['RS'], {}), '(RS)\n', (24045, 24049), True, 'import numpy as np\n'), ((24566, 24586), 'nibabel.load', 'nb.load', (['in_file_hdr'], {}), '(in_file_hdr)\n', (24573, 24586), True, 'import nibabel as nb\n'), ((24782, 24820), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['vsm', 'imref.affine', 'hdr'], {}), '(vsm, imref.affine, hdr)\n', (24796, 24820), True, 'import nibabel as nb\n'), ((25277, 25293), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (25284, 25293), True, 'import nibabel as nb\n'), ((26005, 26024), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (26015, 26024), True, 'import numpy as np\n'), ((9347, 9385), 'os.path.abspath', 'op.abspath', (["('%s_tsoi%s' % (fname, ext))"], {}), "('%s_tsoi%s' % (fname, ext))\n", (9357, 9385), True, 'import os.path as op\n'), ((9513, 9533), 'numpy.where', 'np.where', (['(bvals != 0)'], {}), '(bvals != 0)\n', (9521, 9533), True, 'import numpy as np\n'), ((10272, 10297), 'numpy.where', 'np.where', (['(bval <= lowbval)'], {}), '(bval <= lowbval)\n', (10280, 10297), True, 'import numpy as np\n'), ((10331, 10349), 'nipype.interfaces.base.isdefined', 'isdefined', (['ref_num'], {}), '(ref_num)\n', (10340, 10349), False, 'from nipype.interfaces.base import isdefined\n'), ((11603, 11644), 'os.path.abspath', 'op.abspath', (["('%s_extract%s' % (fname, ext))"], {}), "('%s_extract%s' % (fname, ext))\n", (11613, 11644), True, 'import os.path as op\n'), ((12479, 12488), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12485, 12488), True, 'import numpy as np\n'), ((12978, 13021), 'os.path.abspath', 'op.abspath', (["('%s_eccorrect%s' % (fname, ext))"], {}), "('%s_eccorrect%s' % (fname, ext))\n", (12988, 13021), True, 'import os.path as op\n'), ((14003, 14035), 'os.path.abspath', 'op.abspath', (["('eccor_%04d.mat' % i)"], {}), "('eccor_%04d.mat' % i)\n", (14013, 14035), True, 'import os.path as op\n'), ((14079, 14104), 'numpy.savetxt', 'np.savetxt', (['out_name', 'mat'], {}), '(out_name, mat)\n', (14089, 14104), True, 'import numpy as np\n'), ((14642, 14684), 'os.path.abspath', 'op.abspath', (["('%s_baseline%s' % (fname, ext))"], {}), "('%s_baseline%s' % (fname, ext))\n", (14652, 14684), True, 'import os.path as op\n'), ((16126, 16166), 'os.path.abspath', 'op.abspath', (["('%s_avg_b0%s' % (fname, ext))"], {}), "('%s_avg_b0%s' % (fname, ext))\n", (16136, 16166), True, 'import os.path as op\n'), ((16411, 16424), 'numpy.array', 'np.array', (['b0s'], {}), '(b0s)\n', (16419, 16424), True, 'import numpy as np\n'), ((17115, 17140), 'os.path.basename', 'os.path.basename', (['in_bvec'], {}), '(in_bvec)\n', (17131, 17140), False, 'import os\n'), ((17182, 17204), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (17198, 17204), False, 'import os\n'), ((17274, 17293), 'numpy.loadtxt', 'np.loadtxt', (['in_bvec'], {}), '(in_bvec)\n', (17284, 17293), True, 'import numpy as np\n'), ((17631, 17650), 'numpy.all', 'np.all', (['(bvec == 0.0)'], {}), '(bvec == 0.0)\n', (17637, 17650), True, 'import numpy as np\n'), ((18322, 18347), 'os.path.basename', 'os.path.basename', (['in_bvec'], {}), '(in_bvec)\n', (18338, 18347), False, 'import os\n'), ((18389, 18411), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (18405, 18411), False, 'import os\n'), ((18481, 18500), 'numpy.loadtxt', 'np.loadtxt', (['in_bvec'], {}), '(in_bvec)\n', (18491, 18500), True, 'import numpy as np\n'), ((18767, 18786), 'numpy.all', 'np.all', (['(bvec == 0.0)'], {}), '(bvec == 0.0)\n', (18773, 18786), True, 'import numpy as np\n'), ((20630, 20668), 'os.path.abspath', 'op.abspath', (["('./%s_rads.nii.gz' % fname)"], {}), "('./%s_rads.nii.gz' % fname)\n", (20640, 20668), True, 'import os.path as op\n'), ((21727, 21767), 'os.path.abspath', 'op.abspath', (["('./%s_radsec.nii.gz' % fname)"], {}), "('./%s_radsec.nii.gz' % fname)\n", (21737, 21767), True, 'import os.path as op\n'), ((22313, 22353), 'os.path.abspath', 'op.abspath', (["('./%s_demean.nii.gz' % fname)"], {}), "('./%s_demean.nii.gz' % fname)\n", (22323, 22353), True, 'import os.path as op\n'), ((23151, 23187), 'os.path.abspath', 'op.abspath', (["('./%s_4D.nii.gz' % fname)"], {}), "('./%s_4D.nii.gz' % fname)\n", (23161, 23187), True, 'import os.path as op\n'), ((23666, 23691), 'os.path.basename', 'os.path.basename', (['in_bvec'], {}), '(in_bvec)\n', (23682, 23691), False, 'import os\n'), ((23733, 23755), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (23749, 23755), False, 'import os\n'), ((23826, 23845), 'numpy.loadtxt', 'np.loadtxt', (['in_bvec'], {}), '(in_bvec)\n', (23836, 23845), True, 'import numpy as np\n'), ((23876, 23891), 'nibabel.load', 'nb.load', (['in_dwi'], {}), '(in_dwi)\n', (23883, 23891), True, 'import nibabel as nb\n'), ((23907, 23923), 'nibabel.load', 'nb.load', (['old_dwi'], {}), '(old_dwi)\n', (23914, 23923), True, 'import nibabel as nb\n'), ((24512, 24552), 'os.path.abspath', 'op.abspath', (["('./%s_fixhdr.nii.gz' % fname)"], {}), "('./%s_fixhdr.nii.gz' % fname)\n", (24522, 24552), True, 'import os.path as op\n'), ((25229, 25266), 'os.path.abspath', 'op.abspath', (["('./%s_enh.nii.gz' % fname)"], {}), "('./%s_enh.nii.gz' % fname)\n", (25239, 25266), True, 'import os.path as op\n'), ((9208, 9227), 'os.path.basename', 'op.basename', (['in_dwi'], {}), '(in_dwi)\n', (9219, 9227), True, 'import os.path as op\n'), ((9280, 9298), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (9291, 9298), True, 'import os.path as op\n'), ((9578, 9598), 'numpy.where', 'np.where', (['(bvals == 0)'], {}), '(bvals == 0)\n', (9586, 9598), True, 'import numpy as np\n'), ((9629, 9649), 'numpy.where', 'np.where', (['(bvals == b)'], {}), '(bvals == b)\n', (9637, 9649), True, 'import numpy as np\n'), ((9778, 9817), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['extdata', 'im.affine', 'hdr'], {}), '(extdata, im.affine, hdr)\n', (9792, 9817), True, 'import nibabel as nb\n'), ((10596, 10662), 'numpy.concatenate', 'np.concatenate', (['(data[..., :volid], data[..., volid + 1:])'], {'axis': '(3)'}), '((data[..., :volid], data[..., volid + 1:]), axis=3)\n', (10610, 10662), True, 'import numpy as np\n'), ((10710, 10753), 'numpy.hstack', 'np.hstack', (['(bval[:volid], bval[volid + 1:])'], {}), '((bval[:volid], bval[volid + 1:]))\n', (10719, 10753), True, 'import numpy as np\n'), ((10961, 11000), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['refdata', 'im.affine', 'hdr'], {}), '(refdata, im.affine, hdr)\n', (10975, 11000), True, 'import nibabel as nb\n'), ((11062, 11098), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'im.affine', 'hdr'], {}), '(data, im.affine, hdr)\n', (11076, 11098), True, 'import nibabel as nb\n'), ((11463, 11483), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (11474, 11483), True, 'import os.path as op\n'), ((11536, 11554), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (11547, 11554), True, 'import os.path as op\n'), ((11948, 12014), 'numpy.concatenate', 'np.concatenate', (['(data[..., :volid], data[..., volid + 1:])'], {'axis': '(3)'}), '((data[..., :volid], data[..., volid + 1:]), axis=3)\n', (11962, 12014), True, 'import numpy as np\n'), ((12062, 12105), 'numpy.hstack', 'np.hstack', (['(bval[:volid], bval[volid + 1:])'], {}), '((bval[:volid], bval[volid + 1:]))\n', (12071, 12105), True, 'import numpy as np\n'), ((12147, 12183), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'im.affine', 'hdr'], {}), '(data, im.affine, hdr)\n', (12161, 12183), True, 'import nibabel as nb\n'), ((12839, 12858), 'os.path.basename', 'op.basename', (['in_dwi'], {}), '(in_dwi)\n', (12850, 12858), True, 'import os.path as op\n'), ((12911, 12929), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (12922, 12929), True, 'import os.path as op\n'), ((13430, 13475), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['dwidata', 'im.affine', 'im.header'], {}), '(dwidata, im.affine, im.header)\n', (13444, 13475), True, 'import nibabel as nb\n'), ((13800, 13815), 'numpy.loadtxt', 'np.loadtxt', (['xfm'], {}), '(xfm)\n', (13810, 13815), True, 'import numpy as np\n'), ((13930, 13939), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (13936, 13939), True, 'import numpy as np\n'), ((13972, 13982), 'builtins.next', 'next', (['xfms'], {}), '(xfms)\n', (13976, 13982), False, 'from builtins import next\n'), ((14502, 14522), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (14513, 14522), True, 'import os.path as op\n'), ((14575, 14593), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (14586, 14593), True, 'import os.path as op\n'), ((14698, 14718), 'numpy.atleast_1d', 'np.atleast_1d', (['index'], {}), '(index)\n', (14711, 14718), True, 'import numpy as np\n'), ((15146, 15187), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'imgs[0].affine', 'hdr'], {}), '(data, imgs[0].affine, hdr)\n', (15160, 15187), True, 'import nibabel as nb\n'), ((15987, 16006), 'os.path.basename', 'op.basename', (['in_dwi'], {}), '(in_dwi)\n', (15998, 16006), True, 'import os.path as op\n'), ((16059, 16077), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (16070, 16077), True, 'import os.path as op\n'), ((16205, 16220), 'nibabel.load', 'nb.load', (['in_dwi'], {}), '(in_dwi)\n', (16212, 16220), True, 'import nibabel as nb\n'), ((16568, 16607), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['b0', 'imgs[0].affine', 'hdr'], {}), '(b0, imgs[0].affine, hdr)\n', (16582, 16607), True, 'import nibabel as nb\n'), ((17892, 17911), 'numpy.array', 'np.array', (['new_bvecs'], {}), '(new_bvecs)\n', (17900, 17911), True, 'import numpy as np\n'), ((19417, 19433), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (19430, 19433), True, 'import numpy as np\n'), ((19563, 19582), 'numpy.array', 'np.array', (['new_bvecs'], {}), '(new_bvecs)\n', (19571, 19582), True, 'import numpy as np\n'), ((20521, 20541), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (20532, 20541), True, 'import os.path as op\n'), ((20592, 20610), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (20603, 20610), True, 'import os.path as op\n'), ((20684, 20706), 'numpy.atleast_1d', 'np.atleast_1d', (['in_file'], {}), '(in_file)\n', (20697, 20706), True, 'import numpy as np\n'), ((20983, 21022), 'numpy.squeeze', 'np.squeeze', (['(data[..., 1] - data[..., 0])'], {}), '(data[..., 1] - data[..., 0])\n', (20993, 21022), True, 'import numpy as np\n'), ((21273, 21309), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'im.affine', 'hdr'], {}), '(data, im.affine, hdr)\n', (21287, 21309), True, 'import nibabel as nb\n'), ((21618, 21638), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (21629, 21638), True, 'import os.path as op\n'), ((21689, 21707), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (21700, 21707), True, 'import os.path as op\n'), ((21862, 21904), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'im.affine', 'im.header'], {}), '(data, im.affine, im.header)\n', (21876, 21904), True, 'import nibabel as nb\n'), ((22204, 22224), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (22215, 22224), True, 'import os.path as op\n'), ((22275, 22293), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (22286, 22293), True, 'import os.path as op\n'), ((22695, 22737), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'im.affine', 'im.header'], {}), '(data, im.affine, im.header)\n', (22709, 22737), True, 'import nibabel as nb\n'), ((23042, 23062), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (23053, 23062), True, 'import os.path as op\n'), ((23113, 23131), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (23124, 23131), True, 'import os.path as op\n'), ((23321, 23354), 'nibabel.funcs.concat_images', 'nb.funcs.concat_images', (['[im, zim]'], {}), '([im, zim])\n', (23343, 23354), True, 'import nibabel as nb\n'), ((23946, 23962), 'numpy.linalg.inv', 'np.linalg.inv', (['O'], {}), '(O)\n', (23959, 23962), True, 'import numpy as np\n'), ((24160, 24179), 'numpy.array', 'np.array', (['new_bvecs'], {}), '(new_bvecs)\n', (24168, 24179), True, 'import numpy as np\n'), ((24403, 24423), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (24414, 24423), True, 'import os.path as op\n'), ((24474, 24492), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (24485, 24492), True, 'import os.path as op\n'), ((25120, 25140), 'os.path.basename', 'op.basename', (['in_file'], {}), '(in_file)\n', (25131, 25140), True, 'import os.path as op\n'), ((25191, 25209), 'os.path.splitext', 'op.splitext', (['fname'], {}), '(fname)\n', (25202, 25209), True, 'import os.path as op\n'), ((26080, 26098), 'nipype.interfaces.base.isdefined', 'isdefined', (['in_xfms'], {}), '(in_xfms)\n', (26089, 26098), False, 'from nipype.interfaces.base import isdefined\n'), ((26343, 26374), 'os.path.abspath', 'op.abspath', (["('init_%04d.mat' % i)"], {}), "('init_%04d.mat' % i)\n", (26353, 26374), True, 'import os.path as op\n'), ((13119, 13139), 'numpy.where', 'np.where', (['(bvals != 0)'], {}), '(bvals != 0)\n', (13127, 13139), True, 'import numpy as np\n'), ((13401, 13413), 'nibabel.load', 'nb.load', (['dwi'], {}), '(dwi)\n', (13408, 13413), True, 'import nibabel as nb\n'), ((13758, 13767), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (13764, 13767), True, 'import numpy as np\n'), ((14766, 14782), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (14773, 14782), True, 'import nibabel as nb\n'), ((23995, 24005), 'numpy.abs', 'np.abs', (['RS'], {}), '(RS)\n', (24001, 24005), True, 'import numpy as np\n'), ((25387, 25403), 'nibabel.load', 'nb.load', (['in_mask'], {}), '(in_mask)\n', (25394, 25403), True, 'import nibabel as nb\n'), ((26408, 26417), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (26414, 26417), True, 'import numpy as np\n'), ((15422, 15448), 'numpy.argwhere', 'np.argwhere', (['(bval <= max_b)'], {}), '(bval <= max_b)\n', (15433, 15448), True, 'import numpy as np\n'), ((16266, 16292), 'numpy.argwhere', 'np.argwhere', (['(bval <= max_b)'], {}), '(bval <= max_b)\n', (16277, 16292), True, 'import numpy as np\n'), ((17736, 17751), 'numpy.loadtxt', 'np.loadtxt', (['mat'], {}), '(mat)\n', (17746, 17751), True, 'import numpy as np\n'), ((17840, 17863), 'numpy.linalg.norm', 'np.linalg.norm', (['newbvec'], {}), '(newbvec)\n', (17854, 17863), True, 'import numpy as np\n'), ((19512, 19535), 'numpy.linalg.norm', 'np.linalg.norm', (['newbvec'], {}), '(newbvec)\n', (19526, 19535), True, 'import numpy as np\n'), ((24662, 24678), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (24669, 24678), True, 'import nibabel as nb\n'), ((26201, 26221), 'numpy.where', 'np.where', (['(bvals != 0)'], {}), '(bvals != 0)\n', (26209, 26221), True, 'import numpy as np\n'), ((18987, 18994), 'math.cos', 'cos', (['ax'], {}), '(ax)\n', (18990, 18994), False, 'from math import sin, cos\n'), ((19040, 19047), 'math.sin', 'sin', (['ax'], {}), '(ax)\n', (19043, 19047), False, 'from math import sin, cos\n'), ((19049, 19056), 'math.cos', 'cos', (['ax'], {}), '(ax)\n', (19052, 19056), False, 'from math import sin, cos\n'), ((19088, 19095), 'math.cos', 'cos', (['ay'], {}), '(ay)\n', (19091, 19095), False, 'from math import sin, cos\n'), ((19102, 19109), 'math.sin', 'sin', (['ay'], {}), '(ay)\n', (19105, 19109), False, 'from math import sin, cos\n'), ((19199, 19206), 'math.cos', 'cos', (['ay'], {}), '(ay)\n', (19202, 19206), False, 'from math import sin, cos\n'), ((19238, 19245), 'math.cos', 'cos', (['az'], {}), '(az)\n', (19241, 19245), False, 'from math import sin, cos\n'), ((19291, 19298), 'math.sin', 'sin', (['az'], {}), '(az)\n', (19294, 19298), False, 'from math import sin, cos\n'), ((19300, 19307), 'math.cos', 'cos', (['az'], {}), '(az)\n', (19303, 19307), False, 'from math import sin, cos\n'), ((22497, 22513), 'nibabel.load', 'nb.load', (['in_mask'], {}), '(in_mask)\n', (22504, 22513), True, 'import nibabel as nb\n'), ((18997, 19004), 'math.sin', 'sin', (['ax'], {}), '(ax)\n', (19000, 19004), False, 'from math import sin, cos\n'), ((19185, 19192), 'math.sin', 'sin', (['ay'], {}), '(ay)\n', (19188, 19192), False, 'from math import sin, cos\n'), ((19248, 19255), 'math.sin', 'sin', (['az'], {}), '(az)\n', (19251, 19255), False, 'from math import sin, cos\n'), ((20858, 20877), 'nibabel.load', 'nb.load', (['in_file[1]'], {}), '(in_file[1])\n', (20865, 20877), True, 'import nibabel as nb\n')]
|
#!/usr/bin/env python3
from nucleus.dataset import DataSet
from nucleus.constants import NETS
import argparse
import cv2
import os
import numpy as np
def get_args():
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument("--net")
parser.add_argument("--data_root")
parser.add_argument("--model_dir")
parser.add_argument("--iters", type=int)
parser.add_argument("--epoch", type=int)
parser.add_argument("--input_height", type=int)
parser.add_argument("--input_width", type=int)
return parser.parse_args()
def iterate_prediction(net, orig_img, iters):
predicted_mask = np.round(net.y.eval(feed_dict={net.x: orig_img})) * 255
for _ in range(iters):
x = orig_img[0].astype("uint16")
x = x + predicted_mask[0]
x = np.clip(x, 0, 255)
x = x.astype("uint8")
x = np.array([x])
predicted_mask = np.round(net.y.eval(feed_dict={net.x: x})) * 255
orig_img = x
return predicted_mask
def predict(net, data_root, model_dir, epoch, iters, input_height, input_width):
data = DataSet((input_height, input_width), data_root)
net = NETS[net](input_height, input_width, init_vars=False)
net.restore_model(model_dir, epoch)
results_dir = os.path.join(model_dir, "results_{}".format(epoch))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
for i, test_img in enumerate(data.test_data):
test_x, test_y = data.get_test_imgs(test_img)
test_x = np.array([test_x])
test_y = np.array([test_y])
result_img = iterate_prediction(net, test_x, iters)
cv2.imwrite(os.path.join(results_dir, '{}_p.jpg'.format(i)), result_img[0])
cv2.imwrite(os.path.join(results_dir, '{}_t.jpg'.format(i)), test_x[0])
if __name__ == "__main__":
args = get_args()
predict(args.net,
args.data_root,
args.model_dir,
args.epoch,
args.iters,
args.input_height,
args.input_width)
|
[
"numpy.clip",
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"nucleus.dataset.DataSet",
"numpy.array"
] |
[((183, 233), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'fromfile_prefix_chars': '"""@"""'}), "(fromfile_prefix_chars='@')\n", (206, 233), False, 'import argparse\n'), ((1103, 1150), 'nucleus.dataset.DataSet', 'DataSet', (['(input_height, input_width)', 'data_root'], {}), '((input_height, input_width), data_root)\n', (1110, 1150), False, 'from nucleus.dataset import DataSet\n'), ((810, 828), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (817, 828), True, 'import numpy as np\n'), ((871, 884), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (879, 884), True, 'import numpy as np\n'), ((1339, 1366), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (1353, 1366), False, 'import os\n'), ((1376, 1400), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (1387, 1400), False, 'import os\n'), ((1523, 1541), 'numpy.array', 'np.array', (['[test_x]'], {}), '([test_x])\n', (1531, 1541), True, 'import numpy as np\n'), ((1559, 1577), 'numpy.array', 'np.array', (['[test_y]'], {}), '([test_y])\n', (1567, 1577), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as f
import numpy as np
from layers import GraphAttentionLayer
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
"""Dense version of GAT."""
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in
range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = f.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=-1)
x = f.dropout(x, self.dropout, training=self.training)
x = f.elu(self.out_att(x, adj))
return x
# return F.log_softmax(x, dim=1)
class CriticNetwork(nn.Module):
def __init__(self, input_dim, hidden_gat_dim, hidden_in_dim, hidden_out_dim, output_dim, actor=False):
super(CriticNetwork, self).__init__()
"""self.input_norm = nn.BatchNorm1d(input_dim)
self.input_norm.weight.data.fill_(1)
self.input_norm.bias.data.fill_(0)"""
self.gat = GAT(nfeat=input_dim,
nhid=hidden_gat_dim,
nclass=input_dim,
dropout=0.0,
nheads=4,
alpha=0.1)
self.gat2 = GAT(nfeat=input_dim,
nhid=hidden_gat_dim,
nclass=input_dim,
dropout=0.0,
nheads=4,
alpha=0.1)
# dense_input_dim = input_dim * 5 * 2 # num_agents * res connections
dense_input_dim = input_dim * 5 * 2
self.fc1 = nn.Linear(dense_input_dim, hidden_in_dim)
self.fc2 = nn.Linear(hidden_in_dim, hidden_out_dim)
self.fc3 = nn.Linear(hidden_out_dim, output_dim)
# self.fc3 = nn.Linear(hidden_in_dim, output_dim)
# self.nonlin = f.relu # leaky_relu
self.nonlin = f.leaky_relu
self.actor = actor
# self.reset_parameters()
def reset_parameters(self):
self.gat.weight.data.uniform_(*hidden_init(self.gat))
self.gat2.weight.data.uniform_(*hidden_init(self.gat2))
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-1e-3, 1e-3)
def forward(self, x, adj):
gat = self.gat(x, adj)
gat = self.gat2(gat, adj)
resgat = torch.cat((x, gat), dim=-1) # review
flatten = torch.flatten(gat, start_dim=1)
h1 = self.nonlin(self.fc1(flatten))
h2 = self.nonlin(self.fc2(h1))
h3 = (self.fc3(h2))
return h3
class ActorNetwork(nn.Module):
def __init__(self, input_dim, hidden_in_dim, hidden_out_dim, output_dim, actor=False):
super(ActorNetwork, self).__init__()
"""self.input_norm = nn.BatchNorm1d(input_dim)
self.input_norm.weight.data.fill_(1)
self.input_norm.bias.data.fill_(0)"""
self.fc1 = nn.Linear(input_dim, hidden_in_dim)
self.fc2 = nn.Linear(hidden_in_dim, hidden_out_dim)
self.fc3 = nn.Linear(hidden_out_dim, output_dim)
self.nonlin = f.relu # leaky_relu
self.actor = actor
# self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-1e-3, 1e-3)
def forward(self, x):
# return a vector of the force
h1 = self.nonlin(self.fc1(x))
h2 = self.nonlin(self.fc2(h1))
h3 = (self.fc3(h2))
norm = torch.norm(h3)
# h3 is a 2D vector (a force that is applied to the agent)
# we bound the norm of the vector to be between 0 and 10
# return 10.0*(torch.tanh(norm))*h3/norm if norm > 0 else 10*h3
return 1.0 * (torch.tanh(norm)) * h3 / norm if norm > 0 else 1 * h3
|
[
"torch.tanh",
"numpy.sqrt",
"torch.nn.functional.dropout",
"torch.flatten",
"torch.norm",
"torch.nn.Linear",
"layers.GraphAttentionLayer",
"torch.cat"
] |
[((207, 222), 'numpy.sqrt', 'np.sqrt', (['fan_in'], {}), '(fan_in)\n', (214, 222), True, 'import numpy as np\n'), ((742, 832), 'layers.GraphAttentionLayer', 'GraphAttentionLayer', (['(nhid * nheads)', 'nclass'], {'dropout': 'dropout', 'alpha': 'alpha', 'concat': '(False)'}), '(nhid * nheads, nclass, dropout=dropout, alpha=alpha,\n concat=False)\n', (761, 832), False, 'from layers import GraphAttentionLayer\n'), ((873, 923), 'torch.nn.functional.dropout', 'f.dropout', (['x', 'self.dropout'], {'training': 'self.training'}), '(x, self.dropout, training=self.training)\n', (882, 923), True, 'import torch.nn.functional as f\n'), ((1008, 1058), 'torch.nn.functional.dropout', 'f.dropout', (['x', 'self.dropout'], {'training': 'self.training'}), '(x, self.dropout, training=self.training)\n', (1017, 1058), True, 'import torch.nn.functional as f\n'), ((2060, 2101), 'torch.nn.Linear', 'nn.Linear', (['dense_input_dim', 'hidden_in_dim'], {}), '(dense_input_dim, hidden_in_dim)\n', (2069, 2101), True, 'import torch.nn as nn\n'), ((2121, 2161), 'torch.nn.Linear', 'nn.Linear', (['hidden_in_dim', 'hidden_out_dim'], {}), '(hidden_in_dim, hidden_out_dim)\n', (2130, 2161), True, 'import torch.nn as nn\n'), ((2181, 2218), 'torch.nn.Linear', 'nn.Linear', (['hidden_out_dim', 'output_dim'], {}), '(hidden_out_dim, output_dim)\n', (2190, 2218), True, 'import torch.nn as nn\n'), ((2866, 2893), 'torch.cat', 'torch.cat', (['(x, gat)'], {'dim': '(-1)'}), '((x, gat), dim=-1)\n', (2875, 2893), False, 'import torch\n'), ((2922, 2953), 'torch.flatten', 'torch.flatten', (['gat'], {'start_dim': '(1)'}), '(gat, start_dim=1)\n', (2935, 2953), False, 'import torch\n'), ((3419, 3454), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_in_dim'], {}), '(input_dim, hidden_in_dim)\n', (3428, 3454), True, 'import torch.nn as nn\n'), ((3474, 3514), 'torch.nn.Linear', 'nn.Linear', (['hidden_in_dim', 'hidden_out_dim'], {}), '(hidden_in_dim, hidden_out_dim)\n', (3483, 3514), True, 'import torch.nn as nn\n'), ((3534, 3571), 'torch.nn.Linear', 'nn.Linear', (['hidden_out_dim', 'output_dim'], {}), '(hidden_out_dim, output_dim)\n', (3543, 3571), True, 'import torch.nn as nn\n'), ((4070, 4084), 'torch.norm', 'torch.norm', (['h3'], {}), '(h3)\n', (4080, 4084), False, 'import torch\n'), ((470, 545), 'layers.GraphAttentionLayer', 'GraphAttentionLayer', (['nfeat', 'nhid'], {'dropout': 'dropout', 'alpha': 'alpha', 'concat': '(True)'}), '(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True)\n', (489, 545), False, 'from layers import GraphAttentionLayer\n'), ((4312, 4328), 'torch.tanh', 'torch.tanh', (['norm'], {}), '(norm)\n', (4322, 4328), False, 'import torch\n')]
|
import torch
import numpy as np
from core import predict
def sigmoid_threshold(tensor, threshold=0.5):
"""Applies the sigmoid function to the tensor and thresholds the values
out_tensor = sigmoid(tensor) > threshold
Arguments:
tensor (torch.Tensor): the tensor to threshold.
threshold (scalar or array-like): the threshold value or values. Can be a list,
tuple, NumPy ndarray, scalar, and other types. If array-like, the size must
match the size of `tensor`. Default: 0.5.
Returns:
torch.Tensor: same shape as the input with values {0, 1}.
"""
threshold = torch.tensor(threshold, dtype=tensor.dtype).to(tensor.device)
out = torch.sigmoid(tensor)
return out > threshold
def find_single_threshold(
model,
dataloader,
metric,
device=None,
min_threshold=0.2,
max_threshold=0.8,
num_thresholds=100,
):
"""Searches for the decision threshold that yields the best metric.
Arguments:
model (torch.nn.Module): wrapped model.
dataloader (torch.utils.data.DataLoader): validation set data loader.
metric (metric.Metric): metric to monitor.
device (str or torch.device, optional): a string ("cpu" or "cuda") with an
optional ordinal for the device type (e.g. "cuda:X", where is the ordinal).
Alternatively, can be an object representing the device on which the
computation will take place. Default: None, uses the same device as `model`.
min_threshold (float, optional): the lowest threshold to be tested.
Default: 0.2.
max_threshold (float, optional): the highest threshold to be tested.
Default: 0.8.
num_thresholds (int, optional): the number of thresholds to test between
``min_threshold```and ``max_threshold``. Default: 100.
Returns:
float: the best threshold value.
"""
# Instead of generating predictions for every threshold, we'll get the logits and
# targets from the predict function; then the thresholds are applied to the logits
logits, targets = predict(model, dataloader, device=device, ret_targets=True)
# thresholds is a vector that contains all the thresholds to be tested.
thresholds = np.linspace(min_threshold, max_threshold, num_thresholds)
best_metric = None
# If several thresholds yield the highest metric, then they are stored in
# highscore_thresholds and the final threshold is the median of highscore_thresholds
highscore_thresholds = []
for idx, th in enumerate(thresholds):
outputs = sigmoid_threshold(logits, threshold=th)
metric.reset()
metric.add(outputs, targets)
if idx == 0 or metric.value() > best_metric:
best_metric = metric.value()
highscore_thresholds = [th]
elif metric.value() == best_metric:
highscore_thresholds.append(th)
return np.median(highscore_thresholds)
def find_class_threshold(
model,
dataloader,
metric,
device=None,
min_threshold=0.2,
max_threshold=0.8,
num_thresholds=100,
):
"""Searches for the decision threshold that yields the best metric for each class.
Arguments:
model (torch.nn.Module): wrapped model.
dataloader (torch.utils.data.DataLoader): validation set data loader.
metric (metric.Metric): metric to monitor.
device (str or torch.device, optional): a string ("cpu" or "cuda") with an
optional ordinal for the device type (e.g. "cuda:X", where is the ordinal).
Alternatively, can be an object representing the device on which the
computation will take place. Default: None, uses the same device as `model`.
min_threshold (float, optional): the lowest threshold to be tested.
Default: 0.2.
max_threshold (float, optional): the highest threshold to be tested.
Default: 0.8.
num_thresholds (int, optional): the number of thresholds to test between
``min_threshold```and ``max_threshold``. Default: 100.
Returns:
list: the best threshold value per class. Same length as the number of classes
"""
# Instead of generating predictions for every threshold, we'll get the logits and
# targets from the predict function; then the thresholds are applied to the logits
logits, targets = predict(model, dataloader, device=device, ret_targets=True)
num_classes = targets.size(1)
# thresholds is a vector that contains all the thresholds to be tested. Best
# thresholds is an array that stores the best threshold found for each class
thresholds = np.linspace(min_threshold, max_threshold, num_thresholds)
best_thresholds = np.zeros((num_classes,))
for class_idx in range(num_classes):
# For each class all thresholds are tested. The threshold that yields the
# highest metric is stored in best_thresholds.
best_metric = None
class_thresholds = best_thresholds.copy()
# If several thresholds yield the highest metric, then they are stored in
# highscore_thresholds and the final threshold that is added to best_thresholds
# is the median of highscore_thresholds.
highscore_thresholds = []
for idx, th in enumerate(thresholds):
# th is the current threshold value for this class; class_thresholds is an
# array that contains the threshold value for all classes
class_thresholds[class_idx] = th
outputs = sigmoid_threshold(logits, threshold=class_thresholds)
metric.reset()
metric.add(outputs, targets)
if idx == 0 or metric.value() > best_metric:
best_metric = metric.value()
highscore_thresholds = [th]
elif metric.value() == best_metric:
highscore_thresholds.append(th)
best_thresholds[class_idx] = np.median(highscore_thresholds)
return best_thresholds.tolist()
def multi_find_threshold(models, dataloaders, metric, device=None, num_thresholds=1000):
"""Searches for the best single and per-class decision thresholds for each model
Wrapper function around ``find_single_threshold```and ``find_class_threshold``
built to handle multiple models and return the single and per-class best decision
thresholds for each pair in the array-like objects ``models`` and ``dataloaders``.
Arguments:
models (array-like of torch.nn.Module): an array of models.
dataloader (array-like of torch.utils.data.DataLoader): an array of dataloaders
for validation sets.
metric (metric.Metric): metric to monitor.
device (str or torch.device, optional): a string ("cpu" or "cuda") with an
optional ordinal for the device type (e.g. "cuda:X", where is the ordinal).
Alternatively, can be an object representing the device on which the
computation will take place. Default: None, uses the same device as `model`.
num_thresholds (int, optional): the number of thresholds to test between 0
and 1. Default: 1000.
Returns:
Generator object that yields:
list: the best single decision threshold value for each model. Same length
as ``models``.
list: the best per-class decision thresholds for each model. Same length as
``models``.
"""
for model, loader in zip(models, dataloaders):
# Single threshold
single_threshold = find_single_threshold(
model, loader, metric, device=device, num_thresholds=num_thresholds
)
# Per-class
class_thresholds = find_class_threshold(
model, loader, metric, device=device, num_thresholds=num_thresholds
)
yield single_threshold, class_thresholds
|
[
"numpy.median",
"core.predict",
"torch.sigmoid",
"torch.tensor",
"numpy.linspace",
"numpy.zeros"
] |
[((705, 726), 'torch.sigmoid', 'torch.sigmoid', (['tensor'], {}), '(tensor)\n', (718, 726), False, 'import torch\n'), ((2131, 2190), 'core.predict', 'predict', (['model', 'dataloader'], {'device': 'device', 'ret_targets': '(True)'}), '(model, dataloader, device=device, ret_targets=True)\n', (2138, 2190), False, 'from core import predict\n'), ((2285, 2342), 'numpy.linspace', 'np.linspace', (['min_threshold', 'max_threshold', 'num_thresholds'], {}), '(min_threshold, max_threshold, num_thresholds)\n', (2296, 2342), True, 'import numpy as np\n'), ((2958, 2989), 'numpy.median', 'np.median', (['highscore_thresholds'], {}), '(highscore_thresholds)\n', (2967, 2989), True, 'import numpy as np\n'), ((4426, 4485), 'core.predict', 'predict', (['model', 'dataloader'], {'device': 'device', 'ret_targets': '(True)'}), '(model, dataloader, device=device, ret_targets=True)\n', (4433, 4485), False, 'from core import predict\n'), ((4700, 4757), 'numpy.linspace', 'np.linspace', (['min_threshold', 'max_threshold', 'num_thresholds'], {}), '(min_threshold, max_threshold, num_thresholds)\n', (4711, 4757), True, 'import numpy as np\n'), ((4780, 4804), 'numpy.zeros', 'np.zeros', (['(num_classes,)'], {}), '((num_classes,))\n', (4788, 4804), True, 'import numpy as np\n'), ((5986, 6017), 'numpy.median', 'np.median', (['highscore_thresholds'], {}), '(highscore_thresholds)\n', (5995, 6017), True, 'import numpy as np\n'), ((633, 676), 'torch.tensor', 'torch.tensor', (['threshold'], {'dtype': 'tensor.dtype'}), '(threshold, dtype=tensor.dtype)\n', (645, 676), False, 'import torch\n')]
|
# coding=utf-8
from __future__ import absolute_import, print_function
import os
import json
import torch
from time import time
import numpy as np
import pandas as pd
from sklearn import metrics
from glob import glob
from DataSet.dataset import get_iwildcam_loader, data_prefetcher
from Utils.train_utils import cross_entropy, focal_loss, get_optimizer
from Utils.train_utils import mixup_data, mixup_criterion
from Models.model_factory import create_model
import warnings
warnings.filterwarnings("ignore")
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device:', device)
def evaluate(model, data_loader, criterion, use_onehot=True):
y_pred, y_true, losses = [], [], []
with torch.no_grad():
inputs, labels, ids = data_loader.next()
while inputs is not None:
if use_onehot:
targets = np.argmax(labels.cpu().detach().numpy(), axis=1)
else:
targets = labels.cpu().detach().numpy()
y_true.extend(targets)
output = model(inputs)
loss = criterion(output, labels)
y_pred.extend(np.argmax(output.cpu().detach().numpy(), axis=1))
losses.append(loss.cpu().detach().numpy())
inputs, labels, ids = data_loader.next()
acc = metrics.accuracy_score(y_true, y_pred)
f1 = metrics.f1_score(y_true, y_pred, average='macro')
loss_val = np.mean(losses)
return loss_val, acc, f1
def train(params):
if params['init_model'] is not None:
model = torch.load(params['init_model'])
print('load model', params['init_model'])
else:
model = create_model(
params['Net'],
pretrained=params['pretrained'],
num_classes=params['num_classes'],
drop_rate=params['drop_rate'],
global_pool='avg',
bn_tf=False,
bn_momentum=0.99,
bn_eps=1e-3,
checkpoint_path=params['init_model'],
in_chans=3)
optimizer = get_optimizer(params, model)
param_num = sum([p.data.nelement() for p in model.parameters()])
print("Number of model parameters: {} M".format(param_num / 1024 / 1024))
model = model.to(device)
model.train()
if params['lr_schedule']:
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=params['lr_decay_epochs'], gamma=0.2)
if params['loss'] == 'ce' or params['loss'] == 'cross_entropy':
criterion = cross_entropy().to(device)
label_type = 'float'
elif params['loss'] == 'focal':
criterion = focal_loss(gamma=1.0, alpha=1.0).to(device)
label_type = 'long'
else:
print('no exist loss', params['loss'])
train_data_loader, dev_data_loader = get_iwildcam_loader(
params, mode=params['mode'])
train_log = []
dev_log = []
best_acc, best_f1, best_epoch = 0, 0, 0
t1 = time()
print('begin to train')
use_onehot = params['loss'] != 'focal'
for epoch in range(params['epochs']):
train_loader = data_prefetcher(train_data_loader, label_type)
inputs, labels, ids = train_loader.next()
i = 0
while inputs is not None:
mixup_now = np.random.random() < params['aug_proba']
if params['mixup'] and mixup_now:
inputs, labels_a, labels_b, lam = mixup_data(inputs, labels,
params['mixup_alpha'])
optimizer.zero_grad()
output = model(inputs)
if params['mixup'] and mixup_now:
loss = mixup_criterion(
criterion, output, labels_a, labels_b, lam)
else:
loss = criterion(output, labels)
loss.backward()
optimizer.step()
if i % params['print_step'] == 0:
preds = np.argmax(output.cpu().detach().numpy(), axis=1)
if use_onehot:
targets = np.argmax(labels.cpu().detach().numpy(), axis=1)
else:
targets = labels.cpu().detach().numpy()
acc = metrics.accuracy_score(targets, preds)
loss_val = loss.cpu().detach().numpy()
f1 = metrics.f1_score(targets, preds, average='macro')
train_log.append([epoch, i, loss_val, acc, f1])
print("epoch: %d, iter: %d, train_loss: %.4f, train_acc: %.4f, train_f1: %.4f, time_cost_per_iter: %.4f s" % (
epoch, i, loss_val, acc, f1, (time() - t1)/params['print_step']))
with open(params['log_dir'] + 'train.tsv', 'a') as f:
f.write('%05d\t%05d\t%f\t%f\t%f\n' %
(epoch, i, loss_val, acc, f1))
t1 = time()
if (i+1) % params['save_step'] == 0:
save_model_path = os.path.join(
params['save_dir'], 'model_%d_%d.pkl' % (epoch, i))
torch.save(model, save_model_path)
print('save model to', save_model_path)
if (i+1) % params['eval_step'] == 0:
t2 = time()
model.eval()
data_loader = data_prefetcher(dev_data_loader, label_type)
loss_val, acc, f1 = evaluate(
model, data_loader, criterion, use_onehot)
model.train()
dev_log.append([epoch, i, loss_val, acc, f1])
if f1 > best_f1:
best_acc, best_f1, best_epoch = acc, f1, epoch
print('[Evaluation] -------------------------------')
print("epoch: %d, test acc: %.4f, f1-score: %.4f, loss: %.4f, best-f1-score: %.4f, eval_time: %.4f s" % (
epoch, acc, f1, loss_val, best_f1, time()-t2))
print('[Evaluation] -------------------------------')
with open(params['log_dir'] + 'eval.tsv', 'a') as f:
f.write('%05d\t%05d\t%f\t%f\t%f\n' %
(epoch, i, loss_val, acc, f1))
inputs, labels, ids = train_loader.next()
i += 1
if params['lr_schedule']:
scheduler.step(epoch)
return model
def get_params():
params = {
'mode': 'train_val',
# ['data/bbox/cropped_image/','data/']
'data_dir': 'data/bbox/cropped_image/',
'CCT': True,
'iNat': True,
'save_dir': 'final_output/output_0/',
'init_model': None, # 'output_1/resnet_101_3_3427.pkl',
'Net': 'tf_efficientnet_b0', # 'resnet','wideresnet','tf_efficientnet_b0'
'pretrained': True,
'drop_rate': 0.2,
'batch_size': 32,
'eval_batch_size': 32,
'num_classes': 23,
'epochs': 6,
'print_per_epoch': 500,
'eval_per_epoch': 4,
'save_per_epoch': 4,
'loss': 'ce', # ['ce','focal']
'lr_schedule': True,
'lr': 5e-3,
'weight_decay': 1e-6,
'optim': 'adam',
'lr_decay_epochs': [2, 4],
'clahe': True,
'clahe_prob': 0.2,
'gray': True,
'gray_prob': 0.01,
'aug_proba': 0.5,
'cut_size': 8,
'label_smooth': 0.01,
'mixup': True,
'mixup_alpha': 1,
'height': 64, # 380,#224 resnet, 300
'width': 64,
'threads': 2,
}
params['log_dir'] = os.path.join(params['save_dir'], 'log/')
if not os.path.exists(params['save_dir']):
os.mkdir(params['save_dir'])
if not os.path.exists(params['log_dir']):
os.mkdir(params['log_dir'])
with open(params['log_dir'] + 'eval.tsv', 'a') as f:
f.write('Epoch\tStep\tLoss\tAccuracy\tF1-Score\n')
with open(params['log_dir'] + 'train.tsv', 'a') as f:
f.write('Epoch\tStep\tLoss\tAccuracy\tF1-Score\n')
root = params['data_dir']
params['train_data_size'] = len(pd.read_csv(root + 'train_file.csv'))
params['dev_data_size'] = len(pd.read_csv(root + 'dev_file.csv'))
params['step_per_epoch'] = params['train_data_size'] // params['batch_size']
params['print_step'] = max(
1, params['step_per_epoch']//params['print_per_epoch'])
params['eval_step'] = max(
1, params['step_per_epoch']//params['eval_per_epoch'])
params['save_step'] = max(
1, params['step_per_epoch']//params['save_per_epoch'])
json.dump(obj=params, fp=open(params['log_dir'] + 'parameters.json', 'w'))
print(params)
return params
def load_params(save_dir):
params_path = save_dir + 'log/parameters.json'
print('load params form', params_path)
params = json.load(fp=open(params_path, 'r'))
ckpts = glob(save_dir+'*.pkl')
if len(ckpts) > 0:
ckpts = sorted(ckpts, key=lambda x: eval(
x.split('/')[-1].split('.')[0].split('_')[-1]))
params['init_model'] = ckpts[-1]
print(params)
return params
def main():
params = get_params()
train(params)
if __name__ == '__main__':
main()
|
[
"torch.optim.lr_scheduler.MultiStepLR",
"pandas.read_csv",
"Utils.train_utils.get_optimizer",
"torch.cuda.is_available",
"Models.model_factory.create_model",
"numpy.mean",
"os.path.exists",
"Utils.train_utils.mixup_data",
"numpy.random.random",
"os.mkdir",
"glob.glob",
"DataSet.dataset.get_iwildcam_loader",
"DataSet.dataset.data_prefetcher",
"torch.save",
"time.time",
"warnings.filterwarnings",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"torch.load",
"os.path.join",
"Utils.train_utils.cross_entropy",
"Utils.train_utils.mixup_criterion",
"torch.no_grad",
"Utils.train_utils.focal_loss"
] |
[((474, 507), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (497, 507), False, 'import warnings\n'), ((1395, 1433), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1417, 1433), False, 'from sklearn import metrics\n'), ((1443, 1492), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (1459, 1492), False, 'from sklearn import metrics\n'), ((1508, 1523), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (1515, 1523), True, 'import numpy as np\n'), ((2119, 2147), 'Utils.train_utils.get_optimizer', 'get_optimizer', (['params', 'model'], {}), '(params, model)\n', (2132, 2147), False, 'from Utils.train_utils import cross_entropy, focal_loss, get_optimizer\n'), ((2873, 2921), 'DataSet.dataset.get_iwildcam_loader', 'get_iwildcam_loader', (['params'], {'mode': "params['mode']"}), "(params, mode=params['mode'])\n", (2892, 2921), False, 'from DataSet.dataset import get_iwildcam_loader, data_prefetcher\n'), ((3021, 3027), 'time.time', 'time', ([], {}), '()\n', (3025, 3027), False, 'from time import time\n'), ((7526, 7566), 'os.path.join', 'os.path.join', (["params['save_dir']", '"""log/"""'], {}), "(params['save_dir'], 'log/')\n", (7538, 7566), False, 'import os\n'), ((8823, 8847), 'glob.glob', 'glob', (["(save_dir + '*.pkl')"], {}), "(save_dir + '*.pkl')\n", (8827, 8847), False, 'from glob import glob\n'), ((632, 657), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (655, 657), False, 'import torch\n'), ((808, 823), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (821, 823), False, 'import torch\n'), ((1632, 1664), 'torch.load', 'torch.load', (["params['init_model']"], {}), "(params['init_model'])\n", (1642, 1664), False, 'import torch\n'), ((1741, 1995), 'Models.model_factory.create_model', 'create_model', (["params['Net']"], {'pretrained': "params['pretrained']", 'num_classes': "params['num_classes']", 'drop_rate': "params['drop_rate']", 'global_pool': '"""avg"""', 'bn_tf': '(False)', 'bn_momentum': '(0.99)', 'bn_eps': '(0.001)', 'checkpoint_path': "params['init_model']", 'in_chans': '(3)'}), "(params['Net'], pretrained=params['pretrained'], num_classes=\n params['num_classes'], drop_rate=params['drop_rate'], global_pool='avg',\n bn_tf=False, bn_momentum=0.99, bn_eps=0.001, checkpoint_path=params[\n 'init_model'], in_chans=3)\n", (1753, 1995), False, 'from Models.model_factory import create_model\n'), ((2393, 2494), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': "params['lr_decay_epochs']", 'gamma': '(0.2)'}), "(optimizer, milestones=params[\n 'lr_decay_epochs'], gamma=0.2)\n", (2429, 2494), False, 'import torch\n'), ((3164, 3210), 'DataSet.dataset.data_prefetcher', 'data_prefetcher', (['train_data_loader', 'label_type'], {}), '(train_data_loader, label_type)\n', (3179, 3210), False, 'from DataSet.dataset import get_iwildcam_loader, data_prefetcher\n'), ((7578, 7612), 'os.path.exists', 'os.path.exists', (["params['save_dir']"], {}), "(params['save_dir'])\n", (7592, 7612), False, 'import os\n'), ((7622, 7650), 'os.mkdir', 'os.mkdir', (["params['save_dir']"], {}), "(params['save_dir'])\n", (7630, 7650), False, 'import os\n'), ((7662, 7695), 'os.path.exists', 'os.path.exists', (["params['log_dir']"], {}), "(params['log_dir'])\n", (7676, 7695), False, 'import os\n'), ((7705, 7732), 'os.mkdir', 'os.mkdir', (["params['log_dir']"], {}), "(params['log_dir'])\n", (7713, 7732), False, 'import os\n'), ((8048, 8084), 'pandas.read_csv', 'pd.read_csv', (["(root + 'train_file.csv')"], {}), "(root + 'train_file.csv')\n", (8059, 8084), True, 'import pandas as pd\n'), ((8120, 8154), 'pandas.read_csv', 'pd.read_csv', (["(root + 'dev_file.csv')"], {}), "(root + 'dev_file.csv')\n", (8131, 8154), True, 'import pandas as pd\n'), ((2591, 2606), 'Utils.train_utils.cross_entropy', 'cross_entropy', ([], {}), '()\n', (2604, 2606), False, 'from Utils.train_utils import cross_entropy, focal_loss, get_optimizer\n'), ((3333, 3351), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3349, 3351), True, 'import numpy as np\n'), ((3470, 3519), 'Utils.train_utils.mixup_data', 'mixup_data', (['inputs', 'labels', "params['mixup_alpha']"], {}), "(inputs, labels, params['mixup_alpha'])\n", (3480, 3519), False, 'from Utils.train_utils import mixup_data, mixup_criterion\n'), ((3720, 3779), 'Utils.train_utils.mixup_criterion', 'mixup_criterion', (['criterion', 'output', 'labels_a', 'labels_b', 'lam'], {}), '(criterion, output, labels_a, labels_b, lam)\n', (3735, 3779), False, 'from Utils.train_utils import mixup_data, mixup_criterion\n'), ((4259, 4297), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['targets', 'preds'], {}), '(targets, preds)\n', (4281, 4297), False, 'from sklearn import metrics\n'), ((4374, 4423), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['targets', 'preds'], {'average': '"""macro"""'}), "(targets, preds, average='macro')\n", (4390, 4423), False, 'from sklearn import metrics\n'), ((4908, 4914), 'time.time', 'time', ([], {}), '()\n', (4912, 4914), False, 'from time import time\n'), ((4999, 5063), 'os.path.join', 'os.path.join', (["params['save_dir']", "('model_%d_%d.pkl' % (epoch, i))"], {}), "(params['save_dir'], 'model_%d_%d.pkl' % (epoch, i))\n", (5011, 5063), False, 'import os\n'), ((5101, 5135), 'torch.save', 'torch.save', (['model', 'save_model_path'], {}), '(model, save_model_path)\n', (5111, 5135), False, 'import torch\n'), ((5263, 5269), 'time.time', 'time', ([], {}), '()\n', (5267, 5269), False, 'from time import time\n'), ((5329, 5373), 'DataSet.dataset.data_prefetcher', 'data_prefetcher', (['dev_data_loader', 'label_type'], {}), '(dev_data_loader, label_type)\n', (5344, 5373), False, 'from DataSet.dataset import get_iwildcam_loader, data_prefetcher\n'), ((2703, 2735), 'Utils.train_utils.focal_loss', 'focal_loss', ([], {'gamma': '(1.0)', 'alpha': '(1.0)'}), '(gamma=1.0, alpha=1.0)\n', (2713, 2735), False, 'from Utils.train_utils import cross_entropy, focal_loss, get_optimizer\n'), ((5923, 5929), 'time.time', 'time', ([], {}), '()\n', (5927, 5929), False, 'from time import time\n'), ((4665, 4671), 'time.time', 'time', ([], {}), '()\n', (4669, 4671), False, 'from time import time\n')]
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import cv2
import numpy as np
import argparse
import geojson
from geojson import Polygon, Feature, FeatureCollection
from utils import Raster, Timer
def _gt_convert(x, y, geotf):
x_geo = geotf[0] + x * geotf[1] + y * geotf[2]
y_geo = geotf[3] + x * geotf[4] + y * geotf[5]
return x_geo, y_geo
@Timer
def convert_data(mask_path, save_path, epsilon=0):
raster = Raster(mask_path)
img = raster.getArray()
geo_writer = codecs.open(save_path, "w", encoding="utf-8")
clas = np.unique(img)
cv2_v = (cv2.__version__.split(".")[0] == "3")
feats = []
if not isinstance(epsilon, (int, float)):
epsilon = 0
for iclas in range(1, len(clas)):
tmp = np.zeros_like(img).astype("uint8")
tmp[img == iclas] = 1
# TODO: Detect internal and external contour
results = cv2.findContours(tmp, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_TC89_KCOS)
contours = results[1] if cv2_v else results[0]
# hierarchys = results[2] if cv2_v else results[1]
if len(contours) == 0:
continue
for contour in contours:
contour = cv2.approxPolyDP(contour, epsilon, True)
polys = []
for point in contour:
x, y = point[0]
xg, yg = _gt_convert(x, y, raster.geot)
polys.append((xg, yg))
polys.append(polys[0])
feat = Feature(
geometry=Polygon([polys]), properties={"class": int(iclas)})
feats.append(feat)
gjs = FeatureCollection(feats)
geo_writer.write(geojson.dumps(gjs))
geo_writer.close()
parser = argparse.ArgumentParser(description="input parameters")
parser.add_argument("--mask_path", type=str, required=True, \
help="The path of mask tif.")
parser.add_argument("--save_path", type=str, required=True, \
help="The path to save the results, file suffix is `*.json`.")
parser.add_argument("--epsilon", type=float, default=0, \
help="The CV2 simplified parameters, `0` is the default.")
if __name__ == "__main__":
args = parser.parse_args()
convert_data(args.mask_path, args.save_path, args.epsilon)
|
[
"geojson.dumps",
"cv2.__version__.split",
"geojson.FeatureCollection",
"numpy.unique",
"argparse.ArgumentParser",
"utils.Raster",
"geojson.Polygon",
"cv2.approxPolyDP",
"cv2.findContours",
"codecs.open",
"numpy.zeros_like"
] |
[((2355, 2410), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""input parameters"""'}), "(description='input parameters')\n", (2378, 2410), False, 'import argparse\n'), ((1035, 1052), 'utils.Raster', 'Raster', (['mask_path'], {}), '(mask_path)\n', (1041, 1052), False, 'from utils import Raster, Timer\n'), ((1100, 1145), 'codecs.open', 'codecs.open', (['save_path', '"""w"""'], {'encoding': '"""utf-8"""'}), "(save_path, 'w', encoding='utf-8')\n", (1111, 1145), False, 'import codecs\n'), ((1158, 1172), 'numpy.unique', 'np.unique', (['img'], {}), '(img)\n', (1167, 1172), True, 'import numpy as np\n'), ((2250, 2274), 'geojson.FeatureCollection', 'FeatureCollection', (['feats'], {}), '(feats)\n', (2267, 2274), False, 'from geojson import Polygon, Feature, FeatureCollection\n'), ((1502, 1570), 'cv2.findContours', 'cv2.findContours', (['tmp', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_TC89_KCOS'], {}), '(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)\n', (1518, 1570), False, 'import cv2\n'), ((2297, 2315), 'geojson.dumps', 'geojson.dumps', (['gjs'], {}), '(gjs)\n', (2310, 2315), False, 'import geojson\n'), ((1187, 1213), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (1208, 1213), False, 'import cv2\n'), ((1834, 1874), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['contour', 'epsilon', '(True)'], {}), '(contour, epsilon, True)\n', (1850, 1874), False, 'import cv2\n'), ((1363, 1381), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (1376, 1381), True, 'import numpy as np\n'), ((2155, 2171), 'geojson.Polygon', 'Polygon', (['[polys]'], {}), '([polys])\n', (2162, 2171), False, 'from geojson import Polygon, Feature, FeatureCollection\n')]
|
#!/usr/bin/env python3
import numpy as np
import cv2
# import tensorflow as tf
import sys
sys.path.append("/home/oyster/Tensorflow/Monk_Object_Detection/13_tf_obj_2/lib/")
# from infer_detector_nano import Infer
# from bag_detection.msg import FlipPos, PathPos
def get_rectangles(mask, threshold_area):
"""
Extract defined color from image and return rectangles coordinates of large enough contours on given side
Input:
mask: Binary Image
threshold_area: int
Output:
list of 1x4 tuples (x, y, w, h) of color blobs
"""
contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# print(contours)
rectangles = []
for contour in contours:
if cv2.contourArea(contour) > threshold_area:
rect = cv2.boundingRect(contour)
rectangles.append(rect)
return rectangles
def get_rectangles_new(mask, threshold_area):
"""
Extract defined color from image and return rectangles coordinates of large enough contours on given side
Input:
mask: Binary Image
threshold_area: int
Output:
list of 1x4 tuples (x, y, w, h) of color blobs
"""
contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# print(contours)
# stop = 0
rectangles = []
for contour in contours:
# maskcopy = mask.copy()
# maskcopy = cv2.cvtColor(maskcopy, cv2.COLOR_GRAY2BGR)
print(cv2.contourArea(contour))
if 1000 > cv2.contourArea(contour) > 200:
rect = cv2.boundingRect(contour)
rectangles.append(rect)
# x, y, w, h = rect
# cv2.rectangle(maskcopy,(x,y),(x+w,y+h),(0, 255, 0),3)
# cv2.drawContours(maskcopy, [contour], -1, (255, 0, 0), 15)
# cv2.imshow("Image", maskcopy)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# cv2.waitKey(1)
# stop += 1
# if stop > 100:
# break
return rectangles
def get_contours(mask, threshold_area):
"""
Extract defined color from image and return large contours (UNUSED)
Input:
cv_image: Image (BGR)
lower_range: 1x3 tuple representing lower HSV for target color
upper_range: 1x3 tuple representing upper HSV for target color
threshold_area: int
Output:
list of openCV contours
"""
contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
return [x for x in contours if cv2.contourArea(x) > threshold_area], hierarchy
def color_segmentation(image, lower, upper):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array(lower), np.array(upper))
return mask
def get_mask_pixels(mask):
return np.transpose((mask>0).nonzero())
def get_avg_depth(depth_img, pixels, low_thres=0, high_thres=1000):
avg_depth = 0
i = 0
for x,y in pixels:
depth = depth_img[x][y]
# print(depth)
if depth > low_thres and depth < high_thres:
avg_depth += depth
i += 1
return avg_depth/i
# def get_region_box(smask, area=100, side='bottom', image=None):
# left = mask.shape[1]
# right = 0
# top = mask.shape[0]
# bot = 0
# box = None
# contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for contour in contours:
# if cv2.contourArea(contour) > area:
# rect = cv2.boundingRect(contour)
# if image:
# tl = (rect[0], rect[1])
# br = (rect[0]+rect[2], rect[1]+rect[3])
# cv.rectangle(image, tl, br, (255,0,0), 2)
# if side == 'left':
# if rect[0] < left:
# left = rect[0]
# box = rect
# elif side == 'right':
# if rect[0] > right:
# right = rect[0]
# box = rect
# elif side == 'top':
# if rect[1] < top:
# top = rect[1]
# box = rect
# else:
# if rect[1] > bot:
# bot = rect[1]
# box = rect
# if image:
# cv.rectangle(image, (box[0], box[1]), (box[0]+box[2], box[1]+box[3]), (0,0,255), 2)
# return box
# def get_tf2_detect_fn(path):
# detect_fn=tf.saved_model.load(path)
# return detect_fn
# def detect_objects(detect_fn, image, width=1280, height=720, min_score_thres=0.5):
# image_np = np.array(image)
# input_tensor=tf.convert_to_tensor(image_np)
# input_tensor=input_tensor[tf.newaxis, ...]
# detections=detect_fn(input_tensor)
# print(type(detections))
# # This is the way I'm getting my coordinates
# boxes = detections['detection_boxes'][0]
# # print(boxes)
# # get all boxes from an array
# max_boxes_to_draw = boxes.shape[0]
# # get scores to get a threshold
# scores = detections['detection_scores'][0]
# # print(scores)
# # this is set as a default but feel free to adjust it to your needs
# # iterate over all objects found
# objects = []
# for i in range(min(max_boxes_to_draw, boxes.shape[0])):
# if scores is None or scores[i] > min_score_thresh:
# class_name = detections['detection_classes'][0][i].numpy()
# y_min, x_min, y_max, x_max = boxes[i].numpy()
# tl, br = ((int(x_min*width), int(y_min*height)), (int(x_max*width), int(y_max*height)))
# detection = {'class':class_name, 'box': (tl, br)}
# objects.append(detection)
# return objects
# def get_gtf():
# gtf = Infer();
# print("GTFF INITIALIZEDDDDDDDDDDDDDDDDDDDDDDDDDDD")
# gtf.set_dataset_params(class_list_file = '/home/oyster/Tensorflow/oyster_bag/classes.txt')
# print("DATA SET PARAMMMS SETTTTTT")
# gtf.set_model_params(exported_model_dir = '/home/oyster/Tensorflow/trt_fp16_dir')
# return gtf
# def gtf_detect_objects(gtf, image_np, min_score_thres=0.5, width=1280, height=720):
# input_tensor = tf.convert_to_tensor(image_np)
# input_tensor = input_tensor[tf.newaxis, ...]
# scores, bboxes, labels = gtf.infer_on_tensor(input_tensor, thresh=0.8);
# return bboxes
def get_element(dilation_size, dilation_shape=cv2.MORPH_RECT):
return cv2.getStructuringElement(dilation_shape, (2 * dilation_size + 1, 2 * dilation_size + 1),
(dilation_size, dilation_size))
def canny(img, thres1=90, thres2=180, aperture=1):
return cv2.Canny(img, thres1, thres2, aperture)
def dilate_bag_row(edges, element):
return cv2.morphologyEx(edges, cv2.MORPH_CLOSE, element)
def directional_shear(closed, element, vertical=1, shearing_factor=50, shape=cv2.MORPH_RECT):
# dims = closed.shape[1]
size = (closed.shape[1] // shearing_factor, 1)
if (vertical):
# dims = closed.shape[0]
size = (1, closed.shape[0] // shearing_factor)
structure = cv2.getStructuringElement(shape, size)
closed = cv2.erode(closed, structure)
closed = cv2.dilate(closed, structure)
return cv2.morphologyEx(closed, cv2.MORPH_CLOSE, element)
def bag_rect_detection(img, vertical=1, threshold_area_prop = 0.025, dilation_size=9, dilation_shape=cv2.MORPH_RECT, thres1=100, thres2=200, aperture=1, shearing_factor=50):
element = get_element(dilation_size, dilation_shape)
edges = canny(img, thres1, thres2, aperture)
closed = dilate_bag_row(edges, element)
closed = directional_shear(closed, element, vertical, shearing_factor, dilation_shape)
h, w = img.shape[:2]
threshold_area = threshold_area_prop*h*w
c_rects = get_rectangles(closed, threshold_area)
return c_rects
# def angle_cos(p0, p1, p2):
# d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
# return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
# def find_squares(img):
# img = cv2.GaussianBlur(img, (5, 5), 0)
# squares = []
# for gray in cv2.split(img):
# for thrs in range(0, 255, 26):
# if thrs == 0:
# bina = cv2.Canny(gray, 0, 50, apertureSize=5)
# bina = cv2.dilate(bina, None)
# else:
# retval, bina = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
# contours, hierarchy = cv2.findContours(bina, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# for cnt in contours:
# cnt_len = cv2.arcLength(cnt, True)
# cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
# if len(cnt) == 4 and cv2.contourArea(cnt) > 100 and cv2.isContourConvex(cnt):
# cnt = cnt.reshape(-1, 2)
# max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in range(4)])
# if max_cos < 0.1:
# squares.append(cnt)
# return squares
# def create_flip_pos_msg(top=False, bot=False):
# msg = FlipPos()
# msg.top = top
# msg.bot = bot
# msg.top_x = float('inf')
# msg.top_y = float('inf')
# msg.bot_x = float('inf')
# msg.bot_y = float('inf')
# return msg
|
[
"cv2.erode",
"cv2.contourArea",
"cv2.morphologyEx",
"numpy.array",
"cv2.cvtColor",
"cv2.findContours",
"sys.path.append",
"cv2.dilate",
"cv2.Canny",
"cv2.getStructuringElement",
"cv2.boundingRect"
] |
[((92, 178), 'sys.path.append', 'sys.path.append', (['"""/home/oyster/Tensorflow/Monk_Object_Detection/13_tf_obj_2/lib/"""'], {}), "(\n '/home/oyster/Tensorflow/Monk_Object_Detection/13_tf_obj_2/lib/')\n", (107, 178), False, 'import sys\n'), ((595, 661), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (611, 661), False, 'import cv2\n'), ((1222, 1288), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1238, 1288), False, 'import cv2\n'), ((2455, 2521), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (2471, 2521), False, 'import cv2\n'), ((2662, 2700), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (2674, 2700), False, 'import cv2\n'), ((6431, 6556), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['dilation_shape', '(2 * dilation_size + 1, 2 * dilation_size + 1)', '(dilation_size, dilation_size)'], {}), '(dilation_shape, (2 * dilation_size + 1, 2 *\n dilation_size + 1), (dilation_size, dilation_size))\n', (6456, 6556), False, 'import cv2\n'), ((6651, 6691), 'cv2.Canny', 'cv2.Canny', (['img', 'thres1', 'thres2', 'aperture'], {}), '(img, thres1, thres2, aperture)\n', (6660, 6691), False, 'import cv2\n'), ((6741, 6790), 'cv2.morphologyEx', 'cv2.morphologyEx', (['edges', 'cv2.MORPH_CLOSE', 'element'], {}), '(edges, cv2.MORPH_CLOSE, element)\n', (6757, 6790), False, 'import cv2\n'), ((7092, 7130), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['shape', 'size'], {}), '(shape, size)\n', (7117, 7130), False, 'import cv2\n'), ((7144, 7172), 'cv2.erode', 'cv2.erode', (['closed', 'structure'], {}), '(closed, structure)\n', (7153, 7172), False, 'import cv2\n'), ((7186, 7215), 'cv2.dilate', 'cv2.dilate', (['closed', 'structure'], {}), '(closed, structure)\n', (7196, 7215), False, 'import cv2\n'), ((7227, 7277), 'cv2.morphologyEx', 'cv2.morphologyEx', (['closed', 'cv2.MORPH_CLOSE', 'element'], {}), '(closed, cv2.MORPH_CLOSE, element)\n', (7243, 7277), False, 'import cv2\n'), ((2729, 2744), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (2737, 2744), True, 'import numpy as np\n'), ((2746, 2761), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (2754, 2761), True, 'import numpy as np\n'), ((742, 766), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (757, 766), False, 'import cv2\n'), ((804, 829), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (820, 829), False, 'import cv2\n'), ((1484, 1508), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1499, 1508), False, 'import cv2\n'), ((1528, 1552), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1543, 1552), False, 'import cv2\n'), ((1579, 1604), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (1595, 1604), False, 'import cv2\n'), ((2556, 2574), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (2571, 2574), False, 'import cv2\n')]
|
import os
import imageio
import numpy as np
import torch
from scipy.spatial.distance import pdist
from src.parser.visualize import parser
from src.utils.bvh_export import save_generated_motion
from src.utils.get_model_and_data import get_model_and_data
from src.visualize.anim import plot_3d_motion
class VisualizeLatentSpace:
def __init__(self, model, dataset, base_path, reconstruct_pool, synth_to_visualize):
self._model = model
self._dataset = dataset
self._base_path = base_path
self.reconstruct_pool = reconstruct_pool
self.synth_to_visualize = synth_to_visualize
def _visualize_real_image(self, data, idx=0):
# Get xyz for the real ones
data["output_xyz"] = self._model.rot2xyz(data["output"], data["mask"])
motion = data["output_xyz"]
batch, joints, _, length = motion.shape
save_path = os.path.join(self._base_path, 'real.gif')
params = {"pose_rep": 'xyz'}
plot_3d_motion(motion[idx], length, save_path, params, title='real')
return save_path
def _visualize_reconstructed_image(self, reconstructions, idx=0):
# Reconstruction of the real data
model(reconstructions['ntf']) # update reconstruction dicts
reconstruction = reconstructions[list(reconstructions.keys())[0]]
motion = reconstruction['output_xyz']
batch, joints, _, length = motion.shape
save_path = os.path.join(self._base_path, 'reconstruction.gif')
params = {"pose_rep": 'xyz'}
plot_3d_motion(motion[idx], length, save_path, params, title='reconstruction')
return save_path, reconstruction['z']
def _generate_new_motion(self, z=None):
if z is None:
nspa = self.synth_to_visualize
z = torch.randn(nspa, 256, device=self._model.device)
else:
nspa, latent_space = z.shape
duration = torch.as_tensor([200])
durations = duration.repeat((nspa, 1))
y = torch.as_tensor([0]).to(self._model.device).repeat(nspa)
lengths = durations.to(self._model.device).reshape(y.shape)
mask = torch.ones((nspa, 200)).type(torch.bool)
batch = {"z": z, "y": y, "mask": mask, "lengths": lengths}
batch = self._model.decoder(batch)
batch['output_xyz'] = self._model.rot2xyz(batch["output"], batch["mask"])
return batch
def save_bvh_motion(self, generated_motion):
params = {'translation': True,
'dataset': 'datagen',
"pose_rep": 'rot6d'}
bvh_base_path = os.path.join(self._base_path, 'gen_bvh')
save_generated_motion(generated_motion['output'], generated_motion['mask'], bvh_base_path, params)
def _visualize_synthetic(self, generated_motion):
params = {'translation': True,
'dataset': 'datagen',
"pose_rep": 'xyz'}
batch_size, joints, _, frames = generated_motion['output_xyz'].shape
motion = generated_motion['output_xyz']
paths = []
indexes = np.random.randint(0, batch_size, self.synth_to_visualize)
for i in indexes:
gif_path = os.path.join(self._base_path, f'gen_{i}.gif')
plot_3d_motion(motion[i], frames, gif_path, params, title='gen')
paths.append(gif_path)
return paths
def explore_latent_space(self):
classes = torch.as_tensor(np.zeros(self.reconstruct_pool), dtype=torch.int64)
real_samples, mask_real, real_lengths = dataset.get_label_sample_batch(classes.numpy())
real = {"x": real_samples.to(model.device),
"y": classes.to(model.device),
"mask": mask_real.to(model.device),
"lengths": real_lengths.to(model.device),
"output": real_samples.to(model.device)}
reconstructions = {'ntf': {"x": real_samples.to(model.device),
"y": classes.to(model.device),
"lengths": real_lengths.to(model.device),
"mask": mask_real.to(model.device),
"teacher_force": 'ntf' == "tf"}}
model.eval()
with torch.no_grad():
real_path = self._visualize_real_image(real)
recon_path, recon_z = self._visualize_reconstructed_image(reconstructions)
z = self._explore_z(recon_z)
generated_data = self._generate_new_motion(z=z)
self.save_bvh_motion(generated_data)
generated_path = self._visualize_synthetic(generated_data)
# self.compute_diversity(motion)
all_path = [real_path] + [recon_path] + generated_path
visualize_gen_data(200, all_path, os.path.join(self._base_path, 'output.gif'))
@staticmethod
def compute_diversity(pred, *args):
if pred.shape[0] == 1:
return 0.0
dist = pdist(pred.reshape(pred.shape[0], -1))
diversity = dist.mean().item()
return diversity
def _explore_z(self, rec_z):
z = torch.randn(self.reconstruct_pool, 256, device=self._model.device)
for i in range(self.reconstruct_pool):
indices = np.random.randint(0, self.reconstruct_pool, 4)
a, b, c, d = indices
z[i] = torch.cat((rec_z[d][0:64], rec_z[b][64:128], rec_z[c][128:192], rec_z[a][192:256]), 0)
return z
def visualize_gen_data(number_of_frames, gen_path, output_path):
# Create reader object for the gif
gif_readers = [imageio.get_reader(path) for path in gen_path]
# Create writer object
new_gif = imageio.get_writer(output_path)
for frame_number in range(number_of_frames):
imgs = []
for reader in gif_readers:
img = reader.get_next_data()
imgs.append(img)
# here is the magic
new_image = np.hstack(imgs)
new_gif.append_data(new_image)
[reader.close() for reader in gif_readers]
new_gif.close()
if __name__ == '__main__':
base_path = '/Users/paul.yudkin/Datagen/Applications/visualize_latent_space'
# parse options
parameters, folder, checkpointname, epoch = parser()
parameters['pose_rep'] = 'rot6d'
parameters['num_frames'] = 200
parameters['decoder_test'] = "new"
parameters["noise_same_action"] = 'random'
parameters['fps'] = 10
parameters['dataset'] = 'datagen'
parameters['jointstype'] = 'datagen_skeleton'
parameters["num_actions_to_sample"] = 1
model, datasets = get_model_and_data(parameters)
dataset = datasets["train"]
checkpointpath = os.path.join(folder, checkpointname)
state_dict = torch.load(checkpointpath, map_location=parameters["device"])
model.load_state_dict(state_dict)
v = VisualizeLatentSpace(model, dataset, base_path, reconstruct_pool=100, synth_to_visualize=6)
v.explore_latent_space()
|
[
"src.visualize.anim.plot_3d_motion",
"torch.as_tensor",
"src.parser.visualize.parser",
"torch.ones",
"numpy.hstack",
"torch.load",
"os.path.join",
"imageio.get_reader",
"torch.cat",
"src.utils.bvh_export.save_generated_motion",
"numpy.random.randint",
"numpy.zeros",
"torch.no_grad",
"torch.randn",
"imageio.get_writer",
"src.utils.get_model_and_data.get_model_and_data"
] |
[((5632, 5663), 'imageio.get_writer', 'imageio.get_writer', (['output_path'], {}), '(output_path)\n', (5650, 5663), False, 'import imageio\n'), ((6186, 6194), 'src.parser.visualize.parser', 'parser', ([], {}), '()\n', (6192, 6194), False, 'from src.parser.visualize import parser\n'), ((6537, 6567), 'src.utils.get_model_and_data.get_model_and_data', 'get_model_and_data', (['parameters'], {}), '(parameters)\n', (6555, 6567), False, 'from src.utils.get_model_and_data import get_model_and_data\n'), ((6622, 6658), 'os.path.join', 'os.path.join', (['folder', 'checkpointname'], {}), '(folder, checkpointname)\n', (6634, 6658), False, 'import os\n'), ((6676, 6737), 'torch.load', 'torch.load', (['checkpointpath'], {'map_location': "parameters['device']"}), "(checkpointpath, map_location=parameters['device'])\n", (6686, 6737), False, 'import torch\n'), ((889, 930), 'os.path.join', 'os.path.join', (['self._base_path', '"""real.gif"""'], {}), "(self._base_path, 'real.gif')\n", (901, 930), False, 'import os\n'), ((977, 1045), 'src.visualize.anim.plot_3d_motion', 'plot_3d_motion', (['motion[idx]', 'length', 'save_path', 'params'], {'title': '"""real"""'}), "(motion[idx], length, save_path, params, title='real')\n", (991, 1045), False, 'from src.visualize.anim import plot_3d_motion\n'), ((1442, 1493), 'os.path.join', 'os.path.join', (['self._base_path', '"""reconstruction.gif"""'], {}), "(self._base_path, 'reconstruction.gif')\n", (1454, 1493), False, 'import os\n'), ((1539, 1617), 'src.visualize.anim.plot_3d_motion', 'plot_3d_motion', (['motion[idx]', 'length', 'save_path', 'params'], {'title': '"""reconstruction"""'}), "(motion[idx], length, save_path, params, title='reconstruction')\n", (1553, 1617), False, 'from src.visualize.anim import plot_3d_motion\n'), ((1916, 1938), 'torch.as_tensor', 'torch.as_tensor', (['[200]'], {}), '([200])\n', (1931, 1938), False, 'import torch\n'), ((2587, 2627), 'os.path.join', 'os.path.join', (['self._base_path', '"""gen_bvh"""'], {}), "(self._base_path, 'gen_bvh')\n", (2599, 2627), False, 'import os\n'), ((2636, 2738), 'src.utils.bvh_export.save_generated_motion', 'save_generated_motion', (["generated_motion['output']", "generated_motion['mask']", 'bvh_base_path', 'params'], {}), "(generated_motion['output'], generated_motion['mask'],\n bvh_base_path, params)\n", (2657, 2738), False, 'from src.utils.bvh_export import save_generated_motion\n'), ((3068, 3125), 'numpy.random.randint', 'np.random.randint', (['(0)', 'batch_size', 'self.synth_to_visualize'], {}), '(0, batch_size, self.synth_to_visualize)\n', (3085, 3125), True, 'import numpy as np\n'), ((5078, 5144), 'torch.randn', 'torch.randn', (['self.reconstruct_pool', '(256)'], {'device': 'self._model.device'}), '(self.reconstruct_pool, 256, device=self._model.device)\n', (5089, 5144), False, 'import torch\n'), ((5543, 5567), 'imageio.get_reader', 'imageio.get_reader', (['path'], {}), '(path)\n', (5561, 5567), False, 'import imageio\n'), ((5885, 5900), 'numpy.hstack', 'np.hstack', (['imgs'], {}), '(imgs)\n', (5894, 5900), True, 'import numpy as np\n'), ((1791, 1840), 'torch.randn', 'torch.randn', (['nspa', '(256)'], {'device': 'self._model.device'}), '(nspa, 256, device=self._model.device)\n', (1802, 1840), False, 'import torch\n'), ((3175, 3220), 'os.path.join', 'os.path.join', (['self._base_path', 'f"""gen_{i}.gif"""'], {}), "(self._base_path, f'gen_{i}.gif')\n", (3187, 3220), False, 'import os\n'), ((3233, 3297), 'src.visualize.anim.plot_3d_motion', 'plot_3d_motion', (['motion[i]', 'frames', 'gif_path', 'params'], {'title': '"""gen"""'}), "(motion[i], frames, gif_path, params, title='gen')\n", (3247, 3297), False, 'from src.visualize.anim import plot_3d_motion\n'), ((3426, 3457), 'numpy.zeros', 'np.zeros', (['self.reconstruct_pool'], {}), '(self.reconstruct_pool)\n', (3434, 3457), True, 'import numpy as np\n'), ((4230, 4245), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4243, 4245), False, 'import torch\n'), ((4756, 4799), 'os.path.join', 'os.path.join', (['self._base_path', '"""output.gif"""'], {}), "(self._base_path, 'output.gif')\n", (4768, 4799), False, 'import os\n'), ((5215, 5261), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.reconstruct_pool', '(4)'], {}), '(0, self.reconstruct_pool, 4)\n', (5232, 5261), True, 'import numpy as np\n'), ((5314, 5405), 'torch.cat', 'torch.cat', (['(rec_z[d][0:64], rec_z[b][64:128], rec_z[c][128:192], rec_z[a][192:256])', '(0)'], {}), '((rec_z[d][0:64], rec_z[b][64:128], rec_z[c][128:192], rec_z[a][\n 192:256]), 0)\n', (5323, 5405), False, 'import torch\n'), ((2138, 2161), 'torch.ones', 'torch.ones', (['(nspa, 200)'], {}), '((nspa, 200))\n', (2148, 2161), False, 'import torch\n'), ((1998, 2018), 'torch.as_tensor', 'torch.as_tensor', (['[0]'], {}), '([0])\n', (2013, 2018), False, 'import torch\n')]
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torchvision.utils import make_grid
import matplotlib.image
from src.logger import create_logger
from src.loader import load_images, DataSampler
from src.utils import bool_flag
# parse parameters
parser = argparse.ArgumentParser(description='Attributes swapping')
parser.add_argument("--model_path", type=str, default="",
help="Trained model path")
parser.add_argument("--n_images", type=int, default=10,
help="Number of images to modify")
parser.add_argument("--offset", type=int, default=0,
help="First image index")
parser.add_argument("--n_interpolations", type=int, default=10,
help="Number of interpolations per image")
parser.add_argument("--alpha_min", type=float, default=1,
help="Min interpolation value")
parser.add_argument("--alpha_max", type=float, default=1,
help="Max interpolation value")
parser.add_argument("--plot_size", type=int, default=5,
help="Size of images in the grid")
parser.add_argument("--row_wise", type=bool_flag, default=True,
help="Represent image interpolations horizontally")
parser.add_argument("--output_path", type=str, default="output.png",
help="Output path")
params = parser.parse_args()
# check parameters
assert os.path.isfile(params.model_path)
assert params.n_images >= 1 and params.n_interpolations >= 2
# create logger / load trained model
logger = create_logger(None)
ae = torch.load(params.model_path).eval()
# restore main parameters
params.debug = True
params.batch_size = 32
params.v_flip = False
params.h_flip = False
params.img_sz = ae.img_sz
params.attr = ae.attr
params.n_attr = ae.n_attr
if not (len(params.attr) == 1 and params.n_attr == 2):
raise Exception("The model must use a single boolean attribute only.")
# load dataset
data, attributes = load_images(params)
test_data = DataSampler(data[2], attributes[2], params)
def get_interpolations(ae, images, attributes, params):
"""
Reconstruct images / create interpolations
"""
assert len(images) == len(attributes)
enc_outputs = ae.encode(images)
# interpolation values
alphas = np.linspace(1 - params.alpha_min, params.alpha_max, params.n_interpolations)
alphas = [torch.FloatTensor([1 - alpha, alpha]) for alpha in alphas]
# original image / reconstructed image / interpolations
outputs = []
outputs.append(images)
outputs.append(ae.decode(enc_outputs, attributes)[-1])
for alpha in alphas:
alpha = Variable(alpha.unsqueeze(0).expand((len(images), 2)).cuda())
outputs.append(ae.decode(enc_outputs, alpha)[-1])
# return stacked images
return torch.cat([x.unsqueeze(1) for x in outputs], 1).data.cpu()
interpolations = []
for k in range(0, params.n_images, 100):
i = params.offset + k
j = params.offset + min(params.n_images, k + 100)
images, attributes = test_data.eval_batch(i, j)
interpolations.append(get_interpolations(ae, images, attributes, params))
interpolations = torch.cat(interpolations, 0)
assert interpolations.size() == (params.n_images, 2 + params.n_interpolations,
3, params.img_sz, params.img_sz)
def get_grid(images, row_wise, plot_size=5):
"""
Create a grid with all images.
"""
n_images, n_columns, img_fm, img_sz, _ = images.size()
if not row_wise:
images = images.transpose(0, 1).contiguous()
images = images.view(n_images * n_columns, img_fm, img_sz, img_sz)
images.add_(1).div_(2.0)
return make_grid(images, nrow=(n_columns if row_wise else n_images))
# generate the grid / save it to a PNG file
grid = get_grid(interpolations, params.row_wise, params.plot_size)
matplotlib.image.imsave(params.output_path, grid.numpy().transpose((1, 2, 0)))
|
[
"src.loader.DataSampler",
"src.loader.load_images",
"argparse.ArgumentParser",
"torch.load",
"os.path.isfile",
"numpy.linspace",
"src.logger.create_logger",
"torchvision.utils.make_grid",
"torch.FloatTensor",
"torch.cat"
] |
[((499, 557), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Attributes swapping"""'}), "(description='Attributes swapping')\n", (522, 557), False, 'import argparse\n'), ((1632, 1665), 'os.path.isfile', 'os.path.isfile', (['params.model_path'], {}), '(params.model_path)\n', (1646, 1665), False, 'import os\n'), ((1774, 1793), 'src.logger.create_logger', 'create_logger', (['None'], {}), '(None)\n', (1787, 1793), False, 'from src.logger import create_logger\n'), ((2189, 2208), 'src.loader.load_images', 'load_images', (['params'], {}), '(params)\n', (2200, 2208), False, 'from src.loader import load_images, DataSampler\n'), ((2221, 2264), 'src.loader.DataSampler', 'DataSampler', (['data[2]', 'attributes[2]', 'params'], {}), '(data[2], attributes[2], params)\n', (2232, 2264), False, 'from src.loader import load_images, DataSampler\n'), ((3370, 3398), 'torch.cat', 'torch.cat', (['interpolations', '(0)'], {}), '(interpolations, 0)\n', (3379, 3398), False, 'import torch\n'), ((2505, 2581), 'numpy.linspace', 'np.linspace', (['(1 - params.alpha_min)', 'params.alpha_max', 'params.n_interpolations'], {}), '(1 - params.alpha_min, params.alpha_max, params.n_interpolations)\n', (2516, 2581), True, 'import numpy as np\n'), ((3886, 3945), 'torchvision.utils.make_grid', 'make_grid', (['images'], {'nrow': '(n_columns if row_wise else n_images)'}), '(images, nrow=n_columns if row_wise else n_images)\n', (3895, 3945), False, 'from torchvision.utils import make_grid\n'), ((1799, 1828), 'torch.load', 'torch.load', (['params.model_path'], {}), '(params.model_path)\n', (1809, 1828), False, 'import torch\n'), ((2596, 2633), 'torch.FloatTensor', 'torch.FloatTensor', (['[1 - alpha, alpha]'], {}), '([1 - alpha, alpha])\n', (2613, 2633), False, 'import torch\n')]
|
"""
https://docs.opencv.org/master/d8/d19/tutorial_stitcher.html
Stitching sample (advanced)
===========================
Show how to use Stitcher API from python.
"""
# Python 2/3 compatibility
from __future__ import print_function
import argparse
from collections import OrderedDict
from imutils import paths
import cv2 as cv
import numpy as np
EXPOS_COMP_CHOICES = OrderedDict()
EXPOS_COMP_CHOICES['gain_blocks'] = cv.detail.ExposureCompensator_GAIN_BLOCKS
EXPOS_COMP_CHOICES['gain'] = cv.detail.ExposureCompensator_GAIN
EXPOS_COMP_CHOICES['channel'] = cv.detail.ExposureCompensator_CHANNELS
EXPOS_COMP_CHOICES['channel_blocks'] = cv.detail.ExposureCompensator_CHANNELS_BLOCKS
EXPOS_COMP_CHOICES['no'] = cv.detail.ExposureCompensator_NO
BA_COST_CHOICES = OrderedDict()
BA_COST_CHOICES['ray'] = cv.detail_BundleAdjusterRay
BA_COST_CHOICES['reproj'] = cv.detail_BundleAdjusterReproj
BA_COST_CHOICES['affine'] = cv.detail_BundleAdjusterAffinePartial
BA_COST_CHOICES['no'] = cv.detail_NoBundleAdjuster
FEATURES_FIND_CHOICES = OrderedDict()
try:
FEATURES_FIND_CHOICES['surf'] = cv.xfeatures2d_SURF.create
except AttributeError:
print("SURF not available")
# if SURF not available, ORB is default
FEATURES_FIND_CHOICES['orb'] = cv.ORB.create
try:
FEATURES_FIND_CHOICES['sift'] = cv.xfeatures2d_SIFT.create
except AttributeError:
print("SIFT not available")
try:
FEATURES_FIND_CHOICES['brisk'] = cv.BRISK_create
except AttributeError:
print("BRISK not available")
try:
FEATURES_FIND_CHOICES['akaze'] = cv.AKAZE_create
except AttributeError:
print("AKAZE not available")
SEAM_FIND_CHOICES = OrderedDict()
SEAM_FIND_CHOICES['gc_color'] = cv.detail_GraphCutSeamFinder('COST_COLOR')
SEAM_FIND_CHOICES['gc_colorgrad'] = cv.detail_GraphCutSeamFinder('COST_COLOR_GRAD')
SEAM_FIND_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR')
SEAM_FIND_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD')
SEAM_FIND_CHOICES['voronoi'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)
SEAM_FIND_CHOICES['no'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
ESTIMATOR_CHOICES = OrderedDict()
ESTIMATOR_CHOICES['homography'] = cv.detail_HomographyBasedEstimator
ESTIMATOR_CHOICES['affine'] = cv.detail_AffineBasedEstimator
WARP_CHOICES = (
'spherical',
'plane',
'affine',
'cylindrical',
'fisheye',
'stereographic',
'compressedPlaneA2B1',
'compressedPlaneA1.5B1',
'compressedPlanePortraitA2B1',
'compressedPlanePortraitA1.5B1',
'paniniA2B1',
'paniniA1.5B1',
'paniniPortraitA2B1',
'paniniPortraitA1.5B1',
'mercator',
'transverseMercator',
)
WAVE_CORRECT_CHOICES = ('horiz', 'no', 'vert',)
BLEND_CHOICES = ('multiband', 'feather', 'no',)
parser = argparse.ArgumentParser(
prog="stitching_detailed.py", description="Rotation model images stitcher"
)
# parser.add_argument(
# 'img_names', nargs='+',
# help="Files to stitch", type=str
# )
parser.add_argument("-i", "--images", type=str, required=True,
help="path to input directory of images to stitch")
parser.add_argument(
'--try_cuda',
action='store',
default=True,
help="Try to use CUDA. The default value is yes. All other default values are for CPU mode.",
type=bool, dest='try_cuda'
)
parser.add_argument(
'--work_megapix', action='store', default=0.6,
help="Resolution for image registration step. The default is 0.6 Mpx",
type=float, dest='work_megapix'
)
parser.add_argument(
'--features', action='store', default=list(FEATURES_FIND_CHOICES.keys())[0],
help="Type of features used for images matching. The default is '%s'." % FEATURES_FIND_CHOICES.keys(),
choices=FEATURES_FIND_CHOICES.keys(),
type=str, dest='features'
)
parser.add_argument(
'--matcher', action='store', default='homography',
help="Matcher used for pairwise image matching.",
choices=('homography', 'affine'),
type=str, dest='matcher'
)
parser.add_argument(
'--estimator', action='store', default=list(ESTIMATOR_CHOICES.keys())[0],
help="Type of estimator used for transformation estimation.",
choices=ESTIMATOR_CHOICES.keys(),
type=str, dest='estimator'
)
parser.add_argument(
'--match_conf', action='store',
help="Confidence for feature matching step. The default is 0.3 for ORB and 0.65 for other feature types.",
type=float, dest='match_conf'
)
parser.add_argument(
'--conf_thresh', action='store', default=1.0,
help="Threshold for two images are from the same panorama confidence.The default is 1.0.",
type=float, dest='conf_thresh'
)
parser.add_argument(
'--ba', action='store', default=list(BA_COST_CHOICES.keys())[0],
help="Bundle adjustment cost function. The default is '%s'." % list(BA_COST_CHOICES.keys())[0],
choices=BA_COST_CHOICES.keys(),
type=str, dest='ba'
)
parser.add_argument(
'--ba_refine_mask', action='store', default='xxxxx',
help="Set refinement mask for bundle adjustment. It looks like 'x_xxx', "
"where 'x' means refine respective parameter and '_' means don't refine, "
"and has the following format:<fx><skew><ppx><aspect><ppy>. "
"The default mask is 'xxxxx'. "
"If bundle adjustment doesn't support estimation of selected parameter then "
"the respective flag is ignored.",
type=str, dest='ba_refine_mask'
)
parser.add_argument(
'--wave_correct', action='store', default=WAVE_CORRECT_CHOICES[0],
help="Perform wave effect correction. The default is '%s'" % WAVE_CORRECT_CHOICES[0],
choices=WAVE_CORRECT_CHOICES,
type=str, dest='wave_correct'
)
parser.add_argument(
'--save_graph', action='store', default=None,
help="Save matches graph represented in DOT language to <file_name> file.",
type=str, dest='save_graph'
)
parser.add_argument(
'--warp', action='store', default=WARP_CHOICES[0],
help="Warp surface type. The default is '%s'." % WARP_CHOICES[0],
choices=WARP_CHOICES,
type=str, dest='warp'
)
parser.add_argument(
'--seam_megapix', action='store', default=0.1,
help="Resolution for seam estimation step. The default is 0.1 Mpx.",
type=float, dest='seam_megapix'
)
parser.add_argument(
'--seam', action='store', default=list(SEAM_FIND_CHOICES.keys())[0],
help="Seam estimation method. The default is '%s'." % list(SEAM_FIND_CHOICES.keys())[0],
choices=SEAM_FIND_CHOICES.keys(),
type=str, dest='seam'
)
parser.add_argument(
'--compose_megapix', action='store', default=-1,
help="Resolution for compositing step. Use -1 for original resolution. The default is -1",
type=float, dest='compose_megapix'
)
parser.add_argument(
'--expos_comp', action='store', default=list(EXPOS_COMP_CHOICES.keys())[0],
help="Exposure compensation method. The default is '%s'." % list(EXPOS_COMP_CHOICES.keys())[0],
choices=EXPOS_COMP_CHOICES.keys(),
type=str, dest='expos_comp'
)
parser.add_argument(
'--expos_comp_nr_feeds', action='store', default=1,
help="Number of exposure compensation feed.",
type=np.int32, dest='expos_comp_nr_feeds'
)
parser.add_argument(
'--expos_comp_nr_filtering', action='store', default=2,
help="Number of filtering iterations of the exposure compensation gains.",
type=float, dest='expos_comp_nr_filtering'
)
parser.add_argument(
'--expos_comp_block_size', action='store', default=32,
help="BLock size in pixels used by the exposure compensator. The default is 32.",
type=np.int32, dest='expos_comp_block_size'
)
parser.add_argument(
'--blend', action='store', default=BLEND_CHOICES[0],
help="Blending method. The default is '%s'." % BLEND_CHOICES[0],
choices=BLEND_CHOICES,
type=str, dest='blend'
)
parser.add_argument(
'--blend_strength', action='store', default=5,
help="Blending strength from [0,100] range. The default is 5",
type=np.int32, dest='blend_strength'
)
parser.add_argument(
'--output', action='store', default='result.jpg',
help="The default is 'result.jpg'",
type=str, dest='output'
)
parser.add_argument(
'--timelapse', action='store', default=None,
help="Output warped images separately as frames of a time lapse movie, "
"with 'fixed_' prepended to input file names.",
type=str, dest='timelapse'
)
parser.add_argument(
'--rangewidth', action='store', default=-1,
help="uses range_width to limit number of images to match with.",
type=int, dest='rangewidth'
)
__doc__ += '\n' + parser.format_help()
def get_matcher(args):
try_cuda = args.try_cuda
matcher_type = args.matcher
if args.match_conf is None:
if args.features == 'orb':
match_conf = 0.3
else:
match_conf = 0.65
else:
match_conf = args.match_conf
range_width = args.rangewidth
if matcher_type == "affine":
matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf)
elif range_width == -1:
matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf)
else:
matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf)
return matcher
def get_compensator(args):
expos_comp_type = EXPOS_COMP_CHOICES[args.expos_comp]
expos_comp_nr_feeds = args.expos_comp_nr_feeds
expos_comp_block_size = args.expos_comp_block_size
# expos_comp_nr_filtering = args.expos_comp_nr_filtering
if expos_comp_type == cv.detail.ExposureCompensator_CHANNELS:
compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds)
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
elif expos_comp_type == cv.detail.ExposureCompensator_CHANNELS_BLOCKS:
compensator = cv.detail_BlocksChannelsCompensator(
expos_comp_block_size, expos_comp_block_size,
expos_comp_nr_feeds
)
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
else:
compensator = cv.detail.ExposureCompensator_createDefault(expos_comp_type)
return compensator
def main():
args = parser.parse_args()
img_names = sorted(list(paths.list_images(args.images)))
print(args.images)
work_megapix = args.work_megapix
seam_megapix = args.seam_megapix
compose_megapix = args.compose_megapix
conf_thresh = args.conf_thresh
ba_refine_mask = args.ba_refine_mask
wave_correct = args.wave_correct
if wave_correct == 'no':
do_wave_correct = False
else:
do_wave_correct = True
if args.save_graph is None:
save_graph = False
else:
save_graph = True
warp_type = args.warp
blend_type = args.blend
blend_strength = args.blend_strength
result_name = args.output
if args.timelapse is not None:
timelapse = True
if args.timelapse == "as_is":
timelapse_type = cv.detail.Timelapser_AS_IS
elif args.timelapse == "crop":
timelapse_type = cv.detail.Timelapser_CROP
else:
print("Bad timelapse method")
exit()
else:
timelapse = False
finder = FEATURES_FIND_CHOICES[args.features]()
seam_work_aspect = 1
full_img_sizes = []
features = []
images = []
is_work_scale_set = False
is_seam_scale_set = False
is_compose_scale_set = False
for name in img_names:
full_img = cv.imread(cv.samples.findFile(name))
if full_img is None:
print("Cannot read image ", name)
exit()
full_img_sizes.append((full_img.shape[1], full_img.shape[0]))
if work_megapix < 0:
img = full_img
work_scale = 1
is_work_scale_set = True
else:
if is_work_scale_set is False:
work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
is_work_scale_set = True
img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT)
if is_seam_scale_set is False:
seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
seam_work_aspect = seam_scale / work_scale
is_seam_scale_set = True
img_feat = cv.detail.computeImageFeatures2(finder, img)
features.append(img_feat)
img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT)
images.append(img)
matcher = get_matcher(args)
p = matcher.apply2(features)
matcher.collectGarbage()
if save_graph:
with open(args.save_graph, 'w') as fh:
fh.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh))
indices = cv.detail.leaveBiggestComponent(features, p, 0.3)
img_subset = []
img_names_subset = []
full_img_sizes_subset = []
for i in range(len(indices)):
img_names_subset.append(img_names[indices[i, 0]])
img_subset.append(images[indices[i, 0]])
full_img_sizes_subset.append(full_img_sizes[indices[i, 0]])
images = img_subset
img_names = img_names_subset
full_img_sizes = full_img_sizes_subset
num_images = len(img_names)
if num_images < 2:
print("Need more images")
exit()
estimator = ESTIMATOR_CHOICES[args.estimator]()
b, cameras = estimator.apply(features, p, None)
if not b:
print("Homography estimation failed.")
exit()
for cam in cameras:
cam.R = cam.R.astype(np.float32)
adjuster = BA_COST_CHOICES[args.ba]()
adjuster.setConfThresh(1)
refine_mask = np.zeros((3, 3), np.uint8)
if ba_refine_mask[0] == 'x':
refine_mask[0, 0] = 1
if ba_refine_mask[1] == 'x':
refine_mask[0, 1] = 1
if ba_refine_mask[2] == 'x':
refine_mask[0, 2] = 1
if ba_refine_mask[3] == 'x':
refine_mask[1, 1] = 1
if ba_refine_mask[4] == 'x':
refine_mask[1, 2] = 1
adjuster.setRefinementMask(refine_mask)
b, cameras = adjuster.apply(features, p, cameras)
if not b:
print("Camera parameters adjusting failed.")
exit()
focals = []
for cam in cameras:
focals.append(cam.focal)
sorted(focals)
if len(focals) % 2 == 1:
warped_image_scale = focals[len(focals) // 2]
else:
warped_image_scale = (focals[len(focals) // 2] + focals[len(focals) // 2 - 1]) / 2
if do_wave_correct:
rmats = []
for cam in cameras:
rmats.append(np.copy(cam.R))
rmats = cv.detail.waveCorrect(rmats, cv.detail.WAVE_CORRECT_HORIZ)
for idx, cam in enumerate(cameras):
cam.R = rmats[idx]
corners = []
masks_warped = []
images_warped = []
sizes = []
masks = []
for i in range(0, num_images):
um = cv.UMat(255 * np.ones((images[i].shape[0], images[i].shape[1]), np.uint8))
masks.append(um)
warper = cv.PyRotationWarper(warp_type, warped_image_scale * seam_work_aspect) # warper could be nullptr?
for idx in range(0, num_images):
K = cameras[idx].K().astype(np.float32)
swa = seam_work_aspect
K[0, 0] *= swa
K[0, 2] *= swa
K[1, 1] *= swa
K[1, 2] *= swa
corner, image_wp = warper.warp(images[idx], K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
corners.append(corner)
sizes.append((image_wp.shape[1], image_wp.shape[0]))
images_warped.append(image_wp)
p, mask_wp = warper.warp(masks[idx], K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
masks_warped.append(mask_wp.get())
images_warped_f = []
for img in images_warped:
imgf = img.astype(np.float32)
images_warped_f.append(imgf)
compensator = get_compensator(args)
compensator.feed(corners=corners, images=images_warped, masks=masks_warped)
seam_finder = SEAM_FIND_CHOICES[args.seam]
seam_finder.find(images_warped_f, corners, masks_warped)
compose_scale = 1
corners = []
sizes = []
blender = None
timelapser = None
# https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
for idx, name in enumerate(img_names):
full_img = cv.imread(name)
if not is_compose_scale_set:
if compose_megapix > 0:
compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
is_compose_scale_set = True
compose_work_aspect = compose_scale / work_scale
warped_image_scale *= compose_work_aspect
warper = cv.PyRotationWarper(warp_type, warped_image_scale)
for i in range(0, len(img_names)):
cameras[i].focal *= compose_work_aspect
cameras[i].ppx *= compose_work_aspect
cameras[i].ppy *= compose_work_aspect
sz = (full_img_sizes[i][0] * compose_scale, full_img_sizes[i][1] * compose_scale)
K = cameras[i].K().astype(np.float32)
roi = warper.warpRoi(sz, K, cameras[i].R)
corners.append(roi[0:2])
sizes.append(roi[2:4])
if abs(compose_scale - 1) > 1e-1:
img = cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale,
interpolation=cv.INTER_LINEAR_EXACT)
else:
img = full_img
_img_size = (img.shape[1], img.shape[0])
K = cameras[idx].K().astype(np.float32)
corner, image_warped = warper.warp(img, K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
mask = 255 * np.ones((img.shape[0], img.shape[1]), np.uint8)
p, mask_warped = warper.warp(mask, K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
compensator.apply(idx, corners[idx], image_warped, mask_warped)
image_warped_s = image_warped.astype(np.int16)
dilated_mask = cv.dilate(masks_warped[idx], None)
seam_mask = cv.resize(dilated_mask, (mask_warped.shape[1], mask_warped.shape[0]), 0, 0, cv.INTER_LINEAR_EXACT)
mask_warped = cv.bitwise_and(seam_mask, mask_warped)
if blender is None and not timelapse:
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes)
blend_width = np.sqrt(dst_sz[2] * dst_sz[3]) * blend_strength / 100
if blend_width < 1:
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
elif blend_type == "multiband":
blender = cv.detail_MultiBandBlender()
blender.setNumBands((np.log(blend_width) / np.log(2.) - 1.).astype(np.int))
elif blend_type == "feather":
blender = cv.detail_FeatherBlender()
blender.setSharpness(1. / blend_width)
blender.prepare(dst_sz)
elif timelapser is None and timelapse:
timelapser = cv.detail.Timelapser_createDefault(timelapse_type)
timelapser.initialize(corners, sizes)
if timelapse:
ma_tones = np.ones((image_warped_s.shape[0], image_warped_s.shape[1]), np.uint8)
timelapser.process(image_warped_s, ma_tones, corners[idx])
pos_s = img_names[idx].rfind("/")
if pos_s == -1:
fixed_file_name = "fixed_" + img_names[idx]
else:
fixed_file_name = img_names[idx][:pos_s + 1] + "fixed_" + img_names[idx][pos_s + 1:]
cv.imwrite(fixed_file_name, timelapser.getDst())
else:
blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx])
if not timelapse:
result = None
result_mask = None
result, result_mask = blender.blend(result, result_mask)
cv.imwrite(result_name, result)
zoom_x = 600.0 / result.shape[1]
dst = cv.normalize(src=result, dst=None, alpha=255., norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
dst = cv.resize(dst, dsize=None, fx=zoom_x, fy=zoom_x)
cv.imshow(result_name, dst)
cv.waitKey()
print("Done")
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
[
"numpy.sqrt",
"cv2.normalize",
"cv2.detail.leaveBiggestComponent",
"cv2.samples.findFile",
"numpy.log",
"cv2.detail.BestOf2NearestMatcher_create",
"cv2.PyRotationWarper",
"cv2.imshow",
"cv2.detail.Blender_createDefault",
"cv2.detail.matchesGraphAsString",
"cv2.detail.Timelapser_createDefault",
"cv2.destroyAllWindows",
"imutils.paths.list_images",
"cv2.UMat",
"cv2.detail_MultiBandBlender",
"cv2.detail_DpSeamFinder",
"cv2.detail_GraphCutSeamFinder",
"argparse.ArgumentParser",
"cv2.detail.ExposureCompensator_createDefault",
"cv2.detail_ChannelsCompensator",
"cv2.waitKey",
"collections.OrderedDict",
"numpy.ones",
"cv2.detail.waveCorrect",
"cv2.detail_FeatherBlender",
"cv2.detail.computeImageFeatures2",
"cv2.resize",
"cv2.detail_AffineBestOf2NearestMatcher",
"cv2.imread",
"cv2.imwrite",
"numpy.copy",
"cv2.detail.BestOf2NearestRangeMatcher_create",
"cv2.bitwise_and",
"numpy.zeros",
"cv2.detail_BlocksChannelsCompensator",
"cv2.detail.resultRoi",
"cv2.dilate",
"cv2.detail.SeamFinder_createDefault"
] |
[((372, 385), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (383, 385), False, 'from collections import OrderedDict\n'), ((763, 776), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (774, 776), False, 'from collections import OrderedDict\n'), ((1031, 1044), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1042, 1044), False, 'from collections import OrderedDict\n'), ((1625, 1638), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1636, 1638), False, 'from collections import OrderedDict\n'), ((1671, 1713), 'cv2.detail_GraphCutSeamFinder', 'cv.detail_GraphCutSeamFinder', (['"""COST_COLOR"""'], {}), "('COST_COLOR')\n", (1699, 1713), True, 'import cv2 as cv\n'), ((1750, 1797), 'cv2.detail_GraphCutSeamFinder', 'cv.detail_GraphCutSeamFinder', (['"""COST_COLOR_GRAD"""'], {}), "('COST_COLOR_GRAD')\n", (1778, 1797), True, 'import cv2 as cv\n'), ((1830, 1861), 'cv2.detail_DpSeamFinder', 'cv.detail_DpSeamFinder', (['"""COLOR"""'], {}), "('COLOR')\n", (1852, 1861), True, 'import cv2 as cv\n'), ((1898, 1934), 'cv2.detail_DpSeamFinder', 'cv.detail_DpSeamFinder', (['"""COLOR_GRAD"""'], {}), "('COLOR_GRAD')\n", (1920, 1934), True, 'import cv2 as cv\n'), ((1966, 2035), 'cv2.detail.SeamFinder_createDefault', 'cv.detail.SeamFinder_createDefault', (['cv.detail.SeamFinder_VORONOI_SEAM'], {}), '(cv.detail.SeamFinder_VORONOI_SEAM)\n', (2000, 2035), True, 'import cv2 as cv\n'), ((2062, 2121), 'cv2.detail.SeamFinder_createDefault', 'cv.detail.SeamFinder_createDefault', (['cv.detail.SeamFinder_NO'], {}), '(cv.detail.SeamFinder_NO)\n', (2096, 2121), True, 'import cv2 as cv\n'), ((2143, 2156), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2154, 2156), False, 'from collections import OrderedDict\n'), ((2776, 2880), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""stitching_detailed.py"""', 'description': '"""Rotation model images stitcher"""'}), "(prog='stitching_detailed.py', description=\n 'Rotation model images stitcher')\n", (2799, 2880), False, 'import argparse\n'), ((12776, 12825), 'cv2.detail.leaveBiggestComponent', 'cv.detail.leaveBiggestComponent', (['features', 'p', '(0.3)'], {}), '(features, p, 0.3)\n', (12807, 12825), True, 'import cv2 as cv\n'), ((13653, 13679), 'numpy.zeros', 'np.zeros', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (13661, 13679), True, 'import numpy as np\n'), ((14967, 15036), 'cv2.PyRotationWarper', 'cv.PyRotationWarper', (['warp_type', '(warped_image_scale * seam_work_aspect)'], {}), '(warp_type, warped_image_scale * seam_work_aspect)\n', (14986, 15036), True, 'import cv2 as cv\n'), ((20219, 20241), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (20239, 20241), True, 'import cv2 as cv\n'), ((8881, 8947), 'cv2.detail_AffineBestOf2NearestMatcher', 'cv.detail_AffineBestOf2NearestMatcher', (['(False)', 'try_cuda', 'match_conf'], {}), '(False, try_cuda, match_conf)\n', (8918, 8947), True, 'import cv2 as cv\n'), ((9523, 9573), 'cv2.detail_ChannelsCompensator', 'cv.detail_ChannelsCompensator', (['expos_comp_nr_feeds'], {}), '(expos_comp_nr_feeds)\n', (9552, 9573), True, 'import cv2 as cv\n'), ((12296, 12340), 'cv2.detail.computeImageFeatures2', 'cv.detail.computeImageFeatures2', (['finder', 'img'], {}), '(finder, img)\n', (12327, 12340), True, 'import cv2 as cv\n'), ((12389, 12495), 'cv2.resize', 'cv.resize', ([], {'src': 'full_img', 'dsize': 'None', 'fx': 'seam_scale', 'fy': 'seam_scale', 'interpolation': 'cv.INTER_LINEAR_EXACT'}), '(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale,\n interpolation=cv.INTER_LINEAR_EXACT)\n', (12398, 12495), True, 'import cv2 as cv\n'), ((14579, 14637), 'cv2.detail.waveCorrect', 'cv.detail.waveCorrect', (['rmats', 'cv.detail.WAVE_CORRECT_HORIZ'], {}), '(rmats, cv.detail.WAVE_CORRECT_HORIZ)\n', (14600, 14637), True, 'import cv2 as cv\n'), ((16267, 16282), 'cv2.imread', 'cv.imread', (['name'], {}), '(name)\n', (16276, 16282), True, 'import cv2 as cv\n'), ((17956, 17990), 'cv2.dilate', 'cv.dilate', (['masks_warped[idx]', 'None'], {}), '(masks_warped[idx], None)\n', (17965, 17990), True, 'import cv2 as cv\n'), ((18011, 18113), 'cv2.resize', 'cv.resize', (['dilated_mask', '(mask_warped.shape[1], mask_warped.shape[0])', '(0)', '(0)', 'cv.INTER_LINEAR_EXACT'], {}), '(dilated_mask, (mask_warped.shape[1], mask_warped.shape[0]), 0, 0,\n cv.INTER_LINEAR_EXACT)\n', (18020, 18113), True, 'import cv2 as cv\n'), ((18132, 18170), 'cv2.bitwise_and', 'cv.bitwise_and', (['seam_mask', 'mask_warped'], {}), '(seam_mask, mask_warped)\n', (18146, 18170), True, 'import cv2 as cv\n'), ((19841, 19872), 'cv2.imwrite', 'cv.imwrite', (['result_name', 'result'], {}), '(result_name, result)\n', (19851, 19872), True, 'import cv2 as cv\n'), ((19928, 20021), 'cv2.normalize', 'cv.normalize', ([], {'src': 'result', 'dst': 'None', 'alpha': '(255.0)', 'norm_type': 'cv.NORM_MINMAX', 'dtype': 'cv.CV_8U'}), '(src=result, dst=None, alpha=255.0, norm_type=cv.NORM_MINMAX,\n dtype=cv.CV_8U)\n', (19940, 20021), True, 'import cv2 as cv\n'), ((20031, 20079), 'cv2.resize', 'cv.resize', (['dst'], {'dsize': 'None', 'fx': 'zoom_x', 'fy': 'zoom_x'}), '(dst, dsize=None, fx=zoom_x, fy=zoom_x)\n', (20040, 20079), True, 'import cv2 as cv\n'), ((20088, 20115), 'cv2.imshow', 'cv.imshow', (['result_name', 'dst'], {}), '(result_name, dst)\n', (20097, 20115), True, 'import cv2 as cv\n'), ((20124, 20136), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (20134, 20136), True, 'import cv2 as cv\n'), ((8994, 9054), 'cv2.detail.BestOf2NearestMatcher_create', 'cv.detail.BestOf2NearestMatcher_create', (['try_cuda', 'match_conf'], {}), '(try_cuda, match_conf)\n', (9032, 9054), True, 'import cv2 as cv\n'), ((9083, 9161), 'cv2.detail.BestOf2NearestRangeMatcher_create', 'cv.detail.BestOf2NearestRangeMatcher_create', (['range_width', 'try_cuda', 'match_conf'], {}), '(range_width, try_cuda, match_conf)\n', (9126, 9161), True, 'import cv2 as cv\n'), ((9748, 9854), 'cv2.detail_BlocksChannelsCompensator', 'cv.detail_BlocksChannelsCompensator', (['expos_comp_block_size', 'expos_comp_block_size', 'expos_comp_nr_feeds'], {}), '(expos_comp_block_size,\n expos_comp_block_size, expos_comp_nr_feeds)\n', (9783, 9854), True, 'import cv2 as cv\n'), ((9994, 10054), 'cv2.detail.ExposureCompensator_createDefault', 'cv.detail.ExposureCompensator_createDefault', (['expos_comp_type'], {}), '(expos_comp_type)\n', (10037, 10054), True, 'import cv2 as cv\n'), ((10151, 10181), 'imutils.paths.list_images', 'paths.list_images', (['args.images'], {}), '(args.images)\n', (10168, 10181), False, 'from imutils import paths\n'), ((11402, 11427), 'cv2.samples.findFile', 'cv.samples.findFile', (['name'], {}), '(name)\n', (11421, 11427), True, 'import cv2 as cv\n'), ((11938, 12044), 'cv2.resize', 'cv.resize', ([], {'src': 'full_img', 'dsize': 'None', 'fx': 'work_scale', 'fy': 'work_scale', 'interpolation': 'cv.INTER_LINEAR_EXACT'}), '(src=full_img, dsize=None, fx=work_scale, fy=work_scale,\n interpolation=cv.INTER_LINEAR_EXACT)\n', (11947, 12044), True, 'import cv2 as cv\n'), ((16647, 16697), 'cv2.PyRotationWarper', 'cv.PyRotationWarper', (['warp_type', 'warped_image_scale'], {}), '(warp_type, warped_image_scale)\n', (16666, 16697), True, 'import cv2 as cv\n'), ((17259, 17371), 'cv2.resize', 'cv.resize', ([], {'src': 'full_img', 'dsize': 'None', 'fx': 'compose_scale', 'fy': 'compose_scale', 'interpolation': 'cv.INTER_LINEAR_EXACT'}), '(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale,\n interpolation=cv.INTER_LINEAR_EXACT)\n', (17268, 17371), True, 'import cv2 as cv\n'), ((17658, 17705), 'numpy.ones', 'np.ones', (['(img.shape[0], img.shape[1])', 'np.uint8'], {}), '((img.shape[0], img.shape[1]), np.uint8)\n', (17665, 17705), True, 'import numpy as np\n'), ((18239, 18292), 'cv2.detail.Blender_createDefault', 'cv.detail.Blender_createDefault', (['cv.detail.Blender_NO'], {}), '(cv.detail.Blender_NO)\n', (18270, 18292), True, 'import cv2 as cv\n'), ((18314, 18363), 'cv2.detail.resultRoi', 'cv.detail.resultRoi', ([], {'corners': 'corners', 'sizes': 'sizes'}), '(corners=corners, sizes=sizes)\n', (18333, 18363), True, 'import cv2 as cv\n'), ((19151, 19220), 'numpy.ones', 'np.ones', (['(image_warped_s.shape[0], image_warped_s.shape[1])', 'np.uint8'], {}), '((image_warped_s.shape[0], image_warped_s.shape[1]), np.uint8)\n', (19158, 19220), True, 'import numpy as np\n'), ((12114, 12189), 'numpy.sqrt', 'np.sqrt', (['(seam_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))'], {}), '(seam_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))\n', (12121, 12189), True, 'import numpy as np\n'), ((12702, 12759), 'cv2.detail.matchesGraphAsString', 'cv.detail.matchesGraphAsString', (['img_names', 'p', 'conf_thresh'], {}), '(img_names, p, conf_thresh)\n', (12732, 12759), True, 'import cv2 as cv\n'), ((14547, 14561), 'numpy.copy', 'np.copy', (['cam.R'], {}), '(cam.R)\n', (14554, 14561), True, 'import numpy as np\n'), ((14867, 14926), 'numpy.ones', 'np.ones', (['(images[i].shape[0], images[i].shape[1])', 'np.uint8'], {}), '((images[i].shape[0], images[i].shape[1]), np.uint8)\n', (14874, 14926), True, 'import numpy as np\n'), ((18502, 18555), 'cv2.detail.Blender_createDefault', 'cv.detail.Blender_createDefault', (['cv.detail.Blender_NO'], {}), '(cv.detail.Blender_NO)\n', (18533, 18555), True, 'import cv2 as cv\n'), ((19005, 19055), 'cv2.detail.Timelapser_createDefault', 'cv.detail.Timelapser_createDefault', (['timelapse_type'], {}), '(timelapse_type)\n', (19039, 19055), True, 'import cv2 as cv\n'), ((19645, 19668), 'cv2.UMat', 'cv.UMat', (['image_warped_s'], {}), '(image_warped_s)\n', (19652, 19668), True, 'import cv2 as cv\n'), ((11808, 11883), 'numpy.sqrt', 'np.sqrt', (['(work_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))'], {}), '(work_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))\n', (11815, 11883), True, 'import numpy as np\n'), ((16397, 16475), 'numpy.sqrt', 'np.sqrt', (['(compose_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))'], {}), '(compose_megapix * 1000000.0 / (full_img.shape[0] * full_img.shape[1]))\n', (16404, 16475), True, 'import numpy as np\n'), ((18390, 18420), 'numpy.sqrt', 'np.sqrt', (['(dst_sz[2] * dst_sz[3])'], {}), '(dst_sz[2] * dst_sz[3])\n', (18397, 18420), True, 'import numpy as np\n'), ((18626, 18654), 'cv2.detail_MultiBandBlender', 'cv.detail_MultiBandBlender', ([], {}), '()\n', (18652, 18654), True, 'import cv2 as cv\n'), ((18815, 18841), 'cv2.detail_FeatherBlender', 'cv.detail_FeatherBlender', ([], {}), '()\n', (18839, 18841), True, 'import cv2 as cv\n'), ((18692, 18711), 'numpy.log', 'np.log', (['blend_width'], {}), '(blend_width)\n', (18698, 18711), True, 'import numpy as np\n'), ((18714, 18725), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (18720, 18725), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from sklearn.cluster import KMeans
def give_shape(n, arena, w_pos, r):
ret, frame = cap.read()
cv2.imwrite("new_a.jpg", frame)
# frame = cv2.imread("new_a.jpg")
frame = frame[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]
shape = frame.shape
print(shape)
y_pos = int(w_pos / n)
x_pos = w_pos % n
print(y_pos, x_pos)
nr = [(shape[0] / n) * x_pos, (shape[1] / n) * y_pos, (shape[0] / n) * (x_pos + 1), (shape[1] / n) * (y_pos + 1)]
print(nr)
frame = frame[int(nr[1]):int(nr[3]), int(nr[0]):int(nr[2])]
img_size = frame.shape
X = frame.reshape(img_size[0] * img_size[1], img_size[2])
km = KMeans(n_clusters=12)
km.fit(X)
X_compressed = km.cluster_centers_[km.labels_]
X_compressed = np.clip(X_compressed.astype('uint8'), 0, 255)
new_img = X_compressed.reshape(img_size[0], img_size[1], img_size[2])
red_range = np.load("Red_Range.npy")
yellow_range = np.load("Yellow_Range.npy")
maskBGR = cv2.inRange(new_img, red_range[0], red_range[1])
kernel = np.ones((n, n), np.uint8)
maskBGR = cv2.erode(maskBGR, kernel, iterations=1)
# cv2.imshow("kernel", maskBGR)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
contours, hierarchy = cv2.findContours(maskBGR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
num = 0
for cnt in contours:
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 100:
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w * h
extent = float(area) / rect_area
# red circle is 1 red square is 2 yellow circle is 3 and yellow square is 4
if extent < 0.8: # circle
num = 1
elif extent >= 0.8: # square
num = 2
maskBGR = cv2.inRange(new_img, yellow_range[0], yellow_range[1])
kernel = np.ones((n, n), np.uint8)
maskBGR = cv2.erode(maskBGR, kernel, iterations=1)
# cv2.imshow("kernel", maskBGR)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
contours, hierarchy = cv2.findContours(maskBGR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 100:
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w * h
extent = float(area) / rect_area
# red circle is 1 red square is 2 yellow circle is 3 and yellow square is 4
if extent < 0.8: # circle
num = 3
elif extent >= 0.8: # square
num = 4
arena[y_pos][x_pos] = num
print(num)
return num
"""arena = [[3, 4, 1, 0, 3, 0, 1, 4, 3],
[4, 2, 2, 3, 1, 2, 1, 2, 4],
[3, 1, 1, 3, 3, 3, 3, 2, 1],
[0, 4, 4, 4, 1, 1, 2, 2, 0],
[3, 2, 0, 3, 0, 2, 0, 4, 1],
[0, 1, 4, 2, 4, 4, 3, 3, 0],
[3, 1, 2, 1, 1, 4, 1, 4, 1],
[4, 2, 2, 4, 2, 3, 2, 2, 4],
[3, 4, 1, 0, 0, 0, 1, 4, 3]]
r = np.load("roi_data.npy")
give_shape(9, arena, 3, r)"""
|
[
"sklearn.cluster.KMeans",
"cv2.imwrite",
"numpy.ones",
"cv2.inRange",
"cv2.erode",
"cv2.contourArea",
"cv2.moments",
"cv2.findContours",
"numpy.load",
"cv2.boundingRect"
] |
[((142, 173), 'cv2.imwrite', 'cv2.imwrite', (['"""new_a.jpg"""', 'frame'], {}), "('new_a.jpg', frame)\n", (153, 173), False, 'import cv2\n'), ((713, 734), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(12)'}), '(n_clusters=12)\n', (719, 734), False, 'from sklearn.cluster import KMeans\n'), ((960, 984), 'numpy.load', 'np.load', (['"""Red_Range.npy"""'], {}), "('Red_Range.npy')\n", (967, 984), True, 'import numpy as np\n'), ((1005, 1032), 'numpy.load', 'np.load', (['"""Yellow_Range.npy"""'], {}), "('Yellow_Range.npy')\n", (1012, 1032), True, 'import numpy as np\n'), ((1048, 1096), 'cv2.inRange', 'cv2.inRange', (['new_img', 'red_range[0]', 'red_range[1]'], {}), '(new_img, red_range[0], red_range[1])\n', (1059, 1096), False, 'import cv2\n'), ((1111, 1136), 'numpy.ones', 'np.ones', (['(n, n)', 'np.uint8'], {}), '((n, n), np.uint8)\n', (1118, 1136), True, 'import numpy as np\n'), ((1154, 1194), 'cv2.erode', 'cv2.erode', (['maskBGR', 'kernel'], {'iterations': '(1)'}), '(maskBGR, kernel, iterations=1)\n', (1163, 1194), False, 'import cv2\n'), ((1312, 1377), 'cv2.findContours', 'cv2.findContours', (['maskBGR', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(maskBGR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1328, 1377), False, 'import cv2\n'), ((1870, 1924), 'cv2.inRange', 'cv2.inRange', (['new_img', 'yellow_range[0]', 'yellow_range[1]'], {}), '(new_img, yellow_range[0], yellow_range[1])\n', (1881, 1924), False, 'import cv2\n'), ((1939, 1964), 'numpy.ones', 'np.ones', (['(n, n)', 'np.uint8'], {}), '((n, n), np.uint8)\n', (1946, 1964), True, 'import numpy as np\n'), ((1980, 2020), 'cv2.erode', 'cv2.erode', (['maskBGR', 'kernel'], {'iterations': '(1)'}), '(maskBGR, kernel, iterations=1)\n', (1989, 2020), False, 'import cv2\n'), ((2138, 2203), 'cv2.findContours', 'cv2.findContours', (['maskBGR', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(maskBGR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2154, 2203), False, 'import cv2\n'), ((1430, 1446), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (1441, 1446), False, 'import cv2\n'), ((1463, 1483), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1478, 1483), False, 'import cv2\n'), ((2243, 2259), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (2254, 2259), False, 'import cv2\n'), ((2276, 2296), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (2291, 2296), False, 'import cv2\n'), ((1534, 1555), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (1550, 1555), False, 'import cv2\n'), ((2347, 2368), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (2363, 2368), False, 'import cv2\n')]
|
import os
import math
import glob
import random
import importlib
from pathlib import Path
from collections import defaultdict
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from tensorboardX import SummaryWriter
from optimizers import get_optimizer
from schedulers import get_scheduler
from utility.helper import count_parameters
SAMPLE_RATE = 16000
class Runner():
"""
Used to handle high-level concepts of a ML experiment
eg. training loop, evaluation loop, upstream propagation, optimization, tensorboard logging, checkpoint saving
"""
def __init__(self, args, config):
self.args = args
self.config = config
self.logger = SummaryWriter(args.expdir)
self.init_ckpt = torch.load(self.args.past_exp, map_location='cpu') if self.args.past_exp else {}
self.upstream = self._get_upstream()
self.downstream = self._get_downstream()
# set up the downstream name used by Tensorboard
self.downstream_name = self.args.downstream
if hasattr(self.downstream, 'get_downstream_name'):
self.downstream_name = self.downstream.get_downstream_name()
def _get_upstream(self):
Upstream = getattr(importlib.import_module('hubconf'), self.args.upstream)
upstream = Upstream(
feature_selection = self.args.upstream_feature_selection,
model_config = self.args.upstream_model_config,
refresh = self.args.upstream_refresh,
ckpt = self.args.upstream_ckpt,
).to(self.args.device)
assert hasattr(upstream, 'forward')
assert hasattr(upstream, 'get_output_dim')
assert hasattr(upstream, 'get_downsample_rate')
print(f'[Runner] - Upstream model architecture: {upstream}')
print(f'[Runner] - Upstream output dimension: {upstream.get_output_dim()}')
downsample = upstream.get_downsample_rate()
print(f'[Runner] - Upstream downsample rate: {downsample} ({downsample / SAMPLE_RATE * 1000} ms/frame)')
init_upstream = self.init_ckpt.get('Upstream')
if init_upstream:
print('[Runner] - Loading upstream weights from the previous experiment')
upstream.load_state_dict(init_upstream)
return upstream
def _get_downstream(self):
module_path = f'downstream.{self.args.downstream}.expert'
Downstream = getattr(importlib.import_module(module_path), 'DownstreamExpert')
downstream = Downstream(
upstream_dim = self.upstream.get_output_dim(),
**self.config,
**vars(self.args)
).to(self.args.device)
print(f'[Runner] - Downstream model architecture: {downstream}')
print(f'[Runner] - Downstream has {count_parameters(downstream)} parameters')
assert hasattr(downstream, 'get_train_dataloader')
assert hasattr(downstream, 'get_dev_dataloader')
assert hasattr(downstream, 'get_test_dataloader')
assert hasattr(downstream, 'forward')
assert hasattr(downstream, 'log_records')
init_downstream = self.init_ckpt.get('Downstream')
if init_downstream:
print('[Runner] - Loading downstream weights from the previous experiment')
downstream.load_state_dict(init_downstream)
return downstream
def _get_optimizer(self, model_params):
optimizer = get_optimizer(
model_params,
self.config['runner']['total_steps'],
self.config['optimizer']
)
init_optimizer = self.init_ckpt.get('Optimizer')
if init_optimizer:
print('[Runner] - Loading optimizer weights from the previous experiment')
optimizer.load_state_dict(init_optimizer)
return optimizer
def _get_scheduler(self, optimizer):
scheduler = get_scheduler(
optimizer,
self.config['runner']['total_steps'],
self.config['scheduler']
)
init_scheduler = self.init_ckpt.get('Scheduler')
if init_scheduler:
print('[Runner] - Loading scheduler weights from the previous experiment')
scheduler.load_state_dict(init_scheduler)
return scheduler
def train(self):
# set model train/eval modes
self.downstream.train()
self.upstream.eval()
if self.args.upstream_trainable:
self.upstream.train()
# set optimizer
model_params = [self.downstream]
if self.args.upstream_trainable:
model_params.append(self.upstream)
optimizer = self._get_optimizer(model_params)
# set scheduler
scheduler = None
if self.config.get('scheduler'):
scheduler = self._get_scheduler(optimizer)
# set progress bar
pbar = tqdm(total=self.config['runner']['total_steps'], dynamic_ncols=True, desc='overall')
init_step = self.init_ckpt.get('Step')
if init_step:
pbar.n = init_step
# prepare data
dataloader = self.downstream.get_train_dataloader()
all_loss = []
backward_steps = 0
records = defaultdict(list)
prefix = f'{self.downstream_name}/train-'
while pbar.n < pbar.total:
for batch_id, (wavs, *others) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc='train')):
# try/except block for forward/backward
try:
if pbar.n >= pbar.total:
break
global_step = pbar.n + 1
wavs = [wav.to(self.args.device) for wav in wavs]
if self.args.upstream_trainable:
features = self.upstream(wavs)
else:
with torch.no_grad():
features = self.upstream(wavs)
loss = self.downstream(
features, *others,
records = records,
logger = self.logger,
prefix = prefix,
global_step = global_step,
log_step = self.config['runner']['log_step'],
batch_id = batch_id,
batch_num = len(dataloader),
)
gradient_accumulate_steps = self.config['runner'].get('gradient_accumulate_steps')
(loss / gradient_accumulate_steps).backward()
except RuntimeError as e:
if 'CUDA out of memory' in str(e):
print(f'[Runner] - CUDA out of memory at step {global_step}')
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
optimizer.zero_grad()
continue
else:
raise
# record loss
all_loss.append(loss.item())
del loss
# whether to accumulate gradient
backward_steps += 1
if backward_steps % gradient_accumulate_steps > 0:
continue
# gradient clipping
paras = list(self.downstream.parameters())
if self.args.upstream_trainable:
paras += list(self.upstream.parameters())
grad_norm = torch.nn.utils.clip_grad_norm_(paras, self.config['runner']['gradient_clipping'])
# optimize
if math.isnan(grad_norm):
print(f'[Runner] - grad norm is NaN at step {global_step}')
else:
optimizer.step()
optimizer.zero_grad()
# adjust learning rate
if scheduler:
scheduler.step()
# logging
if global_step % self.config['runner']['log_step'] == 0:
# log loss
average_loss = torch.FloatTensor(all_loss).mean().item()
self.logger.add_scalar(f'{prefix}loss', average_loss, global_step=global_step)
all_loss = []
# log customized contents
self.downstream.log_records(
records = records,
logger = self.logger,
prefix = prefix,
global_step = global_step,
log_step = self.config['runner']['log_step'],
)
records = defaultdict(list)
# evaluation and save checkpoint
save_names = []
if global_step % self.config['runner']['eval_step'] == 0:
for split in self.config['runner']['eval_dataloaders']:
save_names += self.evaluate(split, global_step)
if global_step % self.config['runner']['save_step'] == 0:
def check_ckpt_num(directory):
max_keep = self.config['runner']['max_keep']
ckpt_pths = glob.glob(f'{directory}/states-*.ckpt')
if len(ckpt_pths) >= max_keep:
ckpt_pths = sorted(ckpt_pths, key=lambda pth: int(pth.split('-')[-1].split('.')[0]))
for ckpt_pth in ckpt_pths[:len(ckpt_pths) - max_keep + 1]:
os.remove(ckpt_pth)
check_ckpt_num(self.args.expdir)
save_names.append(f'states-{global_step}.ckpt')
if len(save_names) > 0:
all_states = {
'Downstream': self.downstream.state_dict(),
'Optimizer': optimizer.state_dict(),
'Step': global_step,
'Args': self.args,
'Config': self.config,
}
if scheduler:
all_states['Scheduler'] = scheduler.state_dict()
if self.args.upstream_trainable:
all_states['Upstream'] = self.upstream.state_dict()
save_paths = [os.path.join(self.args.expdir, name) for name in save_names]
tqdm.write(f'[Runner] - Save the checkpoint to:')
for i, path in enumerate(save_paths):
tqdm.write(f'{i + 1}. {path}')
torch.save(all_states, path)
pbar.update(1)
Path(f'{self.args.expdir}/train_finished').touch(exist_ok=True)
pbar.close()
def evaluate(self, split='test', global_step=0):
# fix seed to guarantee the same evaluation protocol across steps
random.seed(self.args.seed)
np.random.seed(self.args.seed)
torch.manual_seed(self.args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(self.args.seed)
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
# record original train/eval states and set all models to eval
downstream_training = self.downstream.training
upstream_training = self.upstream.training
self.downstream.eval()
self.upstream.eval()
# prepare data
dataloader = eval(f'self.downstream.get_{split}_dataloader')()
# main evaluation block
all_loss = []
records = defaultdict(list)
prefix = f'{self.downstream_name}/{split}-'
for batch_id, (wavs, *others) in enumerate(tqdm(dataloader, dynamic_ncols=True, desc=split)):
wavs = [wav.to(self.args.device) for wav in wavs]
with torch.no_grad():
features = self.upstream(wavs)
loss = self.downstream(
features, *others,
records = records,
logger = self.logger,
prefix = prefix,
global_step = global_step,
log_step = self.config['runner']['log_step'],
batch_id = batch_id,
batch_num = len(dataloader),
)
all_loss.append(loss.item())
# log loss
average_loss = torch.FloatTensor(all_loss).mean().item()
self.logger.add_scalar(f'{prefix}loss', average_loss, global_step=global_step)
all_loss = []
# log customized contents
save_names = self.downstream.log_records(
records = records,
logger = self.logger,
prefix = prefix,
global_step = global_step,
log_step = self.config['runner']['log_step'],
)
records = defaultdict(list)
# prepare back to training
with torch.cuda.device(self.args.device):
torch.cuda.empty_cache()
if downstream_training:
self.downstream.train()
if upstream_training:
self.upstream.train()
return [] if type(save_names) is not list else save_names
|
[
"torch.nn.utils.clip_grad_norm_",
"schedulers.get_scheduler",
"torch.cuda.is_available",
"os.remove",
"tensorboardX.SummaryWriter",
"torch.cuda.device",
"pathlib.Path",
"tqdm.tqdm.write",
"numpy.random.seed",
"glob.glob",
"importlib.import_module",
"torch.save",
"utility.helper.count_parameters",
"torch.cuda.empty_cache",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"optimizers.get_optimizer",
"torch.load",
"tqdm.tqdm",
"os.path.join",
"random.seed",
"collections.defaultdict",
"torch.no_grad",
"torch.FloatTensor",
"math.isnan"
] |
[((703, 729), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.expdir'], {}), '(args.expdir)\n', (716, 729), False, 'from tensorboardX import SummaryWriter\n'), ((3408, 3504), 'optimizers.get_optimizer', 'get_optimizer', (['model_params', "self.config['runner']['total_steps']", "self.config['optimizer']"], {}), "(model_params, self.config['runner']['total_steps'], self.\n config['optimizer'])\n", (3421, 3504), False, 'from optimizers import get_optimizer\n'), ((3861, 3954), 'schedulers.get_scheduler', 'get_scheduler', (['optimizer', "self.config['runner']['total_steps']", "self.config['scheduler']"], {}), "(optimizer, self.config['runner']['total_steps'], self.config[\n 'scheduler'])\n", (3874, 3954), False, 'from schedulers import get_scheduler\n'), ((4840, 4929), 'tqdm.tqdm', 'tqdm', ([], {'total': "self.config['runner']['total_steps']", 'dynamic_ncols': '(True)', 'desc': '"""overall"""'}), "(total=self.config['runner']['total_steps'], dynamic_ncols=True, desc=\n 'overall')\n", (4844, 4929), False, 'from tqdm import tqdm\n'), ((5177, 5194), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5188, 5194), False, 'from collections import defaultdict\n'), ((10890, 10917), 'random.seed', 'random.seed', (['self.args.seed'], {}), '(self.args.seed)\n', (10901, 10917), False, 'import random\n'), ((10926, 10956), 'numpy.random.seed', 'np.random.seed', (['self.args.seed'], {}), '(self.args.seed)\n', (10940, 10956), True, 'import numpy as np\n'), ((10965, 10998), 'torch.manual_seed', 'torch.manual_seed', (['self.args.seed'], {}), '(self.args.seed)\n', (10982, 10998), False, 'import torch\n'), ((11010, 11035), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11033, 11035), False, 'import torch\n'), ((11585, 11602), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (11596, 11602), False, 'from collections import defaultdict\n'), ((12872, 12889), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12883, 12889), False, 'from collections import defaultdict\n'), ((756, 806), 'torch.load', 'torch.load', (['self.args.past_exp'], {'map_location': '"""cpu"""'}), "(self.args.past_exp, map_location='cpu')\n", (766, 806), False, 'import torch\n'), ((1232, 1266), 'importlib.import_module', 'importlib.import_module', (['"""hubconf"""'], {}), "('hubconf')\n", (1255, 1266), False, 'import importlib\n'), ((2415, 2451), 'importlib.import_module', 'importlib.import_module', (['module_path'], {}), '(module_path)\n', (2438, 2451), False, 'import importlib\n'), ((11049, 11091), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['self.args.seed'], {}), '(self.args.seed)\n', (11075, 11091), False, 'import torch\n'), ((11105, 11140), 'torch.cuda.device', 'torch.cuda.device', (['self.args.device'], {}), '(self.args.device)\n', (11122, 11140), False, 'import torch\n'), ((11154, 11178), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (11176, 11178), False, 'import torch\n'), ((11707, 11755), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'dynamic_ncols': '(True)', 'desc': 'split'}), '(dataloader, dynamic_ncols=True, desc=split)\n', (11711, 11755), False, 'from tqdm import tqdm\n'), ((12939, 12974), 'torch.cuda.device', 'torch.cuda.device', (['self.args.device'], {}), '(self.args.device)\n', (12956, 12974), False, 'import torch\n'), ((12988, 13012), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13010, 13012), False, 'import torch\n'), ((5336, 5386), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'dynamic_ncols': '(True)', 'desc': '"""train"""'}), "(dataloader, dynamic_ncols=True, desc='train')\n", (5340, 5386), False, 'from tqdm import tqdm\n'), ((7496, 7582), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['paras', "self.config['runner']['gradient_clipping']"], {}), "(paras, self.config['runner'][\n 'gradient_clipping'])\n", (7526, 7582), False, 'import torch\n'), ((7625, 7646), 'math.isnan', 'math.isnan', (['grad_norm'], {}), '(grad_norm)\n', (7635, 7646), False, 'import math\n'), ((10667, 10709), 'pathlib.Path', 'Path', (['f"""{self.args.expdir}/train_finished"""'], {}), "(f'{self.args.expdir}/train_finished')\n", (10671, 10709), False, 'from pathlib import Path\n'), ((11838, 11853), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11851, 11853), False, 'import torch\n'), ((2770, 2798), 'utility.helper.count_parameters', 'count_parameters', (['downstream'], {}), '(downstream)\n', (2786, 2798), False, 'from utility.helper import count_parameters\n'), ((8672, 8689), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8683, 8689), False, 'from collections import defaultdict\n'), ((10410, 10459), 'tqdm.tqdm.write', 'tqdm.write', (['f"""[Runner] - Save the checkpoint to:"""'], {}), "(f'[Runner] - Save the checkpoint to:')\n", (10420, 10459), False, 'from tqdm import tqdm\n'), ((9226, 9265), 'glob.glob', 'glob.glob', (['f"""{directory}/states-*.ckpt"""'], {}), "(f'{directory}/states-*.ckpt')\n", (9235, 9265), False, 'import glob\n'), ((10329, 10365), 'os.path.join', 'os.path.join', (['self.args.expdir', 'name'], {}), '(self.args.expdir, name)\n', (10341, 10365), False, 'import os\n'), ((10542, 10572), 'tqdm.tqdm.write', 'tqdm.write', (['f"""{i + 1}. {path}"""'], {}), "(f'{i + 1}. {path}')\n", (10552, 10572), False, 'from tqdm import tqdm\n'), ((10597, 10625), 'torch.save', 'torch.save', (['all_states', 'path'], {}), '(all_states, path)\n', (10607, 10625), False, 'import torch\n'), ((12417, 12444), 'torch.FloatTensor', 'torch.FloatTensor', (['all_loss'], {}), '(all_loss)\n', (12434, 12444), False, 'import torch\n'), ((5820, 5835), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5833, 5835), False, 'import torch\n'), ((6737, 6772), 'torch.cuda.device', 'torch.cuda.device', (['self.args.device'], {}), '(self.args.device)\n', (6754, 6772), False, 'import torch\n'), ((6802, 6826), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6824, 6826), False, 'import torch\n'), ((9553, 9572), 'os.remove', 'os.remove', (['ckpt_pth'], {}), '(ckpt_pth)\n', (9562, 9572), False, 'import os\n'), ((8098, 8125), 'torch.FloatTensor', 'torch.FloatTensor', (['all_loss'], {}), '(all_loss)\n', (8115, 8125), False, 'import torch\n')]
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import mindspore.nn as nn
from mindspore.common.api import ms_function
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import ParameterTuple, Parameter
context.set_context(device_target='GPU')
class LstmNet(nn.Cell):
def __init__(self, seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
super(LstmNet, self).__init__()
num_directions = 1
if bidirectional:
num_directions = 2
self.lstm = P.LSTM(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
input_np = np.array([[[0.6755, -1.6607, 0.1367, -0.9209, -1.7088, 0.3953, 2.7120, 0.1103, 0.1504, -0.3611],
[0.4276, -0.7850, -0.3758, 0.8604, -0.1361, -1.3618, -0.6251, -0.8391, 0.8142, 0.4068]],
[[-0.6424, -0.6095, 0.6639, -0.7253, 2.1190, -0.2840, 0.3858, 0.1691, 0.6764, 1.2903],
[0.7918, 0.4147, -0.5089, -0.3582, -1.4279, -0.7975, -0.0390, -0.4718, 0.4322, -0.7995]],
[[-1.5612, 0.0120, -0.7289, -1.2479, -0.6197, -0.6099, 0.9543, 0.4362, -1.3141, 0.4273],
[-0.6656, -0.6626, -0.5883, -0.6922, 0.5512, 1.7031, -1.2812, -0.2004, -0.9224, 0.4106]],
[[-0.9667, -0.6296, -0.7310, 1.2503, -0.1650, 1.2050, -0.1704, -0.5215, 0.1595, 0.3904],
[0.1026, -0.6821, -0.4387, -1.1637, -0.5000, 0.0590, 0.5219, -0.6835, 2.4406, 0.7135]],
[[-0.4710, 0.6558, -0.3144, -1.2213, 0.1556, -0.3836, -0.1081, -0.1440, -1.1231, 0.6279],
[-0.8449, -0.2184, -0.1806, -0.0615, -0.5660, -0.3556, 1.6891, -1.0286, 1.3361,
-0.4313]]]).astype(np.float32)
self.x = Parameter(initializer(Tensor(input_np), [seq_len, batch_size, input_size]), name='x')
self.h = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='h')
self.c = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='c')
wih = np.array([[3.4021e-01, -4.6622e-01, 4.5117e-01, 2.3627e-01, 3.7844e-01,
2.8770e-01, 4.1631e-01, -6.2628e-01, -4.8008e-01, -4.9148e-01],
[-6.4257e-02, -2.4807e-01, 1.3550e-02, 6.8946e-01, -1.2608e-02,
-7.1719e-02, -1.3566e-01, -4.9215e-01, 2.8509e-01, -6.3540e-01],
[-6.9863e-01, 5.9773e-01, -3.9062e-01, -7.6151e-02, 5.6803e-04,
-7.0420e-01, -6.1822e-01, 4.1854e-01, 4.0596e-01, 6.4867e-01],
[-3.0253e-01, -1.9464e-01, 7.0591e-01, 4.9368e-01, -5.9758e-01,
1.3251e-02, 3.5685e-01, -3.7640e-01, -4.4612e-01, 5.1794e-01],
[-3.2140e-01, 5.5578e-01, 6.3589e-01, -6.4249e-01, 5.7258e-01,
2.4256e-01, -2.7954e-01, 2.5202e-01, 2.9235e-01, -3.9979e-01],
[1.6547e-01, -7.9030e-02, -2.0045e-01, 6.2484e-01, -1.0727e-01,
-5.0010e-01, -2.9165e-01, -1.7620e-01, 1.5939e-01, -2.2744e-01],
[-4.0835e-01, 3.6751e-01, 4.7989e-01, 5.8886e-01, 5.3598e-01,
-2.9055e-01, -2.8129e-01, 6.0219e-01, 4.9193e-01, 3.3115e-01],
[-5.6894e-01, -5.0359e-01, 4.7491e-01, 5.8110e-01, -5.4921e-01,
-6.1343e-01, -5.8236e-02, -3.7682e-01, 4.8338e-01, -2.1551e-01]]).astype(np.float32).reshape(
[1, -1])
whh = np.array([[-0.4820, -0.2350],
[-0.1195, 0.0519],
[0.4511, -0.3961],
[-0.5962, 0.0906],
[0.2162, -0.1178],
[0.6237, 0.0711],
[0.1867, -0.1225],
[0.1831, 0.0850]]).astype(np.float32).reshape([1, -1])
bih = np.array([-0.2862, 0.0034, 0.2059, -0.6544, 0.3244, -0.2472, 0.0852, -0.3050]).astype(np.float32).reshape(
[1, -1])
bhh = np.array([-0.6575, 0.1562, -0.6434, 0.0212, -0.2493, -0.5626, 0.1530, -0.5235]).astype(
np.float32).reshape([1, -1])
w_np = np.concatenate((wih, whh, bih, bhh), axis=1).reshape([-1, 1, 1])
self.w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
@ms_function
def construct(self):
return self.lstm(self.x, self.h, self.c, self.w)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_lstm():
seq_len = 5
batch_size = 2
input_size = 10
hidden_size = 2
num_layers = 1
has_bias = True
bidirectional = False
dropout = 0.0
num_directions = 1
if bidirectional:
num_directions = 2
net = LstmNet(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
y, h, c, _, _ = net()
expect_y = np.array([[[-2.1429e-02, 1.1760e-01],
[3.1144e-01, 6.3090e-01]],
[[-5.0190e-04, -4.5812e-02],
[2.0324e-02, 2.0392e-01]],
[[-1.0370e-02, -6.0141e-02],
[6.0931e-02, -1.8913e-02]],
[[-1.6031e-01, -2.3428e-01],
[4.1886e-02, -2.2162e-01]],
[[-3.9243e-02, -3.2950e-02],
[-4.1257e-02, -4.5276e-01]]])
error = np.ones([num_layers, batch_size, hidden_size]) * 1.0e-4
diff = y.asnumpy() - expect_y
assert np.all(diff < error)
assert np.all(-diff < error)
expect_h = np.array([[[-0.0392, -0.0329],
[-0.0413, -0.4528]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-4
diff = h.asnumpy() - expect_h
assert np.all(diff < error)
assert np.all(-diff < error)
expect_c = np.array([[[-0.0984, -0.3665],
[-0.1010, -0.6792]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-4
diff = c.asnumpy() - expect_c
assert np.all(diff < error)
assert np.all(-diff < error)
class BiLstmNet(nn.Cell):
def __init__(self, seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
super(BiLstmNet, self).__init__()
num_directions = 1
if bidirectional:
num_directions = 2
self.lstm = P.LSTM(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
input_np = np.array([[[-1.7322, 1.6642, -1.1861, 0.2955, -0.7907, 0.2982, -1.3413, 1.0665, -0.0436, -0.1883],
[0.2195, 0.5917, -0.6739, 0.2388, -0.5364, -1.3309, -0.6018, -0.3081, -0.9648, -1.1627]],
[[-0.5094, -2.6025, -0.9302, -1.1937, 0.6501, -0.1903, -0.0661, 0.1080, 0.9829, -0.2280],
[1.3961, 0.2239, -0.1947, -0.3206, 0.5791, 0.3396, 0.1728, -1.2007, -1.0994, -1.3278]],
[[0.1870, -1.1090, -0.9705, 0.2207, 0.3743, 0.1158, -0.5443, -0.5559, 0.1538, -0.3975],
[-0.2347, -0.1245, -0.2335, 0.3164, 1.0997, -0.3928, -1.8517, 1.1136, -1.5051, -0.0071]],
[[1.2739, 2.5438, -0.4289, -0.7981, -1.3682, -2.2509, 0.2028, 1.3410, 2.9502, -1.1650],
[0.1254, 0.2726, 0.0251, 0.9323, 0.7315, 0.8231, -0.2123, -0.6885, 0.9893, -0.2047]],
[[0.1870, -0.9066, 0.7155, 0.5438, -0.9757, -0.5828, -0.3417, 1.5681, 1.0326, -0.0179],
[-0.7746, -1.0695, -0.5278, 2.5307, -0.1002, -1.5773, 0.7717, 1.0266, -0.0798,
1.2333]]]).astype(np.float32)
self.x = Parameter(initializer(Tensor(input_np), [seq_len, batch_size, input_size]), name='x')
self.h = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='h')
self.c = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='c')
wih = np.array([[-0.2959, -0.1142, 0.3662, 0.5406, 0.1738, 0.2697, -0.6960, -0.0464, 0.3486, 0.1888],
[0.3043, 0.1505, -0.1207, -0.2456, 0.2735, 0.6673, -0.3352, -0.6153, -0.5731, -0.2726],
[-0.2657, -0.5570, 0.6785, -0.1861, -0.0652, 0.5757, 0.6442, -0.4068, -0.3260, 0.7054],
[0.6607, 0.6927, -0.1354, 0.2484, 0.2053, 0.5743, -0.0212, 0.3340, -0.5685, -0.5668],
[0.6701, -0.3013, -0.1202, -0.4200, -0.4280, -0.6329, -0.6074, -0.4997, -0.6215, -0.6259],
[0.0299, -0.6071, -0.4683, -0.3363, -0.0044, -0.0007, 0.2700, 0.0202, -0.2880, -0.6869],
[0.3025, -0.2461, -0.5128, 0.6327, -0.1438, -0.5100, 0.1924, 0.2023, 0.3129, 0.2271],
[0.3777, 0.0546, 0.4790, -0.1895, 0.3588, 0.4490, 0.6850, 0.6240, -0.2739, -0.4474]]).astype(
np.float32).reshape([1, -1])
whh = np.array([[0.6346, -0.6366],
[-0.0248, -0.6156],
[-0.3821, 0.6327],
[-0.6132, -0.5071],
[0.4029, 0.0906],
[-0.5671, 0.2556],
[0.0268, -0.4347],
[0.1152, -0.3124]]).astype(np.float32).reshape([1, -1])
bih = np.array([-0.3839, -0.5365, -0.6691, 0.1697, -0.1564, -0.0451, -0.5921, -0.5367]).astype(
np.float32).reshape([1, -1])
bhh = np.array([0.5952, -0.4905, 0.0423, -0.0293, -0.6638, 0.4348, -0.4291, -0.5541]).astype(
np.float32).reshape([1, -1])
wih_reverse = np.array([[-0.2938, 0.0048, 0.2704, -0.3387, -0.4529, -0.2586, 0.1352, -0.1208, -0.1423, -0.0220],
[-0.3701, 0.0201, -0.0255, 0.1340, -0.1938, -0.7056, -0.2303, 0.4814, 0.3636, -0.5018],
[-0.0284, -0.0108, -0.5788, 0.2389, 0.2604, 0.6774, -0.5525, 0.6265, -0.6126, 0.3197],
[-0.6906, 0.6991, -0.6138, 0.0044, 0.5714, 0.4176, 0.5451, -0.5114, -0.2286, 0.1105],
[0.3547, 0.6233, -0.4543, -0.6799, 0.1109, 0.5601, 0.0212, 0.6926, 0.0597, -0.4383],
[-0.1370, -0.5852, 0.0596, 0.5494, 0.5789, -0.0534, 0.1092, 0.3544, -0.1571, 0.4444],
[-0.5886, -0.4765, -0.3837, -0.6634, 0.0963, -0.1385, -0.0837, -0.1354, 0.0547,
-0.2870],
[0.2049, -0.7057, -0.1736, 0.4724, 0.1957, -0.3037, 0.4626, -0.6465, 0.4575,
0.4230]]).astype(np.float32).reshape([1, -1])
whh_reverse = np.array([[0.2339, -0.0307],
[-0.5850, 0.6328],
[0.5856, -0.5601],
[0.4875, -0.6929],
[0.0314, 0.2531],
[-0.2523, 0.3244],
[0.5199, 0.5146],
[0.3968, 0.4511]]).astype(np.float32).reshape([1, -1])
bih_reverse = np.array([-0.1760, 0.2828, 0.2450, -0.4016, -0.4664, 0.4031, -0.1945, -0.1509]).astype(
np.float32).reshape([1, -1])
bhh_reverse = np.array([0.6427, 0.4806, 0.6278, 0.1596, 0.0038, -0.3418, 0.0549, -0.3900]).astype(
np.float32).reshape([1, -1])
w_np = np.concatenate((wih, whh, wih_reverse, whh_reverse, bih, bhh, bih_reverse, bhh_reverse), axis=1).reshape(
[-1, 1, 1])
self.w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
@ms_function
def construct(self):
return self.lstm(self.x, self.h, self.c, self.w)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_bilstm():
seq_len = 5
batch_size = 2
input_size = 10
hidden_size = 2
num_layers = 1
has_bias = True
bidirectional = True
dropout = 0.0
num_directions = 1
if bidirectional:
num_directions = 2
net = BiLstmNet(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
y, h, c, _, _ = net()
expect_y = np.array([[[-0.0826, 0.0209, 0.1715, -0.0072],
[0.1035, 0.0594, -0.0867, -0.1077]],
[[-0.1647, 0.0293, -0.2189, 0.3809],
[0.0466, 0.4461, 0.0784, 0.0905]],
[[-0.0182, 0.0512, 0.1758, -0.1147],
[0.0460, 0.1588, -0.0314, 0.0886]],
[[-0.0330, 0.0551, 0.2084, -0.1154],
[-0.1641, 0.1118, -0.0122, 0.4916]],
[[-0.2997, 0.0223, 0.1328, 0.3377],
[-0.6669, 0.0089, 0.1138, 0.7786]]])
error = np.ones([num_layers, batch_size, hidden_size * num_directions]) * 1.0e-4
diff = y.asnumpy() - expect_y
assert np.all(diff < error)
assert np.all(-diff < error)
expect_h = np.array([[[-0.2997, 0.0223],
[-0.6669, 0.0089]],
[[0.1715, -0.0072],
[-0.0867, -0.1077]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-4
diff = h.asnumpy() - expect_h
assert np.all(diff < error)
assert np.all(-diff < error)
expect_c = np.array([[[-0.6049, 0.0825],
[-0.9433, 0.1006]],
[[0.3037, -0.2036],
[-0.1633, -0.5663]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-3
diff = c.asnumpy() - expect_c
assert np.all(diff < error)
assert np.all(-diff < error)
class MultiLayerBiLstmNet(nn.Cell):
def __init__(self, seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
super(MultiLayerBiLstmNet, self).__init__()
num_directions = 1
if bidirectional:
num_directions = 2
self.lstm = P.LSTM(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
input_np = np.array([[[-0.1887, -0.4144, -0.0235, 0.7489, 0.7522, 0.5969, 0.3342, 1.2198, 0.6786, -0.9404],
[-0.8643, -1.6835, -2.4965, 2.8093, 0.1741, 0.2707, 0.7387, -0.0939, -1.7990, 0.4765]],
[[-0.5963, -1.2598, -0.7226, 1.1365, -1.7320, -0.7302, 0.1221, -0.2111, -1.6173, -0.0706],
[0.8964, 0.1737, -1.0077, -0.1389, 0.4889, 0.4391, 0.7911, 0.3614, -1.9533, -0.9936]],
[[0.3260, -1.3312, 0.0601, 1.0726, -1.6010, -1.8733, -1.5775, 1.1579, -0.8801, -0.5742],
[-2.2998, -0.6344, -0.5409, -0.9221, -0.6500, 0.1206, 1.5215, 0.7517, 1.3691, 2.0021]],
[[-0.1245, -0.3690, 2.1193, 1.3852, -0.1841, -0.8899, -0.3646, -0.8575, -0.3131, 0.2026],
[1.0218, -1.4331, 0.1744, 0.5442, -0.7808, 0.2527, 0.1566, 1.1484, -0.7766, -0.6747]],
[[-0.6752, 0.9906, -0.4973, 0.3471, -0.1202, -0.4213, 2.0213, 0.0441, 0.9016, 1.0365],
[1.2223, -1.3248, 0.1207, -0.8256, 0.1816, 0.7057, -0.3105, 0.5713, 0.2804,
-1.0685]]]).astype(np.float32)
self.x = Parameter(initializer(Tensor(input_np), [seq_len, batch_size, input_size]), name='x')
self.h = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='h')
self.c = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='c')
wih_l0 = np.array([[0.3715, -0.0723, 0.6017, 0.5115, -0.5357, 0.3794, -0.3752, -0.6205, -0.0370, -0.2904],
[0.7055, -0.4156, -0.3650, -0.0964, 0.4141, -0.2584, -0.4765, -0.0045, 0.2943, -0.2648],
[0.1355, 0.1697, 0.1883, 0.3754, 0.3744, -0.6128, 0.2328, -0.1275, 0.6604, 0.6498],
[-0.0266, 0.5805, -0.5358, -0.0929, 0.0797, 0.3744, 0.3299, -0.3825, 0.5804, -0.0855],
[0.1141, 0.2587, -0.4370, 0.6430, -0.0017, 0.4865, 0.2814, 0.6213, -0.6415, 0.4574],
[-0.3958, -0.5827, -0.1056, 0.6987, -0.6591, -0.1326, 0.5237, 0.4667, -0.7001, -0.2326],
[0.3074, -0.3118, -0.4591, 0.2481, -0.2978, -0.1850, 0.4770, -0.0126, 0.3655, -0.4306],
[0.3033, -0.6264, -0.6551, 0.0069, -0.5238, -0.3950, 0.5681, -0.4931, -0.6258,
0.4079]]).astype(np.float32).reshape([1, -1])
whh_l0 = np.array([[-0.3870, 0.0238],
[-0.3758, 0.2490],
[0.5437, -0.4117],
[0.1181, -0.2043],
[-0.5335, 0.1188],
[-0.0822, 0.2154],
[0.5844, -0.3239],
[-0.6537, 0.0278]]).astype(np.float32).reshape([1, -1])
bih_l0 = np.array([0.5440, 0.5995, 0.0155, -0.6254, 0.5114, 0.3364, -0.1824, -0.6262]).astype(
np.float32).reshape([1, -1])
bhh_l0 = np.array([0.4139, -0.2513, -0.4023, 0.4222, 0.6387, -0.6147, 0.0677, 0.5355]).astype(
np.float32).reshape([1, -1])
wih_reverse_l0 = np.array([[6.5219e-01, 5.6162e-01, -1.8653e-01, 6.8789e-01, 1.3240e-01, 1.7699e-01, 1.2940e-01,
-1.8520e-01, -5.5439e-01, -3.4946e-01],
[3.7645e-01, 6.5475e-01, 3.5964e-01, 2.2433e-01, -1.7869e-01, -2.9047e-01,
1.7615e-01, -5.3353e-01, -7.4204e-02, -2.5270e-01],
[5.8095e-01, -4.6426e-04, 1.9262e-01, -5.1306e-01, -3.6811e-01, 4.4858e-01,
6.2580e-01, 9.5494e-02, -6.9505e-01, 4.9500e-01],
[-3.7810e-01, 1.5485e-01, -1.4735e-01, -1.5327e-01, -4.5702e-01, 3.0816e-01,
-3.4280e-01, 2.1604e-01, 1.4087e-01, -5.7707e-01],
[-3.8700e-01, -6.4653e-01, 6.0653e-01, -4.7297e-01, 6.8413e-02, -1.2681e-01,
6.8464e-02, 6.7011e-01, 3.9950e-01, -2.0577e-01],
[-1.8648e-01, -6.7198e-01, 3.8017e-01, -3.3147e-01, 5.3193e-01, -5.4952e-01,
2.1774e-01, -4.6271e-01, 3.2611e-01, 6.3554e-02],
[-4.5403e-01, -1.5910e-01, -7.5886e-02, 2.6313e-01, 6.8093e-01, -3.9960e-01,
5.5428e-01, 1.0429e-01, 5.1322e-01, 1.9406e-01],
[3.9698e-01, -5.2101e-01, 5.1372e-01, -3.9866e-01, 1.0115e-01, -4.1290e-02,
-3.0980e-01, 2.1607e-01, 4.8420e-01, -1.9267e-01]]).astype(np.float32).reshape(
[1, -1])
whh_reverse_l0 = np.array([[-0.3231, -0.3960],
[-0.1625, -0.3032],
[0.3892, -0.0666],
[0.0159, -0.4870],
[-0.4953, 0.2278],
[-0.5380, -0.5250],
[0.0371, -0.4534],
[-0.5452, 0.5012]]).astype(np.float32).reshape([1, -1])
bih_reverse_l0 = np.array([0.0469, -0.0107, 0.3783, -0.2657, -0.0089, 0.5032, -0.0757, -0.2022]).astype(
np.float32).reshape([1, -1])
bhh_reverse_l0 = np.array([-0.6584, 0.3977, 0.5597, -0.4784, 0.5360, -0.2532, 0.5362, -0.1063]).astype(
np.float32).reshape([1, -1])
wih_l1 = np.array([[0.0602, 0.6977, -0.3882, 0.3734],
[-0.6896, -0.6014, -0.2311, 0.6433],
[-0.6778, -0.5100, -0.1496, 0.5774],
[-0.5824, 0.4656, -0.2835, -0.5688],
[0.5623, 0.3599, 0.1731, 0.3124],
[0.1492, -0.6663, -0.1099, -0.5282],
[0.4696, -0.1795, -0.6712, -0.3903],
[0.4995, 0.0709, -0.1738, 0.2822]]).astype(np.float32).reshape([1, -1])
whh_l1 = np.array([[0.3770, 0.4139],
[0.5351, 0.6394],
[0.3901, -0.1072],
[0.1106, 0.1331],
[0.3970, 0.4693],
[0.2958, -0.3813],
[-0.3064, 0.5519],
[-0.2827, 0.5844]]).astype(np.float32).reshape([1, -1])
bih_l1 = np.array([0.5242, 0.5896, 0.3709, 0.6202, 0.5008, 0.2674, 0.4356, -0.3261]).astype(np.float32).reshape(
[1, -1])
bhh_l1 = np.array([-0.6648, 0.6680, 0.2510, -0.1245, -0.0524, 0.5439, -0.1650, 0.5303]).astype(
np.float32).reshape([1, -1])
wih_reverse_l1 = np.array([[0.6477, 0.4416, 0.3803, -0.4708],
[0.4497, 0.2833, -0.4739, -0.6361],
[-0.5573, -0.3867, -0.0349, -0.4128],
[-0.1545, 0.3720, 0.2354, -0.6090],
[0.5965, 0.6301, -0.4591, -0.0120],
[-0.1253, -0.1881, -0.4388, 0.4335],
[0.1944, -0.1230, -0.6170, 0.1043],
[-0.6700, 0.4343, 0.6474, 0.0113]]).astype(np.float32).reshape([1, -1])
whh_reverse_l1 = np.array([[0.6576, 0.5573],
[0.2318, 0.0187],
[-0.6365, 0.5744],
[-0.6494, -0.1820],
[0.6461, -0.3344],
[0.0906, -0.5405],
[-0.5999, 0.5571],
[-0.0488, 0.5345]]).astype(np.float32).reshape([1, -1])
bih_reverse_l1 = np.array([-0.6058, -0.2812, -0.4449, -0.0802, 0.4931, 0.4066, 0.5960, 0.1968]).astype(
np.float32).reshape([1, -1])
bhh_reverse_l1 = np.array([-0.2490, -0.3402, -0.5089, -0.3875, 0.4852, -0.0402, -0.0072, -0.1017]).astype(
np.float32).reshape([1, -1])
'''
weight
layer0
forward
wih
whh
reverse
wih
whh
layer1
forward
wih
whh
reverse
wih
whh
... ...
bias:
layer0
forward
bih
bhh
reverse
bih
bhh
layer1
forward
bih
bhh
reverse
bih
bhh
... ...
'''
w_np = np.concatenate(
(wih_l0, whh_l0, wih_reverse_l0, whh_reverse_l0, wih_l1, whh_l1, wih_reverse_l1, whh_reverse_l1,
bih_l0, bhh_l0, bih_reverse_l0, bhh_reverse_l0, bih_l1, bhh_l1, bih_reverse_l1, bhh_reverse_l1),
axis=1).reshape([-1, 1, 1])
self.w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
@ms_function
def construct(self):
return self.lstm(self.x, self.h, self.c, self.w)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_multi_layer_bilstm():
seq_len = 5
batch_size = 2
input_size = 10
hidden_size = 2
num_layers = 2
has_bias = True
bidirectional = True
dropout = 0.0
num_directions = 1
if bidirectional:
num_directions = 2
net = MultiLayerBiLstmNet(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional,
dropout)
y, h, c, _, _ = net()
expect_y = np.array([[[0.5186, 0.5419, 0.2710, 0.0384],
[0.6196, 0.5539, 0.3266, 0.0866]],
[[0.5244, 0.5276, 0.3042, 0.0510],
[0.5143, 0.4937, 0.2828, 0.0387]],
[[0.5124, 0.5079, 0.2951, 0.0548],
[0.4051, 0.4493, 0.2369, 0.0077]],
[[0.4532, 0.4749, 0.2557, 0.0611],
[0.4879, 0.4812, 0.3160, 0.0368]],
[[0.4535, 0.4806, 0.3880, 0.0462],
[0.4674, 0.4849, 0.3890, 0.1008]]])
error = np.ones([seq_len, batch_size, hidden_size * num_directions]) * 1.0e-4
diff = y.asnumpy() - expect_y
assert np.all(diff < error)
assert np.all(-diff < error)
expect_h = np.array([[[0.4730, 0.1638],
[0.1406, -0.0697]],
[[0.3887, -0.0518],
[-0.3988, -0.0071]],
[[0.4535, 0.4806],
[0.4674, 0.4849]],
[[0.2710, 0.0384],
[0.3266, 0.0866]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-4
diff = h.asnumpy() - expect_h
assert np.all(diff < error)
assert np.all(-diff < error)
expect_c = np.array([[[0.8713, 0.2694],
[0.2075, -0.2201]],
[[0.5084, -0.0964],
[-0.5155, -0.2452]],
[[1.1724, 1.0334],
[1.2003, 1.1058]],
[[0.5179, 0.0750],
[0.5309, 0.2012]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-3
diff = c.asnumpy() - expect_c
assert np.all(diff < error)
assert np.all(-diff < error)
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.network = network
self.weights = ParameterTuple(network.trainable_params())
self.grad = C.GradOperation('grad',
get_by_list=True,
sens_param=True)
@ms_function
def construct(self, output_grad):
weights = self.weights
grads = self.grad(self.network, weights)(output_grad)
return grads
class Net(nn.Cell):
def __init__(self, seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
super(Net, self).__init__()
num_directions = 1
if bidirectional:
num_directions = 2
self.lstm = P.LSTM(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
input_np = np.array([[[-0.5907, 1.0557, 1.7283, 0.6706, -1.2550, -0.5298, -0.2290, -0.6735, 0.8555, 1.4836],
[-1.7070, -0.5347, -0.9105, -0.2598, 0.0588, 1.5496, 1.0757, 0.3760, -1.2020, -0.2868]],
[[0.0151, 0.2126, 0.8090, -0.5292, -2.5590, 0.4279, -0.3081, -1.4706, -0.0498, 1.2301],
[0.4165, -0.5391, -0.0996, 0.1928, -0.4909, -0.1255, 0.4444, -1.3687, 1.3096, 0.6553]],
[[-0.7802, -0.2083, -0.6388, 1.3757, 0.4293, 0.5363, 0.3202, -0.6687, -1.3864, -0.2953],
[1.0799, -0.7204, 0.1130, -0.5857, -0.4855, -1.1068, 1.0126, 0.8716, 1.5460, -0.7392]],
[[2.2645, -0.6586, -0.2227, 1.4290, -0.5006, -1.6576, -0.1793, 0.5319, 0.1360, 0.2707],
[-0.4071, 0.1575, 1.4199, -0.9156, 0.1855, 0.4947, 1.0460, -0.6365, 0.1191, -0.6374]],
[[0.2468, 1.0815, -0.4893, 0.0664, 0.6405, -2.2967, 0.7612, 0.8759, 0.5685, -1.0999],
[-0.7272, -1.7750, -0.1164, -0.7159, 0.0061, -0.7839, -1.8329, 0.3434, -0.5634,
0.5384]]]).astype(np.float32)
self.x = Parameter(initializer(Tensor(input_np), [seq_len, batch_size, input_size]), name='x')
self.h = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='h')
self.c = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='c')
wih_l0 = np.array([[0.2300, 0.6668, 0.4703, 0.0425, 0.0464, 0.6825, 0.2249, -0.4315, -0.2449, 0.2964],
[-0.2811, -0.3444, 0.2557, -0.5137, -0.5518, 0.1652, -0.6720, 0.1066, 0.3586, 0.6299],
[0.5728, -0.1784, 0.5661, 0.4012, 0.3856, -0.1899, 0.3102, 0.3717, -0.5651, 0.1952],
[0.1026, -0.0527, 0.1198, -0.3080, 0.2292, 0.5757, -0.3567, -0.2731, -0.0586, -0.2849],
[0.2194, -0.1622, 0.3219, -0.3008, -0.3713, -0.3034, -0.2385, 0.0412, -0.5205, 0.0280],
[-0.5499, -0.0733, -0.5236, -0.6753, -0.7045, -0.1839, -0.1037, -0.5026, -0.4055, -0.3416],
[0.1573, -0.1301, -0.2882, -0.3464, 0.6643, 0.1980, -0.6804, 0.5359, 0.5996, 0.0124],
[-0.6436, 0.0587, -0.6520, -0.0471, 0.1667, 0.6042, 0.5752, -0.6296, -0.2976,
-0.3757]]).astype(np.float32).reshape([1, -1])
whh_l0 = np.array([[0.3358, 0.2790],
[-0.5355, 0.0989],
[-0.1402, 0.5120],
[0.1335, 0.1653],
[0.3533, -0.3531],
[0.4166, -0.4420],
[-0.5454, -0.1720],
[0.0041, -0.0799]]).astype(np.float32).reshape([1, -1])
bih_l0 = np.array([0.5518, 0.1083, 0.4829, 0.0607, -0.1770, -0.6944, 0.3059, 0.5354]).astype(
np.float32).reshape([1, -1])
bhh_l0 = np.array([0.5025, -0.1261, -0.5405, 0.3220, -0.3441, 0.6488, -0.0284, -0.2334]).astype(
np.float32).reshape([1, -1])
wih_reverse_l0 = np.array(
[[-0.7048, -0.1768, 0.2288, -0.0760, -0.1319, 0.0820, -0.4132, 0.3644, 0.3919, 0.2449],
[0.0551, -0.0530, -0.5883, 0.0799, -0.5025, 0.1500, -0.4067, -0.3764, -0.3018, 0.2467],
[-0.2279, 0.3144, 0.5705, 0.4617, 0.1729, 0.6539, -0.2086, 0.5355, 0.4439, 0.0122],
[0.6967, -0.5245, 0.3527, 0.3386, 0.0429, -0.3803, -0.4328, -0.4767, 0.4481, -0.2405],
[0.6744, -0.2776, 0.0798, 0.1543, 0.6421, 0.6102, 0.3591, -0.4431, -0.6327, -0.0075],
[-0.4520, 0.4201, -0.2374, -0.1556, -0.4175, -0.6834, 0.3096, -0.1581, 0.0127, 0.6872],
[0.1788, -0.5442, -0.3675, -0.2887, -0.3004, 0.5813, 0.1618, 0.6875, -0.4678, 0.0071],
[-0.6453, -0.2528, 0.5675, -0.5154, -0.4129, -0.0214, 0.5539, 0.0343, 0.1712, 0.5644]]).astype(
np.float32).reshape([1, -1])
whh_reverse_l0 = np.array([[-0.6657, 0.6330],
[-0.2290, 0.6556],
[0.4808, -0.2712],
[0.0407, -0.2587],
[0.3837, 0.0382],
[0.2268, 0.1217],
[-0.6404, -0.3336],
[0.5461, -0.0764]]).astype(np.float32).reshape([1, -1])
bih_reverse_l0 = np.array([0.0314, 0.1009, 0.3664, -0.6732, -0.6944, 0.5098, -0.1251, 0.2644]).astype(
np.float32).reshape([1, -1])
bhh_reverse_l0 = np.array([-0.1961, -0.3836, 0.1191, -0.7022, -0.0961, 0.5493, -0.6979, 0.0017]).astype(
np.float32).reshape([1, -1])
wih_l1 = np.array([[1.2746e-01, -3.3346e-01, 1.5589e-01, -4.7986e-01],
[6.5835e-01, 3.8135e-01, -3.8409e-01, -3.6499e-01],
[-6.0374e-04, -1.2227e-01, -1.5955e-01, 4.2772e-01],
[-1.8281e-01, -5.0484e-01, 7.0204e-01, 6.5872e-01],
[3.7765e-01, -4.3494e-01, 3.1503e-01, -4.2504e-02],
[6.3506e-01, -4.3049e-02, -5.7413e-01, -2.5134e-01],
[8.7181e-02, -5.5216e-01, 5.5436e-01, -3.9599e-01],
[4.4611e-01, -4.2690e-01, 6.6142e-01, 6.3882e-01]]).astype(np.float32).reshape([1, -1])
whh_l1 = np.array([[-0.0049, -0.3267],
[0.0863, -0.6277],
[0.4815, -0.2236],
[0.5996, -0.3441],
[0.3959, -0.0249],
[0.3986, -0.0922],
[-0.5321, 0.0877],
[0.2811, -0.0483]]).astype(np.float32).reshape([1, -1])
bih_l1 = np.array([0.0032, -0.0893, 0.5706, 0.3712, 0.0590, 0.0044, 0.2417, 0.1291]).astype(np.float32).reshape(
[1, -1])
bhh_l1 = np.array([-0.0704, 0.3908, -0.1121, 0.6970, -0.6216, 0.6340, -0.2945, 0.5224]).astype(
np.float32).reshape([1, -1])
wih_reverse_l1 = np.array([[-0.2693, 0.3487, 0.0692, 0.0047],
[0.6187, 0.5649, 0.0680, 0.5110],
[-0.5262, -0.3307, -0.3892, 0.5382],
[-0.2925, 0.5185, -0.1385, 0.3431],
[-0.3252, 0.3809, -0.4680, 0.3379],
[0.4763, -0.5465, 0.0033, -0.5144],
[0.3826, -0.3879, -0.2439, 0.2571],
[-0.0422, -0.0359, -0.4197, -0.2209]]).astype(np.float32).reshape([1, -1])
whh_reverse_l1 = np.array([[-0.4691, 0.5944],
[-0.6885, 0.1708],
[0.6391, -0.3690],
[-0.5919, 0.1805],
[-0.6853, -0.6215],
[-0.4635, -0.6714],
[-0.2050, 0.0513],
[0.3411, -0.2833]]).astype(np.float32).reshape([1, -1])
bih_reverse_l1 = np.array([0.5764, -0.7010, -0.0831, -0.3779, -0.2743, 0.0480, -0.2707, -0.5583]).astype(
np.float32).reshape([1, -1])
bhh_reverse_l1 = np.array([0.3379, -0.2671, -0.2789, -0.6611, -0.5542, -0.0188, 0.1831, 0.3612]).astype(
np.float32).reshape([1, -1])
'''
weight
layer0
forward
wih
whh
reverse
wih
whh
layer1
forward
wih
whh
reverse
wih
whh
... ...
bias:
layer0
forward
bih
bhh
reverse
bih
bhh
layer1
forward
bih
bhh
reverse
bih
bhh
... ...
'''
w_np = np.concatenate(
(wih_l0, whh_l0, wih_reverse_l0, whh_reverse_l0, wih_l1, whh_l1, wih_reverse_l1, whh_reverse_l1,
bih_l0, bhh_l0, bih_reverse_l0, bhh_reverse_l0, bih_l1, bhh_l1, bih_reverse_l1, bhh_reverse_l1),
axis=1).reshape([-1, 1, 1])
self.w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
@ms_function
def construct(self):
return self.lstm(self.x, self.h, self.c, self.w)[0]
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_grad():
seq_len = 5
batch_size = 2
input_size = 10
hidden_size = 2
num_layers = 2
has_bias = True
bidirectional = True
dropout = 0.0
num_directions = 1
if bidirectional:
num_directions = 2
net = Grad(Net(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout))
dy = np.array([[[-3.5471e-01, 7.0540e-01, -7.5945e-01, -1.2322e+00],
[2.7161e-01, 1.0865e+00, -2.1827e-03, 8.8031e-01]],
[[-4.2431e-01, 1.4955e+00, 4.6576e-01, -2.7230e+00],
[-4.0418e-01, -2.3282e-01, 9.1253e-01, -2.7379e-01]],
[[-1.3654e+00, 1.9251e+00, -1.6808e+00, -3.2642e-02],
[-4.6481e-01, 1.3138e+00, 1.2956e-02, 1.0198e+00]],
[[1.2914e+00, -2.3753e-01, 9.4763e-01, 1.7930e-02],
[5.3589e-01, -1.0981e-01, 1.5377e+00, 6.2709e-01]],
[[-1.6032e+00, -1.8818e-01, 7.0441e-01, -2.8765e+00],
[1.0065e-01, 9.2045e-01, 2.7426e-01, 2.6196e-01]]]).astype(np.float32)
dx, dh, dc, dw = net(Tensor(dy))
expect_dx = np.array([[[0.01697153, -0.0096909, 0.01306139, 0.00863109, -0.00122794, -0.00746152, -0.00879683,
0.00643571, 0.0015958, 0.01480642],
[0.05794962, -0.02326604, 0.01862703, 0.02053947, 0.02607713, -0.01278067, 0.04250786,
-0.02686035, -0.07441005, 0.00806021]],
[[-0.026675, -0.01024149, -0.02492021, -0.00457492, -0.0085863, 0.02341479, 0.02188834,
-0.04139283, -0.01367766, -0.00305065],
[-0.00762213, -0.01914341, -0.03233681, -0.03580827, -0.02201782, -0.00153102, -0.00097455,
-0.02708411, -0.03711082, -0.02804472]],
[[-0.0040581, -0.00116989, 0.01652471, 0.02182668, -0.02547193, -0.04171437, 0.04185125,
0.01589275, -0.00517019, 0.06554792],
[-0.02294365, -0.00589715, -0.01425684, -0.01499153, -0.05327821, -0.03133425, 0.00755623,
-0.04192506, -0.02122675, -0.01214214]],
[[-0.00041491, 0.00240709, -0.00942589, 0.00719656, 0.01438523, 0.00931082, 0.00534746,
-0.0004002, 0.01299422, 0.00181135],
[-0.01704482, -0.00887032, -0.01746774, -0.03289891, -0.04259495, -0.01928082, -0.01570587,
-0.01242383, -0.01799918, -0.00610236]],
[[0.00207505, -0.0008109, 0.00114241, 0.00251349, -0.00065676, 0.00151333, -0.00077485,
-0.00034354, -0.00028289, -0.0006986],
[-0.00240827, -0.0001309, 0.01401818, -0.01272261, -0.02665948, -0.01095799, -0.007761,
-0.0087831, 0.01038029, 0.02021475]]]).astype(np.float32)
error = np.ones(dx.asnumpy().shape) * 1.0e-4
diff = dx.asnumpy() - expect_dx
assert np.all(diff < error)
assert np.all(-diff < error)
expect_dh = np.array([[[-0.00696833, 0.00212885],
[0.01416209, 0.0002706]],
[[0.00297393, -0.0021012],
[0.00458834, 0.00400078]],
[[0.08658642, -0.10590762],
[0.1516603, -0.10525411]],
[[0.11888178, -0.04759264],
[0.05898442, -0.08082277]]]).astype(np.float32)
error = np.ones(dh.asnumpy().shape) * 1.0e-4
diff = dh.asnumpy() - expect_dh
assert np.all(diff < error)
assert np.all(-diff < error)
expect_dc = np.array([[[0.00887521, -0.01391486],
[0.03858164, -0.04941981]],
[[0.00665188, 0.00184223],
[-0.00541833, 0.01410913]],
[[-0.2068854, 0.5585638],
[0.01735374, 0.3537254]],
[[0.20350647, -0.2792883],
[0.18456826, 0.02278761]]]).astype(np.float32)
error = np.ones(dc.asnumpy().shape) * 1.0e-4
diff = dc.asnumpy() - expect_dc
assert np.all(diff < error)
assert np.all(-diff < error)
class LstmNetWithDropout(nn.Cell):
def __init__(self, seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
super(LstmNetWithDropout, self).__init__()
num_directions = 1
if bidirectional:
num_directions = 2
self.lstm = P.LSTM(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
input_np = np.array([[[-2.48789445e-01, -2.18991071e-01, -8.41492534e-01, -5.73351622e-01, 8.20644796e-02,
4.14313585e-01, -1.30143976e+00, -4.43366140e-01, -1.21003680e-01, -2.11284861e-01],
[9.94045794e-01, 3.18840504e-01, 4.81898338e-01, -4.83986028e-02, -9.26419497e-02,
-2.57977694e-01, 1.82191110e+00, 5.95121741e-01, 6.30752742e-01, -6.01903737e-01]],
[[7.67166913e-01, 5.41202351e-02, -1.24094069e+00, 1.38814664e+00, 2.05845284e+00,
7.29744852e-01, -1.12405574e+00, 3.78702253e-01, 2.28524983e-01, 2.02445173e+00],
[-1.85264975e-01, -4.55119252e-01, 1.23624969e+00, 1.24347043e+00, -1.68316591e+00,
-3.55918944e-01, 3.07149738e-01, -3.44966322e-01, -1.08978853e-01, 1.80912763e-01]],
[[-6.47622466e-01, 1.31204927e+00, 6.47477210e-01, -7.93370783e-01, 3.08402872e-04,
-5.12097359e-01, -1.69133916e-01, 8.57838035e-01, -3.63963723e-01, 6.35978997e-01],
[-3.92911851e-01, 8.27334300e-02, -1.11347124e-01, 8.79961967e-01, 6.02812059e-02,
-3.76448452e-01, -1.48800862e+00, -9.48699772e-01, -1.24202335e+00, 1.65264118e+00]],
[[4.05404866e-01, 5.67396320e-02, -2.05705926e-01, -8.70196745e-02, -7.34854519e-01,
-1.07580565e-01, 1.33716142e+00, -1.18140256e+00, 2.66074872e+00, -3.26788813e-01],
[6.97183967e-01, -2.32625628e+00, 1.20393467e+00, -2.32532692e+00, 2.03347206e+00,
-7.58083522e-01, 1.35564697e+00, -2.32149422e-01, 9.85125721e-01, 1.00944638e+00]],
[[9.89606023e-01, -5.30669808e-01, -2.66087383e-01, 8.14819038e-01, 1.07067376e-01,
-1.76214290e+00, -5.04977465e-01, 1.94490123e+00, 5.10450959e-01, -2.29238123e-01],
[-1.32928836e+00, -1.18175328e-01, -5.17818272e-01, -1.45089477e-01, 7.13987231e-01,
-7.41293788e-01, -3.67817104e-01, 1.18039274e+00, -6.03745162e-01,
-5.83392143e-01]]]).astype(np.float32)
self.x = Parameter(initializer(Tensor(input_np), [seq_len, batch_size, input_size]), name='x')
self.h = Parameter(initializer(
Tensor(np.array([[[-0.47240502, 1.6824378],
[-0.00978304, 0.8179632]]]).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='h')
self.c = Parameter(initializer(
Tensor(np.array([[[-0.85975164, -0.3198615],
[-0.9821871, 0.26311848]]]).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='c')
wih = np.array([[0.4473, -0.5509, -0.1585, -0.6215, 0.6228, 0.3462, 0.3015, -0.3714, 0.3119, -0.1151],
[-0.6923, 0.1373, 0.2214, 0.2280, 0.6960, -0.6368, 0.5725, -0.1359, 0.0742, -0.6777],
[-0.4432, 0.6162, -0.1066, -0.6138, -0.2529, -0.5638, -0.0603, 0.3039, 0.1068, -0.5300],
[0.4337, -0.1215, -0.5088, -0.0045, 0.2828, 0.1411, 0.0741, 0.6936, -0.4603, 0.6986],
[-0.2079, -0.5518, 0.5375, -0.2168, 0.3662, 0.0948, -0.0564, -0.1808, -0.6672, -0.2410],
[0.5142, 0.0790, -0.1123, -0.2351, 0.3982, -0.6351, 0.5906, 0.3917, -0.0850, -0.5397],
[-0.4795, -0.6576, 0.5693, 0.0047, -0.6626, 0.1013, -0.4015, -0.4040, -0.2817, 0.4430],
[0.0251, -0.3035, -0.6026, 0.2693, -0.2749, 0.1501, -0.5778, 0.5570, -0.7065, -0.6196]]).astype(
np.float32).reshape([1, -1])
whh = np.array([[-0.4344, -0.2529],
[0.0377, 0.7046],
[-0.0579, -0.5240],
[-0.4801, -0.1149],
[-0.4010, -0.5614],
[0.4721, 0.4366],
[-0.4282, 0.0816],
[0.1574, -0.3359]]).astype(np.float32).reshape([1, -1])
bih = np.array([0.2431, 0.5967, -0.2417, -0.4169, -0.5326, 0.5685, -0.2971, -0.4326]).astype(
np.float32).reshape([1, -1])
bhh = np.array([-0.1751, -0.2270, -0.3980, -0.4983, -0.3527, -0.2774, 0.6371, -0.3330]).astype(
np.float32).reshape([1, -1])
w_np = np.concatenate((wih, whh, bih, bhh), axis=1).reshape([-1, 1, 1])
self.w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
def construct(self):
return self.lstm(self.x, self.h, self.c, self.w)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_lstm_dropout():
seq_len = 5
batch_size = 2
input_size = 10
hidden_size = 2
num_layers = 1
has_bias = True
bidirectional = False
dropout = 1.0
num_directions = 1
if bidirectional:
num_directions = 2
net = LstmNetWithDropout(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
y, h, c, _, _ = net()
expect_y = np.array([[[-0.45210335, -0.0844336],
[-0.14677924, 0.07140275]],
[[-0.18895914, -0.11084185],
[-0.26356253, -0.06367199]],
[[-0.33480304, 0.00812318],
[-0.0887147, -0.1564593]],
[[-0.33231455, 0.00743252],
[0.428218, 0.00723737]],
[[-0.20026046, 0.43491203],
[0.17739448, 0.5313992]]])
error = np.ones([num_layers, batch_size, hidden_size]) * 1.0e-4
diff = y.asnumpy() - expect_y
assert np.all(diff < error)
assert np.all(-diff < error)
|
[
"numpy.ones",
"mindspore.context.set_context",
"numpy.array",
"mindspore.common.tensor.Tensor",
"mindspore.ops.composite.GradOperation",
"numpy.concatenate",
"numpy.all",
"mindspore.ops.operations.LSTM"
] |
[((1095, 1135), 'mindspore.context.set_context', 'context.set_context', ([], {'device_target': '"""GPU"""'}), "(device_target='GPU')\n", (1114, 1135), True, 'import mindspore.context as context\n'), ((6168, 6424), 'numpy.array', 'np.array', (['[[[-0.021429, 0.1176], [0.31144, 0.6309]], [[-0.0005019, -0.045812], [\n 0.020324, 0.20392]], [[-0.01037, -0.060141], [0.060931, -0.018913]], [[\n -0.16031, -0.23428], [0.041886, -0.22162]], [[-0.039243, -0.03295], [-\n 0.041257, -0.45276]]]'], {}), '([[[-0.021429, 0.1176], [0.31144, 0.6309]], [[-0.0005019, -0.045812\n ], [0.020324, 0.20392]], [[-0.01037, -0.060141], [0.060931, -0.018913]],\n [[-0.16031, -0.23428], [0.041886, -0.22162]], [[-0.039243, -0.03295], [\n -0.041257, -0.45276]]])\n', (6176, 6424), True, 'import numpy as np\n'), ((6810, 6830), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (6816, 6830), True, 'import numpy as np\n'), ((6842, 6863), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (6848, 6863), True, 'import numpy as np\n'), ((6880, 6932), 'numpy.array', 'np.array', (['[[[-0.0392, -0.0329], [-0.0413, -0.4528]]]'], {}), '([[[-0.0392, -0.0329], [-0.0413, -0.4528]]])\n', (6888, 6932), True, 'import numpy as np\n'), ((7089, 7109), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (7095, 7109), True, 'import numpy as np\n'), ((7121, 7142), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (7127, 7142), True, 'import numpy as np\n'), ((7159, 7210), 'numpy.array', 'np.array', (['[[[-0.0984, -0.3665], [-0.101, -0.6792]]]'], {}), '([[[-0.0984, -0.3665], [-0.101, -0.6792]]])\n', (7167, 7210), True, 'import numpy as np\n'), ((7368, 7388), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (7374, 7388), True, 'import numpy as np\n'), ((7400, 7421), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (7406, 7421), True, 'import numpy as np\n'), ((13808, 14202), 'numpy.array', 'np.array', (['[[[-0.0826, 0.0209, 0.1715, -0.0072], [0.1035, 0.0594, -0.0867, -0.1077]],\n [[-0.1647, 0.0293, -0.2189, 0.3809], [0.0466, 0.4461, 0.0784, 0.0905]],\n [[-0.0182, 0.0512, 0.1758, -0.1147], [0.046, 0.1588, -0.0314, 0.0886]],\n [[-0.033, 0.0551, 0.2084, -0.1154], [-0.1641, 0.1118, -0.0122, 0.4916]],\n [[-0.2997, 0.0223, 0.1328, 0.3377], [-0.6669, 0.0089, 0.1138, 0.7786]]]'], {}), '([[[-0.0826, 0.0209, 0.1715, -0.0072], [0.1035, 0.0594, -0.0867, -\n 0.1077]], [[-0.1647, 0.0293, -0.2189, 0.3809], [0.0466, 0.4461, 0.0784,\n 0.0905]], [[-0.0182, 0.0512, 0.1758, -0.1147], [0.046, 0.1588, -0.0314,\n 0.0886]], [[-0.033, 0.0551, 0.2084, -0.1154], [-0.1641, 0.1118, -0.0122,\n 0.4916]], [[-0.2997, 0.0223, 0.1328, 0.3377], [-0.6669, 0.0089, 0.1138,\n 0.7786]]])\n', (13816, 14202), True, 'import numpy as np\n'), ((14549, 14569), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (14555, 14569), True, 'import numpy as np\n'), ((14581, 14602), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (14587, 14602), True, 'import numpy as np\n'), ((14619, 14715), 'numpy.array', 'np.array', (['[[[-0.2997, 0.0223], [-0.6669, 0.0089]], [[0.1715, -0.0072], [-0.0867, -\n 0.1077]]]'], {}), '([[[-0.2997, 0.0223], [-0.6669, 0.0089]], [[0.1715, -0.0072], [-\n 0.0867, -0.1077]]])\n', (14627, 14715), True, 'import numpy as np\n'), ((14919, 14939), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (14925, 14939), True, 'import numpy as np\n'), ((14951, 14972), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (14957, 14972), True, 'import numpy as np\n'), ((14989, 15085), 'numpy.array', 'np.array', (['[[[-0.6049, 0.0825], [-0.9433, 0.1006]], [[0.3037, -0.2036], [-0.1633, -\n 0.5663]]]'], {}), '([[[-0.6049, 0.0825], [-0.9433, 0.1006]], [[0.3037, -0.2036], [-\n 0.1633, -0.5663]]])\n', (14997, 15085), True, 'import numpy as np\n'), ((15290, 15310), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (15296, 15310), True, 'import numpy as np\n'), ((15322, 15343), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (15328, 15343), True, 'import numpy as np\n'), ((25993, 26367), 'numpy.array', 'np.array', (['[[[0.5186, 0.5419, 0.271, 0.0384], [0.6196, 0.5539, 0.3266, 0.0866]], [[\n 0.5244, 0.5276, 0.3042, 0.051], [0.5143, 0.4937, 0.2828, 0.0387]], [[\n 0.5124, 0.5079, 0.2951, 0.0548], [0.4051, 0.4493, 0.2369, 0.0077]], [[\n 0.4532, 0.4749, 0.2557, 0.0611], [0.4879, 0.4812, 0.316, 0.0368]], [[\n 0.4535, 0.4806, 0.388, 0.0462], [0.4674, 0.4849, 0.389, 0.1008]]]'], {}), '([[[0.5186, 0.5419, 0.271, 0.0384], [0.6196, 0.5539, 0.3266, 0.0866\n ]], [[0.5244, 0.5276, 0.3042, 0.051], [0.5143, 0.4937, 0.2828, 0.0387]],\n [[0.5124, 0.5079, 0.2951, 0.0548], [0.4051, 0.4493, 0.2369, 0.0077]], [\n [0.4532, 0.4749, 0.2557, 0.0611], [0.4879, 0.4812, 0.316, 0.0368]], [[\n 0.4535, 0.4806, 0.388, 0.0462], [0.4674, 0.4849, 0.389, 0.1008]]])\n', (26001, 26367), True, 'import numpy as np\n'), ((26716, 26736), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (26722, 26736), True, 'import numpy as np\n'), ((26748, 26769), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (26754, 26769), True, 'import numpy as np\n'), ((26786, 26960), 'numpy.array', 'np.array', (['[[[0.473, 0.1638], [0.1406, -0.0697]], [[0.3887, -0.0518], [-0.3988, -\n 0.0071]], [[0.4535, 0.4806], [0.4674, 0.4849]], [[0.271, 0.0384], [\n 0.3266, 0.0866]]]'], {}), '([[[0.473, 0.1638], [0.1406, -0.0697]], [[0.3887, -0.0518], [-\n 0.3988, -0.0071]], [[0.4535, 0.4806], [0.4674, 0.4849]], [[0.271, \n 0.0384], [0.3266, 0.0866]]])\n', (26794, 26960), True, 'import numpy as np\n'), ((27265, 27285), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (27271, 27285), True, 'import numpy as np\n'), ((27297, 27318), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (27303, 27318), True, 'import numpy as np\n'), ((27335, 27510), 'numpy.array', 'np.array', (['[[[0.8713, 0.2694], [0.2075, -0.2201]], [[0.5084, -0.0964], [-0.5155, -\n 0.2452]], [[1.1724, 1.0334], [1.2003, 1.1058]], [[0.5179, 0.075], [\n 0.5309, 0.2012]]]'], {}), '([[[0.8713, 0.2694], [0.2075, -0.2201]], [[0.5084, -0.0964], [-\n 0.5155, -0.2452]], [[1.1724, 1.0334], [1.2003, 1.1058]], [[0.5179, \n 0.075], [0.5309, 0.2012]]])\n', (27343, 27510), True, 'import numpy as np\n'), ((27815, 27835), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (27821, 27835), True, 'import numpy as np\n'), ((27847, 27868), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (27853, 27868), True, 'import numpy as np\n'), ((41021, 41041), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (41027, 41041), True, 'import numpy as np\n'), ((41053, 41074), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (41059, 41074), True, 'import numpy as np\n'), ((41627, 41647), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (41633, 41647), True, 'import numpy as np\n'), ((41659, 41680), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (41665, 41680), True, 'import numpy as np\n'), ((42231, 42251), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (42237, 42251), True, 'import numpy as np\n'), ((42263, 42284), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (42269, 42284), True, 'import numpy as np\n'), ((47996, 48302), 'numpy.array', 'np.array', (['[[[-0.45210335, -0.0844336], [-0.14677924, 0.07140275]], [[-0.18895914, -\n 0.11084185], [-0.26356253, -0.06367199]], [[-0.33480304, 0.00812318], [\n -0.0887147, -0.1564593]], [[-0.33231455, 0.00743252], [0.428218, \n 0.00723737]], [[-0.20026046, 0.43491203], [0.17739448, 0.5313992]]]'], {}), '([[[-0.45210335, -0.0844336], [-0.14677924, 0.07140275]], [[-\n 0.18895914, -0.11084185], [-0.26356253, -0.06367199]], [[-0.33480304, \n 0.00812318], [-0.0887147, -0.1564593]], [[-0.33231455, 0.00743252], [\n 0.428218, 0.00723737]], [[-0.20026046, 0.43491203], [0.17739448, \n 0.5313992]]])\n', (48004, 48302), True, 'import numpy as np\n'), ((48631, 48651), 'numpy.all', 'np.all', (['(diff < error)'], {}), '(diff < error)\n', (48637, 48651), True, 'import numpy as np\n'), ((48663, 48684), 'numpy.all', 'np.all', (['(-diff < error)'], {}), '(-diff < error)\n', (48669, 48684), True, 'import numpy as np\n'), ((1424, 1501), 'mindspore.ops.operations.LSTM', 'P.LSTM', (['input_size', 'hidden_size', 'num_layers', 'has_bias', 'bidirectional', 'dropout'], {}), '(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)\n', (1430, 1501), True, 'from mindspore.ops import operations as P\n'), ((6709, 6755), 'numpy.ones', 'np.ones', (['[num_layers, batch_size, hidden_size]'], {}), '([num_layers, batch_size, hidden_size])\n', (6716, 6755), True, 'import numpy as np\n'), ((6971, 7034), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (6978, 7034), True, 'import numpy as np\n'), ((7250, 7313), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (7257, 7313), True, 'import numpy as np\n'), ((7714, 7791), 'mindspore.ops.operations.LSTM', 'P.LSTM', (['input_size', 'hidden_size', 'num_layers', 'has_bias', 'bidirectional', 'dropout'], {}), '(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)\n', (7720, 7791), True, 'from mindspore.ops import operations as P\n'), ((14431, 14494), 'numpy.ones', 'np.ones', (['[num_layers, batch_size, hidden_size * num_directions]'], {}), '([num_layers, batch_size, hidden_size * num_directions])\n', (14438, 14494), True, 'import numpy as np\n'), ((14801, 14864), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (14808, 14864), True, 'import numpy as np\n'), ((15172, 15235), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (15179, 15235), True, 'import numpy as np\n'), ((15656, 15733), 'mindspore.ops.operations.LSTM', 'P.LSTM', (['input_size', 'hidden_size', 'num_layers', 'has_bias', 'bidirectional', 'dropout'], {}), '(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)\n', (15662, 15733), True, 'from mindspore.ops import operations as P\n'), ((26601, 26661), 'numpy.ones', 'np.ones', (['[seq_len, batch_size, hidden_size * num_directions]'], {}), '([seq_len, batch_size, hidden_size * num_directions])\n', (26608, 26661), True, 'import numpy as np\n'), ((27147, 27210), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (27154, 27210), True, 'import numpy as np\n'), ((27697, 27760), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (27704, 27760), True, 'import numpy as np\n'), ((28079, 28137), 'mindspore.ops.composite.GradOperation', 'C.GradOperation', (['"""grad"""'], {'get_by_list': '(True)', 'sens_param': '(True)'}), "('grad', get_by_list=True, sens_param=True)\n", (28094, 28137), True, 'from mindspore.ops import composite as C\n'), ((28660, 28737), 'mindspore.ops.operations.LSTM', 'P.LSTM', (['input_size', 'hidden_size', 'num_layers', 'has_bias', 'bidirectional', 'dropout'], {}), '(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)\n', (28666, 28737), True, 'from mindspore.ops import operations as P\n'), ((39060, 39070), 'mindspore.common.tensor.Tensor', 'Tensor', (['dy'], {}), '(dy)\n', (39066, 39070), False, 'from mindspore.common.tensor import Tensor\n'), ((42595, 42672), 'mindspore.ops.operations.LSTM', 'P.LSTM', (['input_size', 'hidden_size', 'num_layers', 'has_bias', 'bidirectional', 'dropout'], {}), '(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)\n', (42601, 42672), True, 'from mindspore.ops import operations as P\n'), ((48530, 48576), 'numpy.ones', 'np.ones', (['[num_layers, batch_size, hidden_size]'], {}), '([num_layers, batch_size, hidden_size])\n', (48537, 48576), True, 'import numpy as np\n'), ((38296, 38727), 'numpy.array', 'np.array', (['[[[-0.35471, 0.7054, -0.75945, -1.2322], [0.27161, 1.0865, -0.0021827, \n 0.88031]], [[-0.42431, 1.4955, 0.46576, -2.723], [-0.40418, -0.23282, \n 0.91253, -0.27379]], [[-1.3654, 1.9251, -1.6808, -0.032642], [-0.46481,\n 1.3138, 0.012956, 1.0198]], [[1.2914, -0.23753, 0.94763, 0.01793], [\n 0.53589, -0.10981, 1.5377, 0.62709]], [[-1.6032, -0.18818, 0.70441, -\n 2.8765], [0.10065, 0.92045, 0.27426, 0.26196]]]'], {}), '([[[-0.35471, 0.7054, -0.75945, -1.2322], [0.27161, 1.0865, -\n 0.0021827, 0.88031]], [[-0.42431, 1.4955, 0.46576, -2.723], [-0.40418, \n -0.23282, 0.91253, -0.27379]], [[-1.3654, 1.9251, -1.6808, -0.032642],\n [-0.46481, 1.3138, 0.012956, 1.0198]], [[1.2914, -0.23753, 0.94763, \n 0.01793], [0.53589, -0.10981, 1.5377, 0.62709]], [[-1.6032, -0.18818, \n 0.70441, -2.8765], [0.10065, 0.92045, 0.27426, 0.26196]]])\n', (38304, 38727), True, 'import numpy as np\n'), ((39088, 40474), 'numpy.array', 'np.array', (['[[[0.01697153, -0.0096909, 0.01306139, 0.00863109, -0.00122794, -0.00746152,\n -0.00879683, 0.00643571, 0.0015958, 0.01480642], [0.05794962, -\n 0.02326604, 0.01862703, 0.02053947, 0.02607713, -0.01278067, 0.04250786,\n -0.02686035, -0.07441005, 0.00806021]], [[-0.026675, -0.01024149, -\n 0.02492021, -0.00457492, -0.0085863, 0.02341479, 0.02188834, -\n 0.04139283, -0.01367766, -0.00305065], [-0.00762213, -0.01914341, -\n 0.03233681, -0.03580827, -0.02201782, -0.00153102, -0.00097455, -\n 0.02708411, -0.03711082, -0.02804472]], [[-0.0040581, -0.00116989, \n 0.01652471, 0.02182668, -0.02547193, -0.04171437, 0.04185125, \n 0.01589275, -0.00517019, 0.06554792], [-0.02294365, -0.00589715, -\n 0.01425684, -0.01499153, -0.05327821, -0.03133425, 0.00755623, -\n 0.04192506, -0.02122675, -0.01214214]], [[-0.00041491, 0.00240709, -\n 0.00942589, 0.00719656, 0.01438523, 0.00931082, 0.00534746, -0.0004002,\n 0.01299422, 0.00181135], [-0.01704482, -0.00887032, -0.01746774, -\n 0.03289891, -0.04259495, -0.01928082, -0.01570587, -0.01242383, -\n 0.01799918, -0.00610236]], [[0.00207505, -0.0008109, 0.00114241, \n 0.00251349, -0.00065676, 0.00151333, -0.00077485, -0.00034354, -\n 0.00028289, -0.0006986], [-0.00240827, -0.0001309, 0.01401818, -\n 0.01272261, -0.02665948, -0.01095799, -0.007761, -0.0087831, 0.01038029,\n 0.02021475]]]'], {}), '([[[0.01697153, -0.0096909, 0.01306139, 0.00863109, -0.00122794, -\n 0.00746152, -0.00879683, 0.00643571, 0.0015958, 0.01480642], [\n 0.05794962, -0.02326604, 0.01862703, 0.02053947, 0.02607713, -\n 0.01278067, 0.04250786, -0.02686035, -0.07441005, 0.00806021]], [[-\n 0.026675, -0.01024149, -0.02492021, -0.00457492, -0.0085863, 0.02341479,\n 0.02188834, -0.04139283, -0.01367766, -0.00305065], [-0.00762213, -\n 0.01914341, -0.03233681, -0.03580827, -0.02201782, -0.00153102, -\n 0.00097455, -0.02708411, -0.03711082, -0.02804472]], [[-0.0040581, -\n 0.00116989, 0.01652471, 0.02182668, -0.02547193, -0.04171437, \n 0.04185125, 0.01589275, -0.00517019, 0.06554792], [-0.02294365, -\n 0.00589715, -0.01425684, -0.01499153, -0.05327821, -0.03133425, \n 0.00755623, -0.04192506, -0.02122675, -0.01214214]], [[-0.00041491, \n 0.00240709, -0.00942589, 0.00719656, 0.01438523, 0.00931082, 0.00534746,\n -0.0004002, 0.01299422, 0.00181135], [-0.01704482, -0.00887032, -\n 0.01746774, -0.03289891, -0.04259495, -0.01928082, -0.01570587, -\n 0.01242383, -0.01799918, -0.00610236]], [[0.00207505, -0.0008109, \n 0.00114241, 0.00251349, -0.00065676, 0.00151333, -0.00077485, -\n 0.00034354, -0.00028289, -0.0006986], [-0.00240827, -0.0001309, \n 0.01401818, -0.01272261, -0.02665948, -0.01095799, -0.007761, -\n 0.0087831, 0.01038029, 0.02021475]]])\n', (39096, 40474), True, 'import numpy as np\n'), ((41092, 41336), 'numpy.array', 'np.array', (['[[[-0.00696833, 0.00212885], [0.01416209, 0.0002706]], [[0.00297393, -\n 0.0021012], [0.00458834, 0.00400078]], [[0.08658642, -0.10590762], [\n 0.1516603, -0.10525411]], [[0.11888178, -0.04759264], [0.05898442, -\n 0.08082277]]]'], {}), '([[[-0.00696833, 0.00212885], [0.01416209, 0.0002706]], [[\n 0.00297393, -0.0021012], [0.00458834, 0.00400078]], [[0.08658642, -\n 0.10590762], [0.1516603, -0.10525411]], [[0.11888178, -0.04759264], [\n 0.05898442, -0.08082277]]])\n', (41100, 41336), True, 'import numpy as np\n'), ((41698, 41940), 'numpy.array', 'np.array', (['[[[0.00887521, -0.01391486], [0.03858164, -0.04941981]], [[0.00665188, \n 0.00184223], [-0.00541833, 0.01410913]], [[-0.2068854, 0.5585638], [\n 0.01735374, 0.3537254]], [[0.20350647, -0.2792883], [0.18456826, \n 0.02278761]]]'], {}), '([[[0.00887521, -0.01391486], [0.03858164, -0.04941981]], [[\n 0.00665188, 0.00184223], [-0.00541833, 0.01410913]], [[-0.2068854, \n 0.5585638], [0.01735374, 0.3537254]], [[0.20350647, -0.2792883], [\n 0.18456826, 0.02278761]]])\n', (41706, 41940), True, 'import numpy as np\n'), ((1522, 2461), 'numpy.array', 'np.array', (['[[[0.6755, -1.6607, 0.1367, -0.9209, -1.7088, 0.3953, 2.712, 0.1103, 0.1504,\n -0.3611], [0.4276, -0.785, -0.3758, 0.8604, -0.1361, -1.3618, -0.6251, \n -0.8391, 0.8142, 0.4068]], [[-0.6424, -0.6095, 0.6639, -0.7253, 2.119, \n -0.284, 0.3858, 0.1691, 0.6764, 1.2903], [0.7918, 0.4147, -0.5089, -\n 0.3582, -1.4279, -0.7975, -0.039, -0.4718, 0.4322, -0.7995]], [[-1.5612,\n 0.012, -0.7289, -1.2479, -0.6197, -0.6099, 0.9543, 0.4362, -1.3141, \n 0.4273], [-0.6656, -0.6626, -0.5883, -0.6922, 0.5512, 1.7031, -1.2812, \n -0.2004, -0.9224, 0.4106]], [[-0.9667, -0.6296, -0.731, 1.2503, -0.165,\n 1.205, -0.1704, -0.5215, 0.1595, 0.3904], [0.1026, -0.6821, -0.4387, -\n 1.1637, -0.5, 0.059, 0.5219, -0.6835, 2.4406, 0.7135]], [[-0.471, \n 0.6558, -0.3144, -1.2213, 0.1556, -0.3836, -0.1081, -0.144, -1.1231, \n 0.6279], [-0.8449, -0.2184, -0.1806, -0.0615, -0.566, -0.3556, 1.6891, \n -1.0286, 1.3361, -0.4313]]]'], {}), '([[[0.6755, -1.6607, 0.1367, -0.9209, -1.7088, 0.3953, 2.712, \n 0.1103, 0.1504, -0.3611], [0.4276, -0.785, -0.3758, 0.8604, -0.1361, -\n 1.3618, -0.6251, -0.8391, 0.8142, 0.4068]], [[-0.6424, -0.6095, 0.6639,\n -0.7253, 2.119, -0.284, 0.3858, 0.1691, 0.6764, 1.2903], [0.7918, \n 0.4147, -0.5089, -0.3582, -1.4279, -0.7975, -0.039, -0.4718, 0.4322, -\n 0.7995]], [[-1.5612, 0.012, -0.7289, -1.2479, -0.6197, -0.6099, 0.9543,\n 0.4362, -1.3141, 0.4273], [-0.6656, -0.6626, -0.5883, -0.6922, 0.5512, \n 1.7031, -1.2812, -0.2004, -0.9224, 0.4106]], [[-0.9667, -0.6296, -0.731,\n 1.2503, -0.165, 1.205, -0.1704, -0.5215, 0.1595, 0.3904], [0.1026, -\n 0.6821, -0.4387, -1.1637, -0.5, 0.059, 0.5219, -0.6835, 2.4406, 0.7135]\n ], [[-0.471, 0.6558, -0.3144, -1.2213, 0.1556, -0.3836, -0.1081, -0.144,\n -1.1231, 0.6279], [-0.8449, -0.2184, -0.1806, -0.0615, -0.566, -0.3556,\n 1.6891, -1.0286, 1.3361, -0.4313]]])\n', (1530, 2461), True, 'import numpy as np\n'), ((2783, 2799), 'mindspore.common.tensor.Tensor', 'Tensor', (['input_np'], {}), '(input_np)\n', (2789, 2799), False, 'from mindspore.common.tensor import Tensor\n'), ((5439, 5483), 'numpy.concatenate', 'np.concatenate', (['(wih, whh, bih, bhh)'], {'axis': '(1)'}), '((wih, whh, bih, bhh), axis=1)\n', (5453, 5483), True, 'import numpy as np\n'), ((5544, 5556), 'mindspore.common.tensor.Tensor', 'Tensor', (['w_np'], {}), '(w_np)\n', (5550, 5556), False, 'from mindspore.common.tensor import Tensor\n'), ((7812, 8760), 'numpy.array', 'np.array', (['[[[-1.7322, 1.6642, -1.1861, 0.2955, -0.7907, 0.2982, -1.3413, 1.0665, -\n 0.0436, -0.1883], [0.2195, 0.5917, -0.6739, 0.2388, -0.5364, -1.3309, -\n 0.6018, -0.3081, -0.9648, -1.1627]], [[-0.5094, -2.6025, -0.9302, -\n 1.1937, 0.6501, -0.1903, -0.0661, 0.108, 0.9829, -0.228], [1.3961, \n 0.2239, -0.1947, -0.3206, 0.5791, 0.3396, 0.1728, -1.2007, -1.0994, -\n 1.3278]], [[0.187, -1.109, -0.9705, 0.2207, 0.3743, 0.1158, -0.5443, -\n 0.5559, 0.1538, -0.3975], [-0.2347, -0.1245, -0.2335, 0.3164, 1.0997, -\n 0.3928, -1.8517, 1.1136, -1.5051, -0.0071]], [[1.2739, 2.5438, -0.4289,\n -0.7981, -1.3682, -2.2509, 0.2028, 1.341, 2.9502, -1.165], [0.1254, \n 0.2726, 0.0251, 0.9323, 0.7315, 0.8231, -0.2123, -0.6885, 0.9893, -\n 0.2047]], [[0.187, -0.9066, 0.7155, 0.5438, -0.9757, -0.5828, -0.3417, \n 1.5681, 1.0326, -0.0179], [-0.7746, -1.0695, -0.5278, 2.5307, -0.1002, \n -1.5773, 0.7717, 1.0266, -0.0798, 1.2333]]]'], {}), '([[[-1.7322, 1.6642, -1.1861, 0.2955, -0.7907, 0.2982, -1.3413, \n 1.0665, -0.0436, -0.1883], [0.2195, 0.5917, -0.6739, 0.2388, -0.5364, -\n 1.3309, -0.6018, -0.3081, -0.9648, -1.1627]], [[-0.5094, -2.6025, -\n 0.9302, -1.1937, 0.6501, -0.1903, -0.0661, 0.108, 0.9829, -0.228], [\n 1.3961, 0.2239, -0.1947, -0.3206, 0.5791, 0.3396, 0.1728, -1.2007, -\n 1.0994, -1.3278]], [[0.187, -1.109, -0.9705, 0.2207, 0.3743, 0.1158, -\n 0.5443, -0.5559, 0.1538, -0.3975], [-0.2347, -0.1245, -0.2335, 0.3164, \n 1.0997, -0.3928, -1.8517, 1.1136, -1.5051, -0.0071]], [[1.2739, 2.5438,\n -0.4289, -0.7981, -1.3682, -2.2509, 0.2028, 1.341, 2.9502, -1.165], [\n 0.1254, 0.2726, 0.0251, 0.9323, 0.7315, 0.8231, -0.2123, -0.6885, \n 0.9893, -0.2047]], [[0.187, -0.9066, 0.7155, 0.5438, -0.9757, -0.5828, \n -0.3417, 1.5681, 1.0326, -0.0179], [-0.7746, -1.0695, -0.5278, 2.5307, \n -0.1002, -1.5773, 0.7717, 1.0266, -0.0798, 1.2333]]])\n', (7820, 8760), True, 'import numpy as np\n'), ((9069, 9085), 'mindspore.common.tensor.Tensor', 'Tensor', (['input_np'], {}), '(input_np)\n', (9075, 9085), False, 'from mindspore.common.tensor import Tensor\n'), ((13011, 13111), 'numpy.concatenate', 'np.concatenate', (['(wih, whh, wih_reverse, whh_reverse, bih, bhh, bih_reverse, bhh_reverse)'], {'axis': '(1)'}), '((wih, whh, wih_reverse, whh_reverse, bih, bhh, bih_reverse,\n bhh_reverse), axis=1)\n', (13025, 13111), True, 'import numpy as np\n'), ((13181, 13193), 'mindspore.common.tensor.Tensor', 'Tensor', (['w_np'], {}), '(w_np)\n', (13187, 13193), False, 'from mindspore.common.tensor import Tensor\n'), ((15754, 16696), 'numpy.array', 'np.array', (['[[[-0.1887, -0.4144, -0.0235, 0.7489, 0.7522, 0.5969, 0.3342, 1.2198, \n 0.6786, -0.9404], [-0.8643, -1.6835, -2.4965, 2.8093, 0.1741, 0.2707, \n 0.7387, -0.0939, -1.799, 0.4765]], [[-0.5963, -1.2598, -0.7226, 1.1365,\n -1.732, -0.7302, 0.1221, -0.2111, -1.6173, -0.0706], [0.8964, 0.1737, -\n 1.0077, -0.1389, 0.4889, 0.4391, 0.7911, 0.3614, -1.9533, -0.9936]], [[\n 0.326, -1.3312, 0.0601, 1.0726, -1.601, -1.8733, -1.5775, 1.1579, -\n 0.8801, -0.5742], [-2.2998, -0.6344, -0.5409, -0.9221, -0.65, 0.1206, \n 1.5215, 0.7517, 1.3691, 2.0021]], [[-0.1245, -0.369, 2.1193, 1.3852, -\n 0.1841, -0.8899, -0.3646, -0.8575, -0.3131, 0.2026], [1.0218, -1.4331, \n 0.1744, 0.5442, -0.7808, 0.2527, 0.1566, 1.1484, -0.7766, -0.6747]], [[\n -0.6752, 0.9906, -0.4973, 0.3471, -0.1202, -0.4213, 2.0213, 0.0441, \n 0.9016, 1.0365], [1.2223, -1.3248, 0.1207, -0.8256, 0.1816, 0.7057, -\n 0.3105, 0.5713, 0.2804, -1.0685]]]'], {}), '([[[-0.1887, -0.4144, -0.0235, 0.7489, 0.7522, 0.5969, 0.3342, \n 1.2198, 0.6786, -0.9404], [-0.8643, -1.6835, -2.4965, 2.8093, 0.1741, \n 0.2707, 0.7387, -0.0939, -1.799, 0.4765]], [[-0.5963, -1.2598, -0.7226,\n 1.1365, -1.732, -0.7302, 0.1221, -0.2111, -1.6173, -0.0706], [0.8964, \n 0.1737, -1.0077, -0.1389, 0.4889, 0.4391, 0.7911, 0.3614, -1.9533, -\n 0.9936]], [[0.326, -1.3312, 0.0601, 1.0726, -1.601, -1.8733, -1.5775, \n 1.1579, -0.8801, -0.5742], [-2.2998, -0.6344, -0.5409, -0.9221, -0.65, \n 0.1206, 1.5215, 0.7517, 1.3691, 2.0021]], [[-0.1245, -0.369, 2.1193, \n 1.3852, -0.1841, -0.8899, -0.3646, -0.8575, -0.3131, 0.2026], [1.0218, \n -1.4331, 0.1744, 0.5442, -0.7808, 0.2527, 0.1566, 1.1484, -0.7766, -\n 0.6747]], [[-0.6752, 0.9906, -0.4973, 0.3471, -0.1202, -0.4213, 2.0213,\n 0.0441, 0.9016, 1.0365], [1.2223, -1.3248, 0.1207, -0.8256, 0.1816, \n 0.7057, -0.3105, 0.5713, 0.2804, -1.0685]]])\n', (15762, 16696), True, 'import numpy as np\n'), ((17006, 17022), 'mindspore.common.tensor.Tensor', 'Tensor', (['input_np'], {}), '(input_np)\n', (17012, 17022), False, 'from mindspore.common.tensor import Tensor\n'), ((24999, 25223), 'numpy.concatenate', 'np.concatenate', (['(wih_l0, whh_l0, wih_reverse_l0, whh_reverse_l0, wih_l1, whh_l1,\n wih_reverse_l1, whh_reverse_l1, bih_l0, bhh_l0, bih_reverse_l0,\n bhh_reverse_l0, bih_l1, bhh_l1, bih_reverse_l1, bhh_reverse_l1)'], {'axis': '(1)'}), '((wih_l0, whh_l0, wih_reverse_l0, whh_reverse_l0, wih_l1,\n whh_l1, wih_reverse_l1, whh_reverse_l1, bih_l0, bhh_l0, bih_reverse_l0,\n bhh_reverse_l0, bih_l1, bhh_l1, bih_reverse_l1, bhh_reverse_l1), axis=1)\n', (25013, 25223), True, 'import numpy as np\n'), ((25314, 25326), 'mindspore.common.tensor.Tensor', 'Tensor', (['w_np'], {}), '(w_np)\n', (25320, 25326), False, 'from mindspore.common.tensor import Tensor\n'), ((28758, 29696), 'numpy.array', 'np.array', (['[[[-0.5907, 1.0557, 1.7283, 0.6706, -1.255, -0.5298, -0.229, -0.6735, \n 0.8555, 1.4836], [-1.707, -0.5347, -0.9105, -0.2598, 0.0588, 1.5496, \n 1.0757, 0.376, -1.202, -0.2868]], [[0.0151, 0.2126, 0.809, -0.5292, -\n 2.559, 0.4279, -0.3081, -1.4706, -0.0498, 1.2301], [0.4165, -0.5391, -\n 0.0996, 0.1928, -0.4909, -0.1255, 0.4444, -1.3687, 1.3096, 0.6553]], [[\n -0.7802, -0.2083, -0.6388, 1.3757, 0.4293, 0.5363, 0.3202, -0.6687, -\n 1.3864, -0.2953], [1.0799, -0.7204, 0.113, -0.5857, -0.4855, -1.1068, \n 1.0126, 0.8716, 1.546, -0.7392]], [[2.2645, -0.6586, -0.2227, 1.429, -\n 0.5006, -1.6576, -0.1793, 0.5319, 0.136, 0.2707], [-0.4071, 0.1575, \n 1.4199, -0.9156, 0.1855, 0.4947, 1.046, -0.6365, 0.1191, -0.6374]], [[\n 0.2468, 1.0815, -0.4893, 0.0664, 0.6405, -2.2967, 0.7612, 0.8759, \n 0.5685, -1.0999], [-0.7272, -1.775, -0.1164, -0.7159, 0.0061, -0.7839, \n -1.8329, 0.3434, -0.5634, 0.5384]]]'], {}), '([[[-0.5907, 1.0557, 1.7283, 0.6706, -1.255, -0.5298, -0.229, -\n 0.6735, 0.8555, 1.4836], [-1.707, -0.5347, -0.9105, -0.2598, 0.0588, \n 1.5496, 1.0757, 0.376, -1.202, -0.2868]], [[0.0151, 0.2126, 0.809, -\n 0.5292, -2.559, 0.4279, -0.3081, -1.4706, -0.0498, 1.2301], [0.4165, -\n 0.5391, -0.0996, 0.1928, -0.4909, -0.1255, 0.4444, -1.3687, 1.3096, \n 0.6553]], [[-0.7802, -0.2083, -0.6388, 1.3757, 0.4293, 0.5363, 0.3202, \n -0.6687, -1.3864, -0.2953], [1.0799, -0.7204, 0.113, -0.5857, -0.4855, \n -1.1068, 1.0126, 0.8716, 1.546, -0.7392]], [[2.2645, -0.6586, -0.2227, \n 1.429, -0.5006, -1.6576, -0.1793, 0.5319, 0.136, 0.2707], [-0.4071, \n 0.1575, 1.4199, -0.9156, 0.1855, 0.4947, 1.046, -0.6365, 0.1191, -\n 0.6374]], [[0.2468, 1.0815, -0.4893, 0.0664, 0.6405, -2.2967, 0.7612, \n 0.8759, 0.5685, -1.0999], [-0.7272, -1.775, -0.1164, -0.7159, 0.0061, -\n 0.7839, -1.8329, 0.3434, -0.5634, 0.5384]]])\n', (28766, 29696), True, 'import numpy as np\n'), ((30010, 30026), 'mindspore.common.tensor.Tensor', 'Tensor', (['input_np'], {}), '(input_np)\n', (30016, 30026), False, 'from mindspore.common.tensor import Tensor\n'), ((37384, 37608), 'numpy.concatenate', 'np.concatenate', (['(wih_l0, whh_l0, wih_reverse_l0, whh_reverse_l0, wih_l1, whh_l1,\n wih_reverse_l1, whh_reverse_l1, bih_l0, bhh_l0, bih_reverse_l0,\n bhh_reverse_l0, bih_l1, bhh_l1, bih_reverse_l1, bhh_reverse_l1)'], {'axis': '(1)'}), '((wih_l0, whh_l0, wih_reverse_l0, whh_reverse_l0, wih_l1,\n whh_l1, wih_reverse_l1, whh_reverse_l1, bih_l0, bhh_l0, bih_reverse_l0,\n bhh_reverse_l0, bih_l1, bhh_l1, bih_reverse_l1, bhh_reverse_l1), axis=1)\n', (37398, 37608), True, 'import numpy as np\n'), ((37699, 37711), 'mindspore.common.tensor.Tensor', 'Tensor', (['w_np'], {}), '(w_np)\n', (37705, 37711), False, 'from mindspore.common.tensor import Tensor\n'), ((42693, 44163), 'numpy.array', 'np.array', (['[[[-0.248789445, -0.218991071, -0.841492534, -0.573351622, 0.0820644796, \n 0.414313585, -1.30143976, -0.44336614, -0.12100368, -0.211284861], [\n 0.994045794, 0.318840504, 0.481898338, -0.0483986028, -0.0926419497, -\n 0.257977694, 1.8219111, 0.595121741, 0.630752742, -0.601903737]], [[\n 0.767166913, 0.0541202351, -1.24094069, 1.38814664, 2.05845284, \n 0.729744852, -1.12405574, 0.378702253, 0.228524983, 2.02445173], [-\n 0.185264975, -0.455119252, 1.23624969, 1.24347043, -1.68316591, -\n 0.355918944, 0.307149738, -0.344966322, -0.108978853, 0.180912763]], [[\n -0.647622466, 1.31204927, 0.64747721, -0.793370783, 0.000308402872, -\n 0.512097359, -0.169133916, 0.857838035, -0.363963723, 0.635978997], [-\n 0.392911851, 0.08273343, -0.111347124, 0.879961967, 0.0602812059, -\n 0.376448452, -1.48800862, -0.948699772, -1.24202335, 1.65264118]], [[\n 0.405404866, 0.056739632, -0.205705926, -0.0870196745, -0.734854519, -\n 0.107580565, 1.33716142, -1.18140256, 2.66074872, -0.326788813], [\n 0.697183967, -2.32625628, 1.20393467, -2.32532692, 2.03347206, -\n 0.758083522, 1.35564697, -0.232149422, 0.985125721, 1.00944638]], [[\n 0.989606023, -0.530669808, -0.266087383, 0.814819038, 0.107067376, -\n 1.7621429, -0.504977465, 1.94490123, 0.510450959, -0.229238123], [-\n 1.32928836, -0.118175328, -0.517818272, -0.145089477, 0.713987231, -\n 0.741293788, -0.367817104, 1.18039274, -0.603745162, -0.583392143]]]'], {}), '([[[-0.248789445, -0.218991071, -0.841492534, -0.573351622, \n 0.0820644796, 0.414313585, -1.30143976, -0.44336614, -0.12100368, -\n 0.211284861], [0.994045794, 0.318840504, 0.481898338, -0.0483986028, -\n 0.0926419497, -0.257977694, 1.8219111, 0.595121741, 0.630752742, -\n 0.601903737]], [[0.767166913, 0.0541202351, -1.24094069, 1.38814664, \n 2.05845284, 0.729744852, -1.12405574, 0.378702253, 0.228524983, \n 2.02445173], [-0.185264975, -0.455119252, 1.23624969, 1.24347043, -\n 1.68316591, -0.355918944, 0.307149738, -0.344966322, -0.108978853, \n 0.180912763]], [[-0.647622466, 1.31204927, 0.64747721, -0.793370783, \n 0.000308402872, -0.512097359, -0.169133916, 0.857838035, -0.363963723, \n 0.635978997], [-0.392911851, 0.08273343, -0.111347124, 0.879961967, \n 0.0602812059, -0.376448452, -1.48800862, -0.948699772, -1.24202335, \n 1.65264118]], [[0.405404866, 0.056739632, -0.205705926, -0.0870196745, \n -0.734854519, -0.107580565, 1.33716142, -1.18140256, 2.66074872, -\n 0.326788813], [0.697183967, -2.32625628, 1.20393467, -2.32532692, \n 2.03347206, -0.758083522, 1.35564697, -0.232149422, 0.985125721, \n 1.00944638]], [[0.989606023, -0.530669808, -0.266087383, 0.814819038, \n 0.107067376, -1.7621429, -0.504977465, 1.94490123, 0.510450959, -\n 0.229238123], [-1.32928836, -0.118175328, -0.517818272, -0.145089477, \n 0.713987231, -0.741293788, -0.367817104, 1.18039274, -0.603745162, -\n 0.583392143]]])\n', (42701, 44163), True, 'import numpy as np\n'), ((45058, 45074), 'mindspore.common.tensor.Tensor', 'Tensor', (['input_np'], {}), '(input_np)\n', (45064, 45074), False, 'from mindspore.common.tensor import Tensor\n'), ((47265, 47309), 'numpy.concatenate', 'np.concatenate', (['(wih, whh, bih, bhh)'], {'axis': '(1)'}), '((wih, whh, bih, bhh), axis=1)\n', (47279, 47309), True, 'import numpy as np\n'), ((47370, 47382), 'mindspore.common.tensor.Tensor', 'Tensor', (['w_np'], {}), '(w_np)\n', (47376, 47382), False, 'from mindspore.common.tensor import Tensor\n'), ((3310, 4154), 'numpy.array', 'np.array', (['[[0.34021, -0.46622, 0.45117, 0.23627, 0.37844, 0.2877, 0.41631, -0.62628, \n -0.48008, -0.49148], [-0.064257, -0.24807, 0.01355, 0.68946, -0.012608,\n -0.071719, -0.13566, -0.49215, 0.28509, -0.6354], [-0.69863, 0.59773, -\n 0.39062, -0.076151, 0.00056803, -0.7042, -0.61822, 0.41854, 0.40596, \n 0.64867], [-0.30253, -0.19464, 0.70591, 0.49368, -0.59758, 0.013251, \n 0.35685, -0.3764, -0.44612, 0.51794], [-0.3214, 0.55578, 0.63589, -\n 0.64249, 0.57258, 0.24256, -0.27954, 0.25202, 0.29235, -0.39979], [\n 0.16547, -0.07903, -0.20045, 0.62484, -0.10727, -0.5001, -0.29165, -\n 0.1762, 0.15939, -0.22744], [-0.40835, 0.36751, 0.47989, 0.58886, \n 0.53598, -0.29055, -0.28129, 0.60219, 0.49193, 0.33115], [-0.56894, -\n 0.50359, 0.47491, 0.5811, -0.54921, -0.61343, -0.058236, -0.37682, \n 0.48338, -0.21551]]'], {}), '([[0.34021, -0.46622, 0.45117, 0.23627, 0.37844, 0.2877, 0.41631, -\n 0.62628, -0.48008, -0.49148], [-0.064257, -0.24807, 0.01355, 0.68946, -\n 0.012608, -0.071719, -0.13566, -0.49215, 0.28509, -0.6354], [-0.69863, \n 0.59773, -0.39062, -0.076151, 0.00056803, -0.7042, -0.61822, 0.41854, \n 0.40596, 0.64867], [-0.30253, -0.19464, 0.70591, 0.49368, -0.59758, \n 0.013251, 0.35685, -0.3764, -0.44612, 0.51794], [-0.3214, 0.55578, \n 0.63589, -0.64249, 0.57258, 0.24256, -0.27954, 0.25202, 0.29235, -\n 0.39979], [0.16547, -0.07903, -0.20045, 0.62484, -0.10727, -0.5001, -\n 0.29165, -0.1762, 0.15939, -0.22744], [-0.40835, 0.36751, 0.47989, \n 0.58886, 0.53598, -0.29055, -0.28129, 0.60219, 0.49193, 0.33115], [-\n 0.56894, -0.50359, 0.47491, 0.5811, -0.54921, -0.61343, -0.058236, -\n 0.37682, 0.48338, -0.21551]])\n', (3318, 4154), True, 'import numpy as np\n'), ((4771, 4938), 'numpy.array', 'np.array', (['[[-0.482, -0.235], [-0.1195, 0.0519], [0.4511, -0.3961], [-0.5962, 0.0906],\n [0.2162, -0.1178], [0.6237, 0.0711], [0.1867, -0.1225], [0.1831, 0.085]]'], {}), '([[-0.482, -0.235], [-0.1195, 0.0519], [0.4511, -0.3961], [-0.5962,\n 0.0906], [0.2162, -0.1178], [0.6237, 0.0711], [0.1867, -0.1225], [\n 0.1831, 0.085]])\n', (4779, 4938), True, 'import numpy as np\n'), ((5152, 5229), 'numpy.array', 'np.array', (['[-0.2862, 0.0034, 0.2059, -0.6544, 0.3244, -0.2472, 0.0852, -0.305]'], {}), '([-0.2862, 0.0034, 0.2059, -0.6544, 0.3244, -0.2472, 0.0852, -0.305])\n', (5160, 5229), True, 'import numpy as np\n'), ((5294, 5372), 'numpy.array', 'np.array', (['[-0.6575, 0.1562, -0.6434, 0.0212, -0.2493, -0.5626, 0.153, -0.5235]'], {}), '([-0.6575, 0.1562, -0.6434, 0.0212, -0.2493, -0.5626, 0.153, -0.5235])\n', (5302, 5372), True, 'import numpy as np\n'), ((9596, 10333), 'numpy.array', 'np.array', (['[[-0.2959, -0.1142, 0.3662, 0.5406, 0.1738, 0.2697, -0.696, -0.0464, 0.3486,\n 0.1888], [0.3043, 0.1505, -0.1207, -0.2456, 0.2735, 0.6673, -0.3352, -\n 0.6153, -0.5731, -0.2726], [-0.2657, -0.557, 0.6785, -0.1861, -0.0652, \n 0.5757, 0.6442, -0.4068, -0.326, 0.7054], [0.6607, 0.6927, -0.1354, \n 0.2484, 0.2053, 0.5743, -0.0212, 0.334, -0.5685, -0.5668], [0.6701, -\n 0.3013, -0.1202, -0.42, -0.428, -0.6329, -0.6074, -0.4997, -0.6215, -\n 0.6259], [0.0299, -0.6071, -0.4683, -0.3363, -0.0044, -0.0007, 0.27, \n 0.0202, -0.288, -0.6869], [0.3025, -0.2461, -0.5128, 0.6327, -0.1438, -\n 0.51, 0.1924, 0.2023, 0.3129, 0.2271], [0.3777, 0.0546, 0.479, -0.1895,\n 0.3588, 0.449, 0.685, 0.624, -0.2739, -0.4474]]'], {}), '([[-0.2959, -0.1142, 0.3662, 0.5406, 0.1738, 0.2697, -0.696, -\n 0.0464, 0.3486, 0.1888], [0.3043, 0.1505, -0.1207, -0.2456, 0.2735, \n 0.6673, -0.3352, -0.6153, -0.5731, -0.2726], [-0.2657, -0.557, 0.6785, \n -0.1861, -0.0652, 0.5757, 0.6442, -0.4068, -0.326, 0.7054], [0.6607, \n 0.6927, -0.1354, 0.2484, 0.2053, 0.5743, -0.0212, 0.334, -0.5685, -\n 0.5668], [0.6701, -0.3013, -0.1202, -0.42, -0.428, -0.6329, -0.6074, -\n 0.4997, -0.6215, -0.6259], [0.0299, -0.6071, -0.4683, -0.3363, -0.0044,\n -0.0007, 0.27, 0.0202, -0.288, -0.6869], [0.3025, -0.2461, -0.5128, \n 0.6327, -0.1438, -0.51, 0.1924, 0.2023, 0.3129, 0.2271], [0.3777, \n 0.0546, 0.479, -0.1895, 0.3588, 0.449, 0.685, 0.624, -0.2739, -0.4474]])\n', (9604, 10333), True, 'import numpy as np\n'), ((10538, 10711), 'numpy.array', 'np.array', (['[[0.6346, -0.6366], [-0.0248, -0.6156], [-0.3821, 0.6327], [-0.6132, -\n 0.5071], [0.4029, 0.0906], [-0.5671, 0.2556], [0.0268, -0.4347], [\n 0.1152, -0.3124]]'], {}), '([[0.6346, -0.6366], [-0.0248, -0.6156], [-0.3821, 0.6327], [-\n 0.6132, -0.5071], [0.4029, 0.0906], [-0.5671, 0.2556], [0.0268, -0.4347\n ], [0.1152, -0.3124]])\n', (10546, 10711), True, 'import numpy as np\n'), ((10921, 11007), 'numpy.array', 'np.array', (['[-0.3839, -0.5365, -0.6691, 0.1697, -0.1564, -0.0451, -0.5921, -0.5367]'], {}), '([-0.3839, -0.5365, -0.6691, 0.1697, -0.1564, -0.0451, -0.5921, -\n 0.5367])\n', (10929, 11007), True, 'import numpy as np\n'), ((11066, 11145), 'numpy.array', 'np.array', (['[0.5952, -0.4905, 0.0423, -0.0293, -0.6638, 0.4348, -0.4291, -0.5541]'], {}), '([0.5952, -0.4905, 0.0423, -0.0293, -0.6638, 0.4348, -0.4291, -0.5541])\n', (11074, 11145), True, 'import numpy as np\n'), ((11218, 11965), 'numpy.array', 'np.array', (['[[-0.2938, 0.0048, 0.2704, -0.3387, -0.4529, -0.2586, 0.1352, -0.1208, -\n 0.1423, -0.022], [-0.3701, 0.0201, -0.0255, 0.134, -0.1938, -0.7056, -\n 0.2303, 0.4814, 0.3636, -0.5018], [-0.0284, -0.0108, -0.5788, 0.2389, \n 0.2604, 0.6774, -0.5525, 0.6265, -0.6126, 0.3197], [-0.6906, 0.6991, -\n 0.6138, 0.0044, 0.5714, 0.4176, 0.5451, -0.5114, -0.2286, 0.1105], [\n 0.3547, 0.6233, -0.4543, -0.6799, 0.1109, 0.5601, 0.0212, 0.6926, \n 0.0597, -0.4383], [-0.137, -0.5852, 0.0596, 0.5494, 0.5789, -0.0534, \n 0.1092, 0.3544, -0.1571, 0.4444], [-0.5886, -0.4765, -0.3837, -0.6634, \n 0.0963, -0.1385, -0.0837, -0.1354, 0.0547, -0.287], [0.2049, -0.7057, -\n 0.1736, 0.4724, 0.1957, -0.3037, 0.4626, -0.6465, 0.4575, 0.423]]'], {}), '([[-0.2938, 0.0048, 0.2704, -0.3387, -0.4529, -0.2586, 0.1352, -\n 0.1208, -0.1423, -0.022], [-0.3701, 0.0201, -0.0255, 0.134, -0.1938, -\n 0.7056, -0.2303, 0.4814, 0.3636, -0.5018], [-0.0284, -0.0108, -0.5788, \n 0.2389, 0.2604, 0.6774, -0.5525, 0.6265, -0.6126, 0.3197], [-0.6906, \n 0.6991, -0.6138, 0.0044, 0.5714, 0.4176, 0.5451, -0.5114, -0.2286, \n 0.1105], [0.3547, 0.6233, -0.4543, -0.6799, 0.1109, 0.5601, 0.0212, \n 0.6926, 0.0597, -0.4383], [-0.137, -0.5852, 0.0596, 0.5494, 0.5789, -\n 0.0534, 0.1092, 0.3544, -0.1571, 0.4444], [-0.5886, -0.4765, -0.3837, -\n 0.6634, 0.0963, -0.1385, -0.0837, -0.1354, 0.0547, -0.287], [0.2049, -\n 0.7057, -0.1736, 0.4724, 0.1957, -0.3037, 0.4626, -0.6465, 0.4575, 0.423]])\n', (11226, 11965), True, 'import numpy as np\n'), ((12275, 12443), 'numpy.array', 'np.array', (['[[0.2339, -0.0307], [-0.585, 0.6328], [0.5856, -0.5601], [0.4875, -0.6929],\n [0.0314, 0.2531], [-0.2523, 0.3244], [0.5199, 0.5146], [0.3968, 0.4511]]'], {}), '([[0.2339, -0.0307], [-0.585, 0.6328], [0.5856, -0.5601], [0.4875, \n -0.6929], [0.0314, 0.2531], [-0.2523, 0.3244], [0.5199, 0.5146], [\n 0.3968, 0.4511]])\n', (12283, 12443), True, 'import numpy as np\n'), ((12718, 12795), 'numpy.array', 'np.array', (['[-0.176, 0.2828, 0.245, -0.4016, -0.4664, 0.4031, -0.1945, -0.1509]'], {}), '([-0.176, 0.2828, 0.245, -0.4016, -0.4664, 0.4031, -0.1945, -0.1509])\n', (12726, 12795), True, 'import numpy as np\n'), ((12869, 12943), 'numpy.array', 'np.array', (['[0.6427, 0.4806, 0.6278, 0.1596, 0.0038, -0.3418, 0.0549, -0.39]'], {}), '([0.6427, 0.4806, 0.6278, 0.1596, 0.0038, -0.3418, 0.0549, -0.39])\n', (12877, 12943), True, 'import numpy as np\n'), ((17536, 18287), 'numpy.array', 'np.array', (['[[0.3715, -0.0723, 0.6017, 0.5115, -0.5357, 0.3794, -0.3752, -0.6205, -\n 0.037, -0.2904], [0.7055, -0.4156, -0.365, -0.0964, 0.4141, -0.2584, -\n 0.4765, -0.0045, 0.2943, -0.2648], [0.1355, 0.1697, 0.1883, 0.3754, \n 0.3744, -0.6128, 0.2328, -0.1275, 0.6604, 0.6498], [-0.0266, 0.5805, -\n 0.5358, -0.0929, 0.0797, 0.3744, 0.3299, -0.3825, 0.5804, -0.0855], [\n 0.1141, 0.2587, -0.437, 0.643, -0.0017, 0.4865, 0.2814, 0.6213, -0.6415,\n 0.4574], [-0.3958, -0.5827, -0.1056, 0.6987, -0.6591, -0.1326, 0.5237, \n 0.4667, -0.7001, -0.2326], [0.3074, -0.3118, -0.4591, 0.2481, -0.2978, \n -0.185, 0.477, -0.0126, 0.3655, -0.4306], [0.3033, -0.6264, -0.6551, \n 0.0069, -0.5238, -0.395, 0.5681, -0.4931, -0.6258, 0.4079]]'], {}), '([[0.3715, -0.0723, 0.6017, 0.5115, -0.5357, 0.3794, -0.3752, -\n 0.6205, -0.037, -0.2904], [0.7055, -0.4156, -0.365, -0.0964, 0.4141, -\n 0.2584, -0.4765, -0.0045, 0.2943, -0.2648], [0.1355, 0.1697, 0.1883, \n 0.3754, 0.3744, -0.6128, 0.2328, -0.1275, 0.6604, 0.6498], [-0.0266, \n 0.5805, -0.5358, -0.0929, 0.0797, 0.3744, 0.3299, -0.3825, 0.5804, -\n 0.0855], [0.1141, 0.2587, -0.437, 0.643, -0.0017, 0.4865, 0.2814, \n 0.6213, -0.6415, 0.4574], [-0.3958, -0.5827, -0.1056, 0.6987, -0.6591, \n -0.1326, 0.5237, 0.4667, -0.7001, -0.2326], [0.3074, -0.3118, -0.4591, \n 0.2481, -0.2978, -0.185, 0.477, -0.0126, 0.3655, -0.4306], [0.3033, -\n 0.6264, -0.6551, 0.0069, -0.5238, -0.395, 0.5681, -0.4931, -0.6258, \n 0.4079]])\n', (17544, 18287), True, 'import numpy as np\n'), ((18516, 18686), 'numpy.array', 'np.array', (['[[-0.387, 0.0238], [-0.3758, 0.249], [0.5437, -0.4117], [0.1181, -0.2043],\n [-0.5335, 0.1188], [-0.0822, 0.2154], [0.5844, -0.3239], [-0.6537, 0.0278]]'], {}), '([[-0.387, 0.0238], [-0.3758, 0.249], [0.5437, -0.4117], [0.1181, -\n 0.2043], [-0.5335, 0.1188], [-0.0822, 0.2154], [0.5844, -0.3239], [-\n 0.6537, 0.0278]])\n', (18524, 18686), True, 'import numpy as np\n'), ((18922, 18998), 'numpy.array', 'np.array', (['[0.544, 0.5995, 0.0155, -0.6254, 0.5114, 0.3364, -0.1824, -0.6262]'], {}), '([0.544, 0.5995, 0.0155, -0.6254, 0.5114, 0.3364, -0.1824, -0.6262])\n', (18930, 18998), True, 'import numpy as np\n'), ((19066, 19143), 'numpy.array', 'np.array', (['[0.4139, -0.2513, -0.4023, 0.4222, 0.6387, -0.6147, 0.0677, 0.5355]'], {}), '([0.4139, -0.2513, -0.4023, 0.4222, 0.6387, -0.6147, 0.0677, 0.5355])\n', (19074, 19143), True, 'import numpy as np\n'), ((19219, 20051), 'numpy.array', 'np.array', (['[[0.65219, 0.56162, -0.18653, 0.68789, 0.1324, 0.17699, 0.1294, -0.1852, -\n 0.55439, -0.34946], [0.37645, 0.65475, 0.35964, 0.22433, -0.17869, -\n 0.29047, 0.17615, -0.53353, -0.074204, -0.2527], [0.58095, -0.00046426,\n 0.19262, -0.51306, -0.36811, 0.44858, 0.6258, 0.095494, -0.69505, 0.495\n ], [-0.3781, 0.15485, -0.14735, -0.15327, -0.45702, 0.30816, -0.3428, \n 0.21604, 0.14087, -0.57707], [-0.387, -0.64653, 0.60653, -0.47297, \n 0.068413, -0.12681, 0.068464, 0.67011, 0.3995, -0.20577], [-0.18648, -\n 0.67198, 0.38017, -0.33147, 0.53193, -0.54952, 0.21774, -0.46271, \n 0.32611, 0.063554], [-0.45403, -0.1591, -0.075886, 0.26313, 0.68093, -\n 0.3996, 0.55428, 0.10429, 0.51322, 0.19406], [0.39698, -0.52101, \n 0.51372, -0.39866, 0.10115, -0.04129, -0.3098, 0.21607, 0.4842, -0.19267]]'], {}), '([[0.65219, 0.56162, -0.18653, 0.68789, 0.1324, 0.17699, 0.1294, -\n 0.1852, -0.55439, -0.34946], [0.37645, 0.65475, 0.35964, 0.22433, -\n 0.17869, -0.29047, 0.17615, -0.53353, -0.074204, -0.2527], [0.58095, -\n 0.00046426, 0.19262, -0.51306, -0.36811, 0.44858, 0.6258, 0.095494, -\n 0.69505, 0.495], [-0.3781, 0.15485, -0.14735, -0.15327, -0.45702, \n 0.30816, -0.3428, 0.21604, 0.14087, -0.57707], [-0.387, -0.64653, \n 0.60653, -0.47297, 0.068413, -0.12681, 0.068464, 0.67011, 0.3995, -\n 0.20577], [-0.18648, -0.67198, 0.38017, -0.33147, 0.53193, -0.54952, \n 0.21774, -0.46271, 0.32611, 0.063554], [-0.45403, -0.1591, -0.075886, \n 0.26313, 0.68093, -0.3996, 0.55428, 0.10429, 0.51322, 0.19406], [\n 0.39698, -0.52101, 0.51372, -0.39866, 0.10115, -0.04129, -0.3098, \n 0.21607, 0.4842, -0.19267]])\n', (19227, 20051), True, 'import numpy as np\n'), ((20852, 21022), 'numpy.array', 'np.array', (['[[-0.3231, -0.396], [-0.1625, -0.3032], [0.3892, -0.0666], [0.0159, -0.487],\n [-0.4953, 0.2278], [-0.538, -0.525], [0.0371, -0.4534], [-0.5452, 0.5012]]'], {}), '([[-0.3231, -0.396], [-0.1625, -0.3032], [0.3892, -0.0666], [0.0159,\n -0.487], [-0.4953, 0.2278], [-0.538, -0.525], [0.0371, -0.4534], [-\n 0.5452, 0.5012]])\n', (20860, 21022), True, 'import numpy as np\n'), ((21325, 21404), 'numpy.array', 'np.array', (['[0.0469, -0.0107, 0.3783, -0.2657, -0.0089, 0.5032, -0.0757, -0.2022]'], {}), '([0.0469, -0.0107, 0.3783, -0.2657, -0.0089, 0.5032, -0.0757, -0.2022])\n', (21333, 21404), True, 'import numpy as np\n'), ((21479, 21556), 'numpy.array', 'np.array', (['[-0.6584, 0.3977, 0.5597, -0.4784, 0.536, -0.2532, 0.5362, -0.1063]'], {}), '([-0.6584, 0.3977, 0.5597, -0.4784, 0.536, -0.2532, 0.5362, -0.1063])\n', (21487, 21556), True, 'import numpy as np\n'), ((21625, 21941), 'numpy.array', 'np.array', (['[[0.0602, 0.6977, -0.3882, 0.3734], [-0.6896, -0.6014, -0.2311, 0.6433], [-\n 0.6778, -0.51, -0.1496, 0.5774], [-0.5824, 0.4656, -0.2835, -0.5688], [\n 0.5623, 0.3599, 0.1731, 0.3124], [0.1492, -0.6663, -0.1099, -0.5282], [\n 0.4696, -0.1795, -0.6712, -0.3903], [0.4995, 0.0709, -0.1738, 0.2822]]'], {}), '([[0.0602, 0.6977, -0.3882, 0.3734], [-0.6896, -0.6014, -0.2311, \n 0.6433], [-0.6778, -0.51, -0.1496, 0.5774], [-0.5824, 0.4656, -0.2835, \n -0.5688], [0.5623, 0.3599, 0.1731, 0.3124], [0.1492, -0.6663, -0.1099, \n -0.5282], [0.4696, -0.1795, -0.6712, -0.3903], [0.4995, 0.0709, -0.1738,\n 0.2822]])\n', (21633, 21941), True, 'import numpy as np\n'), ((22168, 22334), 'numpy.array', 'np.array', (['[[0.377, 0.4139], [0.5351, 0.6394], [0.3901, -0.1072], [0.1106, 0.1331], [\n 0.397, 0.4693], [0.2958, -0.3813], [-0.3064, 0.5519], [-0.2827, 0.5844]]'], {}), '([[0.377, 0.4139], [0.5351, 0.6394], [0.3901, -0.1072], [0.1106, \n 0.1331], [0.397, 0.4693], [0.2958, -0.3813], [-0.3064, 0.5519], [-\n 0.2827, 0.5844]])\n', (22176, 22334), True, 'import numpy as np\n'), ((22570, 22645), 'numpy.array', 'np.array', (['[0.5242, 0.5896, 0.3709, 0.6202, 0.5008, 0.2674, 0.4356, -0.3261]'], {}), '([0.5242, 0.5896, 0.3709, 0.6202, 0.5008, 0.2674, 0.4356, -0.3261])\n', (22578, 22645), True, 'import numpy as np\n'), ((22712, 22787), 'numpy.array', 'np.array', (['[-0.6648, 0.668, 0.251, -0.1245, -0.0524, 0.5439, -0.165, 0.5303]'], {}), '([-0.6648, 0.668, 0.251, -0.1245, -0.0524, 0.5439, -0.165, 0.5303])\n', (22720, 22787), True, 'import numpy as np\n'), ((22866, 23176), 'numpy.array', 'np.array', (['[[0.6477, 0.4416, 0.3803, -0.4708], [0.4497, 0.2833, -0.4739, -0.6361], [-\n 0.5573, -0.3867, -0.0349, -0.4128], [-0.1545, 0.372, 0.2354, -0.609], [\n 0.5965, 0.6301, -0.4591, -0.012], [-0.1253, -0.1881, -0.4388, 0.4335],\n [0.1944, -0.123, -0.617, 0.1043], [-0.67, 0.4343, 0.6474, 0.0113]]'], {}), '([[0.6477, 0.4416, 0.3803, -0.4708], [0.4497, 0.2833, -0.4739, -\n 0.6361], [-0.5573, -0.3867, -0.0349, -0.4128], [-0.1545, 0.372, 0.2354,\n -0.609], [0.5965, 0.6301, -0.4591, -0.012], [-0.1253, -0.1881, -0.4388,\n 0.4335], [0.1944, -0.123, -0.617, 0.1043], [-0.67, 0.4343, 0.6474, 0.0113]]\n )\n', (22874, 23176), True, 'import numpy as np\n'), ((23473, 23643), 'numpy.array', 'np.array', (['[[0.6576, 0.5573], [0.2318, 0.0187], [-0.6365, 0.5744], [-0.6494, -0.182],\n [0.6461, -0.3344], [0.0906, -0.5405], [-0.5999, 0.5571], [-0.0488, 0.5345]]'], {}), '([[0.6576, 0.5573], [0.2318, 0.0187], [-0.6365, 0.5744], [-0.6494, \n -0.182], [0.6461, -0.3344], [0.0906, -0.5405], [-0.5999, 0.5571], [-\n 0.0488, 0.5345]])\n', (23481, 23643), True, 'import numpy as np\n'), ((23942, 24019), 'numpy.array', 'np.array', (['[-0.6058, -0.2812, -0.4449, -0.0802, 0.4931, 0.4066, 0.596, 0.1968]'], {}), '([-0.6058, -0.2812, -0.4449, -0.0802, 0.4931, 0.4066, 0.596, 0.1968])\n', (23950, 24019), True, 'import numpy as np\n'), ((24095, 24180), 'numpy.array', 'np.array', (['[-0.249, -0.3402, -0.5089, -0.3875, 0.4852, -0.0402, -0.0072, -0.1017]'], {}), '([-0.249, -0.3402, -0.5089, -0.3875, 0.4852, -0.0402, -0.0072, -0.1017]\n )\n', (24103, 24180), True, 'import numpy as np\n'), ((30540, 31285), 'numpy.array', 'np.array', (['[[0.23, 0.6668, 0.4703, 0.0425, 0.0464, 0.6825, 0.2249, -0.4315, -0.2449, \n 0.2964], [-0.2811, -0.3444, 0.2557, -0.5137, -0.5518, 0.1652, -0.672, \n 0.1066, 0.3586, 0.6299], [0.5728, -0.1784, 0.5661, 0.4012, 0.3856, -\n 0.1899, 0.3102, 0.3717, -0.5651, 0.1952], [0.1026, -0.0527, 0.1198, -\n 0.308, 0.2292, 0.5757, -0.3567, -0.2731, -0.0586, -0.2849], [0.2194, -\n 0.1622, 0.3219, -0.3008, -0.3713, -0.3034, -0.2385, 0.0412, -0.5205, \n 0.028], [-0.5499, -0.0733, -0.5236, -0.6753, -0.7045, -0.1839, -0.1037,\n -0.5026, -0.4055, -0.3416], [0.1573, -0.1301, -0.2882, -0.3464, 0.6643,\n 0.198, -0.6804, 0.5359, 0.5996, 0.0124], [-0.6436, 0.0587, -0.652, -\n 0.0471, 0.1667, 0.6042, 0.5752, -0.6296, -0.2976, -0.3757]]'], {}), '([[0.23, 0.6668, 0.4703, 0.0425, 0.0464, 0.6825, 0.2249, -0.4315, -\n 0.2449, 0.2964], [-0.2811, -0.3444, 0.2557, -0.5137, -0.5518, 0.1652, -\n 0.672, 0.1066, 0.3586, 0.6299], [0.5728, -0.1784, 0.5661, 0.4012, \n 0.3856, -0.1899, 0.3102, 0.3717, -0.5651, 0.1952], [0.1026, -0.0527, \n 0.1198, -0.308, 0.2292, 0.5757, -0.3567, -0.2731, -0.0586, -0.2849], [\n 0.2194, -0.1622, 0.3219, -0.3008, -0.3713, -0.3034, -0.2385, 0.0412, -\n 0.5205, 0.028], [-0.5499, -0.0733, -0.5236, -0.6753, -0.7045, -0.1839, \n -0.1037, -0.5026, -0.4055, -0.3416], [0.1573, -0.1301, -0.2882, -0.3464,\n 0.6643, 0.198, -0.6804, 0.5359, 0.5996, 0.0124], [-0.6436, 0.0587, -\n 0.652, -0.0471, 0.1667, 0.6042, 0.5752, -0.6296, -0.2976, -0.3757]])\n', (30548, 31285), True, 'import numpy as np\n'), ((31520, 31687), 'numpy.array', 'np.array', (['[[0.3358, 0.279], [-0.5355, 0.0989], [-0.1402, 0.512], [0.1335, 0.1653], [\n 0.3533, -0.3531], [0.4166, -0.442], [-0.5454, -0.172], [0.0041, -0.0799]]'], {}), '([[0.3358, 0.279], [-0.5355, 0.0989], [-0.1402, 0.512], [0.1335, \n 0.1653], [0.3533, -0.3531], [0.4166, -0.442], [-0.5454, -0.172], [\n 0.0041, -0.0799]])\n', (31528, 31687), True, 'import numpy as np\n'), ((31925, 32000), 'numpy.array', 'np.array', (['[0.5518, 0.1083, 0.4829, 0.0607, -0.177, -0.6944, 0.3059, 0.5354]'], {}), '([0.5518, 0.1083, 0.4829, 0.0607, -0.177, -0.6944, 0.3059, 0.5354])\n', (31933, 32000), True, 'import numpy as np\n'), ((32068, 32146), 'numpy.array', 'np.array', (['[0.5025, -0.1261, -0.5405, 0.322, -0.3441, 0.6488, -0.0284, -0.2334]'], {}), '([0.5025, -0.1261, -0.5405, 0.322, -0.3441, 0.6488, -0.0284, -0.2334])\n', (32076, 32146), True, 'import numpy as np\n'), ((32223, 32971), 'numpy.array', 'np.array', (['[[-0.7048, -0.1768, 0.2288, -0.076, -0.1319, 0.082, -0.4132, 0.3644, 0.3919,\n 0.2449], [0.0551, -0.053, -0.5883, 0.0799, -0.5025, 0.15, -0.4067, -\n 0.3764, -0.3018, 0.2467], [-0.2279, 0.3144, 0.5705, 0.4617, 0.1729, \n 0.6539, -0.2086, 0.5355, 0.4439, 0.0122], [0.6967, -0.5245, 0.3527, \n 0.3386, 0.0429, -0.3803, -0.4328, -0.4767, 0.4481, -0.2405], [0.6744, -\n 0.2776, 0.0798, 0.1543, 0.6421, 0.6102, 0.3591, -0.4431, -0.6327, -\n 0.0075], [-0.452, 0.4201, -0.2374, -0.1556, -0.4175, -0.6834, 0.3096, -\n 0.1581, 0.0127, 0.6872], [0.1788, -0.5442, -0.3675, -0.2887, -0.3004, \n 0.5813, 0.1618, 0.6875, -0.4678, 0.0071], [-0.6453, -0.2528, 0.5675, -\n 0.5154, -0.4129, -0.0214, 0.5539, 0.0343, 0.1712, 0.5644]]'], {}), '([[-0.7048, -0.1768, 0.2288, -0.076, -0.1319, 0.082, -0.4132, \n 0.3644, 0.3919, 0.2449], [0.0551, -0.053, -0.5883, 0.0799, -0.5025, \n 0.15, -0.4067, -0.3764, -0.3018, 0.2467], [-0.2279, 0.3144, 0.5705, \n 0.4617, 0.1729, 0.6539, -0.2086, 0.5355, 0.4439, 0.0122], [0.6967, -\n 0.5245, 0.3527, 0.3386, 0.0429, -0.3803, -0.4328, -0.4767, 0.4481, -\n 0.2405], [0.6744, -0.2776, 0.0798, 0.1543, 0.6421, 0.6102, 0.3591, -\n 0.4431, -0.6327, -0.0075], [-0.452, 0.4201, -0.2374, -0.1556, -0.4175, \n -0.6834, 0.3096, -0.1581, 0.0127, 0.6872], [0.1788, -0.5442, -0.3675, -\n 0.2887, -0.3004, 0.5813, 0.1618, 0.6875, -0.4678, 0.0071], [-0.6453, -\n 0.2528, 0.5675, -0.5154, -0.4129, -0.0214, 0.5539, 0.0343, 0.1712, 0.5644]]\n )\n', (32231, 32971), True, 'import numpy as np\n'), ((33107, 33276), 'numpy.array', 'np.array', (['[[-0.6657, 0.633], [-0.229, 0.6556], [0.4808, -0.2712], [0.0407, -0.2587],\n [0.3837, 0.0382], [0.2268, 0.1217], [-0.6404, -0.3336], [0.5461, -0.0764]]'], {}), '([[-0.6657, 0.633], [-0.229, 0.6556], [0.4808, -0.2712], [0.0407, -\n 0.2587], [0.3837, 0.0382], [0.2268, 0.1217], [-0.6404, -0.3336], [\n 0.5461, -0.0764]])\n', (33115, 33276), True, 'import numpy as np\n'), ((33576, 33653), 'numpy.array', 'np.array', (['[0.0314, 0.1009, 0.3664, -0.6732, -0.6944, 0.5098, -0.1251, 0.2644]'], {}), '([0.0314, 0.1009, 0.3664, -0.6732, -0.6944, 0.5098, -0.1251, 0.2644])\n', (33584, 33653), True, 'import numpy as np\n'), ((33728, 33807), 'numpy.array', 'np.array', (['[-0.1961, -0.3836, 0.1191, -0.7022, -0.0961, 0.5493, -0.6979, 0.0017]'], {}), '([-0.1961, -0.3836, 0.1191, -0.7022, -0.0961, 0.5493, -0.6979, 0.0017])\n', (33736, 33807), True, 'import numpy as np\n'), ((33875, 34231), 'numpy.array', 'np.array', (['[[0.12746, -0.33346, 0.15589, -0.47986], [0.65835, 0.38135, -0.38409, -\n 0.36499], [-0.00060374, -0.12227, -0.15955, 0.42772], [-0.18281, -\n 0.50484, 0.70204, 0.65872], [0.37765, -0.43494, 0.31503, -0.042504], [\n 0.63506, -0.043049, -0.57413, -0.25134], [0.087181, -0.55216, 0.55436, \n -0.39599], [0.44611, -0.4269, 0.66142, 0.63882]]'], {}), '([[0.12746, -0.33346, 0.15589, -0.47986], [0.65835, 0.38135, -\n 0.38409, -0.36499], [-0.00060374, -0.12227, -0.15955, 0.42772], [-\n 0.18281, -0.50484, 0.70204, 0.65872], [0.37765, -0.43494, 0.31503, -\n 0.042504], [0.63506, -0.043049, -0.57413, -0.25134], [0.087181, -\n 0.55216, 0.55436, -0.39599], [0.44611, -0.4269, 0.66142, 0.63882]])\n', (33883, 34231), True, 'import numpy as np\n'), ((34546, 34718), 'numpy.array', 'np.array', (['[[-0.0049, -0.3267], [0.0863, -0.6277], [0.4815, -0.2236], [0.5996, -0.3441\n ], [0.3959, -0.0249], [0.3986, -0.0922], [-0.5321, 0.0877], [0.2811, -\n 0.0483]]'], {}), '([[-0.0049, -0.3267], [0.0863, -0.6277], [0.4815, -0.2236], [0.5996,\n -0.3441], [0.3959, -0.0249], [0.3986, -0.0922], [-0.5321, 0.0877], [\n 0.2811, -0.0483]])\n', (34554, 34718), True, 'import numpy as np\n'), ((34953, 35027), 'numpy.array', 'np.array', (['[0.0032, -0.0893, 0.5706, 0.3712, 0.059, 0.0044, 0.2417, 0.1291]'], {}), '([0.0032, -0.0893, 0.5706, 0.3712, 0.059, 0.0044, 0.2417, 0.1291])\n', (34961, 35027), True, 'import numpy as np\n'), ((35095, 35171), 'numpy.array', 'np.array', (['[-0.0704, 0.3908, -0.1121, 0.697, -0.6216, 0.634, -0.2945, 0.5224]'], {}), '([-0.0704, 0.3908, -0.1121, 0.697, -0.6216, 0.634, -0.2945, 0.5224])\n', (35103, 35171), True, 'import numpy as np\n'), ((35249, 35556), 'numpy.array', 'np.array', (['[[-0.2693, 0.3487, 0.0692, 0.0047], [0.6187, 0.5649, 0.068, 0.511], [-\n 0.5262, -0.3307, -0.3892, 0.5382], [-0.2925, 0.5185, -0.1385, 0.3431],\n [-0.3252, 0.3809, -0.468, 0.3379], [0.4763, -0.5465, 0.0033, -0.5144],\n [0.3826, -0.3879, -0.2439, 0.2571], [-0.0422, -0.0359, -0.4197, -0.2209]]'], {}), '([[-0.2693, 0.3487, 0.0692, 0.0047], [0.6187, 0.5649, 0.068, 0.511],\n [-0.5262, -0.3307, -0.3892, 0.5382], [-0.2925, 0.5185, -0.1385, 0.3431],\n [-0.3252, 0.3809, -0.468, 0.3379], [0.4763, -0.5465, 0.0033, -0.5144],\n [0.3826, -0.3879, -0.2439, 0.2571], [-0.0422, -0.0359, -0.4197, -0.2209]])\n', (35257, 35556), True, 'import numpy as np\n'), ((35855, 36026), 'numpy.array', 'np.array', (['[[-0.4691, 0.5944], [-0.6885, 0.1708], [0.6391, -0.369], [-0.5919, 0.1805],\n [-0.6853, -0.6215], [-0.4635, -0.6714], [-0.205, 0.0513], [0.3411, -0.2833]\n ]'], {}), '([[-0.4691, 0.5944], [-0.6885, 0.1708], [0.6391, -0.369], [-0.5919,\n 0.1805], [-0.6853, -0.6215], [-0.4635, -0.6714], [-0.205, 0.0513], [\n 0.3411, -0.2833]])\n', (35863, 36026), True, 'import numpy as np\n'), ((36327, 36405), 'numpy.array', 'np.array', (['[0.5764, -0.701, -0.0831, -0.3779, -0.2743, 0.048, -0.2707, -0.5583]'], {}), '([0.5764, -0.701, -0.0831, -0.3779, -0.2743, 0.048, -0.2707, -0.5583])\n', (36335, 36405), True, 'import numpy as np\n'), ((36482, 36561), 'numpy.array', 'np.array', (['[0.3379, -0.2671, -0.2789, -0.6611, -0.5542, -0.0188, 0.1831, 0.3612]'], {}), '([0.3379, -0.2671, -0.2789, -0.6611, -0.5542, -0.0188, 0.1831, 0.3612])\n', (36490, 36561), True, 'import numpy as np\n'), ((45648, 46398), 'numpy.array', 'np.array', (['[[0.4473, -0.5509, -0.1585, -0.6215, 0.6228, 0.3462, 0.3015, -0.3714, \n 0.3119, -0.1151], [-0.6923, 0.1373, 0.2214, 0.228, 0.696, -0.6368, \n 0.5725, -0.1359, 0.0742, -0.6777], [-0.4432, 0.6162, -0.1066, -0.6138, \n -0.2529, -0.5638, -0.0603, 0.3039, 0.1068, -0.53], [0.4337, -0.1215, -\n 0.5088, -0.0045, 0.2828, 0.1411, 0.0741, 0.6936, -0.4603, 0.6986], [-\n 0.2079, -0.5518, 0.5375, -0.2168, 0.3662, 0.0948, -0.0564, -0.1808, -\n 0.6672, -0.241], [0.5142, 0.079, -0.1123, -0.2351, 0.3982, -0.6351, \n 0.5906, 0.3917, -0.085, -0.5397], [-0.4795, -0.6576, 0.5693, 0.0047, -\n 0.6626, 0.1013, -0.4015, -0.404, -0.2817, 0.443], [0.0251, -0.3035, -\n 0.6026, 0.2693, -0.2749, 0.1501, -0.5778, 0.557, -0.7065, -0.6196]]'], {}), '([[0.4473, -0.5509, -0.1585, -0.6215, 0.6228, 0.3462, 0.3015, -\n 0.3714, 0.3119, -0.1151], [-0.6923, 0.1373, 0.2214, 0.228, 0.696, -\n 0.6368, 0.5725, -0.1359, 0.0742, -0.6777], [-0.4432, 0.6162, -0.1066, -\n 0.6138, -0.2529, -0.5638, -0.0603, 0.3039, 0.1068, -0.53], [0.4337, -\n 0.1215, -0.5088, -0.0045, 0.2828, 0.1411, 0.0741, 0.6936, -0.4603, \n 0.6986], [-0.2079, -0.5518, 0.5375, -0.2168, 0.3662, 0.0948, -0.0564, -\n 0.1808, -0.6672, -0.241], [0.5142, 0.079, -0.1123, -0.2351, 0.3982, -\n 0.6351, 0.5906, 0.3917, -0.085, -0.5397], [-0.4795, -0.6576, 0.5693, \n 0.0047, -0.6626, 0.1013, -0.4015, -0.404, -0.2817, 0.443], [0.0251, -\n 0.3035, -0.6026, 0.2693, -0.2749, 0.1501, -0.5778, 0.557, -0.7065, -\n 0.6196]])\n', (45656, 46398), True, 'import numpy as np\n'), ((46591, 46762), 'numpy.array', 'np.array', (['[[-0.4344, -0.2529], [0.0377, 0.7046], [-0.0579, -0.524], [-0.4801, -0.1149\n ], [-0.401, -0.5614], [0.4721, 0.4366], [-0.4282, 0.0816], [0.1574, -\n 0.3359]]'], {}), '([[-0.4344, -0.2529], [0.0377, 0.7046], [-0.0579, -0.524], [-0.4801,\n -0.1149], [-0.401, -0.5614], [0.4721, 0.4366], [-0.4282, 0.0816], [\n 0.1574, -0.3359]])\n', (46599, 46762), True, 'import numpy as np\n'), ((46975, 47054), 'numpy.array', 'np.array', (['[0.2431, 0.5967, -0.2417, -0.4169, -0.5326, 0.5685, -0.2971, -0.4326]'], {}), '([0.2431, 0.5967, -0.2417, -0.4169, -0.5326, 0.5685, -0.2971, -0.4326])\n', (46983, 47054), True, 'import numpy as np\n'), ((47118, 47196), 'numpy.array', 'np.array', (['[-0.1751, -0.227, -0.398, -0.4983, -0.3527, -0.2774, 0.6371, -0.333]'], {}), '([-0.1751, -0.227, -0.398, -0.4983, -0.3527, -0.2774, 0.6371, -0.333])\n', (47126, 47196), True, 'import numpy as np\n'), ((2907, 2970), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (2914, 2970), True, 'import numpy as np\n'), ((3131, 3194), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (3138, 3194), True, 'import numpy as np\n'), ((9193, 9256), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (9200, 9256), True, 'import numpy as np\n'), ((9417, 9480), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (9424, 9480), True, 'import numpy as np\n'), ((17130, 17193), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (17137, 17193), True, 'import numpy as np\n'), ((17354, 17417), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (17361, 17417), True, 'import numpy as np\n'), ((30134, 30197), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (30141, 30197), True, 'import numpy as np\n'), ((30358, 30421), 'numpy.ones', 'np.ones', (['(num_layers * num_directions, batch_size, hidden_size)'], {}), '((num_layers * num_directions, batch_size, hidden_size))\n', (30365, 30421), True, 'import numpy as np\n'), ((45182, 45246), 'numpy.array', 'np.array', (['[[[-0.47240502, 1.6824378], [-0.00978304, 0.8179632]]]'], {}), '([[[-0.47240502, 1.6824378], [-0.00978304, 0.8179632]]])\n', (45190, 45246), True, 'import numpy as np\n'), ((45437, 45502), 'numpy.array', 'np.array', (['[[[-0.85975164, -0.3198615], [-0.9821871, 0.26311848]]]'], {}), '([[[-0.85975164, -0.3198615], [-0.9821871, 0.26311848]]])\n', (45445, 45502), True, 'import numpy as np\n')]
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from math import ceil
from constants import ENV_NAMES
import seaborn # sets some style parameters automatically
COLORS = [(57, 106, 177), (218, 124, 48)]
def switch_to_outer_plot(fig):
ax0 = fig.add_subplot(111, frame_on=False)
ax0.set_xticks([])
ax0.set_yticks([])
return ax0
def ema(data_in, smoothing=0):
data_out = np.zeros_like(data_in)
curr = np.nan
for i in range(len(data_in)):
x = data_in[i]
if np.isnan(curr):
curr = x
else:
curr = (1 - smoothing) * x + smoothing * curr
data_out[i] = curr
return data_out
def plot_data_mean_std(ax, data_y, color_idx=0, data_x=None, x_scale=1, smoothing=0, first_valid=0, label=None):
color = COLORS[color_idx]
hexcolor = '#%02x%02x%02x' % color
data_y = data_y[:,first_valid:]
nx, num_datapoint = np.shape(data_y)
if smoothing > 0:
for i in range(nx):
data_y[i,...] = ema(data_y[i,...], smoothing)
if data_x is None:
data_x = (np.array(range(num_datapoint)) + first_valid) * x_scale
data_mean = np.mean(data_y, axis=0)
data_std = np.std(data_y, axis=0, ddof=1)
ax.plot(data_x, data_mean, color=hexcolor, label=label, linestyle='solid', alpha=1, rasterized=True)
ax.fill_between(data_x, data_mean - data_std, data_mean + data_std, color=hexcolor, alpha=.25, linewidth=0.0, rasterized=True)
def read_csv(filename, key_name):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
key_index = -1
values = []
for line_num, row in enumerate(csv_reader):
row = [x.lower() for x in row]
if line_num == 0:
idxs = [i for i, val in enumerate(row) if val == key_name]
key_index = idxs[0]
else:
values.append(row[key_index])
return np.array(values, dtype=np.float32)
def plot_values(ax, all_values, title=None, max_x=0, label=None, **kwargs):
if max_x > 0:
all_values = all_values[...,:max_x]
if ax is not None:
plot_data_mean_std(ax, all_values, label=label, **kwargs)
ax.set_title(title)
return all_values
def plot_experiment(run_directory_prefix, titles=None, suffixes=[''], normalization_ranges=None, key_name='eprewmean', **kwargs):
run_folders = [f'{run_directory_prefix}{x}' for x in range(3)]
num_envs = len(ENV_NAMES)
will_normalize_and_reduce = normalization_ranges is not None
if will_normalize_and_reduce:
num_visible_plots = 1
f, axarr = plt.subplots()
else:
num_visible_plots = num_envs
dimx = dimy = ceil(np.sqrt(num_visible_plots))
f, axarr = plt.subplots(dimx, dimy, sharex=True)
for suffix_idx, suffix in enumerate(suffixes):
all_values = []
game_weights = [1] * num_envs
for env_idx in range(num_envs):
env_name = ENV_NAMES[env_idx]
label = suffix if env_idx == 0 else None # only label the first graph to avoid legend duplicates
print(f'loading results from {env_name}...')
if num_visible_plots == 1:
ax = axarr
else:
dimy = len(axarr[0])
ax = axarr[env_idx // dimy][env_idx % dimy]
csv_files = [f"results/{resid}/progress-{env_name}{'-' if len(suffix) > 0 else ''}{suffix}.csv" for resid in run_folders]
curr_ax = None if will_normalize_and_reduce else ax
raw_data = np.array([read_csv(file, key_name) for file in csv_files])
values = plot_values(curr_ax, raw_data, title=env_name, color_idx=suffix_idx, label=label, **kwargs)
if will_normalize_and_reduce:
game_range = normalization_ranges[env_name]
game_min = game_range[0]
game_max = game_range[1]
game_delta = game_max - game_min
sub_values = game_weights[env_idx] * (np.array(values) - game_min) / (game_delta)
all_values.append(sub_values)
if will_normalize_and_reduce:
normalized_data = np.sum(all_values, axis=0)
normalized_data = normalized_data / np.sum(game_weights)
title = 'Mean Normalized Score'
plot_values(ax, normalized_data, title=None, color_idx=suffix_idx, label=suffix, **kwargs)
if len(suffixes) > 1:
if num_visible_plots == 1:
ax.legend(loc='lower right')
else:
f.legend(loc='lower right', bbox_to_anchor=(.5, 0, .5, 1))
return f, axarr
|
[
"numpy.mean",
"numpy.sqrt",
"numpy.array",
"numpy.sum",
"numpy.isnan",
"csv.reader",
"numpy.std",
"numpy.shape",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((424, 446), 'numpy.zeros_like', 'np.zeros_like', (['data_in'], {}), '(data_in)\n', (437, 446), True, 'import numpy as np\n'), ((936, 952), 'numpy.shape', 'np.shape', (['data_y'], {}), '(data_y)\n', (944, 952), True, 'import numpy as np\n'), ((1177, 1200), 'numpy.mean', 'np.mean', (['data_y'], {'axis': '(0)'}), '(data_y, axis=0)\n', (1184, 1200), True, 'import numpy as np\n'), ((1216, 1246), 'numpy.std', 'np.std', (['data_y'], {'axis': '(0)', 'ddof': '(1)'}), '(data_y, axis=0, ddof=1)\n', (1222, 1246), True, 'import numpy as np\n'), ((1970, 2004), 'numpy.array', 'np.array', (['values'], {'dtype': 'np.float32'}), '(values, dtype=np.float32)\n', (1978, 2004), True, 'import numpy as np\n'), ((534, 548), 'numpy.isnan', 'np.isnan', (['curr'], {}), '(curr)\n', (542, 548), True, 'import numpy as np\n'), ((1577, 1612), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (1587, 1612), False, 'import csv\n'), ((2663, 2677), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2675, 2677), True, 'import matplotlib.pyplot as plt\n'), ((2799, 2836), 'matplotlib.pyplot.subplots', 'plt.subplots', (['dimx', 'dimy'], {'sharex': '(True)'}), '(dimx, dimy, sharex=True)\n', (2811, 2836), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2778), 'numpy.sqrt', 'np.sqrt', (['num_visible_plots'], {}), '(num_visible_plots)\n', (2759, 2778), True, 'import numpy as np\n'), ((4224, 4250), 'numpy.sum', 'np.sum', (['all_values'], {'axis': '(0)'}), '(all_values, axis=0)\n', (4230, 4250), True, 'import numpy as np\n'), ((4299, 4319), 'numpy.sum', 'np.sum', (['game_weights'], {}), '(game_weights)\n', (4305, 4319), True, 'import numpy as np\n'), ((4065, 4081), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (4073, 4081), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
import numpy
if __name__ == "__main__":
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
paste = cv.CreateMat(960, 1280, cv.CV_8UC3)
topleft = numpy.asarray(cv.GetSubRect(paste, (0, 0, 640, 480)))
topright = numpy.asarray(cv.GetSubRect(paste, (640, 0, 640, 480)))
bottomleft = numpy.asarray(cv.GetSubRect(paste, (0, 480, 640, 480)))
bottomright = numpy.asarray(cv.GetSubRect(paste, (640, 480, 640, 480)))
while True:
img = cv.GetMat(cv.QueryFrame(capture))
n = (numpy.asarray(img)).astype(numpy.uint8)
red = n[:,:,0]
grn = n[:,:,1]
blu = n[:,:,2]
topleft[:,:,0] = 255 - grn
topleft[:,:,1] = red
topleft[:,:,2] = blu
topright[:,:,0] = blu
topright[:,:,1] = 255 - red
topright[:,:,2] = grn
bottomright[:,:,0] = red
bottomright[:,:,1] = grn
bottomright[:,:,2] = 255 - blu
fgrn = grn.astype(numpy.float32)
fred = red.astype(numpy.float32)
bottomleft[:,:,0] = blu
bottomleft[:,:,1] = (abs(fgrn - fred)).astype(numpy.uint8)
bottomleft[:,:,2] = red
cv.ShowImage("camera", paste)
if cv.WaitKey(6) == 27:
break
cv.DestroyAllWindows()
|
[
"cv2.cv.CaptureFromCAM",
"cv2.cv.GetSubRect",
"cv2.cv.NamedWindow",
"numpy.asarray",
"cv2.cv.CreateMat",
"cv2.cv.DestroyAllWindows",
"cv2.cv.ShowImage",
"cv2.cv.WaitKey",
"cv2.cv.QueryFrame"
] |
[((109, 136), 'cv2.cv.NamedWindow', 'cv.NamedWindow', (['"""camera"""', '(1)'], {}), "('camera', 1)\n", (123, 136), True, 'import cv2.cv as cv\n'), ((152, 172), 'cv2.cv.CaptureFromCAM', 'cv.CaptureFromCAM', (['(0)'], {}), '(0)\n', (169, 172), True, 'import cv2.cv as cv\n'), ((186, 221), 'cv2.cv.CreateMat', 'cv.CreateMat', (['(960)', '(1280)', 'cv.CV_8UC3'], {}), '(960, 1280, cv.CV_8UC3)\n', (198, 221), True, 'import cv2.cv as cv\n'), ((1303, 1325), 'cv2.cv.DestroyAllWindows', 'cv.DestroyAllWindows', ([], {}), '()\n', (1323, 1325), True, 'import cv2.cv as cv\n'), ((250, 288), 'cv2.cv.GetSubRect', 'cv.GetSubRect', (['paste', '(0, 0, 640, 480)'], {}), '(paste, (0, 0, 640, 480))\n', (263, 288), True, 'import cv2.cv as cv\n'), ((319, 359), 'cv2.cv.GetSubRect', 'cv.GetSubRect', (['paste', '(640, 0, 640, 480)'], {}), '(paste, (640, 0, 640, 480))\n', (332, 359), True, 'import cv2.cv as cv\n'), ((392, 432), 'cv2.cv.GetSubRect', 'cv.GetSubRect', (['paste', '(0, 480, 640, 480)'], {}), '(paste, (0, 480, 640, 480))\n', (405, 432), True, 'import cv2.cv as cv\n'), ((466, 508), 'cv2.cv.GetSubRect', 'cv.GetSubRect', (['paste', '(640, 480, 640, 480)'], {}), '(paste, (640, 480, 640, 480))\n', (479, 508), True, 'import cv2.cv as cv\n'), ((1219, 1248), 'cv2.cv.ShowImage', 'cv.ShowImage', (['"""camera"""', 'paste'], {}), "('camera', paste)\n", (1231, 1248), True, 'import cv2.cv as cv\n'), ((551, 573), 'cv2.cv.QueryFrame', 'cv.QueryFrame', (['capture'], {}), '(capture)\n', (564, 573), True, 'import cv2.cv as cv\n'), ((1260, 1273), 'cv2.cv.WaitKey', 'cv.WaitKey', (['(6)'], {}), '(6)\n', (1270, 1273), True, 'import cv2.cv as cv\n'), ((589, 607), 'numpy.asarray', 'numpy.asarray', (['img'], {}), '(img)\n', (602, 607), False, 'import numpy\n')]
|
import time, math, copy
import numpy as np
import pandas as pd
import machineLearning
import pickle
INFINITY = float("inf")
class GameAI(object):
def __init__(self, game):
super().__init__()
self.game = game
self.move = (-1,-1)
self.timeLimit = 3 # 3 seconds is the time limit for search
self.debug = False # True for debugging
self.fileObject = open("decisionTree", 'rb')
self.tree = pickle.load(self.fileObject)
# AI perform move (there must be an available move due to the pre-move check)
def performMove(self, index):
# Iterative Deepening MiniMax Search with Alpha-Beta Pruning
tmpBoard = [row[:] for row in self.game.board] # we don't want to make changes to the game board
if index == 0:
self.move = self.miniMax(tmpBoard)
print("minimax")
print(self.move)
else:
self.move = self.negaScout(tmpBoard)
print("negascout")
#testing decision tree
#self.move = self.oriminiMax(tmpBoard)
#print("oriMinimax")
print(self.move)
if self.move is None:
#print("here")
return
else:
# perform move (there must be an available move)
self.game.performMove(self.move[0], self.move[1])
def getSortedNode(self, board, player):
sortedNodes = []
successorBoards = self.findSuccessorBoards(board, player)
for successorBoard in successorBoards:
sortedNodes.append((successorBoard, self.utilityOf(successorBoard, player)))
sortedNodes = sorted(sortedNodes, key=lambda node: node[1], reverse=True)
sortedNodes = [node[0] for node in sortedNodes]
return sortedNodes
""" Iterative Deepening MiniMax Search Algorithm within Time Limit
From depth = 3, if still within the time limit, continue search to get more insight.
Return the optimal move within limited resources.
"""
def miniMax(self, board):
print("here")
startTime = time.time()
timeElapsed = 0
depth = 3
optimalMove = (-1, -1)
optimalBoard = board
stopDigging = False
while not stopDigging and timeElapsed < self.timeLimit:
stopDigging, optimalBoard = self.IDMiniMax(board, 0, depth, 2, -INFINITY, INFINITY)
endTime = time.time()
timeElapsed += endTime - startTime
startTime = endTime
depth += 1
print("[Console MSG] Time used by AI: " + str(timeElapsed))
if optimalBoard == board:
return None
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] != optimalBoard[row][col]:
optimalMove = (row, col)
print(np.asarray(optimalBoard).reshape(8, 8))
return optimalMove
""" Iterative Deepening MiniMax Search with Alpha-Beta Pruning
board - state at current node
player - player at current node (AI - white - maximizer; Player - black - minimizer)
currentLevel - level at current node
maxLevel - used to judge whether go deeper or not
Return the optimal board (state) found in the current level for the current node.
"""
def IDMiniMax(self, board, currentLevel, maxLevel, player, alpha, beta):
if self.debug:
print("Level: " + str(currentLevel) + " maxLevel: " + str(maxLevel))
stopDigging = False
if (not self.game.moveCanBeMade(board, player) or currentLevel == maxLevel):
return (stopDigging, board)
successorBoards = self.findSuccessorBoards(board, player)
if len(successorBoards) == 0:
stopDigging = True
return stopDigging, board
bestBoard = None
if player == 2:
maxValue = -INFINITY
for successor in successorBoards:
stopDigging, lookaheadBoard = self.IDMiniMax(successor, currentLevel+1, maxLevel, 1, alpha, beta)
utility = self.utilityOf(lookaheadBoard, player)
if utility > maxValue:
maxValue = utility
bestBoard = successor
alpha = max(alpha, utility)
if utility >= beta:
#print("alphaBeta is pruning", successor)
return stopDigging, successor # prune
else:
minValue = INFINITY
for successor in successorBoards:
stopDigging, lookaheadBoard = self.IDMiniMax(successor, currentLevel+1, maxLevel, 2, alpha, beta)
utility = self.utilityOf(lookaheadBoard, player)
if utility < minValue:
minValue = utility
bestBoard = successor
beta = min(beta, utility)
if utility <= alpha:
#print("alphaBeta is pruning", successor)
return stopDigging, successor # prune
return stopDigging, bestBoard
def negaScout(self, board):
startTime = time.time()
timeElapsed = 0
depth = 3
optimalMove = (-1, -1)
optimalBoard = board
stopDigging = False
while not stopDigging and timeElapsed < self.timeLimit:
# (stopDigging, optimalBoard, alpha) = self.negaScoutHelper(board, 2, depth, -INFINITY, INFINITY, 1)
maxScore = -INFINITY
for successor in self.getSortedNode(board, 1):
point = self.negaScoutHelper2(successor, 1, depth, -INFINITY, INFINITY, 1)
if point > maxScore:
maxScore = point
optimalBoard = successor
endTime = time.time()
timeElapsed += endTime - startTime
startTime = endTime
depth += 1
print("[Console MSG] Time used by AI: " + str(timeElapsed))
if optimalBoard == board:
print("here")
return None
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] != optimalBoard[row][col]:
optimalMove = (row, col)
print(np.asarray(optimalBoard).reshape(8, 8))
print(optimalMove)
return optimalMove
def negaScoutHelper2(self, board, player, depth, alpha, beta, color):
if not self.game.moveCanBeMade(board, player) or depth == 0:
return self.utilityOf(board, player) * color
successorBoards = self.getSortedNode(board, player)
first = True
for successor in successorBoards:
if not first:
score = -self.negaScoutHelper2(successor, player, depth - 1, -alpha - 1, -alpha, -color)
if alpha < score < beta:
score = -self.negaScoutHelper2(successor, player, depth - 1, -beta, -score, -color)
else:
first = False
score = -self.negaScoutHelper2(successor, player, depth - 1, -beta, -alpha, -color)
alpha = max(alpha, score)
if alpha >= beta:
#print("negascout is pruning", successor)
break
return alpha
# return a list of successor boards
def findSuccessorBoards(self, board, player):
successorBoards = []
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] == 0:
numAvailableMoves = self.game.placePiece(board, row, col, player, PLAYMODE=False)
if numAvailableMoves > 0:
successorBoard = copy.deepcopy([row[:] for row in board])
successorBoard[row][col] = player
successorBoards.append(successorBoard)
return successorBoards
# evaluation function (heuristics for non-final node) in this state (board)
def utilityOf(self, board, player):
board_mobility = self.mobility(board, player)
board_frontier = self.frontierSquares(board, player)
board_corners = self.corners(board, player)
xsquares, csquares = self.x_c_squares(board, player)
board_parity = self.parity(board)
board_state = self.gameState(board)
df = pd.Series([board_mobility, board_frontier, board_corners, xsquares, csquares, board_parity, board_state],
index=["numMoves", "frontier", "corners", "Xsquares", "CSquares", "parity", "state"])
return machineLearning.predict(df, self.tree)
# mobility, number of moves a player can make minus number of moves its opponent can make
def mobility(self, board, player):
blackMovesFound = self.findSuccessorBoards(board, 1)
whiteMovesFound = self.findSuccessorBoards(board, 2)
if player == 1:
return len(blackMovesFound) - len(whiteMovesFound)
elif player == 2:
return len(whiteMovesFound) - len(blackMovesFound)
else:
return 0
# number of frontier that player occupies
def frontierSquares(self, board, player):
if player == 1:
opp = 2
if player == 2:
opp = 1
coords_x, coords_y = np.where(np.array(board) == player) # coordinates that surround opponents' pieces
opp_coords_x, opp_coords_y = np.where(np.array(board) == opp)
frontier = []
frontier_opp = []
sur_player = []
for i in range(len(coords_x)):
for row in [-1, 0, 1]:
for col in [-1, 0, 1]:
x = coords_x[i] + row
y = coords_y[i] + col
if 0 <= x < 8 and 0 <= y < 8:
np.append(sur_player, np.array([x, y]))
if len(sur_player) > 0:
sur_player = np.unique(np.asarray(sur_player), axis=0)
for i in range(len(sur_player)):
if board[sur_player[i][0]][sur_player[i][1]] == 0:
np.append(frontier, sur_player[i])
sur_opp = []
for i in range(len(opp_coords_x)):
for row in [-1, 0, 1]:
for col in [-1, 0, 1]:
x = opp_coords_x[i] + row
y = opp_coords_y[i] + col
if 0 <= x < 8 and 0 <= y < 8:
#sur_opp.append(np.array([x, y]))
np.append(sur_opp, np.array([x, y]))
if len(sur_opp) > 0:
sur_opp = np.unique(np.asarray(sur_opp), axis=0)
for i in range(len(sur_opp)):
if board[sur_opp[i][0]][sur_opp[i][1]] == 0:
np.append(frontier_opp, sur_opp[i])
return len(frontier) - len(frontier_opp)
#number of corners the player occupies
def corners(self, board, player):
corners = np.array([[0, 0], [0, 7], [7, 0], [7, 7]])
if player == 1:
opp = 2
if player == 2:
opp = 1
black_corner = 0
white_corner = 0
for corner in corners:
if board[corner[0]][corner[1]] == 0:
continue
elif board[corner[0]][corner[1]] == 1:
black_corner += 1
else:
white_corner += 1
if player == 1:
return black_corner - white_corner
elif player == 2:
return white_corner - black_corner
else:
return 0 # bit different from how the data is created, does not matter, because player 0 gets subsetted
#number of x_c squares player occupies
def x_c_squares(self, board, player):
corners = np.array([[0, 0], [0, 7], [7, 0], [7, 7]])
x_squares = np.array([[1, 1], [1, 6], [6, 1], [6, 6]])
c_squares1 = np.array([[0, 1], [1, 7], [6, 0], [7, 6]])
c_squares2 = np.array([[1, 0], [0, 6], [7, 1], [6, 7]])
if player == 1:
opp = 2
if player == 2:
opp = 1
player_x_squares = 0
opp_x_squares = 0
player_c_squares = 0
opp_c_squares = 0
for i in range(len(x_squares)):
if board[corners[i][0]][corners[i][1]] == 0:
if board[x_squares[i][0]][x_squares[i][1]] == player:
player_x_squares += 1
if board[c_squares1[i][0]][c_squares1[i][1]] == player:
player_c_squares += 1
if board[c_squares2[i][0]][c_squares2[i][1]] == player:
player_c_squares += 1
if board[x_squares[i][0]][x_squares[i][1]] == opp:
opp_x_squares += 1
if board[c_squares1[i][0]][c_squares1[i][1]] == opp:
opp_c_squares += 1
if board[c_squares2[i][0]][c_squares2[i][1]] == opp:
opp_c_squares += 1
else:
continue
XSquares = player_x_squares - opp_x_squares
CSquares = player_c_squares - opp_c_squares
return XSquares, CSquares
def parity(self, board):
progress = 0
for row in range(8):
for col in range(8):
if board[row][col] != 0:
progress += 1
if progress % 2 == 0:
parity = 0
else:
parity = 1
return parity
#which game state the player is on
def gameState(self, board):
progress = 0
for row in range(8):
for col in range(8):
if board[row][col] != 0:
progress += 1
if progress % 61 <= 20:
return "beginning"
elif progress % 61 <= 40:
return "middle"
else:
return "end"
#Code later is used to test the how well the decision performs
#Original code from the ai.py
def oriminiMax(self, board):
startTime = time.time()
timeElapsed = 0
depth = 2
optimalMove = (-1, -1)
optimalBoard = board
stopDigging = False
while not stopDigging and timeElapsed < self.timeLimit:
(stopDigging, optimalBoard) = self.IDMiniMax(board, 0, depth, 1, -INFINITY, INFINITY)
endTime = time.time()
timeElapsed += endTime - startTime
startTime = endTime
depth += 1
print("[Console MSG] Time used by AI: " + str(timeElapsed))
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] != optimalBoard[row][col]:
optimalMove = (row, col)
return optimalMove
""" Iterative Deepening MiniMax Search with Alpha-Beta Pruning
board - state at current node
player - player at current node (AI - white - maximizer; Player - black - minimizer)
currentLevel - level at current node
maxLevel - used to judge whether go deeper or not
Return the optimal board (state) found in the current level for the current node.
"""
def oriIDMiniMax(self, board, currentLevel, maxLevel, player, alpha, beta):
if self.debug:
print("Level: " + str(currentLevel) + " maxLevel: " + str(maxLevel))
stopDigging = False
if (not self.game.moveCanBeMade(board, player) or currentLevel == maxLevel):
return (stopDigging, board)
successorBoards = self.findSuccessorBoards(board, player)
if len(successorBoards) == 0:
stopDigging = True
return (stopDigging, board)
bestBoard = None
if player == 2:
maxValue = -INFINITY
for idx in range(0, len(successorBoards)):
stopDigging, lookaheadBoard = self.oriIDMiniMax(successorBoards[idx], currentLevel + 1, maxLevel, 1, alpha,
beta)
utility = self.oriUtilityOf(lookaheadBoard)
if utility > maxValue:
maxValue = utility
bestBoard = successorBoards[idx]
alpha = max(alpha, utility)
if utility >= beta:
return (stopDigging, successorBoards[idx]) # prune
else:
minValue = INFINITY
for idx in range(0, len(successorBoards)):
stopDigging, lookaheadBoard = self.oriIDMiniMax(successorBoards[idx], currentLevel + 1, maxLevel, 2, alpha,
beta)
utility = self.oriUtilityOf(lookaheadBoard)
if utility < minValue:
minValue = utility
bestBoard = successorBoards[idx]
beta = min(beta, utility)
if utility <= alpha:
return (stopDigging, successorBoards[idx]) # prune
return (stopDigging, bestBoard)
def oriUtilityOf(self, board):
return self.oriPieceDifference(board) + self.oriCornerCaptions(board) + self.oriCornerCloseness(board) + self.oriMobility(board) + self.oriStability(board)
# piece difference when evaluating
def oriPieceDifference(self, board):
allTiles = [item for sublist in board for item in sublist]
whiteTiles = sum(1 for tile in allTiles if tile == 2)
blackTiles = sum(1 for tile in allTiles if tile == 1)
if whiteTiles > blackTiles:
return (whiteTiles / (blackTiles + whiteTiles)) * 100
else:
return - (blackTiles / (blackTiles + whiteTiles)) * 100
# how many corners are owned by each player
def oriCornerCaptions(self, board):
numCorners = [0, 0]
if board[0][0] == 1:
numCorners[0] += 1
else:
numCorners[1] += 1
if board[0][7] == 1:
numCorners[0] += 1
else:
numCorners[1] += 1
if board[7][0] == 1:
numCorners[0] += 1
else:
numCorners[1] += 1
if board[7][7] == 1:
numCorners[0] += 1
else:
numCorners[1] += 1
return 50 * (numCorners[1] - numCorners[0])
# how many corner-closeness pieces are owned by each player
def oriCornerCloseness(self, board):
numCorners = [0, 0]
for row in range(1, 7):
if board[row][0] == 1:
numCorners[0] += 1
elif board[row][0] == 2:
numCorners[1] += 1
if board[row][7] == 1:
numCorners[0] += 1
elif board[row][7] == 2:
numCorners[1] += 1
for col in range(1, 7):
if board[0][col] == 1:
numCorners[0] += 1
elif board[7][col] == 2:
numCorners[1] += 1
if board[row][7] == 1:
numCorners[0] += 1
elif board[row][7] == 2:
numCorners[1] += 1
return 4 * (numCorners[1] - numCorners[0])
# relative mobility of a player to another (how many steps can a player move)
def oriMobility(self, board):
blackMobility = self.game.moveCanBeMade(board, 1)
whiteMobility = self.game.moveCanBeMade(board, 2)
if blackMobility + whiteMobility == 0:
return 0
else:
return 100 * whiteMobility / (whiteMobility + blackMobility)
# for a piece: stable - 1; semi-stable: 0; instable - -1
def oriStability(self, board):
stability = [0, 0]
blackStability, whiteStability = stability[0], stability[1]
for row in range(1, 7):
for col in range(1, 7):
instabilityScale = 0
current = board[row][col]
if current == 0:
continue
if board[row+1][col+1] == 0:
instabilityScale += 1
if board[row-1][col-1] == 0:
instabilityScale += 1
if board[row+1][col] == 0:
instabilityScale += 1
if board[row-1][col] == 0:
instabilityScale += 1
if board[row+1][col-1] == 0:
instabilityScale += 1
if board[row-1][col+1] == 0:
instabilityScale += 1
if board[row][col+1] == 0:
instabilityScale += 1
if board[row][col-1] == 0:
instabilityScale += 1
if instabilityScale >= 7:
stability[current - 1] -= 1;
elif instabilityScale <= 3:
stability[current - 1] += 1;
whiteStability, blackStability = stability[1], stability[0]
if whiteStability + blackStability == 0:
return 0
else:
return 100 * whiteStability / (whiteStability + blackStability)
|
[
"pandas.Series",
"pickle.load",
"machineLearning.predict",
"numpy.asarray",
"numpy.append",
"numpy.array",
"copy.deepcopy",
"time.time"
] |
[((405, 433), 'pickle.load', 'pickle.load', (['self.fileObject'], {}), '(self.fileObject)\n', (416, 433), False, 'import pickle\n'), ((1813, 1824), 'time.time', 'time.time', ([], {}), '()\n', (1822, 1824), False, 'import time, math, copy\n'), ((4286, 4297), 'time.time', 'time.time', ([], {}), '()\n', (4295, 4297), False, 'import time, math, copy\n'), ((6879, 7078), 'pandas.Series', 'pd.Series', (['[board_mobility, board_frontier, board_corners, xsquares, csquares,\n board_parity, board_state]'], {'index': "['numMoves', 'frontier', 'corners', 'Xsquares', 'CSquares', 'parity', 'state']"}), "([board_mobility, board_frontier, board_corners, xsquares,\n csquares, board_parity, board_state], index=['numMoves', 'frontier',\n 'corners', 'Xsquares', 'CSquares', 'parity', 'state'])\n", (6888, 7078), True, 'import pandas as pd\n'), ((7089, 7127), 'machineLearning.predict', 'machineLearning.predict', (['df', 'self.tree'], {}), '(df, self.tree)\n', (7112, 7127), False, 'import machineLearning\n'), ((8966, 9008), 'numpy.array', 'np.array', (['[[0, 0], [0, 7], [7, 0], [7, 7]]'], {}), '([[0, 0], [0, 7], [7, 0], [7, 7]])\n', (8974, 9008), True, 'import numpy as np\n'), ((9601, 9643), 'numpy.array', 'np.array', (['[[0, 0], [0, 7], [7, 0], [7, 7]]'], {}), '([[0, 0], [0, 7], [7, 0], [7, 7]])\n', (9609, 9643), True, 'import numpy as np\n'), ((9658, 9700), 'numpy.array', 'np.array', (['[[1, 1], [1, 6], [6, 1], [6, 6]]'], {}), '([[1, 1], [1, 6], [6, 1], [6, 6]])\n', (9666, 9700), True, 'import numpy as np\n'), ((9716, 9758), 'numpy.array', 'np.array', (['[[0, 1], [1, 7], [6, 0], [7, 6]]'], {}), '([[0, 1], [1, 7], [6, 0], [7, 6]])\n', (9724, 9758), True, 'import numpy as np\n'), ((9774, 9816), 'numpy.array', 'np.array', (['[[1, 0], [0, 6], [7, 1], [6, 7]]'], {}), '([[1, 0], [0, 6], [7, 1], [6, 7]])\n', (9782, 9816), True, 'import numpy as np\n'), ((11339, 11350), 'time.time', 'time.time', ([], {}), '()\n', (11348, 11350), False, 'import time, math, copy\n'), ((2083, 2094), 'time.time', 'time.time', ([], {}), '()\n', (2092, 2094), False, 'import time, math, copy\n'), ((4803, 4814), 'time.time', 'time.time', ([], {}), '()\n', (4812, 4814), False, 'import time, math, copy\n'), ((11611, 11622), 'time.time', 'time.time', ([], {}), '()\n', (11620, 11622), False, 'import time, math, copy\n'), ((7710, 7725), 'numpy.array', 'np.array', (['board'], {}), '(board)\n', (7718, 7725), True, 'import numpy as np\n'), ((7824, 7839), 'numpy.array', 'np.array', (['board'], {}), '(board)\n', (7832, 7839), True, 'import numpy as np\n'), ((2419, 2443), 'numpy.asarray', 'np.asarray', (['optimalBoard'], {}), '(optimalBoard)\n', (2429, 2443), True, 'import numpy as np\n'), ((5156, 5180), 'numpy.asarray', 'np.asarray', (['optimalBoard'], {}), '(optimalBoard)\n', (5166, 5180), True, 'import numpy as np\n'), ((8177, 8199), 'numpy.asarray', 'np.asarray', (['sur_player'], {}), '(sur_player)\n', (8187, 8199), True, 'import numpy as np\n'), ((8678, 8697), 'numpy.asarray', 'np.asarray', (['sur_opp'], {}), '(sur_opp)\n', (8688, 8697), True, 'import numpy as np\n'), ((6328, 6368), 'copy.deepcopy', 'copy.deepcopy', (['[row[:] for row in board]'], {}), '([row[:] for row in board])\n', (6341, 6368), False, 'import time, math, copy\n'), ((8308, 8342), 'numpy.append', 'np.append', (['frontier', 'sur_player[i]'], {}), '(frontier, sur_player[i])\n', (8317, 8342), True, 'import numpy as np\n'), ((8797, 8832), 'numpy.append', 'np.append', (['frontier_opp', 'sur_opp[i]'], {}), '(frontier_opp, sur_opp[i])\n', (8806, 8832), True, 'import numpy as np\n'), ((8105, 8121), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (8113, 8121), True, 'import numpy as np\n'), ((8612, 8628), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (8620, 8628), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
class NeuralBandit:
def __init__(self, nPicos, ABSval, CREval, initExploration, epsilon_0, batch_size=1):
nActivePicosVal = np.arange(0, (nPicos+1))
self.controlSpace = np.array(np.meshgrid(nActivePicosVal, ABSval, CREval)).T.reshape(-1, 3)
self.nControls = len(self.controlSpace[:, 0])
# self.nControls = 10
# Network Parameters
n_hidden_1 = 20 # 1st layer number of features
n_hidden_2 = 20 # 2nd layer number of features
n_input = 2 # data input
n_output = 1 # function output
learning_rate = 0.001
self.batch_size = batch_size
# self.batch_count = np.zeros((self.nControls))
# self.batch_buffer = np.zeros((self.nControls, self.batch_size))
self.count = np.zeros((self.nControls))
self.current_cost = np.zeros((self.nControls))
self.initExploration = initExploration
self.epsilon_0 = epsilon_0
self.neuralArms = list()
self.armCost = list()
self.armOptimizer = list()
self.x = tf.placeholder("float", [None, n_input])
self.y = tf.placeholder("float", [None, n_output])
def multilayer_perceptron(x, weights, biases):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
for i in range(self.nControls):
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='h1_'+str(i)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='h2_'+str(i)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]), name='hout_'+str(i))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1_'+str(i)),
'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2_'+str(i)),
'out': tf.Variable(tf.random_normal([n_output]), name='bout_'+str(i))
}
pred = multilayer_perceptron(self.x, weights, biases)
cost = tf.reduce_sum(tf.pow(pred - self.y, 2)) / self.batch_size
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
self.neuralArms.append(pred)
self.armCost.append(cost)
self.armOptimizer.append(optimizer)
if np.mod(i, 20) == 0:
print('NeuralBandit: Created NN number ' + str(i) + ' of '+str(self.nControls))
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
self.algConf = {'epsilon_0': epsilon_0,
'initExploration': initExploration,
'batch_size': batch_size,
'n_hidden_1': n_hidden_1,
'n_hidden_2': n_hidden_2,
'learning_rate': learning_rate,
'Common_layer': 'no'}
def getControl(self, inputData):
x = inputData['state']
indexes = np.where(self.count < self.initExploration)[0]
if len(indexes) > 0:
array_index = np.random.randint(0, len(indexes))
selectedIndex = indexes[array_index]
else:
epsilon_desc = self.epsilon_0 / np.sum(self.count)
if np.random.rand() < epsilon_desc:
selectedIndex = np.random.randint(0, self.nControls, 1)[0]
else:
estimatedReward = np.zeros(self.nControls)
for i in range(self.nControls):
estimatedReward[i] = self.sess.run([self.neuralArms[i]], feed_dict={self.x: np.expand_dims(x, axis=0)})[0][0][0]
# print(estimatedReward)
selectedIndex = np.argmin(estimatedReward)
return self.controlSpace[selectedIndex, :], selectedIndex
def updateAlg(self, inputData):
index = inputData['index']
x = inputData['state']
reward = inputData['utilityFunctionVal']
self.count[index] += 1
# self.batch_buffer[index, self.batch_count[index]] = reward
# self.batch_count[index] += 1
#
# if self.batch_count[index] == self.batch_size:
# _, self.current_cost[index] = self.sess.run([self.armOptimizer[index], self.armCost[index]], feed_dict={self.x: np.expand_dims(x, axis=0), self.y: np.expand_dims(reward, axis=0)})
_, self.current_cost[index] = self.sess.run([self.armOptimizer[index], self.armCost[index]], feed_dict={self.x: np.expand_dims(x, axis=0), self.y: np.expand_dims(reward, axis=0)})
return self.current_cost
def getConf(self):
return self.algConf
def closeAlg(self):
self.sess.close()
|
[
"tensorflow.random_normal",
"numpy.random.rand",
"tensorflow.nn.relu",
"tensorflow.pow",
"numpy.where",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"tensorflow.matmul",
"numpy.expand_dims",
"numpy.argmin",
"numpy.meshgrid",
"tensorflow.train.AdamOptimizer",
"numpy.mod",
"numpy.arange"
] |
[((182, 206), 'numpy.arange', 'np.arange', (['(0)', '(nPicos + 1)'], {}), '(0, nPicos + 1)\n', (191, 206), True, 'import numpy as np\n'), ((825, 849), 'numpy.zeros', 'np.zeros', (['self.nControls'], {}), '(self.nControls)\n', (833, 849), True, 'import numpy as np\n'), ((880, 904), 'numpy.zeros', 'np.zeros', (['self.nControls'], {}), '(self.nControls)\n', (888, 904), True, 'import numpy as np\n'), ((1106, 1146), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_input]'], {}), "('float', [None, n_input])\n", (1120, 1146), True, 'import tensorflow as tf\n'), ((1164, 1205), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_output]'], {}), "('float', [None, n_output])\n", (1178, 1205), True, 'import tensorflow as tf\n'), ((2774, 2807), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2805, 2807), True, 'import tensorflow as tf\n'), ((2828, 2840), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2838, 2840), True, 'import tensorflow as tf\n'), ((1356, 1375), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer_1'], {}), '(layer_1)\n', (1366, 1375), True, 'import tensorflow as tf\n'), ((1476, 1495), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer_2'], {}), '(layer_2)\n', (1486, 1495), True, 'import tensorflow as tf\n'), ((3293, 3336), 'numpy.where', 'np.where', (['(self.count < self.initExploration)'], {}), '(self.count < self.initExploration)\n', (3301, 3336), True, 'import numpy as np\n'), ((1291, 1318), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['h1']"], {}), "(x, weights['h1'])\n", (1300, 1318), True, 'import tensorflow as tf\n'), ((1405, 1438), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "weights['h2']"], {}), "(layer_1, weights['h2'])\n", (1414, 1438), True, 'import tensorflow as tf\n'), ((1520, 1554), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "weights['out']"], {}), "(layer_2, weights['out'])\n", (1529, 1554), True, 'import tensorflow as tf\n'), ((2642, 2655), 'numpy.mod', 'np.mod', (['i', '(20)'], {}), '(i, 20)\n', (2648, 2655), True, 'import numpy as np\n'), ((3538, 3556), 'numpy.sum', 'np.sum', (['self.count'], {}), '(self.count)\n', (3544, 3556), True, 'import numpy as np\n'), ((3572, 3588), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3586, 3588), True, 'import numpy as np\n'), ((3732, 3756), 'numpy.zeros', 'np.zeros', (['self.nControls'], {}), '(self.nControls)\n', (3740, 3756), True, 'import numpy as np\n'), ((4012, 4038), 'numpy.argmin', 'np.argmin', (['estimatedReward'], {}), '(estimatedReward)\n', (4021, 4038), True, 'import numpy as np\n'), ((1699, 1738), 'tensorflow.random_normal', 'tf.random_normal', (['[n_input, n_hidden_1]'], {}), '([n_input, n_hidden_1])\n', (1715, 1738), True, 'import tensorflow as tf\n'), ((1794, 1836), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_1, n_hidden_2]'], {}), '([n_hidden_1, n_hidden_2])\n', (1810, 1836), True, 'import tensorflow as tf\n'), ((1893, 1933), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_2, n_output]'], {}), '([n_hidden_2, n_output])\n', (1909, 1933), True, 'import tensorflow as tf\n'), ((2027, 2057), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_1]'], {}), '([n_hidden_1])\n', (2043, 2057), True, 'import tensorflow as tf\n'), ((2113, 2143), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_2]'], {}), '([n_hidden_2])\n', (2129, 2143), True, 'import tensorflow as tf\n'), ((2200, 2228), 'tensorflow.random_normal', 'tf.random_normal', (['[n_output]'], {}), '([n_output])\n', (2216, 2228), True, 'import tensorflow as tf\n'), ((2364, 2388), 'tensorflow.pow', 'tf.pow', (['(pred - self.y)', '(2)'], {}), '(pred - self.y, 2)\n', (2370, 2388), True, 'import tensorflow as tf\n'), ((2432, 2483), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2454, 2483), True, 'import tensorflow as tf\n'), ((3637, 3676), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.nControls', '(1)'], {}), '(0, self.nControls, 1)\n', (3654, 3676), True, 'import numpy as np\n'), ((4780, 4805), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4794, 4805), True, 'import numpy as np\n'), ((4815, 4845), 'numpy.expand_dims', 'np.expand_dims', (['reward'], {'axis': '(0)'}), '(reward, axis=0)\n', (4829, 4845), True, 'import numpy as np\n'), ((244, 288), 'numpy.meshgrid', 'np.meshgrid', (['nActivePicosVal', 'ABSval', 'CREval'], {}), '(nActivePicosVal, ABSval, CREval)\n', (255, 288), True, 'import numpy as np\n'), ((3902, 3927), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3916, 3927), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Ensemble(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
print(f'voting {self.config.vote}')
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs)
if self.config.vote:
votes = out.argmax(-1)
oh_votes = F.one_hot(votes, dim=-1, num_classes=out.size(-1))
vote_count = oh_votes.sum(0).float()
most_confident = out.max(0)[0].max(-1)[1]
# Break ties
vote_count[torch.arange(out.size(0)), most_confident] += 0.1
out = vote_count
else:
out = out.mean(0)
return out
def _all_validation_loop(self, device, dataloader,task):
""" weight loss and accuracy using sample specific weights """
self.get_prediction_net(task)
ds_len = len(dataloader.dataset)
acc = 0
with torch.no_grad():
loss_values=[]
acc = 0
for i, (x,y) in enumerate(dataloader):
x, y= x.to(device),y.to(device)
out = []
for model in self.models:
out += [model(x)]
out = torch.stack(out).argmax(-1)
acc += (out == y.view(1,-1)).int().max(0)[0].float().sum().item()
return acc / ds_len
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(f'new model has {n_params} params')
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
#Training loop
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
n+=raw_x.size()[0]
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience:
break
self.models.append(best_model)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
best=self._all_validation_loop(device,validation_loader,task)
print('among best ', best)
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
[
"numpy.prod",
"torch.stack",
"torch.nn.functional.cross_entropy",
"copy.deepcopy",
"torch.no_grad"
] |
[((963, 980), 'torch.stack', 'torch.stack', (['outs'], {}), '(outs)\n', (974, 980), False, 'import torch\n'), ((2910, 2930), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2923, 2930), False, 'import copy\n'), ((1661, 1676), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1674, 1676), False, 'import torch\n'), ((4032, 4061), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['predicted', 'y'], {}), '(predicted, y)\n', (4047, 4061), True, 'import torch.nn.functional as F\n'), ((2398, 2414), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (2405, 2414), True, 'import numpy as np\n'), ((1955, 1971), 'torch.stack', 'torch.stack', (['out'], {}), '(out)\n', (1966, 1971), False, 'import torch\n'), ((6514, 6530), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (6521, 6530), True, 'import numpy as np\n')]
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Get pretrained model file: wget https://zenodo.org/record/2535873/files/resnet50_v1.pb
import time
from argparse import ArgumentParser
import numpy as np
import tensorflow as tf
from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference
from tensorflow.python.framework import dtypes
import ngraph_bridge
INPUTS = 'input_tensor'
OUTPUTS = 'softmax_tensor'
RESNET_IMAGE_SIZE = 224
class RN50Graph:
"""Evaluate image classifier with optimized TensorFlow graph"""
def __init__(self):
arg_parser = ArgumentParser(description='Parse arguments')
arg_parser.add_argument(
"--batch-size", dest="batch_size", type=int, default=8)
arg_parser.add_argument(
"--num-images", dest='num_images', type=int, default=512)
arg_parser.add_argument(
"--num-inter-threads",
dest='num_inter_threads',
type=int,
default=0)
arg_parser.add_argument(
"--num-intra-threads",
dest='num_intra_threads',
type=int,
default=0)
arg_parser.add_argument(
"--input-graph",
dest='input_graph',
type=str,
default="resnet50_v1.pb")
arg_parser.add_argument(
"--warmup-iters", dest='warmup_iters', type=int, default=8)
self.args = arg_parser.parse_args()
def run(self):
"""run benchmark with optimized graph"""
print("Run inference with dummy data")
config = tf.compat.v1.ConfigProto()
config.intra_op_parallelism_threads = self.args.num_intra_threads
config.inter_op_parallelism_threads = self.args.num_inter_threads
config.use_per_session_threads = True
data_graph = tf.Graph()
with data_graph.as_default():
input_shape = [
self.args.batch_size, RESNET_IMAGE_SIZE, RESNET_IMAGE_SIZE, 3
]
images = tf.random.uniform(
input_shape,
0.0,
255.0,
dtype=tf.float32,
seed=42,
name='synthetic_images')
infer_graph = tf.Graph()
with infer_graph.as_default():
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(self.args.input_graph, 'rb') as input_file:
input_graph_content = input_file.read()
graph_def.ParseFromString(input_graph_content)
print(
"Optimizing graph %s for inference..." % self.args.input_graph)
output_graph = optimize_for_inference(
graph_def, [INPUTS], [OUTPUTS], dtypes.float32.as_datatype_enum,
False)
tf.import_graph_def(output_graph, name='')
input_tensor = infer_graph.get_tensor_by_name('input_tensor:0')
output_tensor = infer_graph.get_tensor_by_name('softmax_tensor:0')
# Run without nGraph first
print("Run inference (without nGraph)")
ngraph_bridge.disable()
data_sess = tf.compat.v1.Session(graph=data_graph, config=config)
infer_sess = tf.compat.v1.Session(graph=infer_graph, config=config)
iteration = 0
num_processed_images = 0
num_remaining_images = self.args.num_images
tf_time = 0.0
tf_labels = np.array([], dtype=np.int32)
while num_remaining_images >= self.args.batch_size:
np_images = data_sess.run(images)
if iteration > self.args.warmup_iters:
num_processed_images += self.args.batch_size
num_remaining_images -= self.args.batch_size
tf_start_time = time.time()
predictions = infer_sess.run(output_tensor,
{input_tensor: np_images})
tf_elapsed_time = time.time() - tf_start_time
if iteration > self.args.warmup_iters:
tf_time += tf_elapsed_time
tf_labels = np.append(tf_labels, np.argmax(
predictions, axis=-1))
iteration += 1
print("Total execution time (TF): ", tf_time)
# Run with nGraph now
print("Run inference (with nGraph)")
ngraph_bridge.enable()
data_sess = tf.compat.v1.Session(graph=data_graph, config=config)
infer_sess = tf.compat.v1.Session(graph=infer_graph, config=config)
iteration = 0
num_processed_images = 0
num_remaining_images = self.args.num_images
ngtf_time = 0.0
ngtf_labels = np.array([], dtype=np.int32)
while num_remaining_images >= self.args.batch_size:
np_images = data_sess.run(images)
if iteration > self.args.warmup_iters:
num_processed_images += self.args.batch_size
num_remaining_images -= self.args.batch_size
ngtf_start_time = time.time()
predictions = infer_sess.run(output_tensor,
{input_tensor: np_images})
ngtf_elapsed_time = time.time() - ngtf_start_time
if iteration > self.args.warmup_iters:
ngtf_time += ngtf_elapsed_time
ngtf_labels = np.append(ngtf_labels,
np.argmax(predictions, axis=-1))
iteration += 1
print("Total execution time (NGTF): ", ngtf_time)
print("Processed %d images. Batch size = %d" % (num_processed_images,
self.args.batch_size))
print("Avg throughput (TF): %0.4f img/s" %
(num_processed_images / tf_time))
print("Avg throughput (NGTF): %0.4f img/s" %
(num_processed_images / ngtf_time))
assert ((tf_labels == ngtf_labels).all())
if __name__ == "__main__":
graph = RN50Graph()
graph.run()
|
[
"tensorflow.compat.v1.ConfigProto",
"tensorflow.Graph",
"ngraph_bridge.enable",
"tensorflow.random.uniform",
"ngraph_bridge.disable",
"tensorflow.compat.v1.GraphDef",
"argparse.ArgumentParser",
"tensorflow.io.gfile.GFile",
"numpy.argmax",
"numpy.array",
"tensorflow.import_graph_def",
"time.time",
"tensorflow.compat.v1.Session",
"tensorflow.python.tools.optimize_for_inference_lib.optimize_for_inference"
] |
[((1166, 1211), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Parse arguments"""'}), "(description='Parse arguments')\n", (1180, 1211), False, 'from argparse import ArgumentParser\n'), ((2156, 2182), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (2180, 2182), True, 'import tensorflow as tf\n'), ((2399, 2409), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2407, 2409), True, 'import tensorflow as tf\n'), ((2804, 2814), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2812, 2814), True, 'import tensorflow as tf\n'), ((3649, 3672), 'ngraph_bridge.disable', 'ngraph_bridge.disable', ([], {}), '()\n', (3670, 3672), False, 'import ngraph_bridge\n'), ((3693, 3746), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'data_graph', 'config': 'config'}), '(graph=data_graph, config=config)\n', (3713, 3746), True, 'import tensorflow as tf\n'), ((3768, 3822), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'infer_graph', 'config': 'config'}), '(graph=infer_graph, config=config)\n', (3788, 3822), True, 'import tensorflow as tf\n'), ((3973, 4001), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (3981, 4001), True, 'import numpy as np\n'), ((4868, 4890), 'ngraph_bridge.enable', 'ngraph_bridge.enable', ([], {}), '()\n', (4888, 4890), False, 'import ngraph_bridge\n'), ((4912, 4965), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'data_graph', 'config': 'config'}), '(graph=data_graph, config=config)\n', (4932, 4965), True, 'import tensorflow as tf\n'), ((4987, 5041), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'infer_graph', 'config': 'config'}), '(graph=infer_graph, config=config)\n', (5007, 5041), True, 'import tensorflow as tf\n'), ((5196, 5224), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (5204, 5224), True, 'import numpy as np\n'), ((2589, 2688), 'tensorflow.random.uniform', 'tf.random.uniform', (['input_shape', '(0.0)', '(255.0)'], {'dtype': 'tf.float32', 'seed': '(42)', 'name': '"""synthetic_images"""'}), "(input_shape, 0.0, 255.0, dtype=tf.float32, seed=42, name=\n 'synthetic_images')\n", (2606, 2688), True, 'import tensorflow as tf\n'), ((2878, 2901), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (2899, 2901), True, 'import tensorflow as tf\n'), ((3226, 3325), 'tensorflow.python.tools.optimize_for_inference_lib.optimize_for_inference', 'optimize_for_inference', (['graph_def', '[INPUTS]', '[OUTPUTS]', 'dtypes.float32.as_datatype_enum', '(False)'], {}), '(graph_def, [INPUTS], [OUTPUTS], dtypes.float32.\n as_datatype_enum, False)\n', (3248, 3325), False, 'from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference\n'), ((3366, 3408), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['output_graph'], {'name': '""""""'}), "(output_graph, name='')\n", (3385, 3408), True, 'import tensorflow as tf\n'), ((4310, 4321), 'time.time', 'time.time', ([], {}), '()\n', (4319, 4321), False, 'import time\n'), ((5535, 5546), 'time.time', 'time.time', ([], {}), '()\n', (5544, 5546), False, 'import time\n'), ((2919, 2965), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['self.args.input_graph', '"""rb"""'], {}), "(self.args.input_graph, 'rb')\n", (2936, 2965), True, 'import tensorflow as tf\n'), ((4476, 4487), 'time.time', 'time.time', ([], {}), '()\n', (4485, 4487), False, 'import time\n'), ((5703, 5714), 'time.time', 'time.time', ([], {}), '()\n', (5712, 5714), False, 'import time\n'), ((4648, 4679), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (4657, 4679), True, 'import numpy as np\n'), ((5925, 5956), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (5934, 5956), True, 'import numpy as np\n')]
|
# The MIT License (MIT)
#
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import numpy as np
import h5py as h5
from mpi4py import MPI
#merge function helpers
def merge_all_token(token, comm):
#first, allreduce the counts
n = token[0]
nres = comm.allreduce(token[0])
weight = float(n)/float(nres)
dmeanres = comm.allreduce(weight*token[1], op = MPI.SUM)
dsqmeanres = comm.allreduce(weight*token[2], op = MPI.SUM)
#these guys require a custom reduction because there is no elemwise mean
#so lets just gather them
#min
token_all = comm.allgather(token[3])
dminres = token_all[0]
for tk in token_all[1:]:
dminres = np.minimum(dminres, tk)
#max
token_all = comm.allgather(token[4])
dmaxres = token_all[0]
for tk in token_all[1:]:
dmaxres = np.maximum(dmaxres, tk)
return (nres, dmeanres, dsqmeanres, dminres, dmaxres)
def merge_token(token1, token2):
#extract data
#first
n1 = token1[0]
dmean1 = token1[1]
dsqmean1 = token1[2]
dmin1 = token1[3]
dmax1 = token1[4]
#second
n2 = token2[0]
dmean2 = token2[1]
dsqmean2 = token2[2]
dmin2 = token2[3]
dmax2 = token2[4]
#create new token
nres = n1 + n2
dmeanres = float(n1)/float(nres)*dmean1 + float(n2)/float(nres)*dmean2
dsqmeanres = float(n1)/float(nres)*dsqmean1 + float(n2)/float(nres)*dsqmean2
dminres = np.minimum(dmin1, dmin2)
dmaxres = np.maximum(dmax1, dmax2)
return (nres, dmeanres, dsqmeanres, dminres, dmaxres)
#create data token
def create_token(filename, data_format="nchw", rank = 0):
try:
with h5.File(filename, "r") as f:
arr = f["climate/data"][...]
except:
raise IOError("Cannot open file {} on rank {}".format(filename, rank))
#prep axis for ops
axis = (1,2) if data_format == "nchw" else (0,1)
#how many samples do we have: just 1 here
n = 1
#compute stats
mean = np.mean(arr, axis=axis)
meansq = np.mean(np.square(arr), axis=axis)
minimum = np.amin(arr, axis=axis)
maximum = np.amax(arr, axis=axis)
#result
result = (n, mean, meansq, minimum, maximum)
return result
#global parameters
overwrite = False
data_format = "nhwc"
data_path_prefix = "/data"
#MPI
comm = MPI.COMM_WORLD.Dup()
comm_rank = comm.rank
comm_size = comm.size
#root path
root = os.path.join( data_path_prefix, "train" )
#get files
allfiles = [ os.path.join(root, x) for x in os.listdir(root) \
if x.endswith('.h5') and x.startswith('data-') ]
#split list
numfiles = len(allfiles)
chunksize = int(np.ceil(numfiles / comm_size))
start = chunksize * comm_rank
end = min([start + chunksize, numfiles])
files = allfiles[start:end]
#get first token and then merge recursively
token = create_token(files[0], data_format)
for filename in files[1:]:
token = merge_token(create_token(filename, data_format, comm_rank), token)
#communicate results
token = merge_all_token(token, comm)
#write file on rank 0
if comm_rank == 0:
#save the stuff
with h5.File(os.path.join(data_path_prefix, "stats.h5"), "w") as f:
f["climate/count"]=token[0]
f["climate/mean"]=token[1]
f["climate/sqmean"]=token[2]
f["climate/minval"]=token[3]
f["climate/maxval"]=token[4]
|
[
"numpy.mean",
"numpy.ceil",
"os.listdir",
"numpy.minimum",
"numpy.amin",
"os.path.join",
"numpy.square",
"h5py.File",
"mpi4py.MPI.COMM_WORLD.Dup",
"numpy.maximum",
"numpy.amax"
] |
[((3399, 3419), 'mpi4py.MPI.COMM_WORLD.Dup', 'MPI.COMM_WORLD.Dup', ([], {}), '()\n', (3417, 3419), False, 'from mpi4py import MPI\n'), ((3484, 3523), 'os.path.join', 'os.path.join', (['data_path_prefix', '"""train"""'], {}), "(data_path_prefix, 'train')\n", (3496, 3523), False, 'import os\n'), ((2500, 2524), 'numpy.minimum', 'np.minimum', (['dmin1', 'dmin2'], {}), '(dmin1, dmin2)\n', (2510, 2524), True, 'import numpy as np\n'), ((2539, 2563), 'numpy.maximum', 'np.maximum', (['dmax1', 'dmax2'], {}), '(dmax1, dmax2)\n', (2549, 2563), True, 'import numpy as np\n'), ((3058, 3081), 'numpy.mean', 'np.mean', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (3065, 3081), True, 'import numpy as np\n'), ((3144, 3167), 'numpy.amin', 'np.amin', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (3151, 3167), True, 'import numpy as np\n'), ((3182, 3205), 'numpy.amax', 'np.amax', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (3189, 3205), True, 'import numpy as np\n'), ((3551, 3572), 'os.path.join', 'os.path.join', (['root', 'x'], {}), '(root, x)\n', (3563, 3572), False, 'import os\n'), ((3719, 3748), 'numpy.ceil', 'np.ceil', (['(numfiles / comm_size)'], {}), '(numfiles / comm_size)\n', (3726, 3748), True, 'import numpy as np\n'), ((1759, 1782), 'numpy.minimum', 'np.minimum', (['dminres', 'tk'], {}), '(dminres, tk)\n', (1769, 1782), True, 'import numpy as np\n'), ((1907, 1930), 'numpy.maximum', 'np.maximum', (['dmaxres', 'tk'], {}), '(dmaxres, tk)\n', (1917, 1930), True, 'import numpy as np\n'), ((3103, 3117), 'numpy.square', 'np.square', (['arr'], {}), '(arr)\n', (3112, 3117), True, 'import numpy as np\n'), ((3583, 3599), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (3593, 3599), False, 'import os\n'), ((2725, 2747), 'h5py.File', 'h5.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2732, 2747), True, 'import h5py as h5\n'), ((4183, 4225), 'os.path.join', 'os.path.join', (['data_path_prefix', '"""stats.h5"""'], {}), "(data_path_prefix, 'stats.h5')\n", (4195, 4225), False, 'import os\n')]
|
"""
Transform video
===============
In this example, we use ``torchio.Resample((2, 2, 1))`` to divide the spatial
size of the clip (height and width) by two and
``RandomAffine(degrees=(0, 0, 20))`` to rotate a maximum of 20 degrees around
the time axis.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import torch
import torchio as tio
from PIL import Image
def read_clip(path, undersample=4):
"""Read a GIF a return an array of shape (C, W, H, T)."""
gif = Image.open(path)
frames = []
for i in range(gif.n_frames):
gif.seek(i)
frames.append(np.array(gif.convert('RGB')))
frames = frames[::undersample]
array = np.stack(frames).transpose(3, 1, 2, 0)
delay = gif.info['duration']
return array, delay
def plot_gif(image):
def _update_frame(num):
frame = get_frame(image, num)
im.set_data(frame)
return
def get_frame(image, i):
return image.data[..., i].permute(1, 2, 0).byte()
plt.rcParams['animation.embed_limit'] = 25
fig, ax = plt.subplots()
im = ax.imshow(get_frame(image, 0))
return animation.FuncAnimation(
fig,
_update_frame,
repeat_delay=image['delay'],
frames=image.shape[-1],
)
# Source: https://thehigherlearning.wordpress.com/2014/06/25/watching-a-cell-divide-under-an-electron-microscope-is-mesmerizing-gif/ # noqa: E501
array, delay = read_clip('nBTu3oi.gif')
plt.imshow(array[..., 0].transpose(1, 2, 0))
plt.plot()
image = tio.ScalarImage(tensor=array, delay=delay)
original_animation = plot_gif(image)
transform = tio.Compose((
tio.Resample((2, 2, 1)),
tio.RandomAffine(degrees=(0, 0, 20)),
))
torch.manual_seed(0)
transformed = transform(image)
transformed_animation = plot_gif(transformed)
|
[
"torch.manual_seed",
"PIL.Image.open",
"torchio.RandomAffine",
"torchio.ScalarImage",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.plot",
"numpy.stack",
"torchio.Resample",
"matplotlib.pyplot.subplots"
] |
[((1520, 1530), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (1528, 1530), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1581), 'torchio.ScalarImage', 'tio.ScalarImage', ([], {'tensor': 'array', 'delay': 'delay'}), '(tensor=array, delay=delay)\n', (1554, 1581), True, 'import torchio as tio\n'), ((1721, 1741), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (1738, 1741), False, 'import torch\n'), ((520, 536), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (530, 536), False, 'from PIL import Image\n'), ((1084, 1098), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1096, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1150, 1250), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', '_update_frame'], {'repeat_delay': "image['delay']", 'frames': 'image.shape[-1]'}), "(fig, _update_frame, repeat_delay=image['delay'],\n frames=image.shape[-1])\n", (1173, 1250), True, 'import matplotlib.animation as animation\n'), ((1650, 1673), 'torchio.Resample', 'tio.Resample', (['(2, 2, 1)'], {}), '((2, 2, 1))\n', (1662, 1673), True, 'import torchio as tio\n'), ((1679, 1715), 'torchio.RandomAffine', 'tio.RandomAffine', ([], {'degrees': '(0, 0, 20)'}), '(degrees=(0, 0, 20))\n', (1695, 1715), True, 'import torchio as tio\n'), ((706, 722), 'numpy.stack', 'np.stack', (['frames'], {}), '(frames)\n', (714, 722), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
from typing import Tuple
import numpy as np
import PathReducer.calculate_rmsd as rmsd
import pandas as pd
import math
import glob
import os
import sys
import ntpath
import MDAnalysis as mda
import PathReducer.plotting_functions as plotting_functions
from periodictable import *
from sklearn import *
from sympy import solve, Symbol
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def read_traj_file(*args, **kwargs) -> Tuple[str, np.ndarray, np.ndarray]:
"""
Reads in a trajectory using MDAnalysis' Universe class, documentation and information on parameters found here: (https://www.mdanalysis.org/docs/documentation_pages/core/universe.html#MDAnalysis.core.universe.Universe). A topology file is always required, however there are multiple ways of setting up a universe for a trajectory. Examples include:
u = Universe(topology, trajectory) # read system from file(s)
u = Universe(pdbfile) # read atoms and coordinates from PDB or GRO
u = Universe(topology, [traj1, traj2, ...]) # read from a list of trajectories
u = Universe(topology, traj1, traj2, ...) # read from multiple trajectories
The trajectory being read in should be already pruned (of explicit solvent, backbone residues, and anything that you don't want PCA to capture. The function then returns a numpy array of all of the atom types of the system, and a numpy array of the Cartesian coordinates of each atom for every frame.
:param topology: str (.pdb, .top, .gro etc)
:param coordinates: str (.dcd, .nc, .xyz etc)
:return extensionless_system_name
atom_list
cartesians
"""
u = mda.Universe(*args, **kwargs)
system_name = path_leaf(u.filename)
extensionless_system_name = os.path.splitext(system_name)[0]
n_frames = len(u.trajectory)
n_atoms = len(u.atoms)
cartesians = np.ndarray((n_frames, n_atoms, 3))
try:
atom_list = u.atoms.elements
except AttributeError:
atom_list = u.atoms.types
for frame_index, ts in enumerate(u.trajectory):
cartesians[frame_index] = ts.positions
return extensionless_system_name, atom_list, cartesians
def read_xyz_file(path):
""" Reads in an xyz file from path as a DataFrame. This DataFrame is then turned into a 3D array such that the
dimensions are (number of points) X (number of atoms) X 3 (Cartesian coordinates). The system name (based on the
filename), list of atoms in the system, and Cartesian coordinates are output.
:param path: path to xyz file to be read
:return extensionless_system_name: str
atom_list: numpy array
cartesians: numpy array
"""
system_name = path_leaf(path)
print("File being read is: %s" % system_name)
extensionless_system_name = os.path.splitext(system_name)[0]
data = pd.read_csv(path, header=None, delim_whitespace=True, names=['atom', 'X', 'Y', 'Z'])
n_atoms = int(data.loc[0][0])
n_lines_per_frame = int(n_atoms + 2)
data_array = np.array(data)
data_reshape = np.reshape(data_array, (int(data_array.shape[0]/n_lines_per_frame), n_lines_per_frame,
data_array.shape[1]))
cartesians = data_reshape[:, 2::, 1::].astype(np.float)
atom_list = data_reshape[0, 2::, 0]
return extensionless_system_name, atom_list, cartesians
def remove_atoms_by_type(atom_types_to_remove, atom_list, cartesians):
"""
Removes specific atoms if they are not wanted for PCA
:param atom_list: list of atoms in the structure
:param cartesians: cartesian coordinates of each frame
:return: cartesian coordinates of each frame with specific atom types removed
"""
matches_indexes = [i for i, x in enumerate(atom_list) if x in atom_types_to_remove]
cartesians_sans_atoms = np.delete(cartesians, list(matches_indexes), axis=1)
atom_list_sans_atoms = np.delete(atom_list, list(matches_indexes), axis=0)
return atom_list_sans_atoms, cartesians_sans_atoms
def calculate_velocities(cartesians, timestep=1):
"""
Calculate velocities at each timestep given Cartesian coordinates. Velocities at the first and last point are
extrapolated.
:param cartesians: Cartesian coordinates along trajectory
:param timestep: time step between frames in units of fs, default=1
:return: velocities
"""
velocities = []
for i in range(0, len(cartesians)):
if i == 0:
velocity = (cartesians[i + 1] - cartesians[i]) / timestep
elif i == len(cartesians) - 1:
velocity = (cartesians[i] - cartesians[i - 1]) / timestep
else:
velocity = (cartesians[i + 1] - cartesians[i - 1]) / 2 * timestep
velocities.append(velocity)
return velocities
def calculate_momenta(velocities, atoms):
"""
:param cartesians: Cartesian coordinates along trajectory
:param timestep: time step between frames in units of fs, default=1
:return: velocities
"""
velocities = np.array(velocities)
atoms = np.array(atoms)
atom_masses = np.array([formula(atom).mass for atom in atoms])
momenta = velocities * atom_masses[np.newaxis, :, np.newaxis]
return momenta
def set_atom_one_to_origin(coordinates):
coordinates_shifted = coordinates - coordinates[:, np.newaxis, 0]
return coordinates_shifted
def mass_weighting(atoms, cartesians):
cartesians = np.array(cartesians)
atoms = np.array(atoms)
atom_masses = [formula(atom).mass for atom in atoms]
weighting = np.sqrt(atom_masses)
mass_weighted_cartesians = cartesians * weighting[np.newaxis, :, np.newaxis]
return mass_weighted_cartesians
def remove_mass_weighting(atoms, coordinates):
coordinates = np.array(coordinates)
atoms = np.array(atoms)
atom_masses = [formula(atom).mass for atom in atoms]
weighting = np.sqrt(atom_masses)
unmass_weighted_coords = coordinates / weighting[np.newaxis, :, np.newaxis]
return unmass_weighted_coords
def generate_distance_matrices(coordinates):
""" Generates distance matrices for each structure.
"""
coordinates = np.array(coordinates)
d2 = np.sum((coordinates[:, :, None] - coordinates[:, None, :]) ** 2, axis=3)
return d2
def generate_dihedral_matrices(coordinates):
return coordinates
def generate_and_reshape_ds_big_structures(coordinates):
""" Generates matrix of pairwise distances, which includes pairwise distances for each structure.
:param coordinates:
"""
coordinates = np.array(coordinates)
atoms = int(coordinates.shape[1])
d_re = np.zeros((coordinates.shape[0], int(atoms * (atoms - 1) / 2)))
for i in range(coordinates.shape[0]):
d2 = np.square(metrics.pairwise.euclidean_distances(coordinates[i]))
x = d2[0].shape[0]
dint_re = d2[np.triu_indices(x, k=1)]
d_re[i] = dint_re
return d_re
def reshape_ds(d):
""" Takes only the upper triangle of the distance matrices and reshapes them into 1D arrays.
"""
d_re = []
x = d[0][0].shape[0]
for dint in d:
dint_re = dint[np.triu_indices(x, k=1)]
d_re.append(dint_re)
d_re = np.asarray(d_re)
return d_re
def vector_to_matrix(v):
""" Converts a representation from 1D vector to 2D square matrix. Slightly altered from rmsd package to disregard
zeroes along diagonal of matrix.
:param v: 1D input representation.
:type v: numpy array
:return: Square matrix representation.
:rtype: numpy array
"""
if not (np.sqrt(8 * v.shape[0] + 1) == int(np.sqrt(8 * v.shape[0] + 1))):
print("ERROR: Can not make a square matrix.")
exit(1)
n = v.shape[0]
w = ((-1 + int(np.sqrt(8 * n + 1))) // 2) + 1
m = np.zeros((w, w))
index = 0
for i in range(w):
for j in range(w):
if i > j - 1:
continue
m[i, j] = v[index]
m[j, i] = m[i, j]
index += 1
return m
def distance_matrix_to_coords(v):
""" Converts a (2D square) distance matrix representation of a structure to Cartesian coordinates (first 3 columns
correspond to 3D xyz coordinates) via a Gram matrix.
:param v: 1D vector, numpy array
:return: 3D Cartesian coordinates, numpy array
"""
d = vector_to_matrix(v)
d_one = np.reshape(d[:, 0], (d.shape[0], 1))
m = (-0.5) * (d - np.matmul(np.ones((d.shape[0], 1)), np.transpose(d_one)) - np.matmul(d_one,
np.ones((1, d.shape[0]))))
values, vectors = np.linalg.eig(m)
idx = values.argsort()[::-1]
values = values[idx]
vectors = vectors[:, idx]
assert np.allclose(np.dot(m, vectors), values * vectors)
coords = np.dot(vectors, np.diag(np.sqrt(values)))
# Only taking first three columns as Cartesian (xyz) coordinates
coords = np.asarray(coords[:, 0:3])
return coords
def pca_dr(matrix):
"""
Does PCA on input matrix with specified number of dimensions. Outputs information used to later generate xyz files
in the reduced dimensional space and also for the function that filters out distances between key atoms and their
neighbors.
:param matrix: array
:return: matrix_pca: input data (in matrix) projected onto covariance matrix eigenvectors (PCs)
matrix_pca_fit: fit used to transform new data into reduced dimensional space
pca.components_: eigenvectors of covariance matrix
pca.mean_: mean of the original dataset (in matrix)
pca.explained_variance_: amount of variance described by each PC
"""
matrix = pd.DataFrame(matrix)
pca = decomposition.PCA()
matrix_pca_fit = pca.fit(matrix)
matrix_pca = pca.transform(matrix)
return matrix_pca, matrix_pca_fit, pca.components_, pca.mean_, pca.explained_variance_
# TODO: Add function that is able to do LDA on data rather than PCA
def lda_dr(matrix, data_labels):
"""
Does LDA (Linear Discriminant Analysis) on input matrix with specified number of dimensions. Outputs information
used to later generate xyz files in the reduced dimensional space and also for the function that filters out
distances between key atoms and their neighbors.
:param matrix: array
:param data_labels: array, separates data into classes to differentiate
:return: matrix_lda: input data (in matrix) projected onto covariance matrix eigenvectors (PCs)
matrix_lda_fit: fit used to transform new data into reduced dimensional space
lda.coef_: weight vectors
lda.means_: means of each class of the original dataset
lda.explained_variance_ratio_: amount of variance described by each PC
"""
matrix = pd.DataFrame(matrix)
lda = discriminant_analysis.LinearDiscriminantAnalysis()
matrix_lda_fit = lda.fit(matrix, data_labels)
matrix_lda = lda.transform(matrix)
return matrix_lda, matrix_lda_fit, lda.coef_, lda.means_, lda.explained_variance_ratio_
def calc_mean_distance_vector(d2_matrix):
return np.mean(d2_matrix, axis=0)
def filter_important_distances(upper_tri_d2_matrices, num_dists=75000):
num_points = upper_tri_d2_matrices.shape[0]
vec_length = upper_tri_d2_matrices.shape[1]
num_atoms = calc_num_atoms(vec_length)
variances = []
atom_indexes = {}
for k in range(vec_length):
variances.append(np.var(upper_tri_d2_matrices[:, k]))
atom1, atom2 = calc_ij(k, num_atoms)
atom_indexes[k] = atom1, atom2
important_distances_matrix = np.zeros((num_points, num_dists))
top_vars_indexes = top_values_indexes(variances, num_dists)
i = 0
selected_dist_atom_indexes = {}
for index in top_vars_indexes:
important_distances_matrix[:, i] = upper_tri_d2_matrices[:, index]
selected_dist_atom_indexes[i] = atom_indexes[index], index
i += 1
# print(selected_dist_atom_indexes)
return important_distances_matrix, selected_dist_atom_indexes
def calc_num_atoms(vec_length):
"""
Calculates number of atoms in a system based on the length of the vector generated by flattening the upper triangle
of its interatomic distance matrix.
:param vec_length: length of interatomic distance matrix vector
:return: num_atoms: int, number of atoms in the system
"""
n = Symbol('n', positive=True)
answers = solve(n * (n - 1) / 2 - vec_length, n)
num_atoms = int(answers[0])
return num_atoms
def set_unimportant_distance_weights_to_zero(components, selected_dist_atom_indexes, num_atoms):
num_dists = int((num_atoms * (num_atoms - 1)) / 2)
num_points = components.shape[0]
components_all_distances = np.zeros((num_points, num_dists))
distance_vector_indexes = list(pd.DataFrame(list(selected_dist_atom_indexes.values()))[1])
for i in range(len(distance_vector_indexes)):
components_all_distances[:, distance_vector_indexes[i]] = components[:, i]
return components_all_distances
def generate_PC_matrices_selected_distances(n_dim, matrix_reduced, components, mean, selected_dist_atom_indexes,
num_atoms):
num_points = matrix_reduced.shape[0]
num_dists = int((num_atoms * (num_atoms - 1)) / 2)
PCs_separate = []
for i in range(0, n_dim):
PCi = np.zeros((num_points, num_dists))
PCi_selected = np.dot(matrix_reduced[:, i, None], components[None, i, :]) + mean
for j in range(len(selected_dist_atom_indexes)):
distance_location = selected_dist_atom_indexes[j][1]
PCi[:, distance_location] = PCi_selected[:, j]
PCs_separate.append(PCi)
PCs_combined = np.zeros((num_points, num_dists))
PCs_combined_selected = np.dot(matrix_reduced, components) + mean
for j in range(len(selected_dist_atom_indexes)):
distance_location = selected_dist_atom_indexes[j][1]
PCs_combined[:, distance_location] = PCs_combined_selected[:, j]
PCs_separate = np.array(PCs_separate)
PCs_combined = np.array(PCs_combined)
return PCs_separate, PCs_combined
def inverse_transform_of_pcs(n_dim, matrix_reduced, components, mean):
"""
Calculates the inverse transform of the PCs to see what the PCs correspond to in terms of geometric changes.
Different than inverse_transform_of_pcs_as_normal_modes function because this function shows only structures that
are spanned by the input data itself (i.e., uses the PC scores of each structure).
:param n_dim: int, number of principal components specified to define reduced dimensional space
:param matrix_reduced: array, PCs in reduced dimensional space
:param components: array, eigenvectors of the covariance matrix of the input data
:param mean: mean structure of the input data
:return: PCs_separate, PCs_combined as arrays
"""
PCs_separate = []
for i in range(0, n_dim):
PCi = np.dot(matrix_reduced[:, i, None], components[None, i, :]) + mean
PCs_separate.append(PCi)
PCs_combined = np.dot(matrix_reduced[:, 0:n_dim], components[0:n_dim, :]) + mean
PCs_separate = np.array(PCs_separate)
PCs_combined = np.array(PCs_combined)
return PCs_separate, PCs_combined
def inverse_transform_of_pcs_as_normal_modes(n_dim, matrix_reduced, components, mean, alpha=0.40):
"""
Adds incremental amounts of each eigenvector to the mean structure to show the effect of individual eigenvectors
on molecular structure. Different than the inverse_transform_of_pcs function as this function does NOT take into
account the space spanned by the original input data, but rather distorts the geometry of the mean structure in a
linear fashion (i.e., how visualization of a normal mode appears in GaussView).
:param n_dim: int, number of principal components specified to define reduced dimensional space
:param components: array, eigenvectors of the covariance matrix of the input data
:param mean: mean structure of the input data
:param alpha: the multiple of each eigenvector to add to the mean structure
:return: PCs_separate, PCs_combined as arrays
"""
PCs_separate = []
for i in range(0, n_dim):
PCi = np.dot(alpha * (np.arange(-20, 21))[:, None], components[None, i, :]) + mean
PCs_separate.append(PCi)
multiplier = np.zeros((len(np.arange(-20, 21)), n_dim))
for i in range(n_dim):
multiplier[:, i] = np.arange(-20, 21)
PCs_combined = np.dot(alpha * multiplier, components[0:n_dim, :]) + mean
PCs_separate = np.array(PCs_separate)
PCs_combined = np.array(PCs_combined)
return PCs_separate, PCs_combined
def calc_ij(k, n):
"""
Calculate indexes i and j of a square symmetric matrix given upper triangle vector index k and matrix side length n.
:param k: vector index
:param n: side length of resultant matrix M
:return: i, j as ints
"""
i = n - 2 - math.floor((np.sqrt(-8 * k + 4 * n * (n - 1) - 7) / 2) - 0.5)
j = k + i + 1 - (n * (n - 1) / 2) + ((n - i) * ((n - i) - 1) / 2)
return int(i), int(j)
def top_values_indexes(a, n):
"""
Determine indexes of n top values of matrix a
:param a: matrix
:param n: integer, number of top values desired
:return: sorted list of indexes of n top values of a
"""
return np.argsort(a)[::-1][:n]
def kabsch(coordinates):
"""Kabsch algorithm to get orientation of axes that minimizes RMSD. All structures will be aligned to the first
structure in the trajectory.
:param coordinates: coordinates along trajectory to be aligned, list or array
"""
coordinates = np.array(coordinates)
coordinates[0] -= rmsd.centroid(coordinates[0])
coords_kabsch = []
for i in range(len(coordinates)):
coordinates[i] -= rmsd.centroid(coordinates[i])
coords_kabschi = rmsd.kabsch_rotate(coordinates[i], coordinates[0])
coords_kabsch.append(coords_kabschi)
return np.array(coords_kabsch)
def align_to_original_traj(coords, original_traj_coords):
"""Kabsch algorithm to get orientation of axes that minimizes RMSD (to avoid rotations in visualization). All
structures will be aligned to the first structure in the original trajectory.
:param coords: coordinates along trajectory to be aligned, list or array
:param original_traj_coords: coordinates along original trajectory
"""
coords = np.array(coords)
coords_aligned = []
original_traj_coords[0] -= rmsd.centroid(original_traj_coords[0])
for i in range(len(coords)):
coords[i] -= rmsd.centroid(coords[i])
coords_i = rmsd.kabsch_rotate(coords[i], original_traj_coords[0])
coords_aligned.append(coords_i)
return np.array(coords_aligned)
def chirality_test(coords, stereo_atoms):
""" Determines chirality of structure so it is consistent throughout the generated reduced dimensional
IRC/trajectory.
:param coords: xyz coordinates along IRC or trajectory
:param stereo_atoms: list of 4 atom numbers that represent groups around a chiral center
:type coords: numpy array
:type stereo_atoms: list
"""
a1 = stereo_atoms[0]
a2 = stereo_atoms[1]
a3 = stereo_atoms[2]
a4 = stereo_atoms[3]
signs = []
for i in range(len(coords)):
m = np.ones((4, 4))
m[0, 0:3] = coords[i][a1 - 1]
m[1, 0:3] = coords[i][a2 - 1]
m[2, 0:3] = coords[i][a3 - 1]
m[3, 0:3] = coords[i][a4 - 1]
if np.linalg.det(m) < 0:
signs.append(-1)
elif np.linalg.det(m) > 0:
signs.append(1)
elif np.linalg.det(m) == 0:
signs.append(0)
negs = np.where(np.array(signs) < 0)
poss = np.where(np.array(signs) > 0)
zeros = np.where(np.array(signs) == 0)
return negs, poss, zeros, signs
def chirality_changes(reconstructed_coordinates, stereo_atoms, original_structure_signs):
""" Determines chirality of structure along original trajectory and reconstructed reduced dimensional trajectory
and switches inconsistencies along reduced dimensional IRC/trajectory.
:param reconstructed_coordinates: coordinates of trajectory in the reduced dimensional space
:param stereo_atoms: list of 4 indexes of atoms surrounding stereogenic center
:param original_structure_signs: signs (positive or negative) that represent chirality at given point along original
trajectory, numpy array
:return: correct_chirality_coordinates: coordinates with the chirality of each structure consistent with the
original coordinates (based on original_structure_signs), array
"""
pos, neg, zero, signs_reconstructed = chirality_test(reconstructed_coordinates, stereo_atoms)
correct_chirality_coordinates = reconstructed_coordinates
for i in range(len(original_structure_signs)):
if original_structure_signs[i] == 0:
# If molecule begins planar but reconstruction of PCs are not, keep chirality consistent along reconstructed
# trajectory
if i > 0 and signs_reconstructed[i] != signs_reconstructed[0]:
correct_chirality_coordinates[i] = -correct_chirality_coordinates[i]
elif signs_reconstructed[i] != original_structure_signs[i]:
correct_chirality_coordinates[i] = -correct_chirality_coordinates[i]
return correct_chirality_coordinates
def chirality_changes_normal_modes(reconstructed_coordinates, stereo_atoms, original_structure_signs):
""" Determines chirality of structure along original trajectory and reconstructed reduced dimensional trajectory
and switches inconsistencies along reduced dimensional IRC/trajectory.
:param reconstructed_coordinates: coordinates of trajectory in the reduced dimensional space
:param stereo_atoms: list of 4 indexes of atoms surrounding stereogenic center
:param original_structure_signs: signs (positive or negative) that represent chirality at given point along original
trajectory, numpy array
:return: correct_chirality_coordinates: coordinates with the chirality of each structure consistent with the
original coordinates (based on original_structure_signs), array
"""
pos, neg, zero, signs_reconstructed = chirality_test(reconstructed_coordinates, stereo_atoms)
correct_chirality_coordinates = reconstructed_coordinates
for i in range(len(reconstructed_coordinates)):
if original_structure_signs[0] == 0:
if i > 0 and signs_reconstructed[i] != signs_reconstructed[0]:
correct_chirality_coordinates[i] = -correct_chirality_coordinates[i]
elif signs_reconstructed[i] != original_structure_signs[0]:
correct_chirality_coordinates[i] = -correct_chirality_coordinates[i]
return correct_chirality_coordinates
def make_pc_xyz_files(output_directory, title, atoms, coordinates):
""" Save principal coordinates as xyz files PC[n].xyz to output directory.
:param output_directory: output directory to store xyz files, str
:param atoms: atoms in input trajectory, list
:param title: name of the input system, str
:param coordinates: xyz coordinates of structures along PCi, list or numpy array
:return: None
"""
for k in range(np.array(coordinates).shape[0]):
if np.array(coordinates).shape[0] == 1:
f = open(os.path.join(output_directory, '%s_all_PCs.xyz' % title), 'w')
else:
f = open(os.path.join(output_directory, '%s_PC%s.xyz' % (title, k + 1)), 'w')
for i in range(len(coordinates[k])):
a = coordinates[k][i]
a = a.tolist()
b = []
for j in range(len(a)):
a[j] = ['%.5f' % x for x in a[j]]
a[j].insert(0, atoms[j])
b.append(a[j])
f.write('%d' % len(atoms) + '\n')
f.write('%s point %i' % (title, i + 1) + '\n')
f.write('%s' % str(np.asarray(b)).replace("[", "").replace("]", "").replace("'", "") + '\n')
f.close()
def print_prop_of_var_to_txt(values, system_name, directory):
"""
Print list of proportions of variance explained by each principal component to a text file.
:param values: array or list, proportions of variance in descending order
:param system_name: name of the system, used for the text file name
:param directory: output directory to put the output text file
:return: None
"""
normalized_values = values / np.sum(values)
df = pd.DataFrame({'Principal Component': pd.Series([i + 1 for i in range(len(values))]),
'Singular Value': values,
'Prop. of Variance': normalized_values,
'Cumul. Prop. of Var.': np.cumsum(normalized_values)})
pd.set_option('display.expand_frame_repr', False)
print(df.head())
df.to_csv(os.path.join(directory, system_name + '_prop_of_var.txt'), sep='\t', index=None)
def print_distance_weights_to_files(directory, n_dim, system_name, pca_components, num_atoms,
selected_atom_indexes=None):
for n in range(n_dim):
if selected_atom_indexes:
distance_vector_indexes = list(pd.DataFrame(list(selected_atom_indexes.values()))[1])
else:
distance_vector_indexes = range(len(pca_components[n]))
d = []
for k, l in zip(distance_vector_indexes, range(len(pca_components[n]))):
i, j = calc_ij(k, num_atoms)
coeff = pca_components[n][l]
d.append({'atom 1': i, 'atom 2': j, 'Coefficient of Distance': coeff})
d_df = pd.DataFrame(d)
sorted_d = d_df.reindex(d_df['Coefficient of Distance'].abs().sort_values(ascending=False).index)
output_path = os.path.join(directory, system_name + '_PC%s_components.txt' % (n + 1))
sorted_d.to_csv(output_path, sep='\t', index=None)
def print_distance_weights_to_files_select_atom_indexes(atom_indexes, n_dim, pca_components, system_name, directory):
for n in range(n_dim):
d = []
for k in range(len(pca_components[n])):
coeff = pca_components[n][k]
d.append({'atom 1': atom_indexes[k][0], 'atom 2': atom_indexes[k][1], 'Coefficient of Distance': coeff})
d_df = pd.DataFrame(d)
sorted_d = d_df.reindex(d_df['Coefficient of Distance'].abs().sort_values(ascending=False).index)
sorted_d.to_csv(os.path.join(directory, system_name + '_PC%s_components.txt' % (n + 1)), sep='\t', index=None)
def print_distance_weights_to_files_weighted(directory, n_dim, system_name, pca_components, pca_values, num_atoms,
display=False):
for n in range(n_dim):
d = []
for k in range(len(pca_components[n])):
i, j = calc_ij(k, num_atoms)
coeff = (pca_values[n] / sum(pca_values)) * pca_components[n][k]
d.append({'atom 1': i, 'atom 2': j, 'Coefficient of Distance': coeff})
d_df = pd.DataFrame(d)
sorted_d = d_df.reindex(d_df['Coefficient of Distance'].abs().sort_values(ascending=False).index)
sorted_d.to_csv(os.path.join(directory, system_name + '_PC%s_components_weighted.txt' % (n + 1)), sep='\t',
index=None)
if display:
print("PC%s" % (n + 1))
print(sorted_d)
def transform_new_data(new_xyz_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean,
original_traj_coords, input_type, stereo_atoms=[1, 2, 3, 4], mw=False, remove_atom_types=None,
selected_atom_indexes=None):
if input_type == "Cartesians":
new_system_name, components_df = transform_new_data_cartesians(new_xyz_file_path, output_directory, n_dim,
pca_fit, pca_components, pca_mean,
original_traj_coords, mw=mw,
remove_atom_types=remove_atom_types)
elif input_type == "Distances":
if selected_atom_indexes:
new_system_name, components_df = transform_new_data_only_top_distances(new_xyz_file_path, output_directory,
n_dim,
pca_fit, pca_components, pca_mean,
selected_atom_indexes=selected_atom_indexes,
stereo_atoms=stereo_atoms, mw=mw,
remove_atom_types=remove_atom_types)
else:
new_system_name, components_df = transform_new_data_distances(new_xyz_file_path, output_directory, n_dim,
pca_fit, pca_components, pca_mean,
stereo_atoms=stereo_atoms, mw=mw,
remove_atom_types=remove_atom_types)
else:
print("ERROR: Please specify input_type=\"Cartesians\" or \"Distances\"")
return new_system_name, components_df
def transform_new_data_cartesians(new_trajectory_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean,
original_traj_coords, mw=False, remove_atom_types=None, topology=None):
"""
Takes as input a new trajectory (xyz file) for a given system for which dimensionality reduction has already been
conducted and transforms this new data into the reduced dimensional space. Generates a plot, with the new data atop
the "trained" data, and generates xyz files for the new trajectories represented by the principal components.
:param new_trajectory_file_path: new input to dimensionality reduction (xyz file location), str
:param output_directory: output directory, str
:param n_dim: number of dimensions of the reduced dimensional space, int
:param pca_fit: fit from PCA on training data
:param pca_components: components from PCA on training data, array
:param pca_mean: mean of input data to PCA (mean structure as coords or distances), array
:param original_traj_coords: coordinates of the trajectory that the reduced dimensional space was trained on
:param MW: whether coordinates should be mass weighted prior to PCA, bool
"""
print("\nTransforming %s into reduced dimensional representation..." % new_trajectory_file_path)
new_system_name, atoms, coordinates = read_traj_file(new_trajectory_file_path)
if remove_atom_types is not None:
atoms, coordinates = remove_atoms_by_type(remove_atom_types, atoms, coordinates)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("\nResults for %s input will be stored in %s" % (new_trajectory_file_path, output_directory))
# Determining names of output directories/files
file_name_end = "_Cartesians"
# Align structures using Kabsch algorithm so rotations don't affect PCs
aligned_original_traj_coords = kabsch(original_traj_coords)
coords_for_analysis = align_to_original_traj(coordinates, aligned_original_traj_coords)
if mw is True:
file_name_end = file_name_end + "_MW"
mass_weighted_coords = mass_weighting(atoms, coords_for_analysis)
coords_for_analysis = mass_weighted_coords
else:
file_name_end = file_name_end + "_noMW"
coords_for_analysis = coords_for_analysis
coords_for_analysis = np.reshape(coords_for_analysis, (coords_for_analysis.shape[0],
coords_for_analysis.shape[1] *
coords_for_analysis.shape[2]))
components = pca_fit.transform(coords_for_analysis)
components_df = pd.DataFrame(components)
PCs_separate = []
for i in range(0, n_dim):
PCi = np.dot(components[:, i, None], pca_components[None, i, :]) + pca_mean
PCs_separate.append(PCi)
PCs_combined = np.dot(components, pca_components) + pca_mean
PCs_separate = np.array(PCs_separate)
PCs_combined = np.array(PCs_combined)
# Reshape n x 3N x 1 arrays into n x N x 3 arrays
PCs_separate = np.reshape(PCs_separate, (PCs_separate.shape[0], PCs_separate.shape[1],
int(PCs_separate.shape[2] / 3), 3))
PCs_combined = np.reshape(PCs_combined, (1, PCs_combined.shape[0], int(PCs_combined.shape[1] / 3), 3))
if mw is True:
# Remove mass-weighting of coordinates
no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms, PCs_separate[i])
for i in range(n_dim)]
no_mass_weighting_PCs_combined = remove_mass_weighting(atoms, PCs_combined)
else:
no_mass_weighting_PCs_separate = PCs_separate
no_mass_weighting_PCs_combined = PCs_combined
aligned_PCs_separate = no_mass_weighting_PCs_separate
aligned_PCs_combined = no_mass_weighting_PCs_combined
make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_separate)
make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_combined)
return new_system_name, components_df
def transform_new_data_distances(new_trajectory_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean,
stereo_atoms=[1, 2, 3, 4], mw=False, remove_atom_types=None):
"""
Takes as input a new trajectory (xyz file) for a given system for which dimensionality reduction has already been
conducted and transforms this new data into the reduced dimensional space. Generates a plot, with the new data atop
the "trained" data, and generates xyz files for the new trajectories represented by the principal components.
:param new_trajectory_file_path: new input to dimensionality reduction (xyz file location), str
:param output_directory: output directory, str
:param n_dim: number of dimensions of the reduced dimensional space, int
:param pca_fit: fit from PCA on training data
:param pca_components: components from PCA on training data, array
:param pca_mean: mean of input data to PCA (mean structure as coords or distances), array
:param original_traj_coords: coordinates of the trajectory that the reduced dimensional space was trained on
:param stereo_atoms: indexes of 4 atoms surrounding stereogenic center, list of ints
:param MW: whether coordinates should be mass weighted prior to PCA, bool
"""
print("\nTransforming %s into reduced dimensional representation..." % new_trajectory_file_path)
new_system_name, atoms, coordinates = read_traj_file(new_trajectory_file_path)
if remove_atom_types is not None:
atoms, coordinates = remove_atoms_by_type(remove_atom_types, atoms, coordinates)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("\nResults for %s input will be stored in %s" % (new_trajectory_file_path, output_directory))
# Determining names of output directories/files
file_name_end = "_Distances"
if mw is True:
file_name_end = file_name_end + "_MW"
coordinates_shifted = set_atom_one_to_origin(coordinates)
mass_weighted_coords = mass_weighting(atoms, coordinates_shifted)
coords_for_analysis = mass_weighted_coords
else:
file_name_end = file_name_end + "_noMW"
coords_for_analysis = coordinates
negatives, positives, zeroes, all_signs = chirality_test(coordinates, stereo_atoms)
d2 = generate_distance_matrices(coords_for_analysis)
coords_for_analysis = reshape_ds(d2)
components = pca_fit.transform(coords_for_analysis)
components_df = pd.DataFrame(components)
PCs_separate = []
for i in range(0, n_dim):
PCi = np.dot(components[:, i, None], pca_components[None, i, :]) + pca_mean
PCs_separate.append(PCi)
PCs_combined = np.dot(components, pca_components) + pca_mean
PCs_separate = np.array(PCs_separate)
PCs_combined = np.array(PCs_combined)
# Turning distance matrix representations of structures back into Cartesian coordinates
PCs_separate = [[distance_matrix_to_coords(PCs_separate[i][k])
for k in range(PCs_separate.shape[1])] for i in range(PCs_separate.shape[0])]
PCs_combined = [distance_matrix_to_coords(PCs_combined[i])
for i in range(np.array(PCs_combined).shape[0])]
PCs_separate = np.real(PCs_separate)
PCs_combined = np.real(PCs_combined)
if mw is True:
# Remove mass-weighting of coordinates
no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms, PCs_separate[i])
for i in range(n_dim)]
no_mass_weighting_PCs_combined = remove_mass_weighting(atoms, PCs_combined)
else:
no_mass_weighting_PCs_separate = PCs_separate
no_mass_weighting_PCs_combined = PCs_combined
# Reorient coordinates so they are in a consistent orientation
aligned_PCs_separate = [kabsch(chirality_changes(no_mass_weighting_PCs_separate[i], stereo_atoms,
all_signs)) for i in range(n_dim)]
aligned_PCs_combined = kabsch(chirality_changes(no_mass_weighting_PCs_combined, stereo_atoms, all_signs))
aligned_PCs_combined = np.reshape(aligned_PCs_combined, (1, aligned_PCs_combined.shape[0],
aligned_PCs_combined.shape[1],
aligned_PCs_combined.shape[2]))
make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_separate)
make_pc_xyz_files(output_directory, new_system_name + file_name_end, atoms, aligned_PCs_combined)
return new_system_name, components_df
def transform_new_data_only_top_distances(new_xyz_file_path, output_directory, n_dim, pca_fit, pca_components, pca_mean,
selected_atom_indexes, stereo_atoms=[1, 2, 3, 4], mw=False,
remove_atom_types=None):
"""
Takes as input a new trajectory (xyz file) for a given system for which dimensionality reduction has already been
conducted and transforms this new data into the reduced dimensional space. Generates a plot, with the new data atop
the "trained" data, and generates xyz files for the new trajectories represented by the principal components.
:param new_xyz_file_path: new input to dimensionality reduction (xyz file location), str
:param output_directory: output directory, str
:param n_dim: number of dimensions of the reduced dimensional space, int
:param pca_fit: fit from PCA on training data
:param pca_components: components from PCA on training data, array
:param pca_mean: mean of input data to PCA (mean structure as coords or distances), array
:param original_traj_coords: coordinates of the trajectory that the reduced dimensional space was trained on
:param stereo_atoms: indexes of 4 atoms surrounding stereogenic center, list of ints
:param MW: whether coordinates should be mass weighted prior to PCA, bool
"""
print("\nTransforming %s into reduced dimensional representation..." % new_xyz_file_path)
new_system_name, atoms, coordinates = read_xyz_file(new_xyz_file_path)
if remove_atom_types is not None:
atoms, coordinates = remove_atoms_by_type(remove_atom_types, atoms, coordinates)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("\nResults for %s input will be stored in %s" % (new_xyz_file_path, output_directory))
if mw is True:
coordinates_shifted = set_atom_one_to_origin(coordinates)
mass_weighted_coords = mass_weighting(atoms, coordinates_shifted)
coords_for_analysis = mass_weighted_coords
else:
coords_for_analysis = coordinates
d2_vector_matrix_all = generate_and_reshape_ds_big_structures(coords_for_analysis)
print('Starting new bit')
num_dists = len(list(selected_atom_indexes.keys()))
num_points = d2_vector_matrix_all.shape[0]
important_distances_matrix = np.zeros((num_points, num_dists))
distance_vector_indexes = list(pd.DataFrame(list(selected_atom_indexes.values()))[1])
for i in range(len(distance_vector_indexes)):
important_distances_matrix[:, i] = d2_vector_matrix_all[:, distance_vector_indexes[i]]
components = pca_fit.transform(important_distances_matrix)
components_df = pd.DataFrame(components)
return new_system_name, components_df
def pathreducer(trajectory_file_path, n_dim, stereo_atoms=[1, 2, 3, 4], input_type="Cartesians", mw=False, reconstruct=True,
normal_modes=False, remove_atom_types=None, num_dists=None, topology=None):
"""
Workhorse function for doing dimensionality reduction on xyz files. Dimensionality reduction can be done on the
structures represented as Cartesian coordinates (easy/faster) or the structures represented as distances matrices
(slower, but potentially more useful for certain systems that vary non-linearly with respect to Cartesian space,
e.g., torsions).
:param trajectory_file_path: xyz file or directory filled with xyz files that will be used to generate the reduced
dimensional space, str
:param n_dim: number of dimensions to reduce system to using PCA, int
:param stereo_atoms: list of 4 atom indexes surrounding stereogenic center, ints
:param input_type: input type to PCA, either "Cartesians" or "Distances", str
:return: name, directory, pca, pca_fit, components, mean, values, lengths
"""
# Make sure even large matrices are printed out in their entirety (for the generation of xyz files)
np.set_printoptions(threshold=sys.maxsize)
# Check if input is directory (containing input files) or a single input file itself
assert os.path.isfile(trajectory_file_path) or os.path.isdir(trajectory_file_path), "No such file or directory."
if os.path.isfile(trajectory_file_path) is True:
if input_type == "Cartesians":
system_name, output_directory, pca, pca_fit, components, mean, values, aligned_coords = \
pathreducer_cartesians_one_file(trajectory_file_path, n_dim, mw=mw, reconstruct=reconstruct,
normal_modes=normal_modes, remove_atom_types=remove_atom_types,
topology=topology)
return system_name, output_directory, pca, pca_fit, components, mean, \
values, aligned_coords
elif input_type == "Distances":
if num_dists:
system_name, output_directory, pca, pca_fit, components, mean, values, aligned_coords, selected_dist_atom_indexes = \
pathreducer_distances_one_file(trajectory_file_path, n_dim, stereo_atoms=stereo_atoms, mw=mw,
reconstruct=reconstruct, normal_modes=normal_modes,
remove_atom_types=remove_atom_types, num_dists=num_dists, topology=topology)
lengths = aligned_coords.shape[0]
return system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords, \
selected_dist_atom_indexes
else:
system_name, output_directory, pca, pca_fit, components, mean, values, aligned_coords = \
pathreducer_distances_one_file(trajectory_file_path, n_dim, stereo_atoms=stereo_atoms, mw=mw,
reconstruct=reconstruct, normal_modes=normal_modes,
remove_atom_types=remove_atom_types, num_dists=num_dists, topology=topology)
lengths = aligned_coords.shape[0]
return system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords
elif os.path.isdir(trajectory_file_path) is True:
if input_type == "Cartesians":
system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords = \
pathreducer_cartesians_directory_of_files(trajectory_file_path, n_dim, mw=mw, reconstruct=reconstruct,
normal_modes=normal_modes,
remove_atom_types=remove_atom_types, topology=topology)
return system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords
elif input_type == "Distances":
if num_dists:
system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords, \
selected_dist_atom_indexes = pathreducer_distances_directory_of_files(trajectory_file_path, n_dim,
stereo_atoms=stereo_atoms, mw=mw,
reconstruct=reconstruct,
normal_modes=normal_modes,
num_dists=num_dists,
remove_atom_types=remove_atom_types,
topology=topology)
return system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords, \
selected_dist_atom_indexes
else:
system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords, \
selected_dist_atom_indexes = pathreducer_distances_directory_of_files(trajectory_file_path, n_dim,
stereo_atoms=stereo_atoms, mw=mw,
reconstruct=reconstruct,
normal_modes=normal_modes,
num_dists=num_dists,
remove_atom_types=remove_atom_types)
return system_name, output_directory, pca, pca_fit, components, mean, values, lengths, aligned_coords
def pathreducer_cartesians_one_file(trajectory_file_path, n_dim, mw=False, reconstruct=True, normal_modes=False,
remove_atom_types=None, topology=None):
"""
Workhorse function for doing dimensionality reduction on xyz files. Dimensionality reduction can be done on the
structures represented as Cartesian coordinates (easy/faster) or the structures represented as distances matrices
(slower, but potentially more useful for certain systems that vary in non-linear ways, e.g., torsions).
:param trajectory_file_path: xyz file or directory filled with xyz files that will be used to generate the reduced
dimensional space, str
:param n_dim: number of dimensions to reduce system to using PCA, int
:return: name, directory, pca, pca_fit, components, mean, values, lengths
"""
# Make sure even large matrices are printed out in their entirety (for the generation of xyz files)
np.set_printoptions(threshold=sys.maxsize)
# Check if input is directory (containing input files) or a single input file itself
assert os.path.isfile(trajectory_file_path) or os.path.isdir(trajectory_file_path), "No such file or directory."
# Determining names of output directories/files
file_name_end = "_Cartesians"
if mw is True:
file_name_end = file_name_end + "_MW"
elif mw is False:
file_name_end = file_name_end + "_noMW"
print("\nInput is one file.")
system_name, atoms, coordinates = _read_single_traj_file(topology, trajectory_file_path)
if remove_atom_types is not None:
atoms, coordinates = remove_atoms_by_type(remove_atom_types, atoms, coordinates)
# Creating a directory for output (if directory doesn't already exist)
output_directory = system_name + file_name_end + "_output"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("Results for %s input will be stored in %s" % (trajectory_file_path, output_directory))
aligned_coords = kabsch(coordinates)
print("\n(1C) Done aligning structures using Kabsch algorithm")
if mw is True:
mass_weighted_coordinates = mass_weighting(atoms, aligned_coords)
print("\n(MW) Done mass-weighting coordinates!")
matrix_for_pca = np.reshape(mass_weighted_coordinates, (mass_weighted_coordinates.shape[0],
mass_weighted_coordinates.shape[1] *
mass_weighted_coordinates.shape[2]))
else:
matrix_for_pca = np.reshape(aligned_coords, (aligned_coords.shape[0], aligned_coords.shape[1] *
aligned_coords.shape[2]))
# PCA
cartesians_pca, cartesians_pca_fit, cartesians_components, cartesians_mean, cartesians_values = \
pca_dr(matrix_for_pca)
print("\n(2) Done with PCA of Cartesian coordinates!")
if reconstruct:
if normal_modes:
function = inverse_transform_of_pcs_as_normal_modes
file_name_end += "_normal_modes"
else:
function = inverse_transform_of_pcs
PCs_separate, PCs_combined = function(n_dim, cartesians_pca, cartesians_components,
cartesians_mean)
print("\n(3) Done transforming reduced dimensional representation of input into full dimensional space!")
# Reshape n x 3N x 1 arrays into n x N x 3 arrays
PCs_separate = np.reshape(PCs_separate, (PCs_separate.shape[0], PCs_separate.shape[1],
int(PCs_separate.shape[2] / 3), 3))
PCs_combined = np.reshape(PCs_combined, (1, PCs_combined.shape[0], int(PCs_combined.shape[1] / 3), 3))
if mw is True:
# Remove mass-weighting of coordinates, individual Xs
no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms, PCs_separate[i]) for i in range(n_dim)]
# Remove mass-weighting of coordinates, all Xs combined into one array/reduced dimensional trajectory
no_mass_weighting_PCs_combined = remove_mass_weighting(atoms, PCs_combined)
print("\n(UMW) Done removing mass-weighting!")
else:
no_mass_weighting_PCs_separate = [PCs_separate[i] for i in range(n_dim)]
no_mass_weighting_PCs_combined = PCs_combined
# Make xyz files from final coordinate arrays
make_pc_xyz_files(output_directory, system_name + file_name_end, atoms, no_mass_weighting_PCs_separate)
make_pc_xyz_files(output_directory, system_name + file_name_end, atoms, no_mass_weighting_PCs_combined)
print("\n(4) Done with making output xyz files!")
return system_name, output_directory, cartesians_pca, cartesians_pca_fit, cartesians_components, cartesians_mean, \
cartesians_values, aligned_coords
def pathreducer_cartesians_directory_of_files(trajectory_directory_path, n_dim, mw=False, reconstruct=True,
normal_modes=False, remove_atom_types=None, topology=None):
"""
Workhorse function for doing dimensionality reduction on xyz files. Dimensionality reduction can be done on the
structures represented as Cartesian coordinates (easy/faster) or the structures represented as distances matrices
(slower, but potentially more useful for certain systems that vary in non-linear ways, e.g., torsions).
:param trajectory_directory_path: xyz file or directory filled with xyz files that will be used to generate the
reduced dimensional space, str
:param n_dim: number of dimensions to reduce system to using PCA, int
:return: name, directory, pca, pca_fit, components, mean, values, lengths
"""
# Make sure even large matrices are printed out in their entirety (for the generation of xyz files)
np.set_printoptions(threshold=sys.maxsize)
# Check if input is directory (containing input files) or a single input file itself
assert os.path.isfile(trajectory_directory_path) or os.path.isdir(trajectory_directory_path), "No such file or " \
"directory."
# Determining names of output directories/files
file_name_end = "_Cartesians"
if mw is True:
file_name_end = file_name_end + "_MW"
elif mw is False:
file_name_end = file_name_end + "_noMW"
print("\nInput is a directory of files.")
path = os.path.dirname(trajectory_directory_path)
system_name = os.path.basename(path)
print("\nDoing dimensionality reduction on files in %s" % system_name)
trajectory_files = sorted(glob.glob(os.path.join(trajectory_directory_path, '*.xyz')))
names = []
atoms = []
file_lengths = []
i = 0
for trajectory_file in trajectory_files:
i = i + 1
if topology is not None:
name, atoms_one_file, coordinates = read_traj_file(topology, trajectory_file)
else: # Assume topology is in the trajectory file, e.g. XYZ or PDB
name, atoms_one_file, coordinates = read_traj_file(trajectory_file)
if remove_atom_types is not None:
atoms_one_file, coordinates = remove_atoms_by_type(remove_atom_types, atoms_one_file, coordinates)
names.append(name)
atoms.append(atoms_one_file)
file_lengths.append(coordinates.shape[0])
if i == 1:
coords_for_analysis = coordinates
else:
coords_for_analysis = np.concatenate((coords_for_analysis, coordinates), axis=0)
# Creating a directory for output (if directory doesn't already exist)
output_directory = system_name + file_name_end + "_output"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("Results for %s input will be stored in %s" % (trajectory_directory_path, output_directory))
aligned_coords = kabsch(coords_for_analysis)
print("\n(1C) Done aligning structures using Kabsch algorithm")
if mw is True:
mass_weighted_coordinates = mass_weighting(atoms_one_file, aligned_coords)
print("\n(MW) Done mass-weighting coordinates!")
matrix_for_pca = np.reshape(mass_weighted_coordinates, (mass_weighted_coordinates.shape[0],
mass_weighted_coordinates.shape[1] *
mass_weighted_coordinates.shape[2]))
else:
matrix_for_pca = np.reshape(aligned_coords, (aligned_coords.shape[0],
aligned_coords.shape[1] * aligned_coords.shape[2]))
# PCA
cartesians_pca, cartesians_pca_fit, cartesians_components, cartesians_mean, cartesians_values = \
pca_dr(matrix_for_pca)
print("\n(2) Done with PCA of Cartesian coordinates!")
if reconstruct:
if normal_modes:
function = inverse_transform_of_pcs_as_normal_modes
file_name_end += "_normal_modes"
else:
function = inverse_transform_of_pcs
PCs_separate, PCs_combined = function(n_dim, cartesians_pca, cartesians_components,
cartesians_mean)
print("\n(3) Done transforming reduced dimensional representation of input into full dimensional space!")
# Reshape n x 3N x 1 arrays into n x N x 3 arrays
PCs_separate = np.reshape(PCs_separate, (PCs_separate.shape[0], PCs_separate.shape[1],
int(PCs_separate.shape[2] / 3), 3))
PCs_combined = np.reshape(PCs_combined, (1, PCs_combined.shape[0], int(PCs_combined.shape[1] / 3), 3))
if mw is True:
# Remove mass-weighting of coordinates, individual Xs
no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms_one_file, PCs_separate[i]) for i in
range(n_dim)]
# Remove mass-weighting of coordinates, all Xs combined into one array/reduced dimensional trajectory
no_mass_weighting_PCs_combined = remove_mass_weighting(atoms_one_file, PCs_combined)
print("\n(UMW) Done removing mass-weighting!")
else:
no_mass_weighting_PCs_separate = [PCs_separate[i] for i in range(n_dim)]
no_mass_weighting_PCs_combined = PCs_combined
# Make xyz files from final coordinate arrays
for x in range(len(file_lengths)):
filename = names[x]
if x == 0:
start_index = 0
end_index = file_lengths[x]
one_file_PCs_separate = np.array(no_mass_weighting_PCs_separate)[:, start_index:end_index, :, :]
one_file_PCs_combined = np.array(no_mass_weighting_PCs_combined)[:, start_index:end_index, :, :]
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_separate)
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_combined)
else:
start_index = sum(file_lengths[:x])
end_index = sum(file_lengths[:(x + 1)])
one_file_PCs_separate = np.array(no_mass_weighting_PCs_separate)[:, start_index:end_index, :, :]
one_file_PCs_combined = np.array(no_mass_weighting_PCs_combined)[:, start_index:end_index, :, :]
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_separate)
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_combined)
print("\nDone generating output!")
return system_name, output_directory, cartesians_pca, cartesians_pca_fit, cartesians_components, cartesians_mean, \
cartesians_values, file_lengths, coords_for_analysis
def pathreducer_distances_one_file(trajectory_file_path, n_dim, stereo_atoms=[1, 2, 3, 4], mw=False,
print_distance_coefficients=True, reconstruct=True, normal_modes=False,
num_dists=None, remove_atom_types=None, topology=None):
"""
Workhorse function for doing dimensionality reduction on xyz files. Dimensionality reduction can be done on the
structures represented as Cartesian coordinates (easy/faster) or the structures represented as distances matrices
(slower, but potentially more useful for certain systems that vary in non-linear ways, e.g., torsions).
:param trajectory_file_path: xyz file or directory filled with xyz files that will be used to generate the reduced
dimensional space, str
:param n_dim: number of dimensions to reduce system to using PCA, int
:param stereo_atoms: list of 4 atom indexes surrounding stereogenic center, ints
:return: name, directory, pca, pca_fit, components, mean, values, lengths
"""
# Make sure even large matrices are printed out in their entirety (for the generation of xyz files)
np.set_printoptions(threshold=sys.maxsize)
# Check if input is directory (containing input files) or a single input file itself
assert os.path.isfile(trajectory_file_path) or os.path.isdir(trajectory_file_path), "No such file or directory."
# Determining names of output directories/files
file_name_end = "_Distances"
if mw is True:
file_name_end = file_name_end + "_MW"
elif mw is False:
file_name_end = file_name_end + "_noMW"
print("\nInput is one file.")
name, atoms, coordinates= _read_single_traj_file(topology, trajectory_file_path)
if remove_atom_types is not None:
atoms, coordinates = remove_atoms_by_type(remove_atom_types, atoms, coordinates)
# Creating a directory for output (if directory doesn't already exist)
output_directory = name + file_name_end + "_output"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("Results for %s input will be stored in %s" % (trajectory_file_path, output_directory))
aligned_coordinates = kabsch(coordinates)
negatives, positives, zeroes, all_signs = chirality_test(aligned_coordinates, stereo_atoms)
if mw is True:
coordinates_shifted = set_atom_one_to_origin(coordinates)
mass_weighted_coordinates = mass_weighting(atoms, coordinates_shifted)
coords_for_pca = mass_weighted_coordinates
print("\n(MW) Done mass-weighting coordinates!")
else:
coords_for_pca = aligned_coordinates
if coords_for_pca.shape[1] > 1000:
num_dists = 75000
print("Big matrix. Using the top %s distances for PCA..." % num_dists)
d2_vector_matrix_all = generate_and_reshape_ds_big_structures(coords_for_pca)
d2_vector_matrix, selected_dist_atom_indexes = filter_important_distances(d2_vector_matrix_all,
num_dists=num_dists)
else:
d2_full_matrices = generate_distance_matrices(coords_for_pca)
d2_vector_matrix = reshape_ds(d2_full_matrices)
print("\n(1D) Generation of distance matrices and reshaping upper triangles into vectors done!")
# PCA on distance matrix
d_pca, d_pca_fit, d_components, d_mean, d_values = pca_dr(d2_vector_matrix)
print("\n(2) Done with PCA of structures as distance matrices!")
if print_distance_coefficients:
if coords_for_pca.shape[1] > 1000:
print_distance_weights_to_files(output_directory, n_dim, name + file_name_end, d_components, len(atoms),
selected_atom_indexes=selected_dist_atom_indexes)
else:
print_distance_weights_to_files(output_directory, n_dim, name + file_name_end, d_components, len(atoms))
if reconstruct:
if normal_modes:
function = inverse_transform_of_pcs_as_normal_modes
file_name_end += "_normal_modes"
else:
function = inverse_transform_of_pcs
if coords_for_pca.shape[1] > 1000:
d_components = set_unimportant_distance_weights_to_zero(d_components, selected_dist_atom_indexes,
len(atoms))
d_mean = calc_mean_distance_vector(d2_vector_matrix_all)
PCs_separate_d, PCs_combined_d = function(n_dim, d_pca, d_components, d_mean)
print("\n(3) Done transforming reduced dimensional representation of input into full dimensional space!")
# Turning distance matrix representations of structures back into Cartesian coordinates
PCs_separate = [[distance_matrix_to_coords(PCs_separate_d[i][k])
for k in range(PCs_separate_d.shape[1])] for i in range(PCs_separate_d.shape[0])]
# Turning distance matrix representations of structures back into Cartesian coordinates (all chosen Xs combined
# into one xyz file)
PCs_combined = [distance_matrix_to_coords(PCs_combined_d[i])
for i in range(np.array(PCs_combined_d).shape[0])]
PCs_separate = np.real(PCs_separate)
PCs_combined = np.real(PCs_combined)
print("\n(4D)-(6D) Done with converting distance matrices back to Cartesian coordinates!")
if mw is True:
# Remove mass-weighting of coordinates, individual PCs
no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms, PCs_separate[i])
for i in range(n_dim)]
no_mass_weighting_PCs_combined = remove_mass_weighting(atoms, PCs_combined)
print("\n(UMW) Done removing mass-weighting!")
else:
no_mass_weighting_PCs_separate = PCs_separate
no_mass_weighting_PCs_combined = PCs_combined
if normal_modes:
chirality_consistent_PCs_separate = [
kabsch(chirality_changes_normal_modes(no_mass_weighting_PCs_separate[i], stereo_atoms,
all_signs)) for i in range(n_dim)]
# Reorient coordinates so they are in a consistent coordinate system/chirality, all Xs combined into one array
chirality_consistent_PCs_combined = kabsch(
chirality_changes_normal_modes(no_mass_weighting_PCs_combined, stereo_atoms,
all_signs))
else:
chirality_consistent_PCs_separate = [
kabsch(chirality_changes(no_mass_weighting_PCs_separate[i], stereo_atoms,
all_signs)) for i in range(n_dim)]
# Reorient coordinates so they are in a consistent coordinate system/chirality, all Xs combined into one array
chirality_consistent_PCs_combined = kabsch(chirality_changes(no_mass_weighting_PCs_combined, stereo_atoms,
all_signs))
chirality_consistent_PCs_combined = np.reshape(chirality_consistent_PCs_combined,
(1,
chirality_consistent_PCs_combined.shape[0],
chirality_consistent_PCs_combined.shape[1],
chirality_consistent_PCs_combined.shape[2]))
# Align new Cartesian coordinates to ALIGNED original trajectory
aligned_PCs_separate = [align_to_original_traj(chirality_consistent_PCs_separate[i], aligned_coordinates)
for i in range(len(chirality_consistent_PCs_separate))]
aligned_PCs_combined = [align_to_original_traj(chirality_consistent_PCs_combined[i], aligned_coordinates)
for i in range(len(chirality_consistent_PCs_combined))]
print("\n(7D) Done checking chirality of resultant structures!")
print("\n(8D) Done aligning!")
# Make final structures into xyz files
make_pc_xyz_files(output_directory, name + file_name_end, atoms, aligned_PCs_separate)
make_pc_xyz_files(output_directory, name + file_name_end, atoms, aligned_PCs_combined)
print("\nDone generating output!")
if num_dists:
return name, output_directory, d_pca, d_pca_fit, d_components, d_mean, d_values, aligned_coordinates, \
selected_dist_atom_indexes
else:
return name, output_directory, d_pca, d_pca_fit, d_components, d_mean, d_values, aligned_coordinates,
def _read_single_traj_file(topology, trajectory_file_path):
if topology is not None:
name, atoms, coordinates = read_traj_file(topology, trajectory_file_path)
else:
name, atoms, coordinates = read_traj_file(trajectory_file_path)
return name, atoms, coordinates
def pathreducer_distances_directory_of_files(trajectory_file_path, n_dim, stereo_atoms=[1, 2, 3, 4], mw=False,
print_distance_coefficients=True, reconstruct=True, normal_modes=False,
num_dists=None, remove_atom_types=None, topology=None):
"""
Workhorse function for doing dimensionality reduction on xyz files. Dimensionality reduction can be done on the
structures represented as Cartesian coordinates (easy/faster) or the structures represented as distances matrices
(slower, but potentially more useful for certain systems that vary in non-linear ways, e.g., torsions).
:param trajectory_file_path: xyz file or directory filled with xyz files that will be used to generate the
reduced dimensional space, str
:param n_dim: number of dimensions to reduce system to using PCA, int
:param stereo_atoms: list of 4 atom indexes surrounding stereogenic center, ints
:return: name, directory, pca, pca_fit, components, mean, values, lengths
"""
# Check if input is directory (containing input files) or a single input file itself
assert os.path.isfile(trajectory_file_path) or os.path.isdir(trajectory_file_path), "No such file or " \
"directory."
print("\nInput is a directory of files.")
# Make sure even large matrices are printed out in their entirety (for the generation of xyz files)
np.set_printoptions(threshold=sys.maxsize)
# Determining names of output directories/files
file_name_end = "_Distances"
if mw is True:
file_name_end = file_name_end + "_MW"
elif mw is False:
file_name_end = file_name_end + "_noMW"
path = os.path.dirname(trajectory_file_path)
system_name = os.path.basename(path)
print("\nDoing dimensionality reduction on files in %s" % system_name)
trajectory_files = sorted(glob.glob(os.path.join(trajectory_file_path, '*.xyz')))
names = []
atoms = []
file_lengths = []
i = 0
for trajectory_file in trajectory_files:
i = i + 1
name, atoms_one_file, coordinates = read_traj_file(trajectory_file)
if remove_atom_types is not None:
atoms_one_file, coordinates = remove_atoms_by_type(remove_atom_types, atoms_one_file, coordinates)
names.append(name)
atoms.append(atoms_one_file)
file_lengths.append(coordinates.shape[0])
if i == 1:
coords_for_analysis = coordinates
else:
coords_for_analysis = np.concatenate((coords_for_analysis, coordinates), axis=0)
# Creating a directory for output (if directory doesn't already exist)
output_directory = system_name + file_name_end + "_output"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("Results for %s input will be stored in %s" % (system_name, output_directory))
aligned_coordinates = kabsch(coords_for_analysis)
negatives, positives, zeroes, all_signs = chirality_test(coords_for_analysis, stereo_atoms)
if mw is True:
coordinates_shifted = set_atom_one_to_origin(coords_for_analysis)
mass_weighted_coordinates = mass_weighting(atoms_one_file, coordinates_shifted)
print("\n(MW) Done mass-weighting coordinates!")
coords_for_pca = mass_weighted_coordinates
else:
coords_for_pca = coords_for_analysis
if coords_for_pca.shape[1] > 1000:
num_dists = 75000
print("Big matrix. Using the top %s distances for PCA..." % num_dists)
d2_vector_matrix_all = generate_and_reshape_ds_big_structures(coords_for_pca)
d2_mean = calc_mean_distance_vector(d2_vector_matrix_all)
d2_vector_matrix, selected_dist_atom_indexes = filter_important_distances(d2_vector_matrix_all,
num_dists=num_dists)
# TODO: Make reconstruction possible by setting weights on all "non-important" distances to zero
reconstruct = False
else:
d2_full_matrices = generate_distance_matrices(coords_for_pca)
d2_vector_matrix = reshape_ds(d2_full_matrices)
print("\n(1D) Generation of distance matrices and reshaping upper triangles into vectors done!")
# PCA on distance matrix
d_pca, d_pca_fit, d_components, d_mean, d_values = pca_dr(d2_vector_matrix)
print("\n(2) Done with PCA of structures as interatomic distance matrices!")
if print_distance_coefficients:
print_distance_weights_to_files(output_directory, n_dim, system_name + file_name_end, d_components,
len(atoms_one_file))
if reconstruct:
if normal_modes:
function = inverse_transform_of_pcs_as_normal_modes
file_name_end += "_normal_modes"
else:
function = inverse_transform_of_pcs
PCs_separate_d, PCs_combined_d = function(n_dim, d_pca, d_components, d_mean)
print("\n(3) Done transforming reduced dimensional representation of input into full dimensional space!")
# Turning distance matrix representations of structures back into Cartesian coordinates
PCs_separate = [[distance_matrix_to_coords(PCs_separate_d[i][k])
for k in range(PCs_separate_d.shape[1])] for i in range(PCs_separate_d.shape[0])]
# Turning distance matrix representations of structures back into Cartesian coordinates (all chosen PCs combined
# into one xyz file)
PCs_combined = [distance_matrix_to_coords(PCs_combined_d[i]) for i in range(np.array(PCs_combined_d).shape[0])]
PCs_separate = np.real(PCs_separate)
PCs_combined = np.real(PCs_combined)
print("\n(4D)-(6D) Done with converting distance matrices back to Cartesian coordinates!")
if mw is True:
# Remove mass-weighting of coordinates, individual PCs
no_mass_weighting_PCs_separate = [remove_mass_weighting(atoms_one_file, PCs_separate[i])
for i in range(n_dim)]
no_mass_weighting_PCs_combined = remove_mass_weighting(atoms_one_file, PCs_combined)
print("\n(UMW) Done removing mass-weighting!")
else:
no_mass_weighting_PCs_separate = PCs_separate
no_mass_weighting_PCs_combined = PCs_combined
if normal_modes:
chirality_consistent_PCs_separate = [
kabsch(chirality_changes_normal_modes(no_mass_weighting_PCs_separate[i], stereo_atoms,
all_signs)) for i in range(n_dim)]
chirality_consistent_PCs_combined = kabsch(
chirality_changes_normal_modes(no_mass_weighting_PCs_combined, stereo_atoms,
all_signs))
else:
chirality_consistent_PCs_separate = [chirality_changes(no_mass_weighting_PCs_separate[i], stereo_atoms,
all_signs)
for i in range(n_dim)]
chirality_consistent_PCs_combined = kabsch(chirality_changes(no_mass_weighting_PCs_combined, stereo_atoms,
all_signs))
chirality_consistent_PCs_combined = np.reshape(chirality_consistent_PCs_combined,
(1,
chirality_consistent_PCs_combined.shape[0],
chirality_consistent_PCs_combined.shape[1],
chirality_consistent_PCs_combined.shape[2]))
# Align new Cartesian coordinates to ALIGNED original trajectory
aligned_PCs_separate = [align_to_original_traj(chirality_consistent_PCs_separate[i], aligned_coordinates)
for i in range(len(chirality_consistent_PCs_separate))]
aligned_PCs_combined = [align_to_original_traj(chirality_consistent_PCs_combined[i], aligned_coordinates)
for i in range(len(chirality_consistent_PCs_combined))]
print("\n(7D) Done checking chirality of resultant structures!")
print("\n(8D) Done aligning!")
for x in range(len(trajectory_files)):
filename = names[x]
if x == 0:
start_index = 0
end_index = file_lengths[x]
one_file_PCs_separate = np.array(aligned_PCs_separate)[:, start_index:end_index, :, :]
one_file_PCs_combined = np.array(aligned_PCs_combined)[:, start_index:end_index, :, :]
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_separate)
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_combined)
else:
start_index = sum(file_lengths[:x])
end_index = sum(file_lengths[:(x + 1)])
one_file_PCs_separate = np.array(aligned_PCs_separate)[:, start_index:end_index, :, :]
one_file_PCs_combined = np.array(aligned_PCs_combined)[:, start_index:end_index, :, :]
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_separate)
make_pc_xyz_files(output_directory, filename + file_name_end, atoms_one_file, one_file_PCs_combined)
print("\nDone generating output!")
if num_dists:
return system_name, output_directory, d_pca, d_pca_fit, d_components, d_mean, d_values, file_lengths, \
aligned_coordinates, selected_dist_atom_indexes
else:
return system_name, output_directory, d_pca, d_pca_fit, d_components, d_mean, d_values, file_lengths, \
aligned_coordinates
def pathreducer_interactive():
while True:
input_path = input("\nInput a path to an xyz file or directory of xyz files.\n")
if os.path.isfile(input_path):
print("Input is an individual file.")
system_name = os.path.basename(input_path)
break
elif os.path.isdir(input_path):
print("Input is a directory of files.")
path = os.path.dirname(input_path)
system_name = os.path.basename(path)
break
else:
print("No such file or directory.")
continue
print("\nDoing dimensionality reduction on files in %s" % system_name)
while True:
mass_weight = input("\nWould you like to mass-weight the Cartesian coordinates of your structures prior to "
"dimensionality reduction? (True or False)\n")
if mass_weight in ("True", "true", "T", "t"):
mw = True
break
elif mass_weight in ("False", "false", "F", "f"):
mw = False
break
else:
print("Please type True or False.")
continue
while True:
structure_type = input("\nHow would you like to represent your structures? (Cartesians or Distances)\n")
if structure_type in ("Cartesians", "cartesians", "C", "c"):
input_type = "Cartesians"
break
elif structure_type in ("Distances", "distances", "D", "d"):
input_type = "Distances"
while True:
stereo_atoms = input(
"\nOptional: Enter four atom numbers (separated by commas) to define the chirality of your "
"molecule. Hit Return to skip.\n")
if stereo_atoms == "":
stereo_atoms = '1, 2, 3, 4'
break
elif len(stereo_atoms.split(',')) == 4:
break
elif len(stereo_atoms.split(',')) != 4:
print("Please enter four atom numbers separated by commas, or hit Return to skip.")
continue
stereo_atoms = [int(s) for s in stereo_atoms.split(',')]
break
else:
print("Please type Cartesians or Distances.")
continue
while True:
try:
n_dim = int(input("\nHow many principal components would you like to print out? "
"(If you're not sure, use 3)\n"))
except ValueError:
print("Sorry, number of principal components must be an integer value.")
continue
if n_dim <= 0:
print("Sorry, number of principal components must be greater than zero.")
continue
else:
break
if os.path.isfile(input_path) and input_type == "Cartesians":
system_name, output_directory, pca, pca_fit, components, mean, singular_values, aligned_coords = \
pathreducer_cartesians_one_file(input_path, n_dim, mw=mw)
elif os.path.isdir(input_path) and input_type == "Cartesians":
system_name, output_directory, pca, pca_fit, components, mean, singular_values, traj_lengths, aligned_coords = \
pathreducer_cartesians_directory_of_files(input_path, n_dim, mw=mw)
elif os.path.isfile(input_path) and input_type == "Distances":
system_name, output_directory, pca, pca_fit, components, mean, singular_values, aligned_coords = \
pathreducer_distances_one_file(input_path, n_dim, stereo_atoms=stereo_atoms, mw=mw)
elif os.path.isdir(input_path) and input_type == "Distances":
system_name, output_directory, pca, pca_fit, components, mean, singular_values, traj_lengths, aligned_coords = \
pathreducer_distances_directory_of_files(input_path, n_dim, stereo_atoms=stereo_atoms, mw=mw)
else:
print("Something went wrong.")
pcs_df = pd.DataFrame(pca)
if os.path.isdir(input_path):
lengths = traj_lengths
else:
lengths = None
plot_variance = input("\nWould you like a plot of the variance captured by each PC? (True or False)\n")
if plot_variance == "True":
plotting_functions.plot_prop_of_var(singular_values, system_name, output_directory)
print_prop_of_var_to_txt(singular_values, system_name, output_directory)
plot_pcs = input("\nWould you like a plot of the top two and top three PCs? (True or False)\n")
if plot_pcs == "True":
points_to_circle = input("\nIf you have points to circle, enter them now, separated by commas.\n")
if points_to_circle != "":
points_to_circle = [int(s) for s in points_to_circle.split(',')]
else:
points_to_circle = None
plotting_functions.colored_line_and_scatter_plot(pcs_df[0], pcs_df[1], pcs_df[2], same_axis=False,
output_directory=output_directory, lengths=lengths,
points_to_circle=points_to_circle,
imgname=(system_name + "_scatterline"))
new_data_to_project = input("\nDo you have new data you would like to project into this reduced dimensional space? "
"(True or False)\n")
while new_data_to_project == "True":
new_input = input("\nWhat is the path to the file of interest? (Can only take one file at a time)\n")
if input_type == "Cartesians":
new_system_name, new_data_df = transform_new_data_cartesians(new_input, output_directory, n_dim, pca_fit,
components, mean, aligned_coords, MW=mw)
elif input_type == "Distances":
new_system_name, new_data_df = transform_new_data_distances(new_input, output_directory, n_dim, pca_fit,
components, mean, stereo_atoms=stereo_atoms,
MW=mw)
plot_new_data = input("\nWould you like to plot the new data? (True or False)\n")
if plot_new_data == "True":
plotting_functions.colored_line_and_scatter_plot(pcs_df[0], pcs_df[1], pcs_df[2], same_axis=False,
output_directory=output_directory, lengths=lengths,
new_data=new_data_df, points_to_circle=points_to_circle,
imgname=(new_system_name + "_scatterline"))
continue
def generate_deformation_vector(start_structure, end_structure):
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
nmd_coords = np.reshape(start_structure,
(1, np.array(start_structure).shape[0] * np.array(start_structure).shape[1]))
deformation_vector = end_structure - start_structure
deformation_vector = np.reshape(deformation_vector,
(1, np.array(deformation_vector).shape[0] * np.array(deformation_vector).shape[1]))
print("NMD Coordinates:", nmd_coords)
print("Deformation vector:", deformation_vector)
return deformation_vector
|
[
"sympy.Symbol",
"numpy.sqrt",
"pandas.read_csv",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.mean",
"PathReducer.calculate_rmsd.kabsch_rotate",
"os.path.exists",
"numpy.reshape",
"numpy.asarray",
"pandas.set_option",
"numpy.real",
"numpy.dot",
"sympy.solve",
"os.path.isdir",
"numpy.concatenate",
"pandas.DataFrame",
"PathReducer.calculate_rmsd.centroid",
"numpy.linalg.eig",
"numpy.ones",
"numpy.triu_indices",
"os.path.splitext",
"os.path.isfile",
"os.path.dirname",
"PathReducer.plotting_functions.colored_line_and_scatter_plot",
"MDAnalysis.Universe",
"numpy.transpose",
"numpy.set_printoptions",
"ntpath.basename",
"os.makedirs",
"os.path.join",
"numpy.linalg.det",
"numpy.sum",
"numpy.zeros",
"numpy.ndarray",
"os.path.basename",
"PathReducer.plotting_functions.plot_prop_of_var",
"numpy.cumsum",
"ntpath.split",
"numpy.var"
] |
[((431, 449), 'ntpath.split', 'ntpath.split', (['path'], {}), '(path)\n', (443, 449), False, 'import ntpath\n'), ((1818, 1847), 'MDAnalysis.Universe', 'mda.Universe', (['*args'], {}), '(*args, **kwargs)\n', (1830, 1847), True, 'import MDAnalysis as mda\n'), ((2041, 2075), 'numpy.ndarray', 'np.ndarray', (['(n_frames, n_atoms, 3)'], {}), '((n_frames, n_atoms, 3))\n', (2051, 2075), True, 'import numpy as np\n'), ((3038, 3126), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'delim_whitespace': '(True)', 'names': "['atom', 'X', 'Y', 'Z']"}), "(path, header=None, delim_whitespace=True, names=['atom', 'X',\n 'Y', 'Z'])\n", (3049, 3126), True, 'import pandas as pd\n'), ((3220, 3234), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3228, 3234), True, 'import numpy as np\n'), ((5271, 5291), 'numpy.array', 'np.array', (['velocities'], {}), '(velocities)\n', (5279, 5291), True, 'import numpy as np\n'), ((5305, 5320), 'numpy.array', 'np.array', (['atoms'], {}), '(atoms)\n', (5313, 5320), True, 'import numpy as np\n'), ((5693, 5713), 'numpy.array', 'np.array', (['cartesians'], {}), '(cartesians)\n', (5701, 5713), True, 'import numpy as np\n'), ((5727, 5742), 'numpy.array', 'np.array', (['atoms'], {}), '(atoms)\n', (5735, 5742), True, 'import numpy as np\n'), ((5820, 5840), 'numpy.sqrt', 'np.sqrt', (['atom_masses'], {}), '(atom_masses)\n', (5827, 5840), True, 'import numpy as np\n'), ((6033, 6054), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (6041, 6054), True, 'import numpy as np\n'), ((6068, 6083), 'numpy.array', 'np.array', (['atoms'], {}), '(atoms)\n', (6076, 6083), True, 'import numpy as np\n'), ((6161, 6181), 'numpy.sqrt', 'np.sqrt', (['atom_masses'], {}), '(atom_masses)\n', (6168, 6181), True, 'import numpy as np\n'), ((6435, 6456), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (6443, 6456), True, 'import numpy as np\n'), ((6467, 6539), 'numpy.sum', 'np.sum', (['((coordinates[:, :, None] - coordinates[:, None, :]) ** 2)'], {'axis': '(3)'}), '((coordinates[:, :, None] - coordinates[:, None, :]) ** 2, axis=3)\n', (6473, 6539), True, 'import numpy as np\n'), ((6847, 6868), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (6855, 6868), True, 'import numpy as np\n'), ((7514, 7530), 'numpy.asarray', 'np.asarray', (['d_re'], {}), '(d_re)\n', (7524, 7530), True, 'import numpy as np\n'), ((8117, 8133), 'numpy.zeros', 'np.zeros', (['(w, w)'], {}), '((w, w))\n', (8125, 8133), True, 'import numpy as np\n'), ((8723, 8759), 'numpy.reshape', 'np.reshape', (['d[:, 0]', '(d.shape[0], 1)'], {}), '(d[:, 0], (d.shape[0], 1))\n', (8733, 8759), True, 'import numpy as np\n'), ((9005, 9021), 'numpy.linalg.eig', 'np.linalg.eig', (['m'], {}), '(m)\n', (9018, 9021), True, 'import numpy as np\n'), ((9323, 9349), 'numpy.asarray', 'np.asarray', (['coords[:, 0:3]'], {}), '(coords[:, 0:3])\n', (9333, 9349), True, 'import numpy as np\n'), ((10110, 10130), 'pandas.DataFrame', 'pd.DataFrame', (['matrix'], {}), '(matrix)\n', (10122, 10130), True, 'import pandas as pd\n'), ((11252, 11272), 'pandas.DataFrame', 'pd.DataFrame', (['matrix'], {}), '(matrix)\n', (11264, 11272), True, 'import pandas as pd\n'), ((11584, 11610), 'numpy.mean', 'np.mean', (['d2_matrix'], {'axis': '(0)'}), '(d2_matrix, axis=0)\n', (11591, 11610), True, 'import numpy as np\n'), ((12095, 12128), 'numpy.zeros', 'np.zeros', (['(num_points, num_dists)'], {}), '((num_points, num_dists))\n', (12103, 12128), True, 'import numpy as np\n'), ((12909, 12935), 'sympy.Symbol', 'Symbol', (['"""n"""'], {'positive': '(True)'}), "('n', positive=True)\n", (12915, 12935), False, 'from sympy import solve, Symbol\n'), ((12951, 12989), 'sympy.solve', 'solve', (['(n * (n - 1) / 2 - vec_length)', 'n'], {}), '(n * (n - 1) / 2 - vec_length, n)\n', (12956, 12989), False, 'from sympy import solve, Symbol\n'), ((13277, 13310), 'numpy.zeros', 'np.zeros', (['(num_points, num_dists)'], {}), '((num_points, num_dists))\n', (13285, 13310), True, 'import numpy as np\n'), ((14295, 14328), 'numpy.zeros', 'np.zeros', (['(num_points, num_dists)'], {}), '((num_points, num_dists))\n', (14303, 14328), True, 'import numpy as np\n'), ((14614, 14636), 'numpy.array', 'np.array', (['PCs_separate'], {}), '(PCs_separate)\n', (14622, 14636), True, 'import numpy as np\n'), ((14657, 14679), 'numpy.array', 'np.array', (['PCs_combined'], {}), '(PCs_combined)\n', (14665, 14679), True, 'import numpy as np\n'), ((15773, 15795), 'numpy.array', 'np.array', (['PCs_separate'], {}), '(PCs_separate)\n', (15781, 15795), True, 'import numpy as np\n'), ((15816, 15838), 'numpy.array', 'np.array', (['PCs_combined'], {}), '(PCs_combined)\n', (15824, 15838), True, 'import numpy as np\n'), ((17235, 17257), 'numpy.array', 'np.array', (['PCs_separate'], {}), '(PCs_separate)\n', (17243, 17257), True, 'import numpy as np\n'), ((17278, 17300), 'numpy.array', 'np.array', (['PCs_combined'], {}), '(PCs_combined)\n', (17286, 17300), True, 'import numpy as np\n'), ((18352, 18373), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (18360, 18373), True, 'import numpy as np\n'), ((18397, 18426), 'PathReducer.calculate_rmsd.centroid', 'rmsd.centroid', (['coordinates[0]'], {}), '(coordinates[0])\n', (18410, 18426), True, 'import PathReducer.calculate_rmsd as rmsd\n'), ((18684, 18707), 'numpy.array', 'np.array', (['coords_kabsch'], {}), '(coords_kabsch)\n', (18692, 18707), True, 'import numpy as np\n'), ((19142, 19158), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (19150, 19158), True, 'import numpy as np\n'), ((19216, 19254), 'PathReducer.calculate_rmsd.centroid', 'rmsd.centroid', (['original_traj_coords[0]'], {}), '(original_traj_coords[0])\n', (19229, 19254), True, 'import PathReducer.calculate_rmsd as rmsd\n'), ((19466, 19490), 'numpy.array', 'np.array', (['coords_aligned'], {}), '(coords_aligned)\n', (19474, 19490), True, 'import numpy as np\n'), ((25657, 25706), 'pandas.set_option', 'pd.set_option', (['"""display.expand_frame_repr"""', '(False)'], {}), "('display.expand_frame_repr', False)\n", (25670, 25706), True, 'import pandas as pd\n'), ((32856, 32985), 'numpy.reshape', 'np.reshape', (['coords_for_analysis', '(coords_for_analysis.shape[0], coords_for_analysis.shape[1] *\n coords_for_analysis.shape[2])'], {}), '(coords_for_analysis, (coords_for_analysis.shape[0], \n coords_for_analysis.shape[1] * coords_for_analysis.shape[2]))\n', (32866, 32985), True, 'import numpy as np\n'), ((33181, 33205), 'pandas.DataFrame', 'pd.DataFrame', (['components'], {}), '(components)\n', (33193, 33205), True, 'import pandas as pd\n'), ((33471, 33493), 'numpy.array', 'np.array', (['PCs_separate'], {}), '(PCs_separate)\n', (33479, 33493), True, 'import numpy as np\n'), ((33514, 33536), 'numpy.array', 'np.array', (['PCs_combined'], {}), '(PCs_combined)\n', (33522, 33536), True, 'import numpy as np\n'), ((37248, 37272), 'pandas.DataFrame', 'pd.DataFrame', (['components'], {}), '(components)\n', (37260, 37272), True, 'import pandas as pd\n'), ((37538, 37560), 'numpy.array', 'np.array', (['PCs_separate'], {}), '(PCs_separate)\n', (37546, 37560), True, 'import numpy as np\n'), ((37581, 37603), 'numpy.array', 'np.array', (['PCs_combined'], {}), '(PCs_combined)\n', (37589, 37603), True, 'import numpy as np\n'), ((38023, 38044), 'numpy.real', 'np.real', (['PCs_separate'], {}), '(PCs_separate)\n', (38030, 38044), True, 'import numpy as np\n'), ((38065, 38086), 'numpy.real', 'np.real', (['PCs_combined'], {}), '(PCs_combined)\n', (38072, 38086), True, 'import numpy as np\n'), ((38919, 39053), 'numpy.reshape', 'np.reshape', (['aligned_PCs_combined', '(1, aligned_PCs_combined.shape[0], aligned_PCs_combined.shape[1],\n aligned_PCs_combined.shape[2])'], {}), '(aligned_PCs_combined, (1, aligned_PCs_combined.shape[0],\n aligned_PCs_combined.shape[1], aligned_PCs_combined.shape[2]))\n', (38929, 39053), True, 'import numpy as np\n'), ((41849, 41882), 'numpy.zeros', 'np.zeros', (['(num_points, num_dists)'], {}), '((num_points, num_dists))\n', (41857, 41882), True, 'import numpy as np\n'), ((42210, 42234), 'pandas.DataFrame', 'pd.DataFrame', (['components'], {}), '(components)\n', (42222, 42234), True, 'import pandas as pd\n'), ((43480, 43522), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (43499, 43522), True, 'import numpy as np\n'), ((49499, 49541), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (49518, 49541), True, 'import numpy as np\n'), ((54561, 54603), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (54580, 54603), True, 'import numpy as np\n'), ((55222, 55264), 'os.path.dirname', 'os.path.dirname', (['trajectory_directory_path'], {}), '(trajectory_directory_path)\n', (55237, 55264), False, 'import os\n'), ((55284, 55306), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (55300, 55306), False, 'import os\n'), ((61927, 61969), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (61946, 61969), True, 'import numpy as np\n'), ((71491, 71533), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (71510, 71533), True, 'import numpy as np\n'), ((71776, 71813), 'os.path.dirname', 'os.path.dirname', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (71791, 71813), False, 'import os\n'), ((71833, 71855), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (71849, 71855), False, 'import os\n'), ((84215, 84232), 'pandas.DataFrame', 'pd.DataFrame', (['pca'], {}), '(pca)\n', (84227, 84232), True, 'import pandas as pd\n'), ((84241, 84266), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (84254, 84266), False, 'import os\n'), ((470, 491), 'ntpath.basename', 'ntpath.basename', (['head'], {}), '(head)\n', (485, 491), False, 'import ntpath\n'), ((1926, 1955), 'os.path.splitext', 'os.path.splitext', (['system_name'], {}), '(system_name)\n', (1942, 1955), False, 'import os\n'), ((2991, 3020), 'os.path.splitext', 'os.path.splitext', (['system_name'], {}), '(system_name)\n', (3007, 3020), False, 'import os\n'), ((9141, 9159), 'numpy.dot', 'np.dot', (['m', 'vectors'], {}), '(m, vectors)\n', (9147, 9159), True, 'import numpy as np\n'), ((13927, 13960), 'numpy.zeros', 'np.zeros', (['(num_points, num_dists)'], {}), '((num_points, num_dists))\n', (13935, 13960), True, 'import numpy as np\n'), ((14358, 14392), 'numpy.dot', 'np.dot', (['matrix_reduced', 'components'], {}), '(matrix_reduced, components)\n', (14364, 14392), True, 'import numpy as np\n'), ((15685, 15743), 'numpy.dot', 'np.dot', (['matrix_reduced[:, 0:n_dim]', 'components[0:n_dim, :]'], {}), '(matrix_reduced[:, 0:n_dim], components[0:n_dim, :])\n', (15691, 15743), True, 'import numpy as np\n'), ((17114, 17132), 'numpy.arange', 'np.arange', (['(-20)', '(21)'], {}), '(-20, 21)\n', (17123, 17132), True, 'import numpy as np\n'), ((17155, 17205), 'numpy.dot', 'np.dot', (['(alpha * multiplier)', 'components[0:n_dim, :]'], {}), '(alpha * multiplier, components[0:n_dim, :])\n', (17161, 17205), True, 'import numpy as np\n'), ((18517, 18546), 'PathReducer.calculate_rmsd.centroid', 'rmsd.centroid', (['coordinates[i]'], {}), '(coordinates[i])\n', (18530, 18546), True, 'import PathReducer.calculate_rmsd as rmsd\n'), ((18573, 18623), 'PathReducer.calculate_rmsd.kabsch_rotate', 'rmsd.kabsch_rotate', (['coordinates[i]', 'coordinates[0]'], {}), '(coordinates[i], coordinates[0])\n', (18591, 18623), True, 'import PathReducer.calculate_rmsd as rmsd\n'), ((19311, 19335), 'PathReducer.calculate_rmsd.centroid', 'rmsd.centroid', (['coords[i]'], {}), '(coords[i])\n', (19324, 19335), True, 'import PathReducer.calculate_rmsd as rmsd\n'), ((19356, 19410), 'PathReducer.calculate_rmsd.kabsch_rotate', 'rmsd.kabsch_rotate', (['coords[i]', 'original_traj_coords[0]'], {}), '(coords[i], original_traj_coords[0])\n', (19374, 19410), True, 'import PathReducer.calculate_rmsd as rmsd\n'), ((20058, 20073), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (20065, 20073), True, 'import numpy as np\n'), ((25347, 25361), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (25353, 25361), True, 'import numpy as np\n'), ((25744, 25801), 'os.path.join', 'os.path.join', (['directory', "(system_name + '_prop_of_var.txt')"], {}), "(directory, system_name + '_prop_of_var.txt')\n", (25756, 25801), False, 'import os\n'), ((26524, 26539), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (26536, 26539), True, 'import pandas as pd\n'), ((26672, 26743), 'os.path.join', 'os.path.join', (['directory', "(system_name + '_PC%s_components.txt' % (n + 1))"], {}), "(directory, system_name + '_PC%s_components.txt' % (n + 1))\n", (26684, 26743), False, 'import os\n'), ((27198, 27213), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (27210, 27213), True, 'import pandas as pd\n'), ((27940, 27955), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (27952, 27955), True, 'import pandas as pd\n'), ((32013, 32045), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (32027, 32045), False, 'import os\n'), ((32056, 32085), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (32067, 32085), False, 'import os\n'), ((33403, 33437), 'numpy.dot', 'np.dot', (['components', 'pca_components'], {}), '(components, pca_components)\n', (33409, 33437), True, 'import numpy as np\n'), ((36342, 36374), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (36356, 36374), False, 'import os\n'), ((36385, 36414), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (36396, 36414), False, 'import os\n'), ((37470, 37504), 'numpy.dot', 'np.dot', (['components', 'pca_components'], {}), '(components, pca_components)\n', (37476, 37504), True, 'import numpy as np\n'), ((41142, 41174), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (41156, 41174), False, 'import os\n'), ((41185, 41214), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (41196, 41214), False, 'import os\n'), ((43627, 43663), 'os.path.isfile', 'os.path.isfile', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (43641, 43663), False, 'import os\n'), ((43667, 43702), 'os.path.isdir', 'os.path.isdir', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (43680, 43702), False, 'import os\n'), ((43743, 43779), 'os.path.isfile', 'os.path.isfile', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (43757, 43779), False, 'import os\n'), ((49646, 49682), 'os.path.isfile', 'os.path.isfile', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (49660, 49682), False, 'import os\n'), ((49686, 49721), 'os.path.isdir', 'os.path.isdir', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (49699, 49721), False, 'import os\n'), ((50399, 50431), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (50413, 50431), False, 'import os\n'), ((50442, 50471), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (50453, 50471), False, 'import os\n'), ((50867, 51020), 'numpy.reshape', 'np.reshape', (['mass_weighted_coordinates', '(mass_weighted_coordinates.shape[0], mass_weighted_coordinates.shape[1] *\n mass_weighted_coordinates.shape[2])'], {}), '(mass_weighted_coordinates, (mass_weighted_coordinates.shape[0], \n mass_weighted_coordinates.shape[1] * mass_weighted_coordinates.shape[2]))\n', (50877, 51020), True, 'import numpy as np\n'), ((51183, 51292), 'numpy.reshape', 'np.reshape', (['aligned_coords', '(aligned_coords.shape[0], aligned_coords.shape[1] * aligned_coords.shape[2])'], {}), '(aligned_coords, (aligned_coords.shape[0], aligned_coords.shape[1\n ] * aligned_coords.shape[2]))\n', (51193, 51292), True, 'import numpy as np\n'), ((54708, 54749), 'os.path.isfile', 'os.path.isfile', (['trajectory_directory_path'], {}), '(trajectory_directory_path)\n', (54722, 54749), False, 'import os\n'), ((54753, 54793), 'os.path.isdir', 'os.path.isdir', (['trajectory_directory_path'], {}), '(trajectory_directory_path)\n', (54766, 54793), False, 'import os\n'), ((56501, 56533), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (56515, 56533), False, 'import os\n'), ((56544, 56573), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (56555, 56573), False, 'import os\n'), ((56991, 57144), 'numpy.reshape', 'np.reshape', (['mass_weighted_coordinates', '(mass_weighted_coordinates.shape[0], mass_weighted_coordinates.shape[1] *\n mass_weighted_coordinates.shape[2])'], {}), '(mass_weighted_coordinates, (mass_weighted_coordinates.shape[0], \n mass_weighted_coordinates.shape[1] * mass_weighted_coordinates.shape[2]))\n', (57001, 57144), True, 'import numpy as np\n'), ((57309, 57418), 'numpy.reshape', 'np.reshape', (['aligned_coords', '(aligned_coords.shape[0], aligned_coords.shape[1] * aligned_coords.shape[2])'], {}), '(aligned_coords, (aligned_coords.shape[0], aligned_coords.shape[1\n ] * aligned_coords.shape[2]))\n', (57319, 57418), True, 'import numpy as np\n'), ((62074, 62110), 'os.path.isfile', 'os.path.isfile', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (62088, 62110), False, 'import os\n'), ((62114, 62149), 'os.path.isdir', 'os.path.isdir', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (62127, 62149), False, 'import os\n'), ((62809, 62841), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (62823, 62841), False, 'import os\n'), ((62852, 62881), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (62863, 62881), False, 'import os\n'), ((66111, 66132), 'numpy.real', 'np.real', (['PCs_separate'], {}), '(PCs_separate)\n', (66118, 66132), True, 'import numpy as np\n'), ((66157, 66178), 'numpy.real', 'np.real', (['PCs_combined'], {}), '(PCs_combined)\n', (66164, 66178), True, 'import numpy as np\n'), ((68043, 68237), 'numpy.reshape', 'np.reshape', (['chirality_consistent_PCs_combined', '(1, chirality_consistent_PCs_combined.shape[0],\n chirality_consistent_PCs_combined.shape[1],\n chirality_consistent_PCs_combined.shape[2])'], {}), '(chirality_consistent_PCs_combined, (1,\n chirality_consistent_PCs_combined.shape[0],\n chirality_consistent_PCs_combined.shape[1],\n chirality_consistent_PCs_combined.shape[2]))\n', (68053, 68237), True, 'import numpy as np\n'), ((71124, 71160), 'os.path.isfile', 'os.path.isfile', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (71138, 71160), False, 'import os\n'), ((71164, 71199), 'os.path.isdir', 'os.path.isdir', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (71177, 71199), False, 'import os\n'), ((72839, 72871), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (72853, 72871), False, 'import os\n'), ((72882, 72911), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (72893, 72911), False, 'import os\n'), ((75820, 75841), 'numpy.real', 'np.real', (['PCs_separate'], {}), '(PCs_separate)\n', (75827, 75841), True, 'import numpy as np\n'), ((75866, 75887), 'numpy.real', 'np.real', (['PCs_combined'], {}), '(PCs_combined)\n', (75873, 75887), True, 'import numpy as np\n'), ((77570, 77764), 'numpy.reshape', 'np.reshape', (['chirality_consistent_PCs_combined', '(1, chirality_consistent_PCs_combined.shape[0],\n chirality_consistent_PCs_combined.shape[1],\n chirality_consistent_PCs_combined.shape[2])'], {}), '(chirality_consistent_PCs_combined, (1,\n chirality_consistent_PCs_combined.shape[0],\n chirality_consistent_PCs_combined.shape[1],\n chirality_consistent_PCs_combined.shape[2]))\n', (77580, 77764), True, 'import numpy as np\n'), ((80339, 80365), 'os.path.isfile', 'os.path.isfile', (['input_path'], {}), '(input_path)\n', (80353, 80365), False, 'import os\n'), ((83070, 83096), 'os.path.isfile', 'os.path.isfile', (['input_path'], {}), '(input_path)\n', (83084, 83096), False, 'import os\n'), ((84488, 84575), 'PathReducer.plotting_functions.plot_prop_of_var', 'plotting_functions.plot_prop_of_var', (['singular_values', 'system_name', 'output_directory'], {}), '(singular_values, system_name,\n output_directory)\n', (84523, 84575), True, 'import PathReducer.plotting_functions as plotting_functions\n'), ((85068, 85304), 'PathReducer.plotting_functions.colored_line_and_scatter_plot', 'plotting_functions.colored_line_and_scatter_plot', (['pcs_df[0]', 'pcs_df[1]', 'pcs_df[2]'], {'same_axis': '(False)', 'output_directory': 'output_directory', 'lengths': 'lengths', 'points_to_circle': 'points_to_circle', 'imgname': "(system_name + '_scatterline')"}), "(pcs_df[0], pcs_df[1],\n pcs_df[2], same_axis=False, output_directory=output_directory, lengths=\n lengths, points_to_circle=points_to_circle, imgname=system_name +\n '_scatterline')\n", (85116, 85304), True, 'import PathReducer.plotting_functions as plotting_functions\n'), ((7156, 7179), 'numpy.triu_indices', 'np.triu_indices', (['x'], {'k': '(1)'}), '(x, k=1)\n', (7171, 7179), True, 'import numpy as np\n'), ((7445, 7468), 'numpy.triu_indices', 'np.triu_indices', (['x'], {'k': '(1)'}), '(x, k=1)\n', (7460, 7468), True, 'import numpy as np\n'), ((7897, 7924), 'numpy.sqrt', 'np.sqrt', (['(8 * v.shape[0] + 1)'], {}), '(8 * v.shape[0] + 1)\n', (7904, 7924), True, 'import numpy as np\n'), ((9219, 9234), 'numpy.sqrt', 'np.sqrt', (['values'], {}), '(values)\n', (9226, 9234), True, 'import numpy as np\n'), ((11936, 11971), 'numpy.var', 'np.var', (['upper_tri_d2_matrices[:, k]'], {}), '(upper_tri_d2_matrices[:, k])\n', (11942, 11971), True, 'import numpy as np\n'), ((13985, 14043), 'numpy.dot', 'np.dot', (['matrix_reduced[:, i, None]', 'components[None, i, :]'], {}), '(matrix_reduced[:, i, None], components[None, i, :])\n', (13991, 14043), True, 'import numpy as np\n'), ((15563, 15621), 'numpy.dot', 'np.dot', (['matrix_reduced[:, i, None]', 'components[None, i, :]'], {}), '(matrix_reduced[:, i, None], components[None, i, :])\n', (15569, 15621), True, 'import numpy as np\n'), ((18036, 18049), 'numpy.argsort', 'np.argsort', (['a'], {}), '(a)\n', (18046, 18049), True, 'import numpy as np\n'), ((20242, 20258), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (20255, 20258), True, 'import numpy as np\n'), ((20448, 20463), 'numpy.array', 'np.array', (['signs'], {}), '(signs)\n', (20456, 20463), True, 'import numpy as np\n'), ((20490, 20505), 'numpy.array', 'np.array', (['signs'], {}), '(signs)\n', (20498, 20505), True, 'import numpy as np\n'), ((20533, 20548), 'numpy.array', 'np.array', (['signs'], {}), '(signs)\n', (20541, 20548), True, 'import numpy as np\n'), ((25619, 25647), 'numpy.cumsum', 'np.cumsum', (['normalized_values'], {}), '(normalized_values)\n', (25628, 25647), True, 'import numpy as np\n'), ((27348, 27419), 'os.path.join', 'os.path.join', (['directory', "(system_name + '_PC%s_components.txt' % (n + 1))"], {}), "(directory, system_name + '_PC%s_components.txt' % (n + 1))\n", (27360, 27419), False, 'import os\n'), ((28090, 28175), 'os.path.join', 'os.path.join', (['directory', "(system_name + '_PC%s_components_weighted.txt' % (n + 1))"], {}), "(directory, system_name + '_PC%s_components_weighted.txt' % (n + 1)\n )\n", (28102, 28175), False, 'import os\n'), ((33277, 33335), 'numpy.dot', 'np.dot', (['components[:, i, None]', 'pca_components[None, i, :]'], {}), '(components[:, i, None], pca_components[None, i, :])\n', (33283, 33335), True, 'import numpy as np\n'), ((37344, 37402), 'numpy.dot', 'np.dot', (['components[:, i, None]', 'pca_components[None, i, :]'], {}), '(components[:, i, None], pca_components[None, i, :])\n', (37350, 37402), True, 'import numpy as np\n'), ((45783, 45818), 'os.path.isdir', 'os.path.isdir', (['trajectory_file_path'], {}), '(trajectory_file_path)\n', (45796, 45818), False, 'import os\n'), ((55426, 55474), 'os.path.join', 'os.path.join', (['trajectory_directory_path', '"""*.xyz"""'], {}), "(trajectory_directory_path, '*.xyz')\n", (55438, 55474), False, 'import os\n'), ((56288, 56346), 'numpy.concatenate', 'np.concatenate', (['(coords_for_analysis, coordinates)'], {'axis': '(0)'}), '((coords_for_analysis, coordinates), axis=0)\n', (56302, 56346), True, 'import numpy as np\n'), ((71975, 72018), 'os.path.join', 'os.path.join', (['trajectory_file_path', '"""*.xyz"""'], {}), "(trajectory_file_path, '*.xyz')\n", (71987, 72018), False, 'import os\n'), ((72626, 72684), 'numpy.concatenate', 'np.concatenate', (['(coords_for_analysis, coordinates)'], {'axis': '(0)'}), '((coords_for_analysis, coordinates), axis=0)\n', (72640, 72684), True, 'import numpy as np\n'), ((80445, 80473), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (80461, 80473), False, 'import os\n'), ((80509, 80534), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (80522, 80534), False, 'import os\n'), ((83318, 83343), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (83331, 83343), False, 'import os\n'), ((86573, 86835), 'PathReducer.plotting_functions.colored_line_and_scatter_plot', 'plotting_functions.colored_line_and_scatter_plot', (['pcs_df[0]', 'pcs_df[1]', 'pcs_df[2]'], {'same_axis': '(False)', 'output_directory': 'output_directory', 'lengths': 'lengths', 'new_data': 'new_data_df', 'points_to_circle': 'points_to_circle', 'imgname': "(new_system_name + '_scatterline')"}), "(pcs_df[0], pcs_df[1],\n pcs_df[2], same_axis=False, output_directory=output_directory, lengths=\n lengths, new_data=new_data_df, points_to_circle=points_to_circle,\n imgname=new_system_name + '_scatterline')\n", (86621, 86835), True, 'import PathReducer.plotting_functions as plotting_functions\n'), ((7932, 7959), 'numpy.sqrt', 'np.sqrt', (['(8 * v.shape[0] + 1)'], {}), '(8 * v.shape[0] + 1)\n', (7939, 7959), True, 'import numpy as np\n'), ((8953, 8977), 'numpy.ones', 'np.ones', (['(1, d.shape[0])'], {}), '((1, d.shape[0]))\n', (8960, 8977), True, 'import numpy as np\n'), ((17029, 17047), 'numpy.arange', 'np.arange', (['(-20)', '(21)'], {}), '(-20, 21)\n', (17038, 17047), True, 'import numpy as np\n'), ((20308, 20324), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (20321, 20324), True, 'import numpy as np\n'), ((24088, 24109), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (24096, 24109), True, 'import numpy as np\n'), ((24192, 24248), 'os.path.join', 'os.path.join', (['output_directory', "('%s_all_PCs.xyz' % title)"], {}), "(output_directory, '%s_all_PCs.xyz' % title)\n", (24204, 24248), False, 'import os\n'), ((24292, 24354), 'os.path.join', 'os.path.join', (['output_directory', "('%s_PC%s.xyz' % (title, k + 1))"], {}), "(output_directory, '%s_PC%s.xyz' % (title, k + 1))\n", (24304, 24354), False, 'import os\n'), ((80609, 80636), 'os.path.dirname', 'os.path.dirname', (['input_path'], {}), '(input_path)\n', (80624, 80636), False, 'import os\n'), ((80664, 80686), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (80680, 80686), False, 'import os\n'), ((83589, 83615), 'os.path.isfile', 'os.path.isfile', (['input_path'], {}), '(input_path)\n', (83603, 83615), False, 'import os\n'), ((8077, 8095), 'numpy.sqrt', 'np.sqrt', (['(8 * n + 1)'], {}), '(8 * n + 1)\n', (8084, 8095), True, 'import numpy as np\n'), ((8795, 8819), 'numpy.ones', 'np.ones', (['(d.shape[0], 1)'], {}), '((d.shape[0], 1))\n', (8802, 8819), True, 'import numpy as np\n'), ((8821, 8840), 'numpy.transpose', 'np.transpose', (['d_one'], {}), '(d_one)\n', (8833, 8840), True, 'import numpy as np\n'), ((17639, 17676), 'numpy.sqrt', 'np.sqrt', (['(-8 * k + 4 * n * (n - 1) - 7)'], {}), '(-8 * k + 4 * n * (n - 1) - 7)\n', (17646, 17676), True, 'import numpy as np\n'), ((20373, 20389), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (20386, 20389), True, 'import numpy as np\n'), ((24133, 24154), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (24141, 24154), True, 'import numpy as np\n'), ((59508, 59548), 'numpy.array', 'np.array', (['no_mass_weighting_PCs_separate'], {}), '(no_mass_weighting_PCs_separate)\n', (59516, 59548), True, 'import numpy as np\n'), ((59622, 59662), 'numpy.array', 'np.array', (['no_mass_weighting_PCs_combined'], {}), '(no_mass_weighting_PCs_combined)\n', (59630, 59662), True, 'import numpy as np\n'), ((60101, 60141), 'numpy.array', 'np.array', (['no_mass_weighting_PCs_separate'], {}), '(no_mass_weighting_PCs_separate)\n', (60109, 60141), True, 'import numpy as np\n'), ((60215, 60255), 'numpy.array', 'np.array', (['no_mass_weighting_PCs_combined'], {}), '(no_mass_weighting_PCs_combined)\n', (60223, 60255), True, 'import numpy as np\n'), ((78806, 78836), 'numpy.array', 'np.array', (['aligned_PCs_separate'], {}), '(aligned_PCs_separate)\n', (78814, 78836), True, 'import numpy as np\n'), ((78910, 78940), 'numpy.array', 'np.array', (['aligned_PCs_combined'], {}), '(aligned_PCs_combined)\n', (78918, 78940), True, 'import numpy as np\n'), ((79379, 79409), 'numpy.array', 'np.array', (['aligned_PCs_separate'], {}), '(aligned_PCs_separate)\n', (79387, 79409), True, 'import numpy as np\n'), ((79483, 79513), 'numpy.array', 'np.array', (['aligned_PCs_combined'], {}), '(aligned_PCs_combined)\n', (79491, 79513), True, 'import numpy as np\n'), ((83862, 83887), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (83875, 83887), False, 'import os\n'), ((16900, 16918), 'numpy.arange', 'np.arange', (['(-20)', '(21)'], {}), '(-20, 21)\n', (16909, 16918), True, 'import numpy as np\n'), ((37967, 37989), 'numpy.array', 'np.array', (['PCs_combined'], {}), '(PCs_combined)\n', (37975, 37989), True, 'import numpy as np\n'), ((87258, 87283), 'numpy.array', 'np.array', (['start_structure'], {}), '(start_structure)\n', (87266, 87283), True, 'import numpy as np\n'), ((87295, 87320), 'numpy.array', 'np.array', (['start_structure'], {}), '(start_structure)\n', (87303, 87320), True, 'import numpy as np\n'), ((87490, 87518), 'numpy.array', 'np.array', (['deformation_vector'], {}), '(deformation_vector)\n', (87498, 87518), True, 'import numpy as np\n'), ((87530, 87558), 'numpy.array', 'np.array', (['deformation_vector'], {}), '(deformation_vector)\n', (87538, 87558), True, 'import numpy as np\n'), ((66049, 66073), 'numpy.array', 'np.array', (['PCs_combined_d'], {}), '(PCs_combined_d)\n', (66057, 66073), True, 'import numpy as np\n'), ((75758, 75782), 'numpy.array', 'np.array', (['PCs_combined_d'], {}), '(PCs_combined_d)\n', (75766, 75782), True, 'import numpy as np\n'), ((24797, 24810), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (24807, 24810), True, 'import numpy as np\n')]
|
#System Dependencies
import base64
#Dash dependencies
import dash
import dash_table
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import cv2
#AZURE BUCKETS
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
from yolov3 import Create_Yolov3
from utils import detect_image, Load_Yolo_model
from app import app
from apps import dashboard, index
yolo = Create_Yolov3(input_size= 416, CLASSES="./model_data/license_plate_names.txt")
yolo.load_weights("./checkpoints/yolov3_custom") # use keras weights
connect_str = "DefaultEndpointsProtocol=https;AccountName=epmnprgdevilabdiag;AccountKey=<KEY>;EndpointSuffix=core.windows.net"
#IMAGE UPLOAD
@app.callback(Output("hidden_div_for_redirect_callback", "children"),
[Input('upload-image', 'contents')],
[State('upload-image', 'filename'),
State('upload-image', 'last_modified')])
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
#Conect to BLOB STORAGE
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
container_client = blob_service_client.get_container_client("pruebasds4")
#ELIMINAR TODAS LAS IMAGENES EN EL CONTENEDOR DE BLOB STORAGE SI LAS HAY
my_blobs = container_client.list_blobs()
#BATCH SIZE LIMITATIONS ON DELETE
blobs_list = [b.name for b in my_blobs]
blobs_length = len(blobs_list)
if blobs_length >= 256:
start=0
end=256
while end <= blobs_length:
container_client.delete_blobs(*blobs_list[start:end])
start = start + 256
end = end + 256
if start < blobs_length and end > blobs_length:
container_client.delete_blobs(*blobs_list[start:blobs_length])
else:
container_client.delete_blobs(*blobs_list)
#SUBIR TODAS LAS IMAGENES AL CONTENEDOR
for image , name, _ in zip(list_of_contents, list_of_names, list_of_dates):
if 'jpg' in name:
#decoding image
_ , content_string = image.split(',')
#decoded = base64.b64decode(content_string)
container_client.upload_blob(name=name, data=content_string)
return dcc.Location(pathname="/dashboard", id="url")
#BOTON DE REGRESAR AL INICIO
@app.callback(Output('return-to-index', 'children'),
[Input('btn-nclicks-1', 'n_clicks')])
def displayClick(btn1):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'btn-nclicks-1' in changed_id:
return dcc.Location(pathname="/", id="url")
#This function generates all the image list after pass in the model
def generate_thumbnail(image):
return html.Img(
src=image,
style = {
'height': '50%',
'width': '50%',
'float': 'center',
'position': 'relative',
'padding-top': '10px',
'padding-right': '10px'
}
)
def generate_table(dataframe):
return dash_table.DataTable(
id='table-uploading',
data= dataframe.to_dict('records'),
columns=[{"name": i, "id": i} for i in dataframe.columns],
sort_action='native',
style_as_list_view=True,
style_table= {'margin-left':'15px'},
style_header={'backgroundColor': '#222629', 'color':'#61892F', 'font-family': 'Enriqueta', 'line-height': '1.25', 'margin': '0 0 10px', 'font-size': '15px', 'font-weight': 'bold', 'textAlign': 'left'},
style_cell={
'backgroundColor': '#1A1A1D',
'color': '#474B4F', 'font-family': 'Enriqueta', 'line-height': '1.25', 'margin': '0 0 10px', 'font-size': '13px', 'font-weight': 'bold', 'textAlign': 'center'},
style_data_conditional=[
{
'if': {
'filter_query': '{taken} = 1',
'column_id': 'taken',
},
'backgroundColor': 'dodgerblue',
'color': 'white'
},
{
'if': {
'filter_query': '{taken} = 1', # comparing columns to each other
'column_id': 'order_id'
},
'backgroundColor': '#3D9970'
}]
)
def change_classname(x):
#Change class_names
if x == 0:
return 'RA'
elif x== 1:
return 'HU'
else:
return 'DE'
#CALLBACK FOR THE MODEL INPUTS AND OUTPUTS
@app.callback([Output("output-images", "children"), Output("output-table", "children"), Output("plot1", "figure")],
[Input('hidden-div-dashboard', 'content')])
def update_dashboard(generate_dashboard):
#Download all the images from blob storage and processing with deeplearning model
images_div = []
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
container_client = blob_service_client.get_container_client("pruebasds4")
my_blobs = container_client.list_blobs()
#Column name list
column_names = ['id', 'x1', 'y1', 'x2' , 'y2', 'Score', 'Class']
#row list
rows = []
names = []
images = []
for blob in my_blobs:
image_downloaded = container_client.download_blob(blob.name).readall()
decoded_data = base64.b64decode( image_downloaded )
np_data = np.fromstring(decoded_data,np.uint8)
img = cv2.imdecode(np_data,cv2.IMREAD_UNCHANGED)
#INGRESARLAS AL MODELO
prediction = detect_image(yolo, img, "./IMAGES/HU8_detect.jpg", input_size= 416, show=True, CLASSES="./model_data/license_plate_names.txt", rectangle_colors=(255,0,0))
if prediction[1][0]:
for box_stats in prediction[1]:
box_stats['id'] = blob.name
rows.append(box_stats)
#CAST TO BASE64
retval, buffer_img = cv2.imencode('.jpg', prediction[0])
data = base64.b64encode(buffer_img)
images_div.append('data:image/png;base64,{}'.format(data.decode()))
#Create empty dataframe
df = pd.DataFrame(rows, columns = column_names)
df['Class'] = df['Class'].apply(lambda x: change_classname(x))
children = [generate_thumbnail(data) for data in images_div]
df2 = df.groupby(['Class']).size().reset_index(name='counts')
#GENERATE PLOTS
colors = ['#61892F', '#6B6E70', '#86C232']
ploty1 = go.Figure(data = [go.Pie(
labels = df2['Class'], values = df2['counts'], hole=.5
)])
ploty1.update_layout(height= 350, width = 470, paper_bgcolor = 'rgba(0,0,0,0)', plot_bgcolor = 'rgba(0,0,0,0)',
font=dict(family = "Enriqueta, Times New Roman", size=16 , color='rgb(97,137,47)'), title="Faillure Distribution", title_x = 0.5)
ploty1.update_traces(marker = dict(colors=colors))
table = generate_table(df)
return children, table, ploty1
#GENERATE LABELED IMAGES FROM THE MODEL
|
[
"utils.detect_image",
"pandas.DataFrame",
"cv2.imencode",
"yolov3.Create_Yolov3",
"dash.dependencies.Output",
"dash_core_components.Location",
"base64.b64decode",
"base64.b64encode",
"dash.dependencies.Input",
"plotly.graph_objs.Pie",
"cv2.imdecode",
"dash_html_components.Img",
"azure.storage.blob.BlobServiceClient.from_connection_string",
"dash.dependencies.State",
"numpy.fromstring"
] |
[((568, 645), 'yolov3.Create_Yolov3', 'Create_Yolov3', ([], {'input_size': '(416)', 'CLASSES': '"""./model_data/license_plate_names.txt"""'}), "(input_size=416, CLASSES='./model_data/license_plate_names.txt')\n", (581, 645), False, 'from yolov3 import Create_Yolov3\n'), ((873, 927), 'dash.dependencies.Output', 'Output', (['"""hidden_div_for_redirect_callback"""', '"""children"""'], {}), "('hidden_div_for_redirect_callback', 'children')\n", (879, 927), False, 'from dash.dependencies import Input, Output, State\n'), ((2662, 2699), 'dash.dependencies.Output', 'Output', (['"""return-to-index"""', '"""children"""'], {}), "('return-to-index', 'children')\n", (2668, 2699), False, 'from dash.dependencies import Input, Output, State\n'), ((3056, 3215), 'dash_html_components.Img', 'html.Img', ([], {'src': 'image', 'style': "{'height': '50%', 'width': '50%', 'float': 'center', 'position': 'relative',\n 'padding-top': '10px', 'padding-right': '10px'}"}), "(src=image, style={'height': '50%', 'width': '50%', 'float':\n 'center', 'position': 'relative', 'padding-top': '10px',\n 'padding-right': '10px'})\n", (3064, 3215), True, 'import dash_html_components as html\n'), ((5242, 5295), 'azure.storage.blob.BlobServiceClient.from_connection_string', 'BlobServiceClient.from_connection_string', (['connect_str'], {}), '(connect_str)\n', (5282, 5295), False, 'from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\n'), ((6511, 6551), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': 'column_names'}), '(rows, columns=column_names)\n', (6523, 6551), True, 'import pandas as pd\n'), ((1252, 1305), 'azure.storage.blob.BlobServiceClient.from_connection_string', 'BlobServiceClient.from_connection_string', (['connect_str'], {}), '(connect_str)\n', (1292, 1305), False, 'from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\n'), ((2541, 2586), 'dash_core_components.Location', 'dcc.Location', ([], {'pathname': '"""/dashboard"""', 'id': '"""url"""'}), "(pathname='/dashboard', id='url')\n", (2553, 2586), True, 'import dash_core_components as dcc\n'), ((944, 977), 'dash.dependencies.Input', 'Input', (['"""upload-image"""', '"""contents"""'], {}), "('upload-image', 'contents')\n", (949, 977), False, 'from dash.dependencies import Input, Output, State\n'), ((995, 1028), 'dash.dependencies.State', 'State', (['"""upload-image"""', '"""filename"""'], {}), "('upload-image', 'filename')\n", (1000, 1028), False, 'from dash.dependencies import Input, Output, State\n'), ((1045, 1083), 'dash.dependencies.State', 'State', (['"""upload-image"""', '"""last_modified"""'], {}), "('upload-image', 'last_modified')\n", (1050, 1083), False, 'from dash.dependencies import Input, Output, State\n'), ((2906, 2942), 'dash_core_components.Location', 'dcc.Location', ([], {'pathname': '"""/"""', 'id': '"""url"""'}), "(pathname='/', id='url')\n", (2918, 2942), True, 'import dash_core_components as dcc\n'), ((2716, 2750), 'dash.dependencies.Input', 'Input', (['"""btn-nclicks-1"""', '"""n_clicks"""'], {}), "('btn-nclicks-1', 'n_clicks')\n", (2721, 2750), False, 'from dash.dependencies import Input, Output, State\n'), ((5699, 5733), 'base64.b64decode', 'base64.b64decode', (['image_downloaded'], {}), '(image_downloaded)\n', (5715, 5733), False, 'import base64\n'), ((5754, 5791), 'numpy.fromstring', 'np.fromstring', (['decoded_data', 'np.uint8'], {}), '(decoded_data, np.uint8)\n', (5767, 5791), True, 'import numpy as np\n'), ((5805, 5848), 'cv2.imdecode', 'cv2.imdecode', (['np_data', 'cv2.IMREAD_UNCHANGED'], {}), '(np_data, cv2.IMREAD_UNCHANGED)\n', (5817, 5848), False, 'import cv2\n'), ((5903, 6068), 'utils.detect_image', 'detect_image', (['yolo', 'img', '"""./IMAGES/HU8_detect.jpg"""'], {'input_size': '(416)', 'show': '(True)', 'CLASSES': '"""./model_data/license_plate_names.txt"""', 'rectangle_colors': '(255, 0, 0)'}), "(yolo, img, './IMAGES/HU8_detect.jpg', input_size=416, show=\n True, CLASSES='./model_data/license_plate_names.txt', rectangle_colors=\n (255, 0, 0))\n", (5915, 6068), False, 'from utils import detect_image, Load_Yolo_model\n'), ((4909, 4944), 'dash.dependencies.Output', 'Output', (['"""output-images"""', '"""children"""'], {}), "('output-images', 'children')\n", (4915, 4944), False, 'from dash.dependencies import Input, Output, State\n'), ((4946, 4980), 'dash.dependencies.Output', 'Output', (['"""output-table"""', '"""children"""'], {}), "('output-table', 'children')\n", (4952, 4980), False, 'from dash.dependencies import Input, Output, State\n'), ((4982, 5007), 'dash.dependencies.Output', 'Output', (['"""plot1"""', '"""figure"""'], {}), "('plot1', 'figure')\n", (4988, 5007), False, 'from dash.dependencies import Input, Output, State\n'), ((5025, 5065), 'dash.dependencies.Input', 'Input', (['"""hidden-div-dashboard"""', '"""content"""'], {}), "('hidden-div-dashboard', 'content')\n", (5030, 5065), False, 'from dash.dependencies import Input, Output, State\n'), ((6304, 6339), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'prediction[0]'], {}), "('.jpg', prediction[0])\n", (6316, 6339), False, 'import cv2\n'), ((6359, 6387), 'base64.b64encode', 'base64.b64encode', (['buffer_img'], {}), '(buffer_img)\n', (6375, 6387), False, 'import base64\n'), ((6859, 6918), 'plotly.graph_objs.Pie', 'go.Pie', ([], {'labels': "df2['Class']", 'values': "df2['counts']", 'hole': '(0.5)'}), "(labels=df2['Class'], values=df2['counts'], hole=0.5)\n", (6865, 6918), True, 'import plotly.graph_objs as go\n')]
|
import backbone.support.configurations_variables as confv
import backbone.support.data_loading as dl
import backbone.support.data_analysis as da
import backbone.support.data_cleaning as dc
import backbone.support.configuration_classes as confc
import backbone.support.saving_loading as sl
import backbone.support.plots_and_charts as pc
import backbone.support.build_features as bf
import numpy as np
import backbone.support.models as mdl
from sklearn.utils.class_weight import compute_class_weight
from tensorflow.keras.callbacks import TensorBoard
import time
import backbone.support.directory_file_checking as dfc
import os
from tensorflow.python.keras.callbacks import CSVLogger
import tensorflow as tf
print("\t===========================================================================================\n"
"\t\tMain program started for MAIN-DATABASE:{database}, GENDER-ISOLATION:{gender}\n"
"\t\t\t\u2234 Dataset Name: {name}\n"
"\t==========================================================================================="
.format(database=confv.database_cremad, gender=confv.gender_female, name=confv.dataset_cremad_female))
'''
# DATA LOADING SECTION
print("\n--------------------Started loading original data from the main database: {name}--------------------".format(name=confv.database_cremad))
data_info_cremad_df = dl.load_original_data(database=confv.database_cremad)
print("No. of sample audio files in {database} database: {length}\n".format(database=confv.database_cremad, length=len(data_info_cremad_df)))
print("Dataframe head of {database} database:".format(database=confv.database_cremad))
print(data_info_cremad_df.head())
print("\nDataframe tail of {database} database:".format(database=confv.database_cremad))
print(data_info_cremad_df.tail())
print("--------------------Finished loading original data from the main database: {name}--------------------".format(name=confv.database_cremad))
# RANDOM BASE AUDIO WAVE ANALYSIS SECTION
print("\n\n--------------------Started random base audio wave analysis for the main database: {name}--------------------".format(name=confv.database_cremad))
da.base_audio_wave_analysis(data_info_cremad_df.audio_fname[500], database=confv.database_cremad, status=confv.original)
print("--------------------Finished random base audio wave analysis for the main database: {name}--------------------".format(name=confv.database_cremad))
# DATAFRAME ADJUSTMENTS SECTION
print("\n\n--------------------Started dataframe adjustment for the main database: {name}--------------------".format(name=confv.database_cremad))
data_info_cremad_df_m, data_info_cremad_df_f = dc.data_adjustments(data_info_cremad_df)
print("--------------------Finished dataframe adjustment for the main database: {name}--------------------".format(name=confv.database_cremad))
# DATAFRAME SAVING
print("\n\n--------------------Started dataframe saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
cremad_f_df_obj = confc.DataFrame(database=confv.database_cremad, gender=confv.gender_female, df=data_info_cremad_df_f)
sl.save_dataframe(cremad_f_df_obj)
print("--------------------Finished dataframe saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
'''
# LOAD REQUIRED PICKLE
print("\n\n--------------------Started dataframe loading for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
cremad_f_df_obj = confc.DataFrame(database=confv.database_cremad, gender=confv.gender_female)
cremad_f_df_obj = sl.load_dataframe(cremad_f_df_obj)
data_info_cremad_df_f = cremad_f_df_obj.df
print(cremad_f_df_obj.database)
print(cremad_f_df_obj.gender)
print(len(data_info_cremad_df_f))
print(data_info_cremad_df_f.head())
print(data_info_cremad_df_f.tail())
print(cremad_f_df_obj.dataset)
print(cremad_f_df_obj.save_path)
print("--------------------Finished dataframe loading for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
'''
# ORIGINAL DATA DISTRIBUTION ANALYSIS SECTION
print("\n\n--------------------Started original data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
pc.emotion_distribution_bar_plot(df=data_info_cremad_df_f, title="{database} - {gender} Isolation - No. of Files".format(database=confv.database_cremad, gender=confv.gender_female))
pc.emotion_distribution_pie_plot(df=data_info_cremad_df_f, database=confv.database_cremad, status=confv.original, gender=confv.gender_female, title="{database} - {gender} Isolation - Class/Data/Time Distribution".format(database=confv.database_cremad, gender=confv.gender_female))
print("--------------------Finished original data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
# ORIGINAL DATA VISUAL ANALYSIS (signal, fft, fbank, mfcc) SECTION
print("\n\n--------------------Started original data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
da.visual_analysis(df=data_info_cremad_df_f, database=confv.database_cremad, status=confv.original, gender=confv.gender_female, envelope=False, resample=False)
da.visual_analysis(df=data_info_cremad_df_f, database=confv.database_cremad, status=confv.original, gender=confv.gender_female, envelope=True, resample=True)
print("--------------------Finished original data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
# DATA CLEANING - DOWN SAMPLING AND NOISE FLOOR DETECTION
print("\n\n--------------------Started data cleaning for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
dc.data_cleaning(df=data_info_cremad_df_f, database=confv.database_cremad)
print("--------------------Finished data cleaning for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
'''
# DATA MINIMUM AUDIO LENGTH COMPLIANCE CHECK
print("\n\n--------------------Started data minimum audio compliance check for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
data_info_cremad_df_f = dc.check_and_adjust_df_for_minimum_audio_length_after_cleaning(df=data_info_cremad_df_f, database=confv.database_cremad, gender=confv.gender_female)
print("--------------------Finished data minimum audio compliance check for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
'''
# CLEANED DATA DISTRIBUTION ANALYSIS SECTION
print("\n\n--------------------Started cleaned data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
pc.emotion_distribution_bar_plot(df=data_info_cremad_df_f, title="{database} - {gender} Isolation - No. of Files".format(database=confv.database_cremad, gender=confv.gender_female))
pc.emotion_distribution_pie_plot(df=data_info_cremad_df_f, database=confv.database_cremad, status=confv.clean, gender=confv.gender_female, title="{database} - {gender} Isolation - Class/Data/Time Distribution".format(database=confv.database_cremad, gender=confv.gender_female))
print("--------------------Finished cleaned data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
# CLEANED DATA VISUAL ANALYSIS (signal, fft, fbank, mfcc) SECTION
print("\n\n--------------------Started cleaned data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
da.visual_analysis(df=data_info_cremad_df_f, database=confv.database_cremad, status=confv.clean, gender=confv.gender_female, envelope=False, resample=False)
# This is same as,
# da.visual_analysis(df=data_info_cremad_df_f, database=confv.database_cremad, status=confv.original, gender=confv.gender_female, envelope=True, resample=True)
# Since these cleaned data are already equipped with envelope and resampling, setting them to False or True does not matter.
# (envelope and resample does not matter when its clean)
print("--------------------Finished cleaned data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
'''
# Building Features
print("\n\n--------------------Started building features for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
classes = list(np.unique(data_info_cremad_df_f.stress_emotion))
mconf_cremad_f = confc.ModelConfig(database=confv.database_cremad, gender=confv.gender_female, mode=confv.ml_mode_convolutional, classes=classes)
print(mconf_cremad_f.database)
print(mconf_cremad_f.gender)
print(mconf_cremad_f.mode)
print(mconf_cremad_f.nfilt)
print(mconf_cremad_f.nfeat)
print(mconf_cremad_f.nfft)
print(mconf_cremad_f.step)
print(mconf_cremad_f.classes)
print(mconf_cremad_f.features_save_name)
print(mconf_cremad_f.model_config_save_name)
print(mconf_cremad_f.training_log_name)
print(mconf_cremad_f.model_save_name)
print(mconf_cremad_f.model_h5_save_name)
print(mconf_cremad_f.model_tflite_save_name)
print(mconf_cremad_f.feature_path)
print(mconf_cremad_f.model_config_path)
print(mconf_cremad_f.training_log_path)
print(mconf_cremad_f.model_path)
print(mconf_cremad_f.model_h5_path)
print(mconf_cremad_f.model_tflite_path)
rfpconf_cremad_f = confc.RandFeatParams(df=data_info_cremad_df_f, database=confv.database_cremad, gender=confv.gender_female)
X, y = bf.build_random_features(modelconfig=mconf_cremad_f, randfeatparams=rfpconf_cremad_f)
print("--------------------Finished building features for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
# MODEL & TRAINING
print("\n\n--------------------Started model training for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
input_shape = (X.shape[1], X.shape[2], 1)
model = mdl.get_cremad_female_model(input_shape)
y_flat = np.argmax(y, axis=1)
class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
class_weight = {i : class_weight[i] for i in range(2)}
NAME = "{database}-{gender}-{modeltype}-{spec}-{time}".format(database=confv.database_cremad, gender=confv.gender_female, modeltype=confv.ml_mode_convolutional, spec="1st", time=int(time.time()))
mdl_logs_pth = os.path.join(confv.base_store, confv.log_dir)
tensorboard = TensorBoard(log_dir=mdl_logs_pth + '\\{}'.format(NAME))
dfc.check_dir_inside_saved_features_and_modelconfigs_and_models(parent=confv.saved_training_metrics_logs, database=confv.database_cremad, gender=confv.gender_female)
csv_logger = CSVLogger(mconf_cremad_f.training_log_path)
model.fit(X, y, epochs=40, batch_size=128, shuffle=True, class_weight=class_weight, validation_split=0.2, callbacks=[tensorboard, csv_logger])
print("--------------------Finished model training for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
# MODEL SAVING
print("\n\n--------------------Started model saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
dfc.check_dir_inside_saved_features_and_modelconfigs_and_models(parent=confv.saved_models, database=confv.database_cremad, gender=confv.gender_female)
model.save(mconf_cremad_f.model_path)
model.save(mconf_cremad_f.model_h5_path)
# Convert the model & save in tflite
converter = tf.lite.TFLiteConverter.from_saved_model(mconf_cremad_f.model_path)
tflite_model = converter.convert()
with open(mconf_cremad_f.model_tflite_path, 'wb') as outfile:
outfile.write(tflite_model)
print("--------------------Finished model saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_female, name=confv.dataset_cremad_female))
|
[
"backbone.support.data_cleaning.check_and_adjust_df_for_minimum_audio_length_after_cleaning",
"backbone.support.models.get_cremad_female_model",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"numpy.unique",
"backbone.support.configuration_classes.RandFeatParams",
"os.path.join",
"numpy.argmax",
"time.time",
"backbone.support.directory_file_checking.check_dir_inside_saved_features_and_modelconfigs_and_models",
"tensorflow.python.keras.callbacks.CSVLogger",
"backbone.support.configuration_classes.DataFrame",
"backbone.support.configuration_classes.ModelConfig",
"backbone.support.build_features.build_random_features",
"backbone.support.saving_loading.load_dataframe"
] |
[((3645, 3720), 'backbone.support.configuration_classes.DataFrame', 'confc.DataFrame', ([], {'database': 'confv.database_cremad', 'gender': 'confv.gender_female'}), '(database=confv.database_cremad, gender=confv.gender_female)\n', (3660, 3720), True, 'import backbone.support.configuration_classes as confc\n'), ((3739, 3773), 'backbone.support.saving_loading.load_dataframe', 'sl.load_dataframe', (['cremad_f_df_obj'], {}), '(cremad_f_df_obj)\n', (3756, 3773), True, 'import backbone.support.saving_loading as sl\n'), ((6809, 6967), 'backbone.support.data_cleaning.check_and_adjust_df_for_minimum_audio_length_after_cleaning', 'dc.check_and_adjust_df_for_minimum_audio_length_after_cleaning', ([], {'df': 'data_info_cremad_df_f', 'database': 'confv.database_cremad', 'gender': 'confv.gender_female'}), '(df=\n data_info_cremad_df_f, database=confv.database_cremad, gender=confv.\n gender_female)\n', (6871, 6967), True, 'import backbone.support.data_cleaning as dc\n'), ((9419, 9552), 'backbone.support.configuration_classes.ModelConfig', 'confc.ModelConfig', ([], {'database': 'confv.database_cremad', 'gender': 'confv.gender_female', 'mode': 'confv.ml_mode_convolutional', 'classes': 'classes'}), '(database=confv.database_cremad, gender=confv.\n gender_female, mode=confv.ml_mode_convolutional, classes=classes)\n', (9436, 9552), True, 'import backbone.support.configuration_classes as confc\n'), ((10268, 10379), 'backbone.support.configuration_classes.RandFeatParams', 'confc.RandFeatParams', ([], {'df': 'data_info_cremad_df_f', 'database': 'confv.database_cremad', 'gender': 'confv.gender_female'}), '(df=data_info_cremad_df_f, database=confv.\n database_cremad, gender=confv.gender_female)\n', (10288, 10379), True, 'import backbone.support.configuration_classes as confc\n'), ((10382, 10472), 'backbone.support.build_features.build_random_features', 'bf.build_random_features', ([], {'modelconfig': 'mconf_cremad_f', 'randfeatparams': 'rfpconf_cremad_f'}), '(modelconfig=mconf_cremad_f, randfeatparams=\n rfpconf_cremad_f)\n', (10406, 10472), True, 'import backbone.support.build_features as bf\n'), ((10931, 10971), 'backbone.support.models.get_cremad_female_model', 'mdl.get_cremad_female_model', (['input_shape'], {}), '(input_shape)\n', (10958, 10971), True, 'import backbone.support.models as mdl\n'), ((10982, 11002), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (10991, 11002), True, 'import numpy as np\n'), ((11345, 11390), 'os.path.join', 'os.path.join', (['confv.base_store', 'confv.log_dir'], {}), '(confv.base_store, confv.log_dir)\n', (11357, 11390), False, 'import os\n'), ((11462, 11636), 'backbone.support.directory_file_checking.check_dir_inside_saved_features_and_modelconfigs_and_models', 'dfc.check_dir_inside_saved_features_and_modelconfigs_and_models', ([], {'parent': 'confv.saved_training_metrics_logs', 'database': 'confv.database_cremad', 'gender': 'confv.gender_female'}), '(parent=\n confv.saved_training_metrics_logs, database=confv.database_cremad,\n gender=confv.gender_female)\n', (11525, 11636), True, 'import backbone.support.directory_file_checking as dfc\n'), ((11641, 11684), 'tensorflow.python.keras.callbacks.CSVLogger', 'CSVLogger', (['mconf_cremad_f.training_log_path'], {}), '(mconf_cremad_f.training_log_path)\n', (11650, 11684), False, 'from tensorflow.python.keras.callbacks import CSVLogger\n'), ((12232, 12392), 'backbone.support.directory_file_checking.check_dir_inside_saved_features_and_modelconfigs_and_models', 'dfc.check_dir_inside_saved_features_and_modelconfigs_and_models', ([], {'parent': 'confv.saved_models', 'database': 'confv.database_cremad', 'gender': 'confv.gender_female'}), '(parent=\n confv.saved_models, database=confv.database_cremad, gender=confv.\n gender_female)\n', (12295, 12392), True, 'import backbone.support.directory_file_checking as dfc\n'), ((12512, 12579), 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['mconf_cremad_f.model_path'], {}), '(mconf_cremad_f.model_path)\n', (12552, 12579), True, 'import tensorflow as tf\n'), ((9353, 9400), 'numpy.unique', 'np.unique', (['data_info_cremad_df_f.stress_emotion'], {}), '(data_info_cremad_df_f.stress_emotion)\n', (9362, 9400), True, 'import numpy as np\n'), ((11051, 11068), 'numpy.unique', 'np.unique', (['y_flat'], {}), '(y_flat)\n', (11060, 11068), True, 'import numpy as np\n'), ((11316, 11327), 'time.time', 'time.time', ([], {}), '()\n', (11325, 11327), False, 'import time\n')]
|
"""Forward and back projector for PET data reconstruction"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
#------------------------------------------------------------------------------
import numpy as np
import sys
import os
import logging
import petprj
from niftypet.nipet.img import mmrimg
from niftypet.nipet import mmraux
#=========================================================================
# forward projector
#-------------------------------------------------------------------------
def frwd_prj(im, scanner_params, isub=np.array([-1], dtype=np.int32), dev_out=False, attenuation=False):
''' Calculate forward projection (a set of sinograms) for the provided input image.
Arguments:
im -- input image (can be emission or mu-map image).
scanner_params -- dictionary of all scanner parameters, containing scanner constants,
transaxial and axial look up tables (LUT).
isub -- array of transaxial indices of all sinograms (angles x bins) used for subsets.
when the first element is negative, all transaxial bins are used (as in pure EM-ML).
dev_out -- if True, output sinogram is in the device form, i.e., with two dimensions
(# bins/angles, # sinograms) instead of default three (# sinograms, # bins, # angles).
attenuation -- controls whether emission or LOR attenuation probability sinogram
is calculated; the default is False, meaning emission sinogram; for attenuation
calculations (attenuation=True), the exponential of the negative of the integrated
mu-values along LOR path is taken at the end.
'''
log = logging.getLogger(__name__)
# Get particular scanner parameters: Constants, transaxial and axial LUTs
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
axLUT = scanner_params['axLUT']
#>choose between attenuation forward projection (mu-map is the input)
#>or the default for emission image forward projection
if attenuation:
att = 1
else:
att = 0
if Cnt['SPN']==1:
# number of rings calculated for the given ring range (optionally we can use only part of the axial FOV)
NRNG_c = Cnt['RNG_END'] - Cnt['RNG_STRT']
# number of sinos in span-1
nsinos = NRNG_c**2
# correct for the max. ring difference in the full axial extent (don't use ring range (1,63) as for this case no correction)
if NRNG_c==64:
nsinos -= 12
elif Cnt['SPN']==11: nsinos=Cnt['NSN11']
elif Cnt['SPN']==0: nsinos=Cnt['NSEG0']
if im.shape[0]==Cnt['SO_IMZ'] and im.shape[1]==Cnt['SO_IMY'] and im.shape[2]==Cnt['SO_IMX']:
ims = mmrimg.convert2dev(im, Cnt)
elif im.shape[0]==Cnt['SZ_IMX'] and im.shape[1]==Cnt['SZ_IMY'] and im.shape[2]==Cnt['SZ_IMZ']:
ims = im
elif im.shape[0]==Cnt['rSO_IMZ'] and im.shape[1]==Cnt['SO_IMY'] and im.shape[2]==Cnt['SO_IMX']:
ims = mmrimg.convert2dev(im, Cnt)
elif im.shape[0]==Cnt['SZ_IMX'] and im.shape[1]==Cnt['SZ_IMY'] and im.shape[2]==Cnt['rSZ_IMZ']:
ims = im
else:
log.error('wrong image size; it has to be one of these: (z,y,x) = (127,344,344) or (y,x,z) = (320,320,128)')
log.debug('number of sinos:%d' % nsinos)
#predefine the sinogram. if subsets are used then only preallocate those bins which will be used.
if isub[0]<0:
sinog = np.zeros((txLUT['Naw'], nsinos), dtype=np.float32)
else:
sinog = np.zeros((len(isub), nsinos), dtype=np.float32)
# --------------------
petprj.fprj(sinog, ims, txLUT, axLUT, isub, Cnt, att)
# --------------------
# get the sinogram bins in a proper sinogram
sino = np.zeros((txLUT['Naw'], nsinos), dtype=np.float32)
if isub[0]>=0: sino[isub,:] = sinog
else: sino = sinog
# put the gaps back to form displayable sinogram
if not dev_out:
sino = mmraux.putgaps(sino, txLUT, Cnt)
return sino
#=========================================================================
# back projector
#-------------------------------------------------------------------------
def back_prj(sino, scanner_params, isub=np.array([-1], dtype=np.int32)):
''' Calculate forward projection for the provided input image.
Arguments:
sino -- input emission sinogram to be back projected to the image space.
scanner_params -- dictionary of all scanner parameters, containing scanner constants,
transaxial and axial look up tables (LUT).
isub -- array of transaxial indices of all sinograms (angles x bins) used for subsets;
when the first element is negative, all transaxial bins are used (as in pure EM-ML).
'''
# Get particular scanner parameters: Constants, transaxial and axial LUTs
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
axLUT = scanner_params['axLUT']
if Cnt['SPN']==1:
# number of rings calculated for the given ring range (optionally we can use only part of the axial FOV)
NRNG_c = Cnt['RNG_END'] - Cnt['RNG_STRT']
# number of sinos in span-1
nsinos = NRNG_c**2
# correct for the max. ring difference in the full axial extent (don't use ring range (1,63) as for this case no correction)
if NRNG_c==64:
nsinos -= 12
elif Cnt['SPN']==11: nsinos=Cnt['NSN11']
elif Cnt['SPN']==0: nsinos=Cnt['NSEG0']
#> check first the Siemens default sinogram;
#> for this default shape only full sinograms are expected--no subsets.
if len(sino.shape)==3:
if sino.shape[0]!=nsinos or sino.shape[1]!=Cnt['NSANGLES'] or sino.shape[2]!=Cnt['NSBINS']:
raise ValueError('Unexpected sinogram array dimensions/shape for Siemens defaults.')
sinog = mmraux.remgaps(sino, txLUT, Cnt)
elif len(sino.shape)==2:
if isub[0]<0 and sino.shape[0]!=txLUT["Naw"]:
raise ValueError('Unexpected number of transaxial elements in the full sinogram.')
elif isub[0]>=0 and sino.shape[0]!=len(isub):
raise ValueError('Unexpected number of transaxial elements in the subset sinogram.')
#> check if the number of sinograms is correct
if sino.shape[1]!=nsinos:
raise ValueError('Inconsistent number of sinograms in the array.')
#> when found the dimensions/shape are fine:
sinog = sino
else:
raise ValueError('Unexpected shape of the input sinogram.')
#predefine the output image depending on the number of rings used
if Cnt['SPN']==1 and 'rSZ_IMZ' in Cnt:
nvz = Cnt['rSZ_IMZ']
else:
nvz = Cnt['SZ_IMZ']
bimg = np.zeros((Cnt['SZ_IMX'], Cnt['SZ_IMY'], nvz), dtype=np.float32)
#> run back-projection
petprj.bprj(bimg, sinog, txLUT, axLUT, isub, Cnt)
#> change from GPU optimised image dimensions to the standard Siemens shape
bimg = mmrimg.convert2e7(bimg, Cnt)
return bimg
#-------------------------------------------------------------------------
|
[
"logging.getLogger",
"niftypet.nipet.mmraux.remgaps",
"petprj.bprj",
"niftypet.nipet.img.mmrimg.convert2dev",
"niftypet.nipet.img.mmrimg.convert2e7",
"niftypet.nipet.mmraux.putgaps",
"numpy.array",
"numpy.zeros",
"petprj.fprj"
] |
[((555, 585), 'numpy.array', 'np.array', (['[-1]'], {'dtype': 'np.int32'}), '([-1], dtype=np.int32)\n', (563, 585), True, 'import numpy as np\n'), ((1675, 1702), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1692, 1702), False, 'import logging\n'), ((3590, 3643), 'petprj.fprj', 'petprj.fprj', (['sinog', 'ims', 'txLUT', 'axLUT', 'isub', 'Cnt', 'att'], {}), '(sinog, ims, txLUT, axLUT, isub, Cnt, att)\n', (3601, 3643), False, 'import petprj\n'), ((3731, 3781), 'numpy.zeros', 'np.zeros', (["(txLUT['Naw'], nsinos)"], {'dtype': 'np.float32'}), "((txLUT['Naw'], nsinos), dtype=np.float32)\n", (3739, 3781), True, 'import numpy as np\n'), ((4196, 4226), 'numpy.array', 'np.array', (['[-1]'], {'dtype': 'np.int32'}), '([-1], dtype=np.int32)\n', (4204, 4226), True, 'import numpy as np\n'), ((6694, 6757), 'numpy.zeros', 'np.zeros', (["(Cnt['SZ_IMX'], Cnt['SZ_IMY'], nvz)"], {'dtype': 'np.float32'}), "((Cnt['SZ_IMX'], Cnt['SZ_IMY'], nvz), dtype=np.float32)\n", (6702, 6757), True, 'import numpy as np\n'), ((6790, 6839), 'petprj.bprj', 'petprj.bprj', (['bimg', 'sinog', 'txLUT', 'axLUT', 'isub', 'Cnt'], {}), '(bimg, sinog, txLUT, axLUT, isub, Cnt)\n', (6801, 6839), False, 'import petprj\n'), ((6932, 6960), 'niftypet.nipet.img.mmrimg.convert2e7', 'mmrimg.convert2e7', (['bimg', 'Cnt'], {}), '(bimg, Cnt)\n', (6949, 6960), False, 'from niftypet.nipet.img import mmrimg\n'), ((2718, 2745), 'niftypet.nipet.img.mmrimg.convert2dev', 'mmrimg.convert2dev', (['im', 'Cnt'], {}), '(im, Cnt)\n', (2736, 2745), False, 'from niftypet.nipet.img import mmrimg\n'), ((3433, 3483), 'numpy.zeros', 'np.zeros', (["(txLUT['Naw'], nsinos)"], {'dtype': 'np.float32'}), "((txLUT['Naw'], nsinos), dtype=np.float32)\n", (3441, 3483), True, 'import numpy as np\n'), ((3938, 3970), 'niftypet.nipet.mmraux.putgaps', 'mmraux.putgaps', (['sino', 'txLUT', 'Cnt'], {}), '(sino, txLUT, Cnt)\n', (3952, 3970), False, 'from niftypet.nipet import mmraux\n'), ((5819, 5851), 'niftypet.nipet.mmraux.remgaps', 'mmraux.remgaps', (['sino', 'txLUT', 'Cnt'], {}), '(sino, txLUT, Cnt)\n', (5833, 5851), False, 'from niftypet.nipet import mmraux\n'), ((2976, 3003), 'niftypet.nipet.img.mmrimg.convert2dev', 'mmrimg.convert2dev', (['im', 'Cnt'], {}), '(im, Cnt)\n', (2994, 3003), False, 'from niftypet.nipet.img import mmrimg\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import gradcheck
import random
import numpy as np
import math
import scipy.io as sio
import matplotlib.pyplot as plt
from sphere_cuda import SPHERE_CUDA
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
if torch.cuda.is_available():
# device_name = "cuda"
# torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# torch.cuda.manual_seed(0)
device_name = "cuda"
torch.cuda.manual_seed(0)
# torch.backends.cudnn.enabled = False #greatly slow down the speed
torch.backends.cudnn.deterministic = True
print("Let's use", torch.cuda.device_count(), "GPU(s)!")
else:
device_name = "cpu"
print("CUDA is not available")
device = torch.device(device_name)
batch = 2
channel = 4
rows = 48
cols = 48
h = 69
w = 60
theta_res = 3
rho_res = 1
num_points=128
sphere_size=512
npz_name = f"vote_{rows:d}_{cols:d}_{h:d}_{w:d}_{num_points:d}_{sphere_size:d}.npz"
npzfile = np.load(npz_name, allow_pickle=True)
print('npzfile', npzfile.files)
vote_mapping_sphere = npzfile['vote_mapping_sphere']
sphere_size= npzfile['sphere_size']
angles= npzfile['angles']
xyz= npzfile['xyz']
print('vote_mapping_sphere',vote_mapping_sphere.shape)
# for grad test, use double instead of float
vote_mapping_sphere = torch.from_numpy(vote_mapping_sphere).double().contiguous()
print('vote_mapping_sphere memory MB', vote_mapping_sphere.size(), vote_mapping_sphere.element_size() * vote_mapping_sphere.nelement() / (1024 * 1024))
vote_mapping_dict={}
vote_mapping_dict["vote_mapping"] = vote_mapping_sphere.to(device)
vote_mapping_dict["sphere_size"] = sphere_size
vote_mapping_dict["ht_size"] = [h, w]
Sphere_cuda = SPHERE_CUDA(vote_mapping_dict)
Sphere_cuda = Sphere_cuda.to(device)
print('grad check***********')
input = torch.randn(batch, channel, h, w, requires_grad=True).double().to(device)
res = gradcheck(Sphere_cuda, input, raise_exception=True)
# res=gradcheck(myconv, input, eps=1e-3, atol=1e-3, rtol=1e-2, raise_exception=True)
print('grad check', res)
|
[
"torch.manual_seed",
"torch.cuda.device_count",
"random.seed",
"torch.from_numpy",
"torch.cuda.is_available",
"sphere_cuda.SPHERE_CUDA",
"numpy.random.seed",
"torch.cuda.manual_seed",
"numpy.load",
"torch.autograd.gradcheck",
"torch.randn",
"torch.device"
] |
[((241, 255), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (252, 255), False, 'import random\n'), ((256, 273), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (270, 273), True, 'import numpy as np\n'), ((274, 294), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (291, 294), False, 'import torch\n'), ((300, 325), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (323, 325), False, 'import torch\n'), ((788, 813), 'torch.device', 'torch.device', (['device_name'], {}), '(device_name)\n', (800, 813), False, 'import torch\n'), ((1026, 1062), 'numpy.load', 'np.load', (['npz_name'], {'allow_pickle': '(True)'}), '(npz_name, allow_pickle=True)\n', (1033, 1062), True, 'import numpy as np\n'), ((1754, 1784), 'sphere_cuda.SPHERE_CUDA', 'SPHERE_CUDA', (['vote_mapping_dict'], {}), '(vote_mapping_dict)\n', (1765, 1784), False, 'from sphere_cuda import SPHERE_CUDA\n'), ((1944, 1995), 'torch.autograd.gradcheck', 'gradcheck', (['Sphere_cuda', 'input'], {'raise_exception': '(True)'}), '(Sphere_cuda, input, raise_exception=True)\n', (1953, 1995), False, 'from torch.autograd import gradcheck\n'), ((508, 533), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (530, 533), False, 'import torch\n'), ((676, 701), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (699, 701), False, 'import torch\n'), ((1353, 1390), 'torch.from_numpy', 'torch.from_numpy', (['vote_mapping_sphere'], {}), '(vote_mapping_sphere)\n', (1369, 1390), False, 'import torch\n'), ((1864, 1917), 'torch.randn', 'torch.randn', (['batch', 'channel', 'h', 'w'], {'requires_grad': '(True)'}), '(batch, channel, h, w, requires_grad=True)\n', (1875, 1917), False, 'import torch\n')]
|
import numpy as np
u = np.array([3,2,1])
v = np.array([1,2,3])
z = u + v
z = u - v
z = u * v
z = u / v
x = np.arange(0,9)
print(x)
print(x.shape)
print(x.itemsize)
y = x.reshape((3,3))
print(y)
print(y.shape)
print(y.itemsize)
x = np.array([1,1,1])
soma = sum(x)
print(soma)
# Usando inner para produto interno
u = np.array([3,2,1])
v = np.array([1,2,3])
z = np.inner(v,u)
# retorna z = 10
# Usando cross para produto vetorial
i = [1,0,0]
j = [0,1,0]
k = np.cross(i,j)
# Transposta
A = np.array([[1,2,3],[4,5,6],[7,8,9]])
T = A.T
#print(T)
# Transforming into a one dimensional array
A_flat = A.flatten()
#print(A_flat)
#print(A.ndim)
#print(A.shape)
import numpy.matlib
# Criando uma matriz vazia
A = np.matlib.zeros((3,3))
# Criando uma matriz Identidade
I = np.matlib.identity(3)
# Criando matrizes com random
B = np.matlib.rand((3,3))
# Criando matriz com random mas usando valores da tabela de distribuicao normal
N = np.matlib.randn((3,3))
A = np.array([[1,1,1], [2,2,2], [3,3,3]])
print(A)
x = np.array([1,2,3,4,5,6,7,8,9])
B = x.reshape((3,3))
print(B)
|
[
"numpy.matlib.randn",
"numpy.cross",
"numpy.matlib.rand",
"numpy.matlib.identity",
"numpy.inner",
"numpy.array",
"numpy.arange",
"numpy.matlib.zeros"
] |
[((26, 45), 'numpy.array', 'np.array', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (34, 45), True, 'import numpy as np\n'), ((48, 67), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (56, 67), True, 'import numpy as np\n'), ((113, 128), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (122, 128), True, 'import numpy as np\n'), ((239, 258), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (247, 258), True, 'import numpy as np\n'), ((327, 346), 'numpy.array', 'np.array', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (335, 346), True, 'import numpy as np\n'), ((349, 368), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (357, 368), True, 'import numpy as np\n'), ((372, 386), 'numpy.inner', 'np.inner', (['v', 'u'], {}), '(v, u)\n', (380, 386), True, 'import numpy as np\n'), ((472, 486), 'numpy.cross', 'np.cross', (['i', 'j'], {}), '(i, j)\n', (480, 486), True, 'import numpy as np\n'), ((505, 548), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (513, 548), True, 'import numpy as np\n'), ((730, 753), 'numpy.matlib.zeros', 'np.matlib.zeros', (['(3, 3)'], {}), '((3, 3))\n', (745, 753), True, 'import numpy as np\n'), ((791, 812), 'numpy.matlib.identity', 'np.matlib.identity', (['(3)'], {}), '(3)\n', (809, 812), True, 'import numpy as np\n'), ((850, 872), 'numpy.matlib.rand', 'np.matlib.rand', (['(3, 3)'], {}), '((3, 3))\n', (864, 872), True, 'import numpy as np\n'), ((958, 981), 'numpy.matlib.randn', 'np.matlib.randn', (['(3, 3)'], {}), '((3, 3))\n', (973, 981), True, 'import numpy as np\n'), ((987, 1030), 'numpy.array', 'np.array', (['[[1, 1, 1], [2, 2, 2], [3, 3, 3]]'], {}), '([[1, 1, 1], [2, 2, 2], [3, 3, 3]])\n', (995, 1030), True, 'import numpy as np\n'), ((1039, 1076), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (1047, 1076), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# This will (hopefully) be the code to extract symmetry operations
# from Hall symbols
import numpy as np
lattice_symbols = {
'P': [[0, 0, 0]],
'A': [[0, 0, 0], [0, 1./2, 1./2]],
'B': [[0, 0, 0], [1./2, 0, 1./2]],
'C': [[0, 0, 0], [1./2, 1./2, 0]],
'I': [[0, 0, 0], [1./2, 1./2, 1./2]],
'R': [[0, 0, 0], [2./3, 1./3, 1./3], [1./3, 2./3, 2./3]],
'H': [[0, 0, 0], [2./3, 1./3, 0], [1./3, 2./3, 0]],
'F': [[0, 0, 0], [0, 1./2, 1./2], [1./2, 0, 1./2], [1./2, 1./2, 0]]
}
rotation_matrices = {
'1x': [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
'1y': [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
'1z': [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
'2x': [[1, 0, 0],
[0, -1, 0],
[0, 0, -1]],
'2y': [[-1, 0, 0],
[0, 1, 0],
[0, 0, -1]],
'2z': [[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]],
'3x': [[1, 0, 0],
[0, 0, -1],
[0, 1, -1]],
'3y': [[-1, 0, 1],
[0, 1, 0],
[-1, 0, 0]],
'3z': [[0, -1, 0],
[1, -1, 0],
[0, 0, 1]],
'4x': [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
'4y': [[0, 0, 1],
[0, 1, 0],
[-1, 0, 0]],
'4z': [[0, -1, 0],
[1, 0, 0],
[0, 0, 1]],
'6x': [[1, 0, 0],
[0, 1, -1],
[0, 1, 0]],
'6y': [[0, 0, 1],
[0, 1, 0],
[-1, 0, 1]],
'6z': [[1, -1, 0],
[1, 0, 0],
[0, 0, 1]],
'2px': [[-1, 0, 0], # b-c
[0, 0, -1],
[0, -1, 0]],
'2ppx': [[-1, 0, 0], # b+c
[0, 0, 1],
[0, 1, 0]],
'2py': [[0, 0, -1], # a-c
[0, -1, 0],
[-1, 0, 0]],
'2ppy': [[0, 0, 1], # a+c
[0, -1, 0],
[1, 0, 0]],
'2pz': [[0, -1, 0], # a-b
[-1, 0, 0],
[0, 0, -1]],
'2ppz': [[0, 1, 0], # a+b
[1, 0, 0],
[0, 0, -1]],
'3*': [[0, 0, 1], # a+b+c
[1, 0, 0],
[0, 1, 0]]
}
translations = {
'a': [1./2, 0, 0],
'b': [0, 1./2, 0],
'c': [0, 0, 1./2],
'n': [1./2, 1./2, 1./2],
'u': [1./4, 0, 0],
'v': [0, 1./4, 0],
'w': [0, 0, 1./4],
'd': [1./4, 1./4, 1./4]
}
def read_spg_csv(filename="spg.csv"):
hall_symbols = []
for line in open(filename):
data = line.split(',')
hall_symbols.append([data[6], data[4]])
return hall_symbols
class HallSymbol:
def __init__(self, hall_symbol):
self.hall_symbol = hall_symbol.split()
self._decompose()
self._full_operations()
def get_LNV(self):
return self.L, self.N, self.V
def get_operations(self):
return self.G_R, self.G_T
def _full_operations(self):
gens_R, gens_T = self._generators()
E = np.array(rotation_matrices['1x'])
T0 = np.zeros(3, dtype=float)
if self.L[0] == '-':
G_R = [E, -E]
G_T = [T0, T0]
else:
G_R = [E]
G_T = [T0]
for r, t in zip(gens_R, gens_T):
G2_R, G2_T = self._get_group(r, t)
G_R, G_T = self._multiply_groups(G_R, G_T, G2_R, G2_T)
if self.V is not None:
G_T = self._change_of_basis(G_R, G_T)
G_R_with_centres = []
G_T_with_centred = []
for t in lattice_symbols[self.L[-1]]:
self._lattice_translation(G_R_with_centres,
G_T_with_centred,
G_R, G_T, t)
self.G_R = np.array(G_R_with_centres)
self.G_T = np.array(G_T_with_centred)
# Make sure the first operation has no rotation.
assert (self.G_R[0] == rotation_matrices['1x']).all()
# In Hall numbers 212, 213, 214, the first operation has non-zero
# translation. This translation is subtracted from all operations.
self.G_T -= self.G_T[0]
self.G_T -= np.rint(self.G_T)
cond = self.G_T < -1e-3
self.G_T[cond] += 1
def _change_of_basis(self, G_R, G_T):
G_T_new = []
v = self.V.astype(float) / 12
for r, t in zip(G_R, G_T):
G_T_new.append(-np.dot(r, v) + t + v)
return G_T_new
def _lattice_translation(self, G_R, G_T, G_R0, G_T0, translation):
for r, t in zip(G_R0, G_T0):
G_R.append(r.copy())
t_new = t + translation
G_T.append(t_new)
def _multiply_groups(self, G1_R, G1_T, G2_R, G2_T): # G2xG1
G_R = []
G_T = []
for r1, t1 in zip(G2_R, G2_T):
for r2, t2 in zip(G1_R, G1_T):
G_R.append(np.dot(r1, r2))
G_T.append(np.dot(r1, t2) + t1)
return G_R, G_T
def _get_group(self, r, t):
G_R = [r, ]
G_T = [t, ]
while not (G_R[-1] == rotation_matrices['1x']).all():
_r = np.dot(G_R[-1], r)
_t = np.dot(G_R[-1], t) + G_T[-1]
G_R.append(_r)
G_T.append(_t)
# Bring identity in front
_r = G_R.pop()
_t = G_T.pop()
G_R.insert(0, _r)
G_T.insert(0, _t)
return G_R, G_T
# def _get_group(self, r, t):
# G_R, G_T = self._get_group_recursive([np.array(r)], [np.array(t)])
# r = G_R.pop()
# t = G_T.pop()
# G_R.insert(0, r)
# G_T.insert(0, t)
# return G_R, G_T
# def _get_group_recursive(self, G_R, G_T):
# if not (G_R[-1] == rotation_matrices['1x']).all():
# r = np.dot(G_R[-1], G_R[0])
# t = np.dot(G_R[-1], G_T[0]) + G_T[-1]
# G_R.append(r)
# G_T.append(t)
# self._get_group_recursive(G_R, G_T)
# return G_R, G_T
def _generators(self):
R = []
T = []
for N in self.N:
rot = np.array(rotation_matrices[N[1] + N[2]])
if N[0] == '-':
rot = -rot
R.append(rot)
trans = np.zeros(3, dtype=float)
if N[3] is not None:
for t in N[3]:
if t in ('1', '2', '3', '4', '5'):
trans_screw = float(t) / int(N[1])
if N[2] == 'x':
trans[0] += trans_screw
elif N[2] == 'y':
trans[1] += trans_screw
elif N[2] == 'z':
trans[2] += trans_screw
else:
raise
else:
trans += np.array(translations[t])
T.append(trans)
return np.array(R, dtype=int), np.array(T, dtype=float)
def _rotation_matrix(self, str):
pass
# Decompose Hall symbol
# The following methods are used by _decompose().
def _decompose(self):
L = self.hall_symbol.pop(0)
N = []
V = None
precededN = 0
for i, ms in enumerate(self.hall_symbol):
if ms[0] == '(':
V = self._change_of_basis_symbol(self.hall_symbol[i + 2])
break
else:
N.append(self._matrix_symbol(ms, i, precededN))
precededN = int(N[-1][1][0])
self.L = L
self.N = N
self.V = V
def _matrix_symbol(self, N, i, precededN):
if N[0] == '-':
improper = '-'
N = N[1:]
else:
improper = None
N, R, A = self._rotation(N, i, precededN)
if len(N) > 0:
T = self._translation(N)
else:
T = None
return [improper, R, A, T]
def _rotation(self, N, i, precededN):
A = None
if N[0] == '2':
if len(N) > 1: # 2"
if N[1] == '=':
R = '2pp'
A = 'z'
N = N[2:]
if i == 1 and A is None:
if precededN == 2 or precededN == 4: # 2x
R = '2'
A = 'x'
N = N[1:]
elif precededN == 3 or precededN == 6: # 2'
R = '2p'
A = 'z'
N = N[1:]
elif N[0] == '3': # 3*
if i == 2:
R = '3'
A = '*'
N = N[1:]
elif len(N) > 1:
if N[1] == '*':
R = '3'
A = '*'
N = N[2:]
if A is None:
R = N[0]
N = N[1:]
if len(N) > 0 and i == 0:
N, A = self._principal_axis(N)
else:
A = 'z'
return N, R, A
def _principal_axis(self, N):
if N[0] == 'x':
return N[1:], 'x'
if N[0] == 'y':
return N[1:], 'y'
return N, 'z'
def _translation(self, N):
T = []
for i in range(len(N)):
T.append(N[i])
return T
def _change_of_basis_symbol(self, V):
if V[0] == '-':
return np.array([0, 0, -1])
else:
return np.array([0, 0, 1])
def dump_operations(filename):
hall_symbols = read_spg_csv(filename)
count = 0
print(" 0 , /* dummy */")
for i in range(530):
hs = HallSymbol(hall_symbols[i][0])
G_R, G_T = hs.get_operations()
for j, (r, t) in enumerate(zip(G_R, G_T)):
count += 1
r_encode = encode_rotation(r)
x = np.rint(t * 12).astype(int)
t_encode = x[0] * 144 + x[1] * 12 + x[2]
total = t_encode * 3 ** 9 + r_encode
text = " %-8d," % (total)
text += " /* %4d (%3d) [" % (count, i + 1)
text += "%2d," * 9 % tuple(decode_rotation(total % (3**9)))
text += "%2d,%2d,%2d] */" % tuple(decode_trans(total // (3**9)))
print(text)
def dump_operations_old(filename):
hall_symbols = read_spg_csv(filename)
count = 0
for i in range(530):
hs = HallSymbol(hall_symbols[i][0])
G_R, G_T = hs.get_operations()
for j, (r, t) in enumerate(zip(G_R, G_T)):
count += 1
text = "{%3d," % (i + 1)
text += "%2d,%2d,%2d,%2d,%2d,%2d,%2d,%2d,%2d," % tuple(r.ravel())
text += "%2d,%2d,%2d" % tuple((t * 12 + 0.1).astype(int))
text += "}, /* %4d */" % count
print(text)
# Ternary numerical system
def encode_rotation(r):
r_sum = 0
for i, x in enumerate(r.ravel()):
r_sum += (x + 1) * 3**(8 - i)
return r_sum
def decode_rotation(c):
r = []
for i in range(8, -1, -1):
r.append((c % (3**(i+1))) // (3**i) - 1)
return np.array(r)
def decode_trans(c):
return c // 144, (c % 144) // 12, (c % 12)
def get_reference_to_operations(filename):
hall_symbols = read_spg_csv(filename)
count = 0
for i in range(530):
hs = HallSymbol(hall_symbols[i][0])
G_R, G_T = hs.get_operations()
print(" {%4d,%5d}, /* %3d */ " % (len(G_R), count + 1, i + 1))
count += len(G_R)
def watch_hs(filename, number):
print(" { 0, 0}, /* 0 */")
num = number - 1
hall_symbols = read_spg_csv(filename)
hs = HallSymbol(hall_symbols[num][0])
for char, vals in zip(('L', 'N', 'V'), hs.get_LNV()):
print("%s: %s" % (char, vals))
G_R, G_T = hs.get_operations()
print(number, ":", hall_symbols[num][0], "(", len(G_R), ")")
for i, (r, t) in enumerate(zip(G_R, G_T)):
print("-----", i + 1, "-----")
print(r, t)
if __name__ == '__main__':
"""
Usage
-----
To watch symmetry operations of a Hall symbol,
% python hall2operations.py --hs spg.csv 213
To dump symmetry operations of all Hall symbols that are copied to
spglb_database.c,
% python hall2operations.py --dump spg.csv
To dump address of symmetry operation data that is copied to
spglb_database.c,
% python hall2operations.py --reference spg.csv
"""
from optparse import OptionParser
parser = OptionParser()
parser.set_defaults(watch_hs=False,
dump_operations=False,
shift=None,
origin=None)
parser.add_option("--hs", dest="watch_hs",
action="store_true",
help="spg.csv [spg NUM]")
parser.add_option("--dump", dest="is_dump",
action="store_true")
parser.add_option("--reference", dest="is_reference",
action="store_true")
(options, args) = parser.parse_args()
if options.is_dump:
dump_operations(args[0])
if options.is_reference:
get_reference_to_operations(args[0])
elif options.watch_hs:
watch_hs(args[0], int(args[1]))
|
[
"optparse.OptionParser",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.rint"
] |
[((10889, 10900), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (10897, 10900), True, 'import numpy as np\n'), ((12263, 12277), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (12275, 12277), False, 'from optparse import OptionParser\n'), ((2939, 2972), 'numpy.array', 'np.array', (["rotation_matrices['1x']"], {}), "(rotation_matrices['1x'])\n", (2947, 2972), True, 'import numpy as np\n'), ((2986, 3010), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (2994, 3010), True, 'import numpy as np\n'), ((3680, 3706), 'numpy.array', 'np.array', (['G_R_with_centres'], {}), '(G_R_with_centres)\n', (3688, 3706), True, 'import numpy as np\n'), ((3726, 3752), 'numpy.array', 'np.array', (['G_T_with_centred'], {}), '(G_T_with_centred)\n', (3734, 3752), True, 'import numpy as np\n'), ((4075, 4092), 'numpy.rint', 'np.rint', (['self.G_T'], {}), '(self.G_T)\n', (4082, 4092), True, 'import numpy as np\n'), ((5021, 5039), 'numpy.dot', 'np.dot', (['G_R[-1]', 'r'], {}), '(G_R[-1], r)\n', (5027, 5039), True, 'import numpy as np\n'), ((5975, 6015), 'numpy.array', 'np.array', (['rotation_matrices[N[1] + N[2]]'], {}), '(rotation_matrices[N[1] + N[2]])\n', (5983, 6015), True, 'import numpy as np\n'), ((6118, 6142), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (6126, 6142), True, 'import numpy as np\n'), ((6794, 6816), 'numpy.array', 'np.array', (['R'], {'dtype': 'int'}), '(R, dtype=int)\n', (6802, 6816), True, 'import numpy as np\n'), ((6818, 6842), 'numpy.array', 'np.array', (['T'], {'dtype': 'float'}), '(T, dtype=float)\n', (6826, 6842), True, 'import numpy as np\n'), ((9236, 9256), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (9244, 9256), True, 'import numpy as np\n'), ((9290, 9309), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9298, 9309), True, 'import numpy as np\n'), ((5057, 5075), 'numpy.dot', 'np.dot', (['G_R[-1]', 't'], {}), '(G_R[-1], t)\n', (5063, 5075), True, 'import numpy as np\n'), ((4780, 4794), 'numpy.dot', 'np.dot', (['r1', 'r2'], {}), '(r1, r2)\n', (4786, 4794), True, 'import numpy as np\n'), ((9677, 9692), 'numpy.rint', 'np.rint', (['(t * 12)'], {}), '(t * 12)\n', (9684, 9692), True, 'import numpy as np\n'), ((4823, 4837), 'numpy.dot', 'np.dot', (['r1', 't2'], {}), '(r1, t2)\n', (4829, 4837), True, 'import numpy as np\n'), ((6724, 6749), 'numpy.array', 'np.array', (['translations[t]'], {}), '(translations[t])\n', (6732, 6749), True, 'import numpy as np\n'), ((4318, 4330), 'numpy.dot', 'np.dot', (['r', 'v'], {}), '(r, v)\n', (4324, 4330), True, 'import numpy as np\n')]
|
"""Module for interacting with the Comet Observations Database (COBS)."""
from io import StringIO
import re
from pathlib import Path
from appdirs import user_cache_dir
from astropy.time import Time
import mechanize
import numpy as np
import pandas as pd
from . import PACKAGEDIR, log
# Where to store COBS data?
CACHEDIR = Path(user_cache_dir("cometcurve"))
# Column numbers of COBS/ICQ data fields
ICQ_COLUMNS = {
'comet': (0, 11),
'date': (11, 21),
'fractional_day': (21, 24),
'method': (26, 27),
'upper_limit': (27, 28),
'magnitude': (28, 32),
'poor': (32, 33),
'aperture': (35, 40),
'instrument': (40, 41),
'observer': (75, 80),
'comments': (130, -1)
}
class CometObservations():
"""Class to interact with the Comet Observation Database (COBS).
Parameters
----------
data : `pandas.DataFrame`
DataFrame returned by `read_cobs()`.
"""
def __init__(self, data=None):
if data is None:
self.data = read_cobs()
else:
self.data = data
def get_observer_list(self):
"""Returns a string listing all observer names.
The names are sorted by number of observations.
"""
return ", ".join(self.data.observer_name.value_counts().keys())
def read_cobs(years=('2020'), comet=None, start=None, stop=None,
allowed_methods=('S', 'B', 'M', 'I', 'E', 'Z', 'V', 'O'),):
"""Returns a `CometObservations` instance containing the COBS database."""
if years == 'all':
years = tuple(range(2018, 2020))
# Read the data
data = []
for yr in np.atleast_1d(years):
try:
dfyr = _get_cache_dataframe(yr)
except FileNotFoundError:
dfyr = download_cobs(yr)
data.append(dfyr)
df = pd.concat(data)
# Remove bad lines defined as follows:
# * date i.e. year does not start with the character 1 or 2 (indicative of ill-formatted ICQ)
# * the magnitude is not missing (character "-")
# * the "poor" column is empty (note: this removes a small number of entries
# for comet 1965S1 where magnitude -10.0 overflows into the poor column).
# * the magnitude is not an upper limit (`df.upper_limit.isna()`)
# * did not use a convential method (cf. `allowed_methods`), i.e. a method
# that does not yield something similar to a V-band integrated magnitude.
bad_data_mask = (df.date.str[0].isin(["1","2"])
& df.poor.isna()
& df.upper_limit.isna())
if df.magnitude.dtype is not float:
bad_data_mask &= df.magnitude != '-'
if allowed_methods != 'all':
bad_data_mask &= df.method.isin(allowed_methods)
df = df[bad_data_mask]
df['time'] = pd.to_datetime(df.date, utc=True) + pd.to_timedelta(df.fractional_day, unit='D')
df['jd'] = Time(df.time).jd
df['magnitude'] = df.magnitude.astype(float)
df['aperture'] = df.aperture.astype(float)
df['visual'] = df.method.isin(('S', 'M', 'B'))
df['binocular'] = df.instrument == 'B'
df['poor'] = df.poor.astype(str) == ":"
df['observer_name'] = df.comments.str.split(pat="[,;]", expand=True)[0]
# Optional data filtering
mask = np.ones(len(df), dtype=bool)
if comet is not None:
mask &= df.comet == comet.replace(" ", "")
if start is not None:
mask &= df.time > start
if stop is not None:
mask &= df.time < stop
df = df[mask]
# Add a column detailing the number of observations by each observer
df_counts = df.observer.value_counts().reset_index()
df_counts.columns = ['observer', 'observations']
df = pd.merge(df, df_counts, on="observer")
return CometObservations(df)
def _parse_icq(fileobj):
"""Parse a International Comet Quarterly (ICQ) format file."""
df = pd.read_fwf(fileobj, colspecs=list(ICQ_COLUMNS.values()),
names=ICQ_COLUMNS.keys(), header=None)
return df
def _get_cache_filename(year=2020):
"""Returns the `Path` to the COBS data file for a given year."""
return CACHEDIR / f'cobs{year}.feather'
def _get_cache_dataframe(year=2020):
fn = _get_cache_filename(year)
if fn.exists():
log.info(f"Loading {fn}")
return pd.read_feather(fn)
else:
raise FileNotFoundError(f"File not found: {fn}")
def download_cobs(year=2020, update=False):
"""Download a year of COBS data and save it in the cache."""
URL = "https://cobs.si/analysis"
cache_fn = _get_cache_filename(year)
if cache_fn.exists() and not update:
raise IOError(f"Data for {year} has already been downloaded. "
"Use `update=True` to download again.")
log.info(f"Retrieving {year} data from {URL}")
br = mechanize.Browser()
br.set_handle_robots(False)
br.open(URL)
br.select_form(nr=0)
br.form['START_DATE'] = f'{year}/01/01 00:00'
br.form['END_DATE'] = f'{year}/12/31 00:00'
br.submit(id="getobs")
resp = None
for link in br.links():
match = re.compile('.*lightcurve_.*.dat').search(link.url)
if match:
log.info(f"Downloading {link.url}")
resp = br.follow_link(link)
break
if resp is None:
raise IOError(f"Could not download COBS data for {year}.")
# Parse the format and save to a feather cache file
df = _parse_icq(StringIO(resp.get_data().decode()))
cache_fn.parent.mkdir(exist_ok=True)
log.info(f"Saving data to {cache_fn}")
df.to_feather(cache_fn)
return df
|
[
"pandas.read_feather",
"pandas.to_timedelta",
"appdirs.user_cache_dir",
"re.compile",
"pandas.merge",
"astropy.time.Time",
"mechanize.Browser",
"pandas.concat",
"pandas.to_datetime",
"numpy.atleast_1d"
] |
[((332, 360), 'appdirs.user_cache_dir', 'user_cache_dir', (['"""cometcurve"""'], {}), "('cometcurve')\n", (346, 360), False, 'from appdirs import user_cache_dir\n'), ((1681, 1701), 'numpy.atleast_1d', 'np.atleast_1d', (['years'], {}), '(years)\n', (1694, 1701), True, 'import numpy as np\n'), ((1866, 1881), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (1875, 1881), True, 'import pandas as pd\n'), ((3732, 3770), 'pandas.merge', 'pd.merge', (['df', 'df_counts'], {'on': '"""observer"""'}), "(df, df_counts, on='observer')\n", (3740, 3770), True, 'import pandas as pd\n'), ((4844, 4863), 'mechanize.Browser', 'mechanize.Browser', ([], {}), '()\n', (4861, 4863), False, 'import mechanize\n'), ((2828, 2861), 'pandas.to_datetime', 'pd.to_datetime', (['df.date'], {'utc': '(True)'}), '(df.date, utc=True)\n', (2842, 2861), True, 'import pandas as pd\n'), ((2864, 2908), 'pandas.to_timedelta', 'pd.to_timedelta', (['df.fractional_day'], {'unit': '"""D"""'}), "(df.fractional_day, unit='D')\n", (2879, 2908), True, 'import pandas as pd\n'), ((2924, 2937), 'astropy.time.Time', 'Time', (['df.time'], {}), '(df.time)\n', (2928, 2937), False, 'from astropy.time import Time\n'), ((4334, 4353), 'pandas.read_feather', 'pd.read_feather', (['fn'], {}), '(fn)\n', (4349, 4353), True, 'import pandas as pd\n'), ((5123, 5156), 're.compile', 're.compile', (['""".*lightcurve_.*.dat"""'], {}), "('.*lightcurve_.*.dat')\n", (5133, 5156), False, 'import re\n')]
|
import numpy as np
import h5py
import argparse
np.random.seed(2019)
parser = argparse.ArgumentParser(description="Generate the diff data")
parser.add_argument("--valid", action="store_true")
parser.add_argument("--use_random", action="store_true")
# specify the interval
parser.add_argument("--bound", default=1, type=int, required=False)
parser.add_argument("--use_previous", action="store_true", help="Specify whether to use previous frames or not")
parser.add_argument('--use_pre', action='store_true')
args = parser.parse_args()
# compute the difference of frames
bound = args.bound
is_train = not args.valid
use_random = args.use_random
use_previous = args.use_previous
use_pre = args.use_pre
# in_filename = "../data/kinetics_final.h5"
suffix = str(bound) if bound > 1 else ""
if use_random:
suffix += "_rand"
if use_pre:
suffix += "_pre"
in_filename = "../data/h36m_{}_pred3.h5".format("train" if is_train else "valid")
out_filename = "../data/h36m_{}_diff{}.h5".format("train" if is_train else "valid", suffix)
f = h5py.File(in_filename, "r")
names = [name.decode() for name in f['imagename'][:]]
joints_2d = np.array(f['joint_2d_gt' if not use_pre else "joint_2d_pre"])
f.close()
print("Load from", in_filename)
size = joints_2d.shape[0]
splits = [name.split('/') for name in names]
sequences = ['/'.join(split[:3]) for split in splits]
indices = [int(split[-1]) for split in splits]
# calculate the length of each sequence
seq_lens = {}
for split in splits:
seq = '/'.join(split[:3])
if seq not in seq_lens:
seq_lens[seq] = 0
seq_lens[seq] += 1
intervals = np.random.randint(1, bound + 1, (size, ))
if not use_random:
intervals.fill(bound)
if use_previous:
spec_indices = [i for i, index in enumerate(indices) if index < intervals[i]]
diff_indices = np.arange(0, size, 1) - intervals
diff_indices[spec_indices] += 2 * intervals[spec_indices]
else:
spec_indices = [i for i, index in enumerate(indices) if index >= seq_lens[sequences[i]] - intervals[i]]
diff_indices = np.arange(0, size, 1) + intervals
diff_indices[spec_indices] -= 2 * intervals[spec_indices]
# before_joints = np.concatenate((joints_2d[:1].copy(), joints_2d[:-1].copy()), axis=0)
# after_joints = np.concatenate((joints_2d[1:].copy(), joints_2d[-1:].copy()), axis=0)
# print(before_joints.shape, after_joints.shape)
# diff_before = joints_2d - before_joints
# diff_after = joints_2d - after_joints
# diff_before, diff_after = before_joints, after_joints
# diff_before, diff_after = diff_before[:, np.newaxis], diff_after[:, np.newaxis]
# finally process the special cases
# diff_before[start_indices] = diff_after[start_indices]
# diff_after[end_indices] = diff_before[end_indices]
# diff = np.concatenate((diff_before, diff_after), axis=1)
# print(diff.shape)
# diff_types = np.ones((len(diff), ), dtype=np.uint8)
# diff_types[start_indices] = 0
# diff_types[end_indices] = 2
diff = joints_2d[diff_indices]
dist = np.linalg.norm((joints_2d - diff).reshape(size, -1), axis=1).mean()
print("Mean distance bewteen diff and original: {:.3f}".format(dist))
f = h5py.File(out_filename, "w")
f['gt_diff'] = diff
# f['gt_diff_type'] = diff_types
f.close()
print("Saved to", out_filename)
|
[
"argparse.ArgumentParser",
"h5py.File",
"numpy.array",
"numpy.random.randint",
"numpy.random.seed",
"numpy.arange"
] |
[((51, 71), 'numpy.random.seed', 'np.random.seed', (['(2019)'], {}), '(2019)\n', (65, 71), True, 'import numpy as np\n'), ((81, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate the diff data"""'}), "(description='Generate the diff data')\n", (104, 142), False, 'import argparse\n'), ((1042, 1069), 'h5py.File', 'h5py.File', (['in_filename', '"""r"""'], {}), "(in_filename, 'r')\n", (1051, 1069), False, 'import h5py\n'), ((1136, 1197), 'numpy.array', 'np.array', (["f['joint_2d_gt' if not use_pre else 'joint_2d_pre']"], {}), "(f['joint_2d_gt' if not use_pre else 'joint_2d_pre'])\n", (1144, 1197), True, 'import numpy as np\n'), ((1612, 1652), 'numpy.random.randint', 'np.random.randint', (['(1)', '(bound + 1)', '(size,)'], {}), '(1, bound + 1, (size,))\n', (1629, 1652), True, 'import numpy as np\n'), ((3118, 3146), 'h5py.File', 'h5py.File', (['out_filename', '"""w"""'], {}), "(out_filename, 'w')\n", (3127, 3146), False, 'import h5py\n'), ((1819, 1840), 'numpy.arange', 'np.arange', (['(0)', 'size', '(1)'], {}), '(0, size, 1)\n', (1828, 1840), True, 'import numpy as np\n'), ((2049, 2070), 'numpy.arange', 'np.arange', (['(0)', 'size', '(1)'], {}), '(0, size, 1)\n', (2058, 2070), True, 'import numpy as np\n')]
|
from scipy.sparse import *
import numpy as np
import pickle
import random
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from tqdm import tqdm
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
from operator import itemgetter
from scipy.sparse import csc_matrix as smatrix
import scipy
from scipy.sparse import *
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.neural_network import MLPClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.regularizers import l2
# parameters
random_state = 42
min_word_frequency = 100
# get all unique tokens in file
def get_words(filename):
data = open(filename, 'rb')
res = []
for line in tqdm(data):
line = line.strip().decode("utf-8").split(' ')
res += line
return list(set(res))
# join two token lists to token -> id mapping
def get_vocab(words_pos, words_neg):
all_words = list(set(words_pos + words_neg))
return {x: i for i, x in tqdm(enumerate(all_words))}
# file -> [[word_number_1_1, ..., word_number_1_K1], ..., [word_number_L_1, ..., word_number_L_KL]]
def file_to_word2numbers(filename, vocab):
data = open(filename, 'rb')
word2numbers_all = []
for line in tqdm(data):
line = line.strip().decode("utf-8").split(' ')
word2numbers = []
for word in line:
if word in vocab: word2numbers.append(vocab[word])
if word2numbers:
word2numbers_all.append(word2numbers)
return word2numbers_all
# number of word occurences as embeddings (basic embeddings)
def numbers_to_dataset(numbers, vocab):
arr = {}
for i, tweet in tqdm(enumerate(numbers)):
for number in tweet:
p = (i, number)
if p in arr: arr[p] += 1
else: arr[p] = 1
keys = list(arr.keys())
values = [arr[k] for k in keys]
return coo_matrix((values, ([x for x, y in keys], [y for x, y in keys])), shape=(len(numbers), len(vocab)))
# constructing X, y pair
def two_datasets_to_one(pos_data, neg_data):
assert pos_data.shape[1] == neg_data.shape[1]
X = scipy.sparse.vstack((pos_data, neg_data))
y = np.array([1] * pos_data.shape[0] + [0] * neg_data.shape[0])
assert len(y) == X.shape[0]
assert X.shape[0] == pos_data.shape[0] + neg_data.shape[0]
assert X.shape[1] == pos_data.shape[1]
return X, y
# returns vector of token frequencies
def get_word_count(fn, vocab):
data = open(fn, 'rb')
res = [0] * len(vocab)
for line in tqdm(data):
line = line.strip().decode("utf-8").split(' ')
for w in line:
if w in vocab:
res[vocab[w]] += 1
return np.array(res)
# obtain dataset from two files using functions above
def get_dataset(tweets_pos, tweets_neg, count_threshold = 100):
words_pos = get_words(tweets_pos)
words_neg = get_words(tweets_neg)
vocab = get_vocab(words_pos, words_neg)
# construct num -> word dict
reverse_dictionary = dict(zip(vocab.values(), vocab.keys()))
# removing non-frequent words from vocab
word_count = get_word_count(tweets_pos, vocab) + get_word_count(tweets_neg, vocab)
use_words = [reverse_dictionary[i] for i, x in enumerate(word_count) if x > count_threshold]
print('Using %d words out of %d' % (len(use_words), len(vocab)))
vocab = {x: i for i, x in tqdm(enumerate(use_words))}
# construct num -> word dict
reverse_dictionary = dict(zip(vocab.values(), vocab.keys()))
# loading data -> numbers of words
pos_numbers = file_to_word2numbers(tweets_pos, vocab)
neg_numbers = file_to_word2numbers(tweets_neg, vocab)
# applying it to numbers
pos_data = numbers_to_dataset(pos_numbers, vocab)
neg_data = numbers_to_dataset(neg_numbers, vocab)
# applying to datasets (pos & neg)
X, Y = two_datasets_to_one(pos_data, neg_data)
return vocab, reverse_dictionary, X, Y
# get full dataset
vocab_full, rev_full, X_full, Y_full = get_dataset('data/clean_train_pos.txt', 'data/clean_train_neg.txt', min_word_frequency)
vocab, reverse_dictionary, X, Y = vocab_full, rev_full, X_full, Y_full
# plot word length distribution
def plot_word_count_hist(vocab):
lengths = [len(x) for x in vocab.keys()]
pd.DataFrame(lengths).hist()
#plot_word_count_hist(vocab)
# split to train/val
test_size_percent = 0.01
x, x_val, y, y_val = train_test_split(X, Y, test_size=test_size_percent, random_state=random_state)
y = np.array(y).reshape(-1, 1)
y_val = np.array(y_val).reshape(-1, 1)
def batch_generator(X, y, batch_size, number_of_batches):
counter = 0
shuffle_index = np.arange(np.shape(y)[0])
np.random.shuffle(shuffle_index)
X = X[shuffle_index, :]
y = y[shuffle_index]
while 1:
index_batch = shuffle_index[batch_size*counter:batch_size*(counter+1)]
X_batch = X[index_batch,:].todense()
y_batch = y[index_batch]
counter += 1
yield(np.array(X_batch),y_batch)
if (counter >= number_of_batches):
np.random.shuffle(shuffle_index)
counter = 0
# training parameters
hidden_layers = (200, 50, 20)
model = Sequential()
np.random.seed(random_state)
# first hidden layer
model.add(Dense(hidden_layers[0], activation='relu', input_dim=x.shape[1]))
# adding regularization
model.add(Dropout(0.1))
# hidden layers
for neurons_n in hidden_layers[1:]:
model.add(Dense(neurons_n, activation='relu'))
# two for classification
model.add(Dense(2, activation='relu'))
#output layer
model.add(Dense(1, activation='sigmoid'))
# showing accuracy
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
# setting parameters
batch_size = 10000
nb_epoch = 10
nb_batches = x.shape[0] / batch_size
generator = batch_generator(x, y, batch_size, nb_batches)
# training model
model.fit_generator(generator = generator, epochs = nb_epoch,
steps_per_epoch = nb_batches,)
# print resulting train/val losses
print('Accuracy on test: %.3f, on validation: %.3f' % (clf.score(x, y), clf.score(x_val, y_val)))
# plot ROC curve
def plot_ROC(x, y, clf):
fpr, tpr, _ = roc_curve(y, clf.predict(x)[:, 1])
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.show()
#plot_ROC(x, y, clf)
# open train and predict
def test_to_dataset(filename):
data = open(filename, 'rb')
idxes = []
tweets_embeddings = []
for line in tqdm(data):
idx, line = line.strip().decode("utf-8").split(',', 1)
idxes.append(idx)
line = line.split(' ')
tweet = []
tweet_embeddings = np.zeros((len(vocab), ), dtype=np.float32)
for word in line:
if word in vocab:
tweet_embeddings[vocab[word]] += 1
tweets_embeddings.append(tweet_embeddings)
#return tweets_embeddings
tweets_embeddings = np.array(tweets_embeddings)
assert len(idxes) == tweets_embeddings.shape[0]
assert tweets_embeddings.shape[1] == len(vocab)
return idxes, tweets_embeddings
# write resulting clf predictions to output_filename
# using filename as tweets input (test)
def write_result(filename, clf, output_filename):
idx_test, X_test = test_to_dataset(filename)
y_predicted = np.array(2 * (clf.predict(X_test) - 0.5), dtype=np.int64)
answers = sorted(zip(idx_test, y_predicted), key = lambda x: int(x[0]))
f = open(output_filename, 'w')
f.write("Id,Prediction\n")
for idx, ans in answers:
f.write("%s,%s\n" % (idx, ans))
f.close()
write_result(filename, clf, 'submission.txt')
|
[
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"numpy.array",
"keras.layers.Dense",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"sklearn.model_selection.train_test_split",
"keras.models.Sequential",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.shape",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"tqdm.tqdm",
"matplotlib.pyplot.figure",
"scipy.sparse.vstack",
"numpy.random.shuffle"
] |
[((4519, 4597), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': 'test_size_percent', 'random_state': 'random_state'}), '(X, Y, test_size=test_size_percent, random_state=random_state)\n', (4535, 4597), False, 'from sklearn.model_selection import train_test_split\n'), ((5288, 5300), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5298, 5300), False, 'from keras.models import Sequential\n'), ((5302, 5330), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (5316, 5330), True, 'import numpy as np\n'), ((817, 827), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (821, 827), False, 'from tqdm import tqdm\n'), ((1338, 1348), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (1342, 1348), False, 'from tqdm import tqdm\n'), ((2233, 2274), 'scipy.sparse.vstack', 'scipy.sparse.vstack', (['(pos_data, neg_data)'], {}), '((pos_data, neg_data))\n', (2252, 2274), False, 'import scipy\n'), ((2283, 2342), 'numpy.array', 'np.array', (['([1] * pos_data.shape[0] + [0] * neg_data.shape[0])'], {}), '([1] * pos_data.shape[0] + [0] * neg_data.shape[0])\n', (2291, 2342), True, 'import numpy as np\n'), ((2636, 2646), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (2640, 2646), False, 'from tqdm import tqdm\n'), ((2799, 2812), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2807, 2812), True, 'import numpy as np\n'), ((4794, 4826), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (4811, 4826), True, 'import numpy as np\n'), ((5363, 5427), 'keras.layers.Dense', 'Dense', (['hidden_layers[0]'], {'activation': '"""relu"""', 'input_dim': 'x.shape[1]'}), "(hidden_layers[0], activation='relu', input_dim=x.shape[1])\n", (5368, 5427), False, 'from keras.layers import Dense, Dropout\n'), ((5464, 5476), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (5471, 5476), False, 'from keras.layers import Dense, Dropout\n'), ((5618, 5645), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""relu"""'}), "(2, activation='relu')\n", (5623, 5645), False, 'from keras.layers import Dense, Dropout\n'), ((5676, 5706), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5681, 5706), False, 'from keras.layers import Dense, Dropout\n'), ((6350, 6363), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (6353, 6363), False, 'from sklearn.metrics import roc_curve, auc\n'), ((6369, 6381), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6379, 6381), True, 'from matplotlib import pyplot as plt\n'), ((6397, 6491), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.2f)' % roc_auc)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.2f)' % roc_auc)\n", (6405, 6491), True, 'from matplotlib import pyplot as plt\n'), ((6491, 6552), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (6499, 6552), True, 'from matplotlib import pyplot as plt\n'), ((6557, 6577), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (6565, 6577), True, 'from matplotlib import pyplot as plt\n'), ((6582, 6603), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (6590, 6603), True, 'from matplotlib import pyplot as plt\n'), ((6608, 6641), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (6618, 6641), True, 'from matplotlib import pyplot as plt\n'), ((6646, 6678), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (6656, 6678), True, 'from matplotlib import pyplot as plt\n'), ((6683, 6705), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC curve"""'], {}), "('ROC curve')\n", (6692, 6705), True, 'from matplotlib import pyplot as plt\n'), ((6710, 6739), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6720, 6739), True, 'from matplotlib import pyplot as plt\n'), ((6744, 6754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6752, 6754), True, 'from matplotlib import pyplot as plt\n'), ((6929, 6939), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (6933, 6939), False, 'from tqdm import tqdm\n'), ((7406, 7433), 'numpy.array', 'np.array', (['tweets_embeddings'], {}), '(tweets_embeddings)\n', (7414, 7433), True, 'import numpy as np\n'), ((4603, 4614), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4611, 4614), True, 'import numpy as np\n'), ((4638, 4653), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (4646, 4653), True, 'import numpy as np\n'), ((5545, 5580), 'keras.layers.Dense', 'Dense', (['neurons_n'], {'activation': '"""relu"""'}), "(neurons_n, activation='relu')\n", (5550, 5580), False, 'from keras.layers import Dense, Dropout\n'), ((4392, 4413), 'pandas.DataFrame', 'pd.DataFrame', (['lengths'], {}), '(lengths)\n', (4404, 4413), True, 'import pandas as pd\n'), ((4774, 4785), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4782, 4785), True, 'import numpy as np\n'), ((5169, 5201), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (5186, 5201), True, 'import numpy as np\n'), ((5087, 5104), 'numpy.array', 'np.array', (['X_batch'], {}), '(X_batch)\n', (5095, 5104), True, 'import numpy as np\n')]
|
import numpy as np
class Renderer:
def __init__(self, height, width, config):
self.height = height
self.width = width
self.content = None
self.zbuffer = None
self.m = None
self.f = 1.0
self.resize(height, width)
self.colors = config.colors
self.bonds = config.bonds
self.btoggle = len(self.bonds) > 0
self.pos, self.sym = np.array(config.coordinates), config.symbols
self.ztoggle = True
self.zoom = 1.0
self.rot = np.identity(3)
self.rotcounter = [0, 0, 0]
self.draw_scene()
def draw_scene(self):
"""
A super simple rasterizer. For now, just draw single character atom symbols at their rounded x and y
positions.
:return: True if nothing bad happened.
"""
mx, my = self.m
rot = np.matmul(self.pos, self.rot)
self.clear()
# Draw bonds
for bond in self.bonds:
i, j = bond
# if bond is (i, j) with i == j, just draw the label (no bonds)
if i == j:
x, y, z = rot[i]
xp, yp = round(float(x) * self.f * self.zoom + mx), round(float(y) * self.zoom + my)
if 1 < xp < self.width - 2 and 1 < yp < self.height - 3 and float(z) < self.zbuffer[yp][xp]:
self.zbuffer[yp][xp] = float(z)
self.content[yp][xp] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
# else draw the bond with the labels at the end points
else:
# Draw the two labels at the end points
xa, ya, za = rot[i]
xa = float(xa) * self.f * self.zoom + mx
ya = float(ya) * self.zoom + my
xb, yb, zb = rot[j]
xb = float(xb) * self.f * self.zoom + mx
yb = float(yb) * self.zoom + my
xap, yap = round(xa), round(ya)
xbp, ybp = round(xb), round(yb)
if 1 < xap < self.width - 2 and 1 < yap < self.height - 3 and float(za) < self.zbuffer[yap][xap]:
self.zbuffer[yap][xap] = float(za)
self.content[yap][xap] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
if 1 < xbp < self.width - 2 and 1 < ybp < self.height - 3 and float(zb) < self.zbuffer[ybp][xbp]:
self.zbuffer[ybp][xbp] = float(zb)
self.content[ybp][xbp] = self.sym[j][0].upper() + "," + self.colors[self.sym[j].upper()]
if not self.btoggle:
continue
# Then start at xap+1 and go to xbp-1, drawing line segments
sy = -1 if ya > yb else 1
sx = -1 if xa > xb else 1
sz = -1 if za > zb else 1
dx = float((xb - xa) / (yb - ya)) if abs(yb - ya) > 0 else 0
dy = float((yb - ya) / (xb - xa)) if abs(xb - xa) > 0 else 0
dz = float((zb - za) / (xb - xa)) if abs(xb - xa) > 0 else 0
if abs(dy) <= 1:
for k in range(1, abs(xap - xbp)):
xk = xap + sx * k
yk = round(float(ya) + sx * k * dy)
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(xap - xbp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "·,%s" % col
else:
for k in range(1, abs(yap - ybp)):
xk = round((float(xa) + sy * k * dx))
yk = yap + sy * k
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(yap - ybp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "·,%s" % col
return True
def rotate(self, direction):
"""
Set an internal rotation matrix that is applied to the coordinates before every render.
:param direction: 1 and -1 are x and -x, 2 is either z/y, depending on whether the ztoggle is active or not
"""
if direction == 1:
self.rot = np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, -0.0872], [0.0, 0.0872, 0.9962]])
if self.rotcounter[0] + 5 > 360:
self.rotcounter[0] = 0
self.rotcounter[0] += 5
elif direction == -1:
self.rot = np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, 0.0872], [0.0, -0.0872, 0.9962]])
if self.rotcounter[0] - 5 < 0:
self.rotcounter[0] = 360
self.rotcounter[0] -= 5
elif direction == 2 and self.ztoggle:
self.rot = np.matmul(self.rot, [[0.9962, -0.0872, 0.0], [0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]])
if self.rotcounter[2] + 5 > 360:
self.rotcounter[2] = 0
else:
self.rotcounter[2] += 5
elif direction == -2 and self.ztoggle:
self.rot = np.matmul(self.rot, [[0.9962, 0.0872, 0.0], [-0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]])
if self.rotcounter[2] - 5 < 0:
self.rotcounter[2] = 360
else:
self.rotcounter[2] -= 5
elif direction == 2:
self.rot = np.matmul(self.rot, [[0.9962, 0.0, 0.0872], [0.0, 1.0, 0.0], [-0.0872, 0.0, 0.9962]])
if self.rotcounter[1] + 5 > 360:
self.rotcounter[1] = 0
else:
self.rotcounter[1] += 5
elif direction == -2:
self.rot = np.matmul(self.rot, [[0.9962, 0.0, -0.0872], [0.0, 1.0, 0.0], [0.0872, 0.0, 0.9962]])
if self.rotcounter[1] - 5 < 0:
self.rotcounter[1] = 360
else:
self.rotcounter[1] -= 5
def reset_view(self):
"""
Reset the view to the starting values.
"""
self.zoom = 1.0
self.rotcounter = [0, 0, 0]
self.rot = np.identity(3)
self.m = round(self.width / 2), round(self.height / 2)
def resize(self, height, width):
"""
Resize the screen. Known issue: crashes if the resize is faster than the framerate.
"""
self.height = height
self.width = width
self.content = [[" ,0"] * self.width for n in range(self.height - 2)]
self.zbuffer = [[10000.0] * self.width for n in range(self.height - 2)]
self.m = round(self.width / 2), round(self.height / 2)
# Since terminal characters are higher than wide, I correct for this by multiplying the x by f
# so that it appears wider. 2.25 is what looks good on my terminals, but might be
# nice to have a general way of determining the optimal value
self.f = 2
def clear(self):
"""
Clear the canvas and redraw the border.
"""
for i in range(self.height - 2):
for j in range(self.width):
self.zbuffer[i][j] = 10000.0
for i in range(self.height - 2):
for j in range(self.width):
if i == 0 and j == 0:
self.content[i][j] = "┌,0"
elif (i == 0 or i == self.height - 3) and 0 < j < self.width - 1:
self.content[i][j] = "─,0"
elif i == 0 and j == self.width - 1:
self.content[i][j] = "┐,0"
elif i < self.height - 3 and (j == 0 or j == self.width - 1):
self.content[i][j] = "│,0"
elif i == self.height - 3 and j == 0:
self.content[i][j] = "└,0"
elif i == self.height - 3 and j == self.width - 1:
self.content[i][j] = "┘,0"
else:
self.content[i][j] = " ,0"
|
[
"numpy.identity",
"numpy.array",
"numpy.matmul"
] |
[((534, 548), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (545, 548), True, 'import numpy as np\n'), ((876, 905), 'numpy.matmul', 'np.matmul', (['self.pos', 'self.rot'], {}), '(self.pos, self.rot)\n', (885, 905), True, 'import numpy as np\n'), ((6596, 6610), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (6607, 6610), True, 'import numpy as np\n'), ((417, 445), 'numpy.array', 'np.array', (['config.coordinates'], {}), '(config.coordinates)\n', (425, 445), True, 'import numpy as np\n'), ((4798, 4887), 'numpy.matmul', 'np.matmul', (['self.rot', '[[1.0, 0.0, 0.0], [0.0, 0.9962, -0.0872], [0.0, 0.0872, 0.9962]]'], {}), '(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, -0.0872], [0.0, 0.0872,\n 0.9962]])\n', (4807, 4887), True, 'import numpy as np\n'), ((5057, 5146), 'numpy.matmul', 'np.matmul', (['self.rot', '[[1.0, 0.0, 0.0], [0.0, 0.9962, 0.0872], [0.0, -0.0872, 0.9962]]'], {}), '(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, 0.0872], [0.0, -0.0872,\n 0.9962]])\n', (5066, 5146), True, 'import numpy as np\n'), ((5332, 5422), 'numpy.matmul', 'np.matmul', (['self.rot', '[[0.9962, -0.0872, 0.0], [0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]]'], {}), '(self.rot, [[0.9962, -0.0872, 0.0], [0.0872, 0.9962, 0.0], [0.0, \n 0.0, 1.0]])\n', (5341, 5422), True, 'import numpy as np\n'), ((5630, 5720), 'numpy.matmul', 'np.matmul', (['self.rot', '[[0.9962, 0.0872, 0.0], [-0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]]'], {}), '(self.rot, [[0.9962, 0.0872, 0.0], [-0.0872, 0.9962, 0.0], [0.0, \n 0.0, 1.0]])\n', (5639, 5720), True, 'import numpy as np\n'), ((5910, 5999), 'numpy.matmul', 'np.matmul', (['self.rot', '[[0.9962, 0.0, 0.0872], [0.0, 1.0, 0.0], [-0.0872, 0.0, 0.9962]]'], {}), '(self.rot, [[0.9962, 0.0, 0.0872], [0.0, 1.0, 0.0], [-0.0872, 0.0,\n 0.9962]])\n', (5919, 5999), True, 'import numpy as np\n'), ((6191, 6280), 'numpy.matmul', 'np.matmul', (['self.rot', '[[0.9962, 0.0, -0.0872], [0.0, 1.0, 0.0], [0.0872, 0.0, 0.9962]]'], {}), '(self.rot, [[0.9962, 0.0, -0.0872], [0.0, 1.0, 0.0], [0.0872, 0.0,\n 0.9962]])\n', (6200, 6280), True, 'import numpy as np\n')]
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import json
import random
import cv2
import numpy as np
import tensorflow as tf
from image_retrieval.common import preproces_image, depreprocess_image, fit_to_max_size, from_list
def blur(image):
kernel = np.ones((3, 3), np.float32) / 9
image = cv2.filter2D(image, -1, kernel)
return image
def gray_noise(image):
if np.mean(image) > 100:
gray = np.random.uniform(0.0, 100.0, image.shape[0:2])
gray3 = np.array([gray, gray, gray])
gray3 = np.transpose(gray3, (1, 2, 0))
gray3 = cv2.blur(gray3, ksize=(7, 7))
image -= gray3
image = np.clip(image, 0.0, 255.0)
return image
@tf.function
def tf_random_crop_and_resize(image, input_size):
min_size = tf.minimum(tf.shape(image)[0], tf.shape(image)[1])
crop_size = tf.random.uniform((), min_size // 2, min_size, dtype=tf.int32)
crop = tf.image.random_crop(image, (crop_size, crop_size, 3))
var_thr = 100
for _ in tf.range(10):
moments = tf.nn.moments(tf.reshape(crop, (-1, 3)), axes=0)
if tf.less(tf.reduce_sum(moments[1]), tf.constant(var_thr, dtype=tf.float32)):
crop = tf.image.random_crop(image, (crop_size, crop_size, 3))
else:
break
moments = tf.nn.moments(tf.reshape(crop, (-1, 3)), axes=0)
if tf.less(tf.reduce_sum(moments[1]), tf.constant(var_thr, dtype=tf.float32)):
crop = tf.image.random_crop(image, (tf.shape(image)[0], tf.shape(image)[1], 3))
crop = tf.cast(tf.expand_dims(crop, axis=0), tf.float32)
crop = tf.image.resize(crop, (input_size, input_size))
crop = tf.squeeze(crop, axis=0)
return crop
@tf.function
def tf_distort_color(image):
""" Distorts color. """
image = image / 255.0
image = image[:, :, ::-1]
brightness_max_delta = 16. / 255.
color_ordering = tf.random.uniform([], maxval=5, dtype=tf.int32)
if tf.equal(color_ordering, 0):
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.1)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif tf.equal(color_ordering, 1):
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.1)
elif tf.equal(color_ordering, 2):
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.1)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif tf.equal(color_ordering, 3):
image = tf.image.random_hue(image, max_delta=0.1)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.clip_by_value(image, 0.0, 1.0)
image = image * 255
image = image[:, :, ::-1]
return image
class Dataset:
def __init__(self, images_paths, labels, is_real, input_size, batch_size, params,
return_original=False):
self.images_paths = images_paths
self.input_size = input_size
self.batch_size = batch_size
self.params = params
self.return_original = return_original
self.loaded_images = []
self.labels = Dataset.reassign_labels(labels)
self.is_real = is_real
if self.params['preload']:
self.preload()
if self.params['pretile']:
self.pretile()
self.images_indexes_per_class = collections.defaultdict(list)
for index, label in enumerate(self.labels):
self.images_indexes_per_class[label].append(index)
if self.params['weighted_sampling']:
self.calc_sampling_probs()
def calc_sampling_probs(self):
''' Counts number of images per class and returns probability distribution so that
distribution of images classes becomes uniform.
'''
frequency = {l: self.labels.count(l) for l in set(self.labels)}
probs = np.empty((len(self.labels)), dtype=np.float32)
for idx, l in enumerate(self.labels):
probs[idx] = 1.0 / frequency[l]
self.probs = probs / np.sum(probs)
def preload(self):
''' Pre-loads images in RAM. '''
for image_path in self.images_paths:
self.loaded_images.append(cv2.imread(image_path))
def pretile(self):
''' Pre-tiles images in RAM. Makes training faster but requires huge amount of RAM. '''
tiled_labels = []
tiled_is_real = []
tiled_loaded_images = []
for read_image, label, real in zip(self.loaded_images, self.labels, self.is_real):
if not real:
for n in range(2, self.params['max_tiling'] + 1):
image = self.tile(read_image, n)
tiled_labels.append(label)
tiled_is_real.append(real)
tiled_loaded_images.append(image)
self.labels.extend(tiled_labels)
self.is_real.extend(tiled_is_real)
self.loaded_images.extend(tiled_loaded_images)
def tile(self, image, n):
''' Tiles images taking their aspect ratios into account. '''
aspect_ratio = image.shape[1] / image.shape[0]
if aspect_ratio < 1:
w_repeats = n
h_repeats = max(1 if n != self.params['max_tiling'] else 2, int(n * aspect_ratio))
else:
h_repeats = n
w_repeats = max(1 if n != self.params['max_tiling'] else 2, int(n / aspect_ratio))
image = np.tile(image, (h_repeats, w_repeats, 1))
fit_size = self.input_size * 3
if image.shape[0] > fit_size or image.shape[1] > fit_size:
image = fit_to_max_size(image, self.input_size * 3)
return image
def sample_index(self):
''' Samples indexes. '''
choices = list(range(len(self.labels)))
if self.params['weighted_sampling']:
choices = np.random.choice(choices, len(self.labels), p=self.probs)
elif self.params['shuffle']:
np.random.shuffle(choices)
# duplication is required for triplet loss at least.
duplicated_choices = []
for choice in choices:
for _ in range(self.params['duplicate_n_times']):
duplicated_choices.append(int(
np.random.choice(
self.images_indexes_per_class[self.labels[choice]],
1)))
for choice in duplicated_choices:
yield [choice]
def read(self, index):
''' Reads an image from RAM or disk and returns it with corresponding class label. '''
if self.params['preload']:
image = self.loaded_images[index[0]].astype(np.float32)
else:
image = cv2.imread(self.images_paths[index[0]]).astype(np.float32)
if not self.params['pretile'] and not self.is_real[index[0]]:
n = random.randint(1, self.params['max_tiling'])
image = self.tile(image, n)
return image, self.labels[index[0]]
def cv2_rotate(self, image):
''' Rotates images on random angle using opencv. '''
c_xy = image.shape[1] / 2, image.shape[0] / 2
angle = random.uniform(-self.params['add_rot_angle'],
self.params['add_rot_angle']) * 57.2958
if self.params['rot90']:
angle += random.randint(0, 3) * 180
rotation_matrix = cv2.getRotationMatrix2D(c_xy, angle, 1)
img_rotation = cv2.warpAffine(image, rotation_matrix, (image.shape[1], image.shape[0]))
return img_rotation
def cv2_noise_and_blur(self, image):
''' Adds noise making image darker and blur.'''
image = image.astype(np.float32)
if self.params['apply_gray_noise'] and np.random.choice([True, False]):
image = gray_noise(image)
if self.params['blur'] and np.random.choice([True, False]):
image = blur(image)
return image
def train_preprocess(self, choice):
''' Applies training preprocessing. '''
original, label = tf.numpy_function(self.read, [choice], [tf.float32, tf.int64])
image = tf_random_crop_and_resize(original, self.input_size)
image, = tf.numpy_function(self.cv2_noise_and_blur, [image], [tf.float32])
if self.params['horizontal_flip']:
image = tf.image.random_flip_left_right(image)
if self.params['vertical_flip']:
image = tf.image.random_flip_up_down(image)
if self.params['add_rot_angle'] > 0 or self.params['rot90']:
image, = tf.numpy_function(self.cv2_rotate, [image], [tf.float32])
image = tf_distort_color(image)
image = preproces_image(image)
if self.return_original:
return image, label, original
return image, label
def __call__(self, *args, **kwargs):
''' Returns tf.data.Dataset instance as well as number of classes in training set. '''
dataset = tf.data.Dataset.from_generator(self.sample_index, (tf.int32),
(tf.TensorShape([1])))
dataset = dataset.map(self.train_preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if not self.return_original:
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.repeat()
return dataset, len(set(self.labels))
@staticmethod
def create_from_list(path, input_size, batch_size, params, return_original=False):
''' Creates Dataset instance from path to images list.
Images list has following format:
<relative_path_to_image> <class_label>
'''
impaths, labels, is_real, _ = from_list(path)
return Dataset(impaths, labels, is_real, input_size, batch_size, params, return_original)()
@staticmethod
def reassign_labels(labels):
''' Re-assign class labels so that they starts from 0 and ends with (num_classes - 1). '''
unique_labels = list(set(labels))
return [unique_labels.index(l) for l in labels]
def main():
import argparse
import time
args = argparse.ArgumentParser()
args.add_argument('--gallery', required=True)
args.add_argument('--input_size', default=224, type=int)
args.add_argument('--augmentation_config', required=True)
args = args.parse_args()
with open(args.augmentation_config) as f:
augmentation_config = json.load(f)
dataset, _ = Dataset.create_from_list(args.gallery, args.input_size, 1,
augmentation_config, True)
t = time.time()
for preprocessed, label, original in dataset.take(1000):
cv2.imshow('preprocessed', depreprocess_image(preprocessed.numpy()))
cv2.imshow('original', original.numpy().astype(np.uint8))
print(label)
if cv2.waitKey(0) == 27:
break
print(time.time() - t)
if __name__ == '__main__':
main()
|
[
"numpy.clip",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.numpy_function",
"cv2.filter2D",
"numpy.array",
"tensorflow.image.random_saturation",
"image_retrieval.common.preproces_image",
"numpy.mean",
"argparse.ArgumentParser",
"tensorflow.image.random_crop",
"tensorflow.clip_by_value",
"cv2.waitKey",
"random.randint",
"cv2.blur",
"tensorflow.random.uniform",
"numpy.tile",
"random.uniform",
"cv2.warpAffine",
"numpy.ones",
"numpy.random.choice",
"tensorflow.range",
"tensorflow.image.random_hue",
"tensorflow.image.random_brightness",
"image_retrieval.common.fit_to_max_size",
"tensorflow.reshape",
"tensorflow.expand_dims",
"cv2.getRotationMatrix2D",
"numpy.transpose",
"time.time",
"tensorflow.image.random_contrast",
"cv2.imread",
"tensorflow.image.random_flip_left_right",
"tensorflow.image.resize",
"image_retrieval.common.from_list",
"tensorflow.image.random_flip_up_down",
"numpy.sum",
"tensorflow.constant",
"collections.defaultdict",
"numpy.random.uniform",
"json.load",
"tensorflow.squeeze",
"tensorflow.TensorShape",
"numpy.random.shuffle"
] |
[((858, 889), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel'], {}), '(image, -1, kernel)\n', (870, 889), False, 'import cv2\n'), ((1393, 1455), 'tensorflow.random.uniform', 'tf.random.uniform', (['()', '(min_size // 2)', 'min_size'], {'dtype': 'tf.int32'}), '((), min_size // 2, min_size, dtype=tf.int32)\n', (1410, 1455), True, 'import tensorflow as tf\n'), ((1468, 1522), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', '(crop_size, crop_size, 3)'], {}), '(image, (crop_size, crop_size, 3))\n', (1488, 1522), True, 'import tensorflow as tf\n'), ((1556, 1568), 'tensorflow.range', 'tf.range', (['(10)'], {}), '(10)\n', (1564, 1568), True, 'import tensorflow as tf\n'), ((2139, 2186), 'tensorflow.image.resize', 'tf.image.resize', (['crop', '(input_size, input_size)'], {}), '(crop, (input_size, input_size))\n', (2154, 2186), True, 'import tensorflow as tf\n'), ((2198, 2222), 'tensorflow.squeeze', 'tf.squeeze', (['crop'], {'axis': '(0)'}), '(crop, axis=0)\n', (2208, 2222), True, 'import tensorflow as tf\n'), ((2430, 2477), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {'maxval': '(5)', 'dtype': 'tf.int32'}), '([], maxval=5, dtype=tf.int32)\n', (2447, 2477), True, 'import tensorflow as tf\n'), ((2485, 2512), 'tensorflow.equal', 'tf.equal', (['color_ordering', '(0)'], {}), '(color_ordering, 0)\n', (2493, 2512), True, 'import tensorflow as tf\n'), ((3769, 3802), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0.0)', '(1.0)'], {}), '(image, 0.0, 1.0)\n', (3785, 3802), True, 'import tensorflow as tf\n'), ((11333, 11358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11356, 11358), False, 'import argparse\n'), ((11806, 11817), 'time.time', 'time.time', ([], {}), '()\n', (11815, 11817), False, 'import time\n'), ((814, 841), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (821, 841), True, 'import numpy as np\n'), ((939, 953), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (946, 953), True, 'import numpy as np\n'), ((976, 1023), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(100.0)', 'image.shape[0:2]'], {}), '(0.0, 100.0, image.shape[0:2])\n', (993, 1023), True, 'import numpy as np\n'), ((1040, 1068), 'numpy.array', 'np.array', (['[gray, gray, gray]'], {}), '([gray, gray, gray])\n', (1048, 1068), True, 'import numpy as np\n'), ((1085, 1115), 'numpy.transpose', 'np.transpose', (['gray3', '(1, 2, 0)'], {}), '(gray3, (1, 2, 0))\n', (1097, 1115), True, 'import numpy as np\n'), ((1132, 1161), 'cv2.blur', 'cv2.blur', (['gray3'], {'ksize': '(7, 7)'}), '(gray3, ksize=(7, 7))\n', (1140, 1161), False, 'import cv2\n'), ((1201, 1227), 'numpy.clip', 'np.clip', (['image', '(0.0)', '(255.0)'], {}), '(image, 0.0, 255.0)\n', (1208, 1227), True, 'import numpy as np\n'), ((1860, 1885), 'tensorflow.reshape', 'tf.reshape', (['crop', '(-1, 3)'], {}), '(crop, (-1, 3))\n', (1870, 1885), True, 'import tensorflow as tf\n'), ((1910, 1935), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['moments[1]'], {}), '(moments[1])\n', (1923, 1935), True, 'import tensorflow as tf\n'), ((1937, 1975), 'tensorflow.constant', 'tf.constant', (['var_thr'], {'dtype': 'tf.float32'}), '(var_thr, dtype=tf.float32)\n', (1948, 1975), True, 'import tensorflow as tf\n'), ((2086, 2114), 'tensorflow.expand_dims', 'tf.expand_dims', (['crop'], {'axis': '(0)'}), '(crop, axis=0)\n', (2100, 2114), True, 'import tensorflow as tf\n'), ((2530, 2595), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': 'brightness_max_delta'}), '(image, max_delta=brightness_max_delta)\n', (2556, 2595), True, 'import tensorflow as tf\n'), ((2612, 2667), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (2638, 2667), True, 'import tensorflow as tf\n'), ((2684, 2725), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': '(0.1)'}), '(image, max_delta=0.1)\n', (2703, 2725), True, 'import tensorflow as tf\n'), ((2742, 2795), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (2766, 2795), True, 'import tensorflow as tf\n'), ((2805, 2832), 'tensorflow.equal', 'tf.equal', (['color_ordering', '(1)'], {}), '(color_ordering, 1)\n', (2813, 2832), True, 'import tensorflow as tf\n'), ((4503, 4532), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (4526, 4532), False, 'import collections\n'), ((6567, 6608), 'numpy.tile', 'np.tile', (['image', '(h_repeats, w_repeats, 1)'], {}), '(image, (h_repeats, w_repeats, 1))\n', (6574, 6608), True, 'import numpy as np\n'), ((8490, 8529), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['c_xy', 'angle', '(1)'], {}), '(c_xy, angle, 1)\n', (8513, 8529), False, 'import cv2\n'), ((8553, 8625), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rotation_matrix', '(image.shape[1], image.shape[0])'], {}), '(image, rotation_matrix, (image.shape[1], image.shape[0]))\n', (8567, 8625), False, 'import cv2\n'), ((9152, 9214), 'tensorflow.numpy_function', 'tf.numpy_function', (['self.read', '[choice]', '[tf.float32, tf.int64]'], {}), '(self.read, [choice], [tf.float32, tf.int64])\n', (9169, 9214), True, 'import tensorflow as tf\n'), ((9301, 9366), 'tensorflow.numpy_function', 'tf.numpy_function', (['self.cv2_noise_and_blur', '[image]', '[tf.float32]'], {}), '(self.cv2_noise_and_blur, [image], [tf.float32])\n', (9318, 9366), True, 'import tensorflow as tf\n'), ((9770, 9792), 'image_retrieval.common.preproces_image', 'preproces_image', (['image'], {}), '(image)\n', (9785, 9792), False, 'from image_retrieval.common import preproces_image, depreprocess_image, fit_to_max_size, from_list\n'), ((10904, 10919), 'image_retrieval.common.from_list', 'from_list', (['path'], {}), '(path)\n', (10913, 10919), False, 'from image_retrieval.common import preproces_image, depreprocess_image, fit_to_max_size, from_list\n'), ((11638, 11650), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11647, 11650), False, 'import json\n'), ((1337, 1352), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1345, 1352), True, 'import tensorflow as tf\n'), ((1357, 1372), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1365, 1372), True, 'import tensorflow as tf\n'), ((1602, 1627), 'tensorflow.reshape', 'tf.reshape', (['crop', '(-1, 3)'], {}), '(crop, (-1, 3))\n', (1612, 1627), True, 'import tensorflow as tf\n'), ((1657, 1682), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['moments[1]'], {}), '(moments[1])\n', (1670, 1682), True, 'import tensorflow as tf\n'), ((1684, 1722), 'tensorflow.constant', 'tf.constant', (['var_thr'], {'dtype': 'tf.float32'}), '(var_thr, dtype=tf.float32)\n', (1695, 1722), True, 'import tensorflow as tf\n'), ((1744, 1798), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', '(crop_size, crop_size, 3)'], {}), '(image, (crop_size, crop_size, 3))\n', (1764, 1798), True, 'import tensorflow as tf\n'), ((2850, 2905), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (2876, 2905), True, 'import tensorflow as tf\n'), ((2922, 2987), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': 'brightness_max_delta'}), '(image, max_delta=brightness_max_delta)\n', (2948, 2987), True, 'import tensorflow as tf\n'), ((3004, 3057), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (3028, 3057), True, 'import tensorflow as tf\n'), ((3074, 3115), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': '(0.1)'}), '(image, max_delta=0.1)\n', (3093, 3115), True, 'import tensorflow as tf\n'), ((3125, 3152), 'tensorflow.equal', 'tf.equal', (['color_ordering', '(2)'], {}), '(color_ordering, 2)\n', (3133, 3152), True, 'import tensorflow as tf\n'), ((5188, 5201), 'numpy.sum', 'np.sum', (['probs'], {}), '(probs)\n', (5194, 5201), True, 'import numpy as np\n'), ((6736, 6779), 'image_retrieval.common.fit_to_max_size', 'fit_to_max_size', (['image', '(self.input_size * 3)'], {}), '(image, self.input_size * 3)\n', (6751, 6779), False, 'from image_retrieval.common import preproces_image, depreprocess_image, fit_to_max_size, from_list\n'), ((7968, 8012), 'random.randint', 'random.randint', (['(1)', "self.params['max_tiling']"], {}), "(1, self.params['max_tiling'])\n", (7982, 8012), False, 'import random\n'), ((8264, 8339), 'random.uniform', 'random.uniform', (["(-self.params['add_rot_angle'])", "self.params['add_rot_angle']"], {}), "(-self.params['add_rot_angle'], self.params['add_rot_angle'])\n", (8278, 8339), False, 'import random\n'), ((8842, 8873), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {}), '([True, False])\n', (8858, 8873), True, 'import numpy as np\n'), ((8949, 8980), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {}), '([True, False])\n', (8965, 8980), True, 'import numpy as np\n'), ((9430, 9468), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (9461, 9468), True, 'import tensorflow as tf\n'), ((9530, 9565), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['image'], {}), '(image)\n', (9558, 9565), True, 'import tensorflow as tf\n'), ((9656, 9713), 'tensorflow.numpy_function', 'tf.numpy_function', (['self.cv2_rotate', '[image]', '[tf.float32]'], {}), '(self.cv2_rotate, [image], [tf.float32])\n', (9673, 9713), True, 'import tensorflow as tf\n'), ((10164, 10183), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1]'], {}), '([1])\n', (10178, 10183), True, 'import tensorflow as tf\n'), ((12054, 12068), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (12065, 12068), False, 'import cv2\n'), ((12104, 12115), 'time.time', 'time.time', ([], {}), '()\n', (12113, 12115), False, 'import time\n'), ((3170, 3223), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (3194, 3223), True, 'import tensorflow as tf\n'), ((3240, 3281), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': '(0.1)'}), '(image, max_delta=0.1)\n', (3259, 3281), True, 'import tensorflow as tf\n'), ((3298, 3363), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': 'brightness_max_delta'}), '(image, max_delta=brightness_max_delta)\n', (3324, 3363), True, 'import tensorflow as tf\n'), ((3380, 3435), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (3406, 3435), True, 'import tensorflow as tf\n'), ((3445, 3472), 'tensorflow.equal', 'tf.equal', (['color_ordering', '(3)'], {}), '(color_ordering, 3)\n', (3453, 3472), True, 'import tensorflow as tf\n'), ((5351, 5373), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (5361, 5373), False, 'import cv2\n'), ((7087, 7113), 'numpy.random.shuffle', 'np.random.shuffle', (['choices'], {}), '(choices)\n', (7104, 7113), True, 'import numpy as np\n'), ((8436, 8456), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (8450, 8456), False, 'import random\n'), ((2022, 2037), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2030, 2037), True, 'import tensorflow as tf\n'), ((2042, 2057), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2050, 2057), True, 'import tensorflow as tf\n'), ((3490, 3531), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': '(0.1)'}), '(image, max_delta=0.1)\n', (3509, 3531), True, 'import tensorflow as tf\n'), ((3548, 3603), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (3574, 3603), True, 'import tensorflow as tf\n'), ((3620, 3673), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (3644, 3673), True, 'import tensorflow as tf\n'), ((3690, 3755), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': 'brightness_max_delta'}), '(image, max_delta=brightness_max_delta)\n', (3716, 3755), True, 'import tensorflow as tf\n'), ((7822, 7861), 'cv2.imread', 'cv2.imread', (['self.images_paths[index[0]]'], {}), '(self.images_paths[index[0]])\n', (7832, 7861), False, 'import cv2\n'), ((7368, 7439), 'numpy.random.choice', 'np.random.choice', (['self.images_indexes_per_class[self.labels[choice]]', '(1)'], {}), '(self.images_indexes_per_class[self.labels[choice]], 1)\n', (7384, 7439), True, 'import numpy as np\n')]
|
# ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import pytest
from neon import NervanaObject
from neon.backends import gen_backend
from neon.layers.layer import Pooling
import ngraph as ng
import ngraph.transformers as ngt
from ngraph.testing import RandomTensorGenerator, executor
from ngraph.frontends.neon import ax, ar
from ngraph.frontends.neon.layer import output_dim
rng = RandomTensorGenerator(0, np.float32)
NervanaObject.be = gen_backend()
class DummyDeltaBuffers(object):
"""
Dummy class for delta buffers needed by neon
"""
def __init__(self):
self.buffers = [None]
def test_wrong_input_shape_length():
"""
test wrong input shape length
"""
ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W])
inputs = ng.placeholder(axes=ax_i)
pool_params = dict(op='max')
with pytest.raises(ValueError) as exinfo:
ng.pooling(pool_params, inputs, {})
assert str(exinfo.value) == 'pooling input shape must be length 5, found {}' \
.format(len(ax_i))
def test_wrong_op_name():
"""
test wrong number of batch axes at input
"""
ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
inputs = ng.placeholder(axes=ax_i)
pooltype = 'min'
pool_params = dict(op=pooltype)
with pytest.raises(ValueError) as exinfo:
ng.pooling(pool_params, inputs, {})
assert str(exinfo.value) == "Unsupported pooling type: {pooltype}. Only max and avg " \
"pooling currently supported. ".format(pooltype=pooltype)
def test_pooling():
"""
test pooling forward and backward path
"""
N = 128
C = 3
D = 1
H = W = 32
J = T = 1
R = S = 2
ngt.make_transformer()
padding = dict(pad_d=0, pad_h=0, pad_w=0, pad_c=0)
strides = dict(str_d=1, str_h=1, str_w=1, str_c=1)
fshape = dict(J=J, T=T, R=R, S=S)
pool_params = dict(op='max')
pool_params.update(padding)
pool_params.update(strides)
pool_params.update(fshape)
ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
ax_i.set_shape((C, D, H, W, N))
inputs = ng.placeholder(axes=ax_i)
ax_o = ng.make_axes([
ng.make_axis(roles=[ar.features_input]).named('C'),
ng.make_axis(roles=[ar.features_0]).named('D'),
ng.make_axis(roles=[ar.features_1]).named('H'),
ng.make_axis(roles=[ar.features_2]).named('W'),
ax.N
])
ax_o[:-1].set_shape((
output_dim(C, J, padding['pad_c'], strides['str_c']),
output_dim(D, T, padding['pad_d'], strides['str_d']),
output_dim(H, R, padding['pad_h'], strides['str_h']),
output_dim(W, S, padding['pad_w'], strides['str_w']))
)
# randomly initialize
input_value = rng.uniform(-1, 1, ax_i)
assert input_value.shape == ax_i.lengths
# compute convolution with graph
output = ng.pooling(pool_params, inputs, axes=ax_o)
targets = ng.placeholder(axes=ax_o)
costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)
error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)
d_inputs = ng.deriv(error, inputs)
targets_value = rng.uniform(.1, 0.9, output.axes)
with executor([output, error, d_inputs], inputs, targets) as conv_executor:
result_ng, err_ng, gradI_ng = conv_executor(input_value, targets_value)
# Now compute reference values via NEON
NervanaObject.be.bsz = N
neon_layer = Pooling(fshape=fshape, padding=padding, strides=strides, op="max")
inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))
neon_layer.configure((C, H, W))
neon_layer.prev_layer = True
neon_layer.allocate()
neon_layer.set_deltas(DummyDeltaBuffers())
result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)
act_result_ne = 1. / (1.0 + np.exp(-result_ne))
err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))
gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)
# Compare fprop
ng.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)
# Compare bprop
ng.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)
|
[
"ngraph.testing.RandomTensorGenerator",
"ngraph.transformers.make_transformer",
"ngraph.make_axes",
"neon.backends.gen_backend",
"ngraph.testing.executor",
"ngraph.sum",
"ngraph.batch_size",
"ngraph.sigmoid",
"ngraph.make_axis",
"ngraph.deriv",
"ngraph.placeholder",
"numpy.exp",
"ngraph.pooling",
"neon.layers.layer.Pooling",
"pytest.raises",
"ngraph.testing.assert_allclose",
"ngraph.frontends.neon.layer.output_dim"
] |
[((1095, 1131), 'ngraph.testing.RandomTensorGenerator', 'RandomTensorGenerator', (['(0)', 'np.float32'], {}), '(0, np.float32)\n', (1116, 1131), False, 'from ngraph.testing import RandomTensorGenerator, executor\n'), ((1152, 1165), 'neon.backends.gen_backend', 'gen_backend', ([], {}), '()\n', (1163, 1165), False, 'from neon.backends import gen_backend\n'), ((1420, 1458), 'ngraph.make_axes', 'ng.make_axes', (['[ax.C, ax.D, ax.H, ax.W]'], {}), '([ax.C, ax.D, ax.H, ax.W])\n', (1432, 1458), True, 'import ngraph as ng\n'), ((1472, 1497), 'ngraph.placeholder', 'ng.placeholder', ([], {'axes': 'ax_i'}), '(axes=ax_i)\n', (1486, 1497), True, 'import ngraph as ng\n'), ((1833, 1877), 'ngraph.make_axes', 'ng.make_axes', (['[ax.C, ax.D, ax.H, ax.W, ax.N]'], {}), '([ax.C, ax.D, ax.H, ax.W, ax.N])\n', (1845, 1877), True, 'import ngraph as ng\n'), ((1891, 1916), 'ngraph.placeholder', 'ng.placeholder', ([], {'axes': 'ax_i'}), '(axes=ax_i)\n', (1905, 1916), True, 'import ngraph as ng\n'), ((2386, 2408), 'ngraph.transformers.make_transformer', 'ngt.make_transformer', ([], {}), '()\n', (2406, 2408), True, 'import ngraph.transformers as ngt\n'), ((2699, 2743), 'ngraph.make_axes', 'ng.make_axes', (['[ax.C, ax.D, ax.H, ax.W, ax.N]'], {}), '([ax.C, ax.D, ax.H, ax.W, ax.N])\n', (2711, 2743), True, 'import ngraph as ng\n'), ((2793, 2818), 'ngraph.placeholder', 'ng.placeholder', ([], {'axes': 'ax_i'}), '(axes=ax_i)\n', (2807, 2818), True, 'import ngraph as ng\n'), ((3541, 3583), 'ngraph.pooling', 'ng.pooling', (['pool_params', 'inputs'], {'axes': 'ax_o'}), '(pool_params, inputs, axes=ax_o)\n', (3551, 3583), True, 'import ngraph as ng\n'), ((3598, 3623), 'ngraph.placeholder', 'ng.placeholder', ([], {'axes': 'ax_o'}), '(axes=ax_o)\n', (3612, 3623), True, 'import ngraph as ng\n'), ((3767, 3790), 'ngraph.deriv', 'ng.deriv', (['error', 'inputs'], {}), '(error, inputs)\n', (3775, 3790), True, 'import ngraph as ng\n'), ((4098, 4164), 'neon.layers.layer.Pooling', 'Pooling', ([], {'fshape': 'fshape', 'padding': 'padding', 'strides': 'strides', 'op': '"""max"""'}), "(fshape=fshape, padding=padding, strides=strides, op='max')\n", (4105, 4164), False, 'from neon.layers.layer import Pooling\n'), ((4683, 4751), 'ngraph.testing.assert_allclose', 'ng.testing.assert_allclose', (['result_ng', 'result_ne'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(result_ng, result_ne, rtol=0, atol=1e-06)\n', (4709, 4751), True, 'import ngraph as ng\n'), ((4776, 4842), 'ngraph.testing.assert_allclose', 'ng.testing.assert_allclose', (['gradI_ng', 'gradI_ne'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(gradI_ng, gradI_ne, rtol=0, atol=1e-06)\n', (4802, 4842), True, 'import ngraph as ng\n'), ((1541, 1566), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1554, 1566), False, 'import pytest\n'), ((1586, 1621), 'ngraph.pooling', 'ng.pooling', (['pool_params', 'inputs', '{}'], {}), '(pool_params, inputs, {})\n', (1596, 1621), True, 'import ngraph as ng\n'), ((1984, 2009), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1997, 2009), False, 'import pytest\n'), ((2029, 2064), 'ngraph.pooling', 'ng.pooling', (['pool_params', 'inputs', '{}'], {}), '(pool_params, inputs, {})\n', (2039, 2064), True, 'import ngraph as ng\n'), ((3661, 3679), 'ngraph.sigmoid', 'ng.sigmoid', (['output'], {}), '(output)\n', (3671, 3679), True, 'import ngraph as ng\n'), ((3702, 3728), 'ngraph.sum', 'ng.sum', (['costs'], {'out_axes': '()'}), '(costs, out_axes=())\n', (3708, 3728), True, 'import ngraph as ng\n'), ((3731, 3751), 'ngraph.batch_size', 'ng.batch_size', (['costs'], {}), '(costs)\n', (3744, 3751), True, 'import ngraph as ng\n'), ((3856, 3908), 'ngraph.testing.executor', 'executor', (['[output, error, d_inputs]', 'inputs', 'targets'], {}), '([output, error, d_inputs], inputs, targets)\n', (3864, 3908), False, 'from ngraph.testing import RandomTensorGenerator, executor\n'), ((3129, 3181), 'ngraph.frontends.neon.layer.output_dim', 'output_dim', (['C', 'J', "padding['pad_c']", "strides['str_c']"], {}), "(C, J, padding['pad_c'], strides['str_c'])\n", (3139, 3181), False, 'from ngraph.frontends.neon.layer import output_dim\n'), ((3191, 3243), 'ngraph.frontends.neon.layer.output_dim', 'output_dim', (['D', 'T', "padding['pad_d']", "strides['str_d']"], {}), "(D, T, padding['pad_d'], strides['str_d'])\n", (3201, 3243), False, 'from ngraph.frontends.neon.layer import output_dim\n'), ((3253, 3305), 'ngraph.frontends.neon.layer.output_dim', 'output_dim', (['H', 'R', "padding['pad_h']", "strides['str_h']"], {}), "(H, R, padding['pad_h'], strides['str_h'])\n", (3263, 3305), False, 'from ngraph.frontends.neon.layer import output_dim\n'), ((3315, 3367), 'ngraph.frontends.neon.layer.output_dim', 'output_dim', (['W', 'S', "padding['pad_w']", "strides['str_w']"], {}), "(W, S, padding['pad_w'], strides['str_w'])\n", (3325, 3367), False, 'from ngraph.frontends.neon.layer import output_dim\n'), ((4484, 4502), 'numpy.exp', 'np.exp', (['(-result_ne)'], {}), '(-result_ne)\n', (4490, 4502), True, 'import numpy as np\n'), ((2854, 2893), 'ngraph.make_axis', 'ng.make_axis', ([], {'roles': '[ar.features_input]'}), '(roles=[ar.features_input])\n', (2866, 2893), True, 'import ngraph as ng\n'), ((2914, 2949), 'ngraph.make_axis', 'ng.make_axis', ([], {'roles': '[ar.features_0]'}), '(roles=[ar.features_0])\n', (2926, 2949), True, 'import ngraph as ng\n'), ((2970, 3005), 'ngraph.make_axis', 'ng.make_axis', ([], {'roles': '[ar.features_1]'}), '(roles=[ar.features_1])\n', (2982, 3005), True, 'import ngraph as ng\n'), ((3026, 3061), 'ngraph.make_axis', 'ng.make_axis', ([], {'roles': '[ar.features_2]'}), '(roles=[ar.features_2])\n', (3038, 3061), True, 'import ngraph as ng\n')]
|
import multiprocessing
import os
import random
import numpy as np
import sacred
import torch
from capreolus.reranker.reranker import Reranker
from capreolus.collection import COLLECTIONS
from capreolus.benchmark import Benchmark
from capreolus.index import Index
from capreolus.searcher import Searcher
from capreolus.utils.common import params_to_string, forced_types, get_default_cache_dir, get_default_results_dir
from capreolus.reranker.common import pair_hinge_loss, pair_softmax_loss
from capreolus.utils.frozendict import FrozenDict
from capreolus.utils.loginit import get_logger
logger = get_logger(__name__) # pylint: disable=invalid-name
############################################################
# hack to allow config functions to return their default values
# (normally sacred does not allow config functions to return a value)
orig_dfb = sacred.config.config_scope.dedent_function_body
def _custom_config_dfb(*args, **kwargs):
config_skip_return = "return locals().copy() # ignored by sacred"
src = orig_dfb(*args, **kwargs)
filtered = [line for line in src.split("\n") if not line.strip() == config_skip_return]
return "\n".join(filtered)
sacred.config.config_scope.dedent_function_body = _custom_config_dfb
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append("CUDA_VISIBLE_DEVICES")
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append("USER")
############################################################
modules = ("collection", "index", "searcher", "benchmark", "reranker")
def module_config():
# default modules
collection = "robust04"
index = "anserini"
searcher = "bm25"
benchmark = "robust04.title.wsdm20demo"
reranker = "PACRR"
return locals().copy() # ignored by sacred
# config options that shouldn't be automatically added to the path
# (e.g., they don't affect model training or they're manually included somewhere in the path)
def stateless_config():
expid = "debug" # experiment id/name
predontrain = False
fold = "s1"
earlystopping = True
return locals().copy() # ignored by sacred
def pipeline_config():
# not working / disabled
# resume = False # resume from last existing weights, if any exist #TODO make this work with epoch preds
# saveall = True
# selfprediction = False
# uniformunk = True
# datamode = "basic"
maxdoclen = 800 # maximum document length (in number of terms after tokenization)
maxqlen = 4 # maximum query length (in number of terms after tokenization)
batch = 32 # batch size
niters = 150 # number of iterations to train for
itersize = 4096 # number of training instances in one iteration (epoch)
gradacc = 1 # number of batches to accumulate over before updating weights
lr = 0.001 # learning rate
seed = 123_456 # random seed to use
sample = "simple"
softmaxloss = True # True to use softmax loss (over pairs) or False to use hinge loss
dataparallel = "none"
if sample not in ["simple"]:
raise RuntimeError(f"sample '{sample}' must be one of: simple")
# sanity checks
if niters <= 0:
raise RuntimeError("niters must be > 0")
if itersize < batch:
raise RuntimeError("itersize must be >= batch")
if niters < 1:
raise RuntimeError("gradacc must be >= 1")
return locals().copy() # ignored by sacred
class Pipeline:
def __init__(self, module_choices):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ex = sacred.Experiment("capreolus")
self.ex = ex
ex.path = "capreolus"
ex.captured_out_filter = sacred.utils.apply_backspaces_and_linefeeds
self.module2cls = self.get_module_to_class(module_choices)
# now the Modules to load have been determined, so we pass their configs to sacred and determine
# which modules each config key should be associated with (based on first module to set it).
# later modules can override keys, but the key remains associated with the initial module.
# this is in order of lowest to highest precedence since later values override earlier ones with sacred
self.parameters_to_module = self.get_parameters_to_module(ex)
self.parameter_types = self.get_parameter_types(ex)
self.parameters_to_module, self.parameter_types = self.get_parameters_to_module_for_missing_parameters(ex)
self.parameters_to_module, self.parameter_types = self.get_parameters_to_module_for_feature_parameters(ex)
self.module_to_parameters = self.get_module_to_parameters()
self.check_for_invalid_keys()
def check_for_invalid_keys(self):
invalid_keys = []
for k in self.parameters_to_module:
if "_" in k or "-" in k or "," in k:
invalid_keys.append(k)
if len(invalid_keys) > 0:
raise ValueError("config keys cannot contain '-' ',' or '_'\n\tinvalid keys: %s" % ", ".join(invalid_keys))
def get_module_to_parameters(self):
"""
Essentially the reverse of self.parameter_to_module. Associates with each module a list of parameter
names that are valid for it
"""
module_to_parameters = {module: [] for module in list(modules) + ["pipeline", "module", "stateless", "extractor"]}
for k, module in self.parameters_to_module.items():
module_to_parameters[module].append(k)
return module_to_parameters
def get_parameters_to_module_for_feature_parameters(self, ex):
"""
Adds config related to the extractor associated with the supplied NIR Reranker to the parameter_to_module dict.
Eg: See `EmbedText.config()`
"""
self.parameter_types["extractor"] = str
for feature_cls in self.module2cls["reranker"].EXTRACTORS:
for k, v in ex.config(feature_cls.config)().items():
if k in self.parameters_to_module:
raise RuntimeError(f"extractor {feature_cls} contains conflicting config key {k}")
self.parameters_to_module[k] = "extractor"
self.parameter_types[k] = forced_types.get(type(v), type(v))
return self.parameters_to_module, self.parameter_types
def get_parameters_to_module_for_missing_parameters(self, ex):
"""
Not all parameters are supplied by the user. This method determines which parameters were not supplied by the
user and plugs in sensible defaults for them
"""
# config keys for each module
for module in modules:
if module == "collection": # collections do not have their own configs
continue
for k, v in ex.config(self.module2cls[module].config)().items():
if k not in self.parameters_to_module:
self.parameters_to_module[k] = module
self.parameter_types[k] = forced_types.get(type(v), type(v))
return self.parameters_to_module, self.parameter_types
def get_module_to_class(self, module_choices):
# in order of highest to lowest precedence,
# - determine the class of each module based on explicit (eg CLI param) choices or the default
# - allow this module to override any module choices that were not explicit (i.e., set by user)
module2cls = {}
# collection < index < searcher < benchmark < Reranker
module_loaders = {
"collection": COLLECTIONS,
"index": Index.ALL,
"searcher": Searcher.ALL,
"benchmark": Benchmark.ALL,
"reranker": Reranker.ALL,
}
default_modules = module_config()
for module in reversed(modules):
# load user's choice or default module
if module_choices.get(module, None) is None:
module_choices[module] = default_modules[module]
module2cls[module] = module_loaders[module][module_choices[module]]
# collections do not have their own configs, so we stop early
if module == "collection":
continue
# TODO: Is this required anymore? I don't think module_choices are used anywhere down the line
# override any unset modules (which must have lower precedence)
for k, v in module2cls[module].config().items():
if k in modules and module_choices.get(k, None) is None:
logger.debug("%s config setting module %s = %s", module, k, v)
module_choices[k] = v
return module2cls
def get_parameter_types(self, ex):
"""
For each config() parameter specified in the codebase for each module, deduce the correct type.
Specifically, key_types[x] contains a function that we can call to cast the cmdline parameter to the correct type
Eg: "none" should be casted to None
"""
parameter_types = {}
for k, v in ex.config(module_config)().items():
parameter_types[k] = type("string")
for k, v in ex.config(stateless_config)().items():
parameter_types[k] = forced_types.get(type(v), type(v))
parameter_types["pipeline"] = str
for k, v in ex.config(pipeline_config)().items():
parameter_types[k] = forced_types.get(type(v), type(v))
return parameter_types
def get_parameters_to_module(self, ex):
"""
Creates a dict that group each of the supplied parameters to an umbrella "module"
"""
parameter_to_module = {}
for k, v in ex.config(module_config)().items():
parameter_to_module[k] = "module"
for k, v in ex.config(stateless_config)().items():
parameter_to_module[k] = "stateless"
for k, v in ex.config(pipeline_config)().items():
parameter_to_module[k] = "pipeline"
return parameter_to_module
def get_paths(self, config):
"""
Returns a dictionary of various paths
:param config: A sacred config
:return: A dict. Eg:
{
"collection_path": "path",
"base_path": "path",
"cache_path": "path",
"index_path": "path",
"run_path": "path",
"model_path": "path"
}
"""
expid = config["expid"]
collection_path = self.module2cls["collection"].basepath
base_path = os.environ.get("CAPREOLUS_RESULTS", get_default_results_dir())
cache_path = os.environ.get("CAPREOLUS_CACHE", get_default_cache_dir())
index_key = os.path.join(cache_path, config["collection"], self.module_key("index"))
index_path = os.path.join(index_key, "index")
run_path = os.path.join(index_key, "searcher", self.module_key("searcher"))
model_path = os.path.join(
base_path,
expid,
config["collection"],
self.module_key("index"),
self.module_key("searcher"),
self.module_key("benchmark"),
self.module_key("pipeline"),
self.module_key("reranker") + "_" + self.module_key("extractor"),
)
trained_weight_path = os.path.join(model_path, config["fold"], "weights", "dev")
return {
"collection_path": collection_path,
"base_path": base_path,
"cache_path": cache_path,
"index_path": index_path,
"index_key": index_key,
"run_path": run_path,
"model_path": model_path,
"trained_weight_path": trained_weight_path,
}
def initialize(self, cfg):
if hasattr(self, "cfg"):
raise RuntimeError("Pipeline has already been initialized")
cfg = {k: self.parameter_types[k](v) for k, v in cfg.items()}
maxthreads = int(os.environ.get("CAPREOLUS_THREADS", multiprocessing.cpu_count()))
if maxthreads <= 0:
logger.warning("changing invalid maxthreads value of '%s' to 8", maxthreads)
maxthreads = 8
cfg["maxthreads"] = maxthreads
self.cfg = FrozenDict(cfg)
random.seed(cfg["seed"])
np.random.seed(cfg["seed"])
torch.manual_seed(cfg["seed"])
torch.cuda.manual_seed_all(cfg["seed"])
path_dict = self.get_paths(cfg)
self.collection_path = path_dict["collection_path"]
self.base_path = path_dict["base_path"]
self.cache_path = path_dict["cache_path"]
self.index_key = path_dict["index_key"]
self.index_path = path_dict["index_path"]
self.run_path = path_dict["run_path"]
self.reranker_path = path_dict["model_path"]
# attempt to download the collection if it is missing and a URL is available
self.module2cls["collection"].download_if_missing(self.cache_path)
if cfg["softmaxloss"]:
self.lossf = pair_softmax_loss
else:
self.lossf = pair_hinge_loss
# IMPORTANT - The order of initialization matters. Also, we need self.cfg to be present when control reaches here
self.collection = self.module2cls["collection"]
self.index = self.module2cls["index"](self.collection, self.index_path, self.index_key)
self.searcher = self.module2cls["searcher"](self.index, self.collection, self.run_path, cfg)
self.benchmark = self.module2cls["benchmark"](self.searcher, self.collection, cfg)
self.benchmark.build()
self.extractors = []
self.initialize_extractors()
self.reranker = self.module2cls["reranker"](
self.extractors[0].embeddings, self.benchmark.reranking_runs[cfg["fold"]], cfg
)
self.reranker.build()
self.reranker.to(self.device)
self._anserini = None
self.benchmark.set_extractor(self.extractors[0])
def initialize_extractors(self):
for cls in self.module2cls["reranker"].EXTRACTORS:
cfg = {k: self.cfg[k] for k in cls.config()}
extractor_cache_dir = self.extractor_cache(cls)
extractor = cls(
self.cache_path,
extractor_cache_dir,
self.cfg,
benchmark=self.benchmark,
collection=self.collection,
index=self.index,
)
extractor.build_from_benchmark(**cfg)
self.extractors.append(extractor)
def extractor_cache(self, cls):
cfg = {k: self.cfg[k] for k in cls.config()}
cfg["extractor"] = cls.__name__
feature_key = params_to_string("extractor", cfg, self.parameter_types)
# HACK: set v=0 for keys that do not affect cache
real_cfg = self.cfg
self.cfg = {k: v for k, v in real_cfg.items()}
for k in ["batch", "lr", "gradacc"]:
self.cfg[k] = 0
benchmark_key = self.module_key("benchmark")
pipeline_key = self.module_key("pipeline")
self.cfg = real_cfg
s = os.path.join(
self.cache_path,
"features",
self.cfg["collection"],
self.module_key("index"),
self.module_key("searcher"),
benchmark_key,
pipeline_key,
self.cfg["fold"],
feature_key,
)
return s
def module_key(self, name):
"""
Creates a string based on all the parameters and their values associated with a given module.
This "key" can be used for caching, creating a directory structure e.t.c
"""
compcfg = {k: self.cfg[k] for k in self.module_to_parameters[name]}
# hack since the pipeline isn't an actual config option (and thus isn't included)
if name == "pipeline" or name == "extractor":
compcfg[name] = name
else:
compcfg[name] = self.cfg[name]
return params_to_string(name, compcfg, self.parameter_types)
def cli_module_choice(argv, module):
key = f"{module}="
choice = None
# if a key is repeated several times, we use the last value in order to match sacred's behavior
for arg in argv:
if arg.startswith(key):
choice = arg[len(key) :]
return choice
|
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"capreolus.utils.loginit.get_logger",
"os.path.join",
"sacred.Experiment",
"random.seed",
"multiprocessing.cpu_count",
"capreolus.utils.frozendict.FrozenDict",
"torch.cuda.is_available",
"numpy.random.seed",
"capreolus.utils.common.get_default_results_dir",
"capreolus.utils.common.get_default_cache_dir",
"sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append",
"capreolus.utils.common.params_to_string"
] |
[((599, 619), 'capreolus.utils.loginit.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (609, 619), False, 'from capreolus.utils.loginit import get_logger\n'), ((1251, 1320), 'sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append', 'sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (1296, 1320), False, 'import sacred\n'), ((1321, 1374), 'sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append', 'sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append', (['"""USER"""'], {}), "('USER')\n", (1366, 1374), False, 'import sacred\n'), ((3522, 3552), 'sacred.Experiment', 'sacred.Experiment', (['"""capreolus"""'], {}), "('capreolus')\n", (3539, 3552), False, 'import sacred\n'), ((10707, 10739), 'os.path.join', 'os.path.join', (['index_key', '"""index"""'], {}), "(index_key, 'index')\n", (10719, 10739), False, 'import os\n'), ((11215, 11273), 'os.path.join', 'os.path.join', (['model_path', "config['fold']", '"""weights"""', '"""dev"""'], {}), "(model_path, config['fold'], 'weights', 'dev')\n", (11227, 11273), False, 'import os\n'), ((12128, 12143), 'capreolus.utils.frozendict.FrozenDict', 'FrozenDict', (['cfg'], {}), '(cfg)\n', (12138, 12143), False, 'from capreolus.utils.frozendict import FrozenDict\n'), ((12153, 12177), 'random.seed', 'random.seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (12164, 12177), False, 'import random\n'), ((12186, 12213), 'numpy.random.seed', 'np.random.seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (12200, 12213), True, 'import numpy as np\n'), ((12222, 12252), 'torch.manual_seed', 'torch.manual_seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (12239, 12252), False, 'import torch\n'), ((12261, 12300), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["cfg['seed']"], {}), "(cfg['seed'])\n", (12287, 12300), False, 'import torch\n'), ((14582, 14638), 'capreolus.utils.common.params_to_string', 'params_to_string', (['"""extractor"""', 'cfg', 'self.parameter_types'], {}), "('extractor', cfg, self.parameter_types)\n", (14598, 14638), False, 'from capreolus.utils.common import params_to_string, forced_types, get_default_cache_dir, get_default_results_dir\n'), ((15881, 15934), 'capreolus.utils.common.params_to_string', 'params_to_string', (['name', 'compcfg', 'self.parameter_types'], {}), '(name, compcfg, self.parameter_types)\n', (15897, 15934), False, 'from capreolus.utils.common import params_to_string, forced_types, get_default_cache_dir, get_default_results_dir\n'), ((10486, 10511), 'capreolus.utils.common.get_default_results_dir', 'get_default_results_dir', ([], {}), '()\n', (10509, 10511), False, 'from capreolus.utils.common import params_to_string, forced_types, get_default_cache_dir, get_default_results_dir\n'), ((10568, 10591), 'capreolus.utils.common.get_default_cache_dir', 'get_default_cache_dir', ([], {}), '()\n', (10589, 10591), False, 'from capreolus.utils.common import params_to_string, forced_types, get_default_cache_dir, get_default_results_dir\n'), ((3471, 3496), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3494, 3496), False, 'import torch\n'), ((11895, 11922), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (11920, 11922), False, 'import multiprocessing\n')]
|
from bert_utils.pretrain_model import load_model
from bert_utils.tokenization import Tokenizer
import numpy as np
config_path = '../chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '../chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '../chinese_L-12_H-768_A-12/vocab.txt'
model = load_model(checkpoint_path, dict_path, is_pool=False)
tokenizer = Tokenizer(dict_path, do_lower_case=True)
token_id, segment_id = tokenizer.encode('我老婆是喻言')
out = model([np.array([token_id]), np.array([segment_id])])
print(out)
|
[
"numpy.array",
"bert_utils.tokenization.Tokenizer",
"bert_utils.pretrain_model.load_model"
] |
[((298, 351), 'bert_utils.pretrain_model.load_model', 'load_model', (['checkpoint_path', 'dict_path'], {'is_pool': '(False)'}), '(checkpoint_path, dict_path, is_pool=False)\n', (308, 351), False, 'from bert_utils.pretrain_model import load_model\n'), ((365, 405), 'bert_utils.tokenization.Tokenizer', 'Tokenizer', (['dict_path'], {'do_lower_case': '(True)'}), '(dict_path, do_lower_case=True)\n', (374, 405), False, 'from bert_utils.tokenization import Tokenizer\n'), ((471, 491), 'numpy.array', 'np.array', (['[token_id]'], {}), '([token_id])\n', (479, 491), True, 'import numpy as np\n'), ((493, 515), 'numpy.array', 'np.array', (['[segment_id]'], {}), '([segment_id])\n', (501, 515), True, 'import numpy as np\n')]
|
"""Test mmd related functions."""
import numpy as np
import pytest
from sklearn.metrics.pairwise import euclidean_distances
from discern.mmd import mmd
def _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma):
# pylint: disable=too-many-locals
stat = np.zeros_like(scales)
n_x = np.float(dist_xx.shape[0])
n_y = np.float(dist_yy.shape[0])
for i, k in enumerate(scales):
val = k * sigma
k_xx = np.exp(-dist_xx / (2 * val))
np.fill_diagonal(k_xx, 0.0)
k_xxnd = np.sum(k_xx) / (n_x * n_x - n_x)
k_yy = np.exp(-dist_yy / (2 * val))
np.fill_diagonal(k_yy, 0.0)
k_yynd = np.sum(k_yy) / (n_y * n_y - n_y)
res1 = k_xxnd + k_yynd
res2 = np.exp(-dist_xy / (2 * val))
res2 = np.sum(res2) * 2. / (n_x * n_y)
stat[i] = res1 - res2
return np.max(stat)
@pytest.mark.parametrize("n_rows", [10, 25, 100, 500, 1000])
@pytest.mark.parametrize("n_cols", [10, 25, 100, 500, 1000])
def test_calculate_distances(n_rows, n_cols):
"""Test _calculate_distances function."""
x = np.random.rand(n_rows, n_cols) # pylint: disable=invalid-name
y = np.random.rand(n_rows, n_cols) # pylint: disable=invalid-name
expected = (euclidean_distances(x, y)**2, euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
got = mmd._calculate_distances(x, y) # pylint: disable=protected-access
np.testing.assert_allclose(got[0], expected[0])
np.testing.assert_allclose(got[1], expected[1])
np.testing.assert_allclose(got[2], expected[2])
@pytest.mark.parametrize("shape", [25, 100, 500, 1000])
@pytest.mark.parametrize("sigma", [0.1, 1., 5., 7.5, 15.])
def test_mmd_loop_py(shape, sigma):
"""Test _mmd_loop_py function."""
x = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(sigma)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_py(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
@pytest.mark.parametrize(
"shape", (1000, 2000, pytest.param(4000, marks=pytest.mark.slow)))
def test_mmd_loop_py_unbalanced(shape):
"""Test _mmd_loop_py function."""
x = np.random.rand(100, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(6.)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_py(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
@pytest.mark.skipif(not mmd.USE_C_IMPLEMENTATION,
reason="Testing C version required compiled binary")
@pytest.mark.parametrize("shape", [25, 100, 500, 1000])
@pytest.mark.parametrize("sigma", [0.1, 1., 5., 7.5, 15.])
def test_mmd_loop_c_version(shape, sigma):
"""Test _mmd_loop function."""
x = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(sigma)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_c(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
@pytest.mark.skipif(not mmd.USE_C_IMPLEMENTATION,
reason="Testing C version required compiled binary")
@pytest.mark.parametrize(
"shape", (1000, 2000, pytest.param(4000, marks=pytest.mark.slow)))
def test_mmd_loop_c_version_unbalanced(shape):
"""Test _mmd_loop function."""
x = np.random.rand(100, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(6.)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_c(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
def test_mmd():
"""Test if mmd_loss throws exception."""
np.random.seed(42)
x = np.random.rand(1000, 500).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(950, 500).astype(np.float32) # pylint: disable=invalid-name
got = mmd.mmd_loss(x, y, 5.0)
np.testing.assert_allclose(got, 1.418614e-06, rtol=0.0018)
np.random.seed(None) # To re-seed the generator for different functions
|
[
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"discern.mmd.mmd._calculate_distances",
"numpy.random.seed",
"pytest.mark.skipif",
"discern.mmd.mmd.mmd_loss",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.fill_diagonal",
"discern.mmd.mmd._mmd_loop_c",
"discern.mmd.mmd._mmd_loop_py",
"numpy.float",
"pytest.param",
"pytest.mark.parametrize",
"numpy.sum",
"numpy.zeros_like",
"numpy.float32"
] |
[((855, 914), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rows"""', '[10, 25, 100, 500, 1000]'], {}), "('n_rows', [10, 25, 100, 500, 1000])\n", (878, 914), False, 'import pytest\n'), ((916, 975), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_cols"""', '[10, 25, 100, 500, 1000]'], {}), "('n_cols', [10, 25, 100, 500, 1000])\n", (939, 975), False, 'import pytest\n'), ((1568, 1622), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[25, 100, 500, 1000]'], {}), "('shape', [25, 100, 500, 1000])\n", (1591, 1622), False, 'import pytest\n'), ((1624, 1684), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigma"""', '[0.1, 1.0, 5.0, 7.5, 15.0]'], {}), "('sigma', [0.1, 1.0, 5.0, 7.5, 15.0])\n", (1647, 1684), False, 'import pytest\n'), ((3297, 3403), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not mmd.USE_C_IMPLEMENTATION)'], {'reason': '"""Testing C version required compiled binary"""'}), "(not mmd.USE_C_IMPLEMENTATION, reason=\n 'Testing C version required compiled binary')\n", (3315, 3403), False, 'import pytest\n'), ((3420, 3474), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[25, 100, 500, 1000]'], {}), "('shape', [25, 100, 500, 1000])\n", (3443, 3474), False, 'import pytest\n'), ((3476, 3536), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigma"""', '[0.1, 1.0, 5.0, 7.5, 15.0]'], {}), "('sigma', [0.1, 1.0, 5.0, 7.5, 15.0])\n", (3499, 3536), False, 'import pytest\n'), ((4297, 4403), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not mmd.USE_C_IMPLEMENTATION)'], {'reason': '"""Testing C version required compiled binary"""'}), "(not mmd.USE_C_IMPLEMENTATION, reason=\n 'Testing C version required compiled binary')\n", (4315, 4403), False, 'import pytest\n'), ((261, 282), 'numpy.zeros_like', 'np.zeros_like', (['scales'], {}), '(scales)\n', (274, 282), True, 'import numpy as np\n'), ((293, 319), 'numpy.float', 'np.float', (['dist_xx.shape[0]'], {}), '(dist_xx.shape[0])\n', (301, 319), True, 'import numpy as np\n'), ((330, 356), 'numpy.float', 'np.float', (['dist_yy.shape[0]'], {}), '(dist_yy.shape[0])\n', (338, 356), True, 'import numpy as np\n'), ((839, 851), 'numpy.max', 'np.max', (['stat'], {}), '(stat)\n', (845, 851), True, 'import numpy as np\n'), ((1076, 1106), 'numpy.random.rand', 'np.random.rand', (['n_rows', 'n_cols'], {}), '(n_rows, n_cols)\n', (1090, 1106), True, 'import numpy as np\n'), ((1147, 1177), 'numpy.random.rand', 'np.random.rand', (['n_rows', 'n_cols'], {}), '(n_rows, n_cols)\n', (1161, 1177), True, 'import numpy as np\n'), ((1342, 1372), 'discern.mmd.mmd._calculate_distances', 'mmd._calculate_distances', (['x', 'y'], {}), '(x, y)\n', (1366, 1372), False, 'from discern.mmd import mmd\n'), ((1413, 1460), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got[0]', 'expected[0]'], {}), '(got[0], expected[0])\n', (1439, 1460), True, 'import numpy as np\n'), ((1465, 1512), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got[1]', 'expected[1]'], {}), '(got[1], expected[1])\n', (1491, 1512), True, 'import numpy as np\n'), ((1517, 1564), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got[2]', 'expected[2]'], {}), '(got[2], expected[2])\n', (1543, 1564), True, 'import numpy as np\n'), ((2132, 2179), 'numpy.linspace', 'np.linspace', (['(0.8)', '(1.5)'], {'num': '(23)', 'dtype': 'np.float32'}), '(0.8, 1.5, num=23, dtype=np.float32)\n', (2143, 2179), True, 'import numpy as np\n'), ((2192, 2209), 'numpy.float32', 'np.float32', (['sigma'], {}), '(sigma)\n', (2202, 2209), True, 'import numpy as np\n'), ((2287, 2345), 'discern.mmd.mmd._mmd_loop_py', 'mmd._mmd_loop_py', (['dist_xy', 'dist_xx', 'dist_yy', 'scales', 'sigma'], {}), '(dist_xy, dist_xx, dist_yy, scales, sigma)\n', (2303, 2345), False, 'from discern.mmd import mmd\n'), ((2386, 2439), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got', 'expected'], {'atol': '(1e-06)'}), '(got, expected, atol=1e-06)\n', (2412, 2439), True, 'import numpy as np\n'), ((2990, 3037), 'numpy.linspace', 'np.linspace', (['(0.8)', '(1.5)'], {'num': '(23)', 'dtype': 'np.float32'}), '(0.8, 1.5, num=23, dtype=np.float32)\n', (3001, 3037), True, 'import numpy as np\n'), ((3050, 3065), 'numpy.float32', 'np.float32', (['(6.0)'], {}), '(6.0)\n', (3060, 3065), True, 'import numpy as np\n'), ((3142, 3200), 'discern.mmd.mmd._mmd_loop_py', 'mmd._mmd_loop_py', (['dist_xy', 'dist_xx', 'dist_yy', 'scales', 'sigma'], {}), '(dist_xy, dist_xx, dist_yy, scales, sigma)\n', (3158, 3200), False, 'from discern.mmd import mmd\n'), ((3241, 3294), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got', 'expected'], {'atol': '(1e-06)'}), '(got, expected, atol=1e-06)\n', (3267, 3294), True, 'import numpy as np\n'), ((3988, 4035), 'numpy.linspace', 'np.linspace', (['(0.8)', '(1.5)'], {'num': '(23)', 'dtype': 'np.float32'}), '(0.8, 1.5, num=23, dtype=np.float32)\n', (3999, 4035), True, 'import numpy as np\n'), ((4048, 4065), 'numpy.float32', 'np.float32', (['sigma'], {}), '(sigma)\n', (4058, 4065), True, 'import numpy as np\n'), ((4143, 4200), 'discern.mmd.mmd._mmd_loop_c', 'mmd._mmd_loop_c', (['dist_xy', 'dist_xx', 'dist_yy', 'scales', 'sigma'], {}), '(dist_xy, dist_xx, dist_yy, scales, sigma)\n', (4158, 4200), False, 'from discern.mmd import mmd\n'), ((4241, 4294), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got', 'expected'], {'atol': '(1e-06)'}), '(got, expected, atol=1e-06)\n', (4267, 4294), True, 'import numpy as np\n'), ((4972, 5019), 'numpy.linspace', 'np.linspace', (['(0.8)', '(1.5)'], {'num': '(23)', 'dtype': 'np.float32'}), '(0.8, 1.5, num=23, dtype=np.float32)\n', (4983, 5019), True, 'import numpy as np\n'), ((5032, 5047), 'numpy.float32', 'np.float32', (['(6.0)'], {}), '(6.0)\n', (5042, 5047), True, 'import numpy as np\n'), ((5124, 5181), 'discern.mmd.mmd._mmd_loop_c', 'mmd._mmd_loop_c', (['dist_xy', 'dist_xx', 'dist_yy', 'scales', 'sigma'], {}), '(dist_xy, dist_xx, dist_yy, scales, sigma)\n', (5139, 5181), False, 'from discern.mmd import mmd\n'), ((5222, 5275), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got', 'expected'], {'atol': '(1e-06)'}), '(got, expected, atol=1e-06)\n', (5248, 5275), True, 'import numpy as np\n'), ((5342, 5360), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5356, 5360), True, 'import numpy as np\n'), ((5540, 5563), 'discern.mmd.mmd.mmd_loss', 'mmd.mmd_loss', (['x', 'y', '(5.0)'], {}), '(x, y, 5.0)\n', (5552, 5563), False, 'from discern.mmd import mmd\n'), ((5568, 5626), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['got', '(1.418614e-06)'], {'rtol': '(0.0018)'}), '(got, 1.418614e-06, rtol=0.0018)\n', (5594, 5626), True, 'import numpy as np\n'), ((5631, 5651), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (5645, 5651), True, 'import numpy as np\n'), ((431, 459), 'numpy.exp', 'np.exp', (['(-dist_xx / (2 * val))'], {}), '(-dist_xx / (2 * val))\n', (437, 459), True, 'import numpy as np\n'), ((468, 495), 'numpy.fill_diagonal', 'np.fill_diagonal', (['k_xx', '(0.0)'], {}), '(k_xx, 0.0)\n', (484, 495), True, 'import numpy as np\n'), ((561, 589), 'numpy.exp', 'np.exp', (['(-dist_yy / (2 * val))'], {}), '(-dist_yy / (2 * val))\n', (567, 589), True, 'import numpy as np\n'), ((598, 625), 'numpy.fill_diagonal', 'np.fill_diagonal', (['k_yy', '(0.0)'], {}), '(k_yy, 0.0)\n', (614, 625), True, 'import numpy as np\n'), ((722, 750), 'numpy.exp', 'np.exp', (['(-dist_xy / (2 * val))'], {}), '(-dist_xy / (2 * val))\n', (728, 750), True, 'import numpy as np\n'), ((2493, 2535), 'pytest.param', 'pytest.param', (['(4000)'], {'marks': 'pytest.mark.slow'}), '(4000, marks=pytest.mark.slow)\n', (2505, 2535), False, 'import pytest\n'), ((4471, 4513), 'pytest.param', 'pytest.param', (['(4000)'], {'marks': 'pytest.mark.slow'}), '(4000, marks=pytest.mark.slow)\n', (4483, 4513), False, 'import pytest\n'), ((513, 525), 'numpy.sum', 'np.sum', (['k_xx'], {}), '(k_xx)\n', (519, 525), True, 'import numpy as np\n'), ((643, 655), 'numpy.sum', 'np.sum', (['k_yy'], {}), '(k_yy)\n', (649, 655), True, 'import numpy as np\n'), ((1226, 1251), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'y'], {}), '(x, y)\n', (1245, 1251), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((1256, 1281), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'x'], {}), '(x, x)\n', (1275, 1281), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((1302, 1327), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['y', 'y'], {}), '(y, y)\n', (1321, 1327), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((1764, 1791), 'numpy.random.rand', 'np.random.rand', (['shape', '(1000)'], {}), '(shape, 1000)\n', (1778, 1791), True, 'import numpy as np\n'), ((1851, 1878), 'numpy.random.rand', 'np.random.rand', (['shape', '(1000)'], {}), '(shape, 1000)\n', (1865, 1878), True, 'import numpy as np\n'), ((1963, 1988), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'y'], {}), '(x, y)\n', (1982, 1988), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2026, 2051), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'x'], {}), '(x, x)\n', (2045, 2051), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2089, 2114), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['y', 'y'], {}), '(y, y)\n', (2108, 2114), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2624, 2649), 'numpy.random.rand', 'np.random.rand', (['(100)', '(1000)'], {}), '(100, 1000)\n', (2638, 2649), True, 'import numpy as np\n'), ((2709, 2736), 'numpy.random.rand', 'np.random.rand', (['shape', '(1000)'], {}), '(shape, 1000)\n', (2723, 2736), True, 'import numpy as np\n'), ((2821, 2846), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'y'], {}), '(x, y)\n', (2840, 2846), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2884, 2909), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'x'], {}), '(x, x)\n', (2903, 2909), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2947, 2972), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['y', 'y'], {}), '(y, y)\n', (2966, 2972), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((3620, 3647), 'numpy.random.rand', 'np.random.rand', (['shape', '(1000)'], {}), '(shape, 1000)\n', (3634, 3647), True, 'import numpy as np\n'), ((3707, 3734), 'numpy.random.rand', 'np.random.rand', (['shape', '(1000)'], {}), '(shape, 1000)\n', (3721, 3734), True, 'import numpy as np\n'), ((3819, 3844), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'y'], {}), '(x, y)\n', (3838, 3844), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((3882, 3907), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'x'], {}), '(x, x)\n', (3901, 3907), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((3945, 3970), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['y', 'y'], {}), '(y, y)\n', (3964, 3970), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4606, 4631), 'numpy.random.rand', 'np.random.rand', (['(100)', '(1000)'], {}), '(100, 1000)\n', (4620, 4631), True, 'import numpy as np\n'), ((4691, 4718), 'numpy.random.rand', 'np.random.rand', (['shape', '(1000)'], {}), '(shape, 1000)\n', (4705, 4718), True, 'import numpy as np\n'), ((4803, 4828), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'y'], {}), '(x, y)\n', (4822, 4828), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4866, 4891), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'x'], {}), '(x, x)\n', (4885, 4891), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4929, 4954), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['y', 'y'], {}), '(y, y)\n', (4948, 4954), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((5369, 5394), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(500)'], {}), '(1000, 500)\n', (5383, 5394), True, 'import numpy as np\n'), ((5454, 5478), 'numpy.random.rand', 'np.random.rand', (['(950)', '(500)'], {}), '(950, 500)\n', (5468, 5478), True, 'import numpy as np\n'), ((766, 778), 'numpy.sum', 'np.sum', (['res2'], {}), '(res2)\n', (772, 778), True, 'import numpy as np\n')]
|
from typing import Dict
import numpy as np
import edunet as net
from edunet.core import Operation
from edunet.core import Variable
EPSILON = 1e-6
SEED = 69696969
RANDOM_STATE = np.random.RandomState(SEED)
INPUT_DTYPE = np.float64
INPUT_DATA_SHAPE = (1, 6, 6, 3)
INPUT_LABELS_SHAPE = (1, 3, 1)
data_batch = RANDOM_STATE.uniform(0, 1, INPUT_DATA_SHAPE).astype(INPUT_DTYPE)
labels_batch = RANDOM_STATE.uniform(0, 1, INPUT_LABELS_SHAPE).astype(INPUT_DTYPE)
def build_model(layers: Dict[str, Operation]):
random_state = np.random.RandomState(SEED)
layers['input_data'] = net.Input(INPUT_DATA_SHAPE, INPUT_DTYPE)
layers['input_labels'] = net.Input(INPUT_LABELS_SHAPE, INPUT_DTYPE)
layers['conv_1'] = net.Convolution2D(layers['input_data'], 5, 2, strides=3, mode='valid', random_state=random_state)
layers['relu_1'] = net.Relu(layers['conv_1'])
layers['pool_1'] = net.AveragePool2D(layers['relu_1'], 2, mode='valid')
layers['flatten'] = net.Flatten(layers['pool_1'])
layers['dense_1'] = net.Dense(layers['flatten'], 5, random_state=random_state)
layers['relu'] = net.Relu6(layers['dense_1'])
layers['dense_2'] = net.Dense(layers['relu'], 3, random_state=random_state)
layers['softmax'] = net.SoftArgMax(layers['dense_2'], 1)
layers['loss'] = net.CrossEntropy(layers['softmax'], layers['input_labels'], 1)
layers['reduce_sum'] = net.ReduceSum(layers['loss'], 0)
explicit_model: Dict[str, Operation] = dict()
build_model(explicit_model)
implicit_model: Dict[str, Operation] = dict()
build_model(implicit_model)
def forward_pass(layers: Dict[str, Operation]):
layers['input_data'].feed(data_batch)
layers['input_labels'].feed(labels_batch)
layers['input_data'].run()
layers['input_labels'].run()
layers['conv_1'].run()
layers['relu_1'].run()
layers['pool_1'].run()
layers['flatten'].run()
layers['dense_1'].run()
layers['relu'].run()
layers['dense_2'].run()
layers['softmax'].run()
layers['loss'].run()
layers['reduce_sum'].run()
def backward_pass(layers: Dict[str, Operation], final_op_name: str, op_name: str):
layers['gradients'] = net.Gradients(layers[final_op_name], [layers[op_name]])
layers['gradients'].run()
def compute_explicit_gradients(layers: Dict[str, Operation], final_op_name: str, variable: Variable) -> np.ndarray:
num_grads = np.empty(variable.shape, variable.dtype)
for i in range(variable.values.size):
base_value = variable.values.flat[i]
variable.values.flat[i] = base_value - EPSILON
forward_pass(layers)
loss_1 = layers[final_op_name].output.values
variable.values.flat[i] = base_value + EPSILON
forward_pass(layers)
loss_2 = layers[final_op_name].output.values
variable.values.flat[i] = base_value
num_grads.flat[i] = (loss_2 - loss_1) / (2 * EPSILON)
return num_grads
# # Var list numerical validation.
# i_var = 1
# forward_pass(explicit_model)
# variable = explicit_model['conv_1'].var_list[i_var]
# num_grads = compute_explicit_gradients(explicit_model, 'reduce_sum', variable)
#
# forward_pass(implicit_model)
# backward_pass(implicit_model)
# grads = implicit_model['conv_1'].grads_dict[implicit_model['conv_1'].var_list[i_var]].values
# Layer output variables validation.
forward_pass(explicit_model)
variable = explicit_model['input_data'].output
num_grads = compute_explicit_gradients(explicit_model, 'reduce_sum', variable)
forward_pass(implicit_model)
backward_pass(implicit_model, 'reduce_sum', 'conv_1')
grads = implicit_model['gradients'].output.values[0][implicit_model['input_data'].output].values
print(num_grads.shape)
print(grads.shape)
print()
grads_error = np.abs(num_grads.ravel() - grads.ravel())
print(num_grads.flatten())
print()
print(grads.flatten())
print()
print(grads_error)
print()
print(np.all(grads_error < EPSILON))
print()
|
[
"edunet.Gradients",
"edunet.ReduceSum",
"edunet.CrossEntropy",
"edunet.AveragePool2D",
"edunet.SoftArgMax",
"edunet.Relu6",
"edunet.Relu",
"numpy.empty",
"edunet.Convolution2D",
"edunet.Input",
"edunet.Flatten",
"edunet.Dense",
"numpy.all",
"numpy.random.RandomState"
] |
[((181, 208), 'numpy.random.RandomState', 'np.random.RandomState', (['SEED'], {}), '(SEED)\n', (202, 208), True, 'import numpy as np\n'), ((527, 554), 'numpy.random.RandomState', 'np.random.RandomState', (['SEED'], {}), '(SEED)\n', (548, 554), True, 'import numpy as np\n'), ((583, 623), 'edunet.Input', 'net.Input', (['INPUT_DATA_SHAPE', 'INPUT_DTYPE'], {}), '(INPUT_DATA_SHAPE, INPUT_DTYPE)\n', (592, 623), True, 'import edunet as net\n'), ((653, 695), 'edunet.Input', 'net.Input', (['INPUT_LABELS_SHAPE', 'INPUT_DTYPE'], {}), '(INPUT_LABELS_SHAPE, INPUT_DTYPE)\n', (662, 695), True, 'import edunet as net\n'), ((720, 821), 'edunet.Convolution2D', 'net.Convolution2D', (["layers['input_data']", '(5)', '(2)'], {'strides': '(3)', 'mode': '"""valid"""', 'random_state': 'random_state'}), "(layers['input_data'], 5, 2, strides=3, mode='valid',\n random_state=random_state)\n", (737, 821), True, 'import edunet as net\n'), ((841, 867), 'edunet.Relu', 'net.Relu', (["layers['conv_1']"], {}), "(layers['conv_1'])\n", (849, 867), True, 'import edunet as net\n'), ((891, 943), 'edunet.AveragePool2D', 'net.AveragePool2D', (["layers['relu_1']", '(2)'], {'mode': '"""valid"""'}), "(layers['relu_1'], 2, mode='valid')\n", (908, 943), True, 'import edunet as net\n'), ((968, 997), 'edunet.Flatten', 'net.Flatten', (["layers['pool_1']"], {}), "(layers['pool_1'])\n", (979, 997), True, 'import edunet as net\n'), ((1022, 1080), 'edunet.Dense', 'net.Dense', (["layers['flatten']", '(5)'], {'random_state': 'random_state'}), "(layers['flatten'], 5, random_state=random_state)\n", (1031, 1080), True, 'import edunet as net\n'), ((1102, 1130), 'edunet.Relu6', 'net.Relu6', (["layers['dense_1']"], {}), "(layers['dense_1'])\n", (1111, 1130), True, 'import edunet as net\n'), ((1155, 1210), 'edunet.Dense', 'net.Dense', (["layers['relu']", '(3)'], {'random_state': 'random_state'}), "(layers['relu'], 3, random_state=random_state)\n", (1164, 1210), True, 'import edunet as net\n'), ((1235, 1271), 'edunet.SoftArgMax', 'net.SoftArgMax', (["layers['dense_2']", '(1)'], {}), "(layers['dense_2'], 1)\n", (1249, 1271), True, 'import edunet as net\n'), ((1293, 1355), 'edunet.CrossEntropy', 'net.CrossEntropy', (["layers['softmax']", "layers['input_labels']", '(1)'], {}), "(layers['softmax'], layers['input_labels'], 1)\n", (1309, 1355), True, 'import edunet as net\n'), ((1383, 1415), 'edunet.ReduceSum', 'net.ReduceSum', (["layers['loss']", '(0)'], {}), "(layers['loss'], 0)\n", (1396, 1415), True, 'import edunet as net\n'), ((2155, 2210), 'edunet.Gradients', 'net.Gradients', (['layers[final_op_name]', '[layers[op_name]]'], {}), '(layers[final_op_name], [layers[op_name]])\n', (2168, 2210), True, 'import edunet as net\n'), ((2375, 2415), 'numpy.empty', 'np.empty', (['variable.shape', 'variable.dtype'], {}), '(variable.shape, variable.dtype)\n', (2383, 2415), True, 'import numpy as np\n'), ((3867, 3896), 'numpy.all', 'np.all', (['(grads_error < EPSILON)'], {}), '(grads_error < EPSILON)\n', (3873, 3896), True, 'import numpy as np\n')]
|
"""
This module will encompass the code for analyzing the molecular dynamics generated on the simulations as dcd files.
It also takes care of the CV generation for further feeding into the MLTSA pipeline.
"""
import numpy as np
import mdtraj as md
from itertools import combinations
from itertools import permutations
import re
class CVs(object):
"""
This class is a container of the Collective Variables defined to calculate on mdtraj later.
It contains different methods for CV definition to later on calculate for the MLTSA.
"""
def __init__(self, top):
"""
The definition of this class needs only the topology which will use later on to define the CVs needed.
:param top: str Path to the relevant topology file that will be used (.pdb or .psf format, can also work on
other topologies compatible with mdtraj)
"""
#TODO Implement a way to use multiple topologies, for now only way to make it work is call the CV each time.
self.topology_file = md.load(top)
self.topology_path = top
self.top = self.topology_file.top
return
def define_variables(self, CV_type, custom_selection_string=None, CV_indices=None):
"""
This method defines the variables depending on the type of CV passed, labels such as 'all' for example, will
calculate every interatomic distance between all atoms. Custom CVs can be passed in with CV_indices=list
and CV_type = "custom_CVs" as pairs of atom indices to calculate distances on. A custom_selection_string
atom selection using mdtraj's syntax can be passed with custom_selection_string= to select atoms to
calculate all distances from by using CV_type="custom_selection" .
:param CV_type: str Label to specify the CV definition, it can be "all" for all atoms, "Calpha_water" for ligand
+water+Calpha atoms, "Calpha" for ligand+Calpha atoms, "all_closest_atoms" for all close atoms between
residues, "all_closest_heavy_atoms" for all closest heavy inter-residue atoms, "bubble_ligand" for all
distances between ligand and protein for a 6 Angstroms bubble around the ligand. "custom_CVs" for a
selected set of CV_indices to be passed, and "custom_selection" to pass a custom_selection_string to use on mdtraj
as an atom selection sytnax.
:param custom_selection_string: str Atom selection from mdtraj's atom selection reference syntax which will
select the atom indices and use them for CV definition.
:param CV_indices: list, array CVs can be defined outside of this class and passed here as atom indices.
:return:
"""
self.CV_type = CV_type
if self.CV_type == "all":
# TODO Label yet to test if it works, please do consider double cheking that it works before using
print("You selected the option 'all', this can be very computationally expensive")
print("please, proceed with caution")
relevant_atoms = list(self.top.atoms)
CV_indices = []
for a,b in combinations(relevant_atoms, 2):
CV_indices.append([a.index, b.index])
print(len(CV_indices), " CV features defined, is this the correct number?")
self.CV_indices = CV_indices
elif self.CV_type == "Calpha_water":
# TODO Label yet to test if it works, please do consider double cheking that it works before using
relevant_atoms = list(self.top.select("name == CA or water or resname LIG"))
CV_indices = []
for a,b in combinations(relevant_atoms, 2):
CV_indices.append([a, b])
print(len(CV_indices), " CV features defined, is this the correct number?")
self.CV_indices = CV_indices
elif self.CV_type == "Calpha":
# TODO Label yet to test if it works, please do consider double cheking that it works before using
relevant_atoms = list(self.top.select("name == CA or resname LIG"))
CV_indices = []
for a,b in combinations(relevant_atoms, 2):
CV_indices.append([a, b])
print(len(CV_indices), " CV features defined, is this the correct number?")
self.CV_indices = CV_indices
elif self.CV_type == "all_closest_atoms":
# TODO Label yet to test if it works, please do consider double cheking that it works before using
relevant_residues = list(self.top.residues)
CV_indices = []
for a, b in combinations(relevant_residues, 2):
CV_indices.append([a.index, b.index])
dist, CV_indices = md.compute_contacts(self.topology_file,
contacts=relevant_residues,
scheme="closest")
self.CV_indices = CV_indices
elif self.CV_type == "all_closest_heavy_atoms":
# TODO Label yet to test if it works, please do consider double cheking that it works before using
relevant_residues = list(self.top.residues)
CV_indices = []
for a, b in combinations(relevant_residues, 2):
CV_indices.append([a.index, b.index])
dist, CV_indices = md.compute_contacts(self.topology_file,
contacts=relevant_residues,
scheme="closest-heavy")
self.CV_indices = CV_indices
elif self.CV_type == "custom_selection":
# TODO Label yet to test if it works, please do consider double cheking that it works before using
"""" For now if you wish to find all interatomic between multiple selections, then put all selections under
one string using 'or' to keep adding selections, otherwise if you want to define pairs manually give them as
lists of [[atom1,atom2], [atom3.atom4]] """
if isinstance(custom_selection_string, list):
CV_indices = []
for pair in custom_selection_string:
idx1 = self.top.select(pair[0])[0]
idx2 = self.top.select(pair[1])[0]
CV_indices.append([idx1, idx2])
self.CV_indices = CV_indices
else:
relevant_atoms = list(self.top.select(custom_selection_string))
CV_indices = []
for a, b in combinations(relevant_atoms, 2):
CV_indices.append([a, b])
self.CV_indices = CV_indices
elif self.CV_type == "bubble_ligand":
# Checked label, works fine
relevant_atoms = list(self.top.select("resname LIG"))
#Cutoff distance is in Nanometers as mdtraj expects, 0.6nm == 6 Angstroms
close_atoms = list(md.compute_neighbors(self.topology_file, 0.6, relevant_atoms)[0])
all_combinations = [[i, j] for i in relevant_atoms for j in close_atoms]
self.CV_indices = all_combinations
elif self.CV_type == "custom_CVs":
# TODO Label yet to test if it works, please do consider double checking that it works before using
self.CV_indices = CV_indices
return
class MDs(object):
"""
Analyzer wrapper based on mdtraj, that can generate distances out of a previously defined CVs object with
calculate_CVs(). It can also make use of a list of dcd files and topology along with a set of selection strings
and upper/lower values to check for an automatic labeling of simulations with label_simulations().
"""
def __init__(self):
"""
It does not need anything to initialize.
"""
print("Setting up MD analyzer")
def calculate_CVs(self, CVs, dcd_paths, loading="normal"):
"""
Method for calculating the Collective Variables previously defined by passing on a CVs object along the
list of trajectories to use and calculate the data. It has different methods for loading depending on the
complexity of the dataset to analyze.
:param CVs: class CVs object class previously defined with a set of CVs already defined. It will be used to
calculate the distances.
:param dcd_paths: list List of strings containing the paths to the different .dcd/trajectory files.
:param loading: str Label for the type of trajectory loading to use, it can affect the performance.
:return:
"""
self.topology = CVs.topology_path
self.dcd_list = dcd_paths
self.type = dcd_paths[0][-3:]
if loading == "normal":
distances = []
for dcd in dcd_paths:
traj = md.load(dcd, top=self.topology, stride=1)
dists = md.compute_distances(traj, CVs.CV_indices)
distances.append(dists)
return distances
if loading == "optimized":
print()
if loading == "iterload":
print()
def label_simulations(self, top, dcd_paths, selection_strings_to_label, upper_lim, lower_lim,
loading="normal", end_perc=0.25, get_sum=True, plotting_sum=False,
plotting_all=False, show_plots=False, save_labels=False, save_plots=False, save_path=""):
"""
Method for the labeling of a given set of trajectory files on the desired string selections to check and the
upper/lower limit to classify. It can also plot figures with the values for each of the distances throughout
the trajectories and save them in the specified path.
:param top: str Path to the topology file to use (.pdb/.psf) or any mdtraj compatible topology file.
:param dcd_paths: list List containing the paths to the trajectory files (.dcd/other)
:param selection_strings_to_label: str String selection using mdtraj's atom selection reference syntax.
:param upper_lim: float Upper limit which sets the OUT label for the trajectories when labeled. Anything bigger
than this will be considered as OUT. Anything smaller than this and bigger than lower_lim will be labeled as
UCL.
:param lower_lim: float Lower limit which sets the IN label for the trajectories when labeled. Anything smaller
than this will be considered as IN. Anything biggerr than this and smaller than upper_lim will be labeled as
UCL.
:param loading: str Label to specify the loading procedure, affects performance.
:param plotting: boolean Determines whether to plot in matplotlib the evolution of the labeling distances
throughout the trajectories. Figures will be saved in the given save_path, one per simulation.
:param show_plots: boolean Whether to show the plots in the current session or not. If this is False and
plotting=True and save_plots=True it will still save them without showing them.
:param save_labels: boolean Determines if the labels should be saved in a file on the desired destination with
save_path.
:param save_plots: boolean Determines whether to save the plots or not.Figures will be saved in the given
save_path, one per simulation.
:param save_path: str Path to save the figures generated by the labelling if plotting=True. If not specified it
saves in the working directory.
:return: list Returns the list of labelled simulations as ["IN", "OUT", etc.] for each trajectory passed in the
dcd_paths list.
"""
#Prepare everything to calculate the data needed.
if isinstance(top, list):
clf_distances = []
for t, dcd in zip(top, dcd_paths):
topo = md.load_pdb(str(t))
pairs = []
for sel1, sel2 in selection_strings_to_label:
idx1 = topo.topology.select(sel1)
idx2 = topo.topology.select(sel2)
pairs.append([idx1[0], idx2[0]])
vars = CVs(t)
CV_indices = np.array(pairs)
vars.define_variables(CV_type="custom_CVs", CV_indices=CV_indices)
dists = self.calculate_CVs(vars, [dcd])
clf_distances.append(dists[0])
else:
topo = md.load(top)
pairs = []
for sel1, sel2 in selection_strings_to_label:
idx1 = topo.topology.select(sel1)
idx2 = topo.topology.select(sel2)
pairs.append([idx1[0], idx2[0]])
vars = CVs(top)
CV_indices = np.array(pairs)
vars.define_variables(CV_type="custom_CVs", CV_indices=CV_indices)
clf_distances = self.calculate_CVs(vars, dcd_paths)
if plotting_all == True or plotting_sum == True:
print("Plotting values, make sure you want to do this to either show them or save them somewhere.")
import matplotlib.pyplot as plt
md_labels = []
sums_traj = []
for n, traj in enumerate(clf_distances):
sums = []
sum_plot = []
if plotting_all == True:
plt.figure()
for d in range(len(selection_strings_to_label)):
values = np.array(traj).T[d]
if plotting_all == True:
filename = re.split('/', dcd_paths[n])[-1]
plt.title("Sim: "+filename)
plt.plot(values,
label="CV {}: {} - {}".format(d, selection_strings_to_label[d][0],
selection_strings_to_label[d][1] ))
plt.xlabel("Frame")
plt.ylabel("Distance(A)")
plt.legend()
if show_plots == True:
plt.show()
if save_plots == True:
plt.savefig(str(save_path)+filename+"CV"+str(d)+"_label.svg")
plt.close()
sum_plot.append(values)
#This fetches the mean value found on the "end_perc" of the trajectory
sums.append(np.mean(values[int(len(values)*(1-end_perc)):]))
if plotting_sum == True:
filename = re.split('/', dcd_paths[n])[-1]
plt.figure()
plt.title("Sum of Distances Trajectory "+filename)
plt.plot(np.sum(sum_plot, axis=0), label="Sum of distances")
label_data = np.sum(sum_plot, axis=0)
label_data[0:int(len(label_data)*(1-end_perc))] = np.NaN
plt.plot(label_data, label="Range used for labelling", linewidth=2)
plt.xlabel("Frame")
plt.ylabel("Distance(A)")
plt.legend()
if show_plots == True:
plt.show()
if save_plots == True:
plt.savefig(str(save_path) + filename + "_sum_label.svg")
plt.close()
#This evaluates the sum of the means of each value to classify
sums = np.sum(sums)
sums_traj.append(sums)
if sums < lower_lim:
md_labels.append("IN")
elif sums > upper_lim:
md_labels.append("OUT")
elif lower_lim < sums < upper_lim:
md_labels.append("UCL")
if save_labels == True:
print("Saving labels at ", str(save_path)+"labels.dat")
with open(str(save_path)+"labels.dat", "w") as f:
for d, label in zip(dcd_paths, md_labels):
f.write(str(d)+"\t"+str(label)+"\n")
f.close()
if get_sum == True:
print("Returning labels and the sum of values for each trajectory")
return md_labels, sums_traj
else:
return md_labels
if __name__ == '__main__':
print()
|
[
"re.split",
"matplotlib.pyplot.title",
"mdtraj.compute_distances",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"mdtraj.compute_contacts",
"itertools.combinations",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"mdtraj.compute_neighbors",
"mdtraj.load",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1041, 1053), 'mdtraj.load', 'md.load', (['top'], {}), '(top)\n', (1048, 1053), True, 'import mdtraj as md\n'), ((3158, 3189), 'itertools.combinations', 'combinations', (['relevant_atoms', '(2)'], {}), '(relevant_atoms, 2)\n', (3170, 3189), False, 'from itertools import combinations\n'), ((12528, 12540), 'mdtraj.load', 'md.load', (['top'], {}), '(top)\n', (12535, 12540), True, 'import mdtraj as md\n'), ((12824, 12839), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (12832, 12839), True, 'import numpy as np\n'), ((15330, 15342), 'numpy.sum', 'np.sum', (['sums'], {}), '(sums)\n', (15336, 15342), True, 'import numpy as np\n'), ((3673, 3704), 'itertools.combinations', 'combinations', (['relevant_atoms', '(2)'], {}), '(relevant_atoms, 2)\n', (3685, 3704), False, 'from itertools import combinations\n'), ((8890, 8931), 'mdtraj.load', 'md.load', (['dcd'], {'top': 'self.topology', 'stride': '(1)'}), '(dcd, top=self.topology, stride=1)\n', (8897, 8931), True, 'import mdtraj as md\n'), ((8956, 8998), 'mdtraj.compute_distances', 'md.compute_distances', (['traj', 'CVs.CV_indices'], {}), '(traj, CVs.CV_indices)\n', (8976, 8998), True, 'import mdtraj as md\n'), ((12293, 12308), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (12301, 12308), True, 'import numpy as np\n'), ((13396, 13408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13406, 13408), True, 'import matplotlib.pyplot as plt\n'), ((14544, 14556), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14554, 14556), True, 'import matplotlib.pyplot as plt\n'), ((14573, 14625), 'matplotlib.pyplot.title', 'plt.title', (["('Sum of Distances Trajectory ' + filename)"], {}), "('Sum of Distances Trajectory ' + filename)\n", (14582, 14625), True, 'import matplotlib.pyplot as plt\n'), ((14730, 14754), 'numpy.sum', 'np.sum', (['sum_plot'], {'axis': '(0)'}), '(sum_plot, axis=0)\n', (14736, 14754), True, 'import numpy as np\n'), ((14844, 14911), 'matplotlib.pyplot.plot', 'plt.plot', (['label_data'], {'label': '"""Range used for labelling"""', 'linewidth': '(2)'}), "(label_data, label='Range used for labelling', linewidth=2)\n", (14852, 14911), True, 'import matplotlib.pyplot as plt\n'), ((14928, 14947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frame"""'], {}), "('Frame')\n", (14938, 14947), True, 'import matplotlib.pyplot as plt\n'), ((14964, 14989), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance(A)"""'], {}), "('Distance(A)')\n", (14974, 14989), True, 'import matplotlib.pyplot as plt\n'), ((15006, 15018), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15016, 15018), True, 'import matplotlib.pyplot as plt\n'), ((15222, 15233), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15231, 15233), True, 'import matplotlib.pyplot as plt\n'), ((4161, 4192), 'itertools.combinations', 'combinations', (['relevant_atoms', '(2)'], {}), '(relevant_atoms, 2)\n', (4173, 4192), False, 'from itertools import combinations\n'), ((13639, 13668), 'matplotlib.pyplot.title', 'plt.title', (["('Sim: ' + filename)"], {}), "('Sim: ' + filename)\n", (13648, 13668), True, 'import matplotlib.pyplot as plt\n'), ((13888, 13907), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frame"""'], {}), "('Frame')\n", (13898, 13907), True, 'import matplotlib.pyplot as plt\n'), ((13928, 13953), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance(A)"""'], {}), "('Distance(A)')\n", (13938, 13953), True, 'import matplotlib.pyplot as plt\n'), ((13974, 13986), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13984, 13986), True, 'import matplotlib.pyplot as plt\n'), ((14214, 14225), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14223, 14225), True, 'import matplotlib.pyplot as plt\n'), ((14496, 14523), 're.split', 're.split', (['"""/"""', 'dcd_paths[n]'], {}), "('/', dcd_paths[n])\n", (14504, 14523), False, 'import re\n'), ((14649, 14673), 'numpy.sum', 'np.sum', (['sum_plot'], {'axis': '(0)'}), '(sum_plot, axis=0)\n', (14655, 14673), True, 'import numpy as np\n'), ((15078, 15088), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15086, 15088), True, 'import matplotlib.pyplot as plt\n'), ((4637, 4671), 'itertools.combinations', 'combinations', (['relevant_residues', '(2)'], {}), '(relevant_residues, 2)\n', (4649, 4671), False, 'from itertools import combinations\n'), ((4758, 4848), 'mdtraj.compute_contacts', 'md.compute_contacts', (['self.topology_file'], {'contacts': 'relevant_residues', 'scheme': '"""closest"""'}), "(self.topology_file, contacts=relevant_residues, scheme=\n 'closest')\n", (4777, 4848), True, 'import mdtraj as md\n'), ((13495, 13509), 'numpy.array', 'np.array', (['traj'], {}), '(traj)\n', (13503, 13509), True, 'import numpy as np\n'), ((13587, 13614), 're.split', 're.split', (['"""/"""', 'dcd_paths[n]'], {}), "('/', dcd_paths[n])\n", (13595, 13614), False, 'import re\n'), ((14054, 14064), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14062, 14064), True, 'import matplotlib.pyplot as plt\n'), ((5263, 5297), 'itertools.combinations', 'combinations', (['relevant_residues', '(2)'], {}), '(relevant_residues, 2)\n', (5275, 5297), False, 'from itertools import combinations\n'), ((5384, 5480), 'mdtraj.compute_contacts', 'md.compute_contacts', (['self.topology_file'], {'contacts': 'relevant_residues', 'scheme': '"""closest-heavy"""'}), "(self.topology_file, contacts=relevant_residues, scheme=\n 'closest-heavy')\n", (5403, 5480), True, 'import mdtraj as md\n'), ((6592, 6623), 'itertools.combinations', 'combinations', (['relevant_atoms', '(2)'], {}), '(relevant_atoms, 2)\n', (6604, 6623), False, 'from itertools import combinations\n'), ((6986, 7047), 'mdtraj.compute_neighbors', 'md.compute_neighbors', (['self.topology_file', '(0.6)', 'relevant_atoms'], {}), '(self.topology_file, 0.6, relevant_atoms)\n', (7006, 7047), True, 'import mdtraj as md\n')]
|
import csv
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import numpy as np
# read the data
csvfile=open("weightedX.csv", 'r')
x = list(csv.reader(csvfile))
csvfile=open("weightedY.csv", 'r')
y = list(csv.reader(csvfile))
m=len(x)
n=1
x3=[]
y2=[]
for i in range(m):
x3.append(float(x[i][0]))
y2.append(float(y[i][0]))
# normalise the data
meanx=sum(x3)/m
v=0 # variance
for i in range(m):
t=x3[i]-meanx
v+=t*t
v/=m
v=math.sqrt(v)
for i in range(m):
x3[i]=(x3[i]-meanx)/v
x2=[]
for i in range(m):
x2.append(np.array([1,x3[i]]))
X=np.array(x2)
Y=np.array(y2)
xvalues=np.linspace(min(x3),max(x3),100)
plt.ion()
fig=plt.figure()
ax1 = fig.add_subplot(1,1,1)
# plots Training data &
# straight line from linear regression
def pl(th):
ax1.clear()
ax1.scatter(x3, y2, label= "Training Data", color= "r",
marker= ".", s=10)
the=list(th)
yvalues=the[1]*xvalues+the[0]
ax1.plot(xvalues, yvalues, label="Hypothesis function learned",color ='b')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Q2 (a)')
plt.show()
plt.pause(0.001)
# All weights same
# theta= inv(X'*X)*X'*Y
theta = np.dot(np.dot(np.linalg.inv(np.dot(X.T ,X)) , np.transpose(X)) , Y)
print("theta=",theta)
plt.ioff()
pl(theta)
# Part (b)
fig=plt.figure()
ax1 = fig.add_subplot(1,1,1)
# change value of tau for part (c)
tau=0.8
tau2=tau*tau
# plots the hypothesis function learned
def plot_2():
ax1.clear()
ax1.scatter(x3, y2, label= "Training Data", color= "r",
marker= ".", s=10)
# calculate the yaxis values for corresponding xaxis values
yvalues=[]
for i in range(len(xvalues)):
weights=[]
for j in range(m):
c=xvalues[i]-X[j][1]
power=-(c*c)/(2*tau2)
weights.append(math.exp(power))
# convert np array to diagonal matrix
# W is m*m matrix
W=np.diag(np.array(weights))
# theta=inv(X'*W*X)*X'*W*Y
the = np.dot(np.dot(np.dot(np.linalg.inv(np.dot(np.dot(X.T ,W),X)) , X.T), W) , Y)
yvalues.append(the[1]*xvalues[i]+the[0])
ax1.plot(xvalues, yvalues, label="Hypothesis function learned",color ='b')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Q2 tau={}'.format(tau))
plt.show()
plt.pause(0.001)
plt.ioff()
plot_2()
|
[
"numpy.transpose",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"math.sqrt",
"matplotlib.pyplot.ioff",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.dot",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"math.exp",
"matplotlib.pyplot.pause",
"csv.reader",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((489, 501), 'math.sqrt', 'math.sqrt', (['v'], {}), '(v)\n', (498, 501), False, 'import math\n'), ((612, 624), 'numpy.array', 'np.array', (['x2'], {}), '(x2)\n', (620, 624), True, 'import numpy as np\n'), ((627, 639), 'numpy.array', 'np.array', (['y2'], {}), '(y2)\n', (635, 639), True, 'import numpy as np\n'), ((683, 692), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (690, 692), True, 'import matplotlib.pyplot as plt\n'), ((697, 709), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (707, 709), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1330), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1328, 1330), True, 'import matplotlib.pyplot as plt\n'), ((1357, 1369), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1367, 1369), True, 'import matplotlib.pyplot as plt\n'), ((2375, 2385), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (2383, 2385), True, 'import matplotlib.pyplot as plt\n'), ((188, 207), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (198, 207), False, 'import csv\n'), ((253, 272), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (263, 272), False, 'import csv\n'), ((1063, 1078), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1073, 1078), True, 'import matplotlib.pyplot as plt\n'), ((1083, 1098), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1093, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1115), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1113, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1120, 1139), 'matplotlib.pyplot.title', 'plt.title', (['"""Q2 (a)"""'], {}), "('Q2 (a)')\n", (1129, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1152, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1175), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (1168, 1175), True, 'import matplotlib.pyplot as plt\n'), ((2245, 2260), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2255, 2260), True, 'import matplotlib.pyplot as plt\n'), ((2265, 2280), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2275, 2280), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2297), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2295, 2297), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2350, 2352), True, 'import matplotlib.pyplot as plt\n'), ((2357, 2373), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (2366, 2373), True, 'import matplotlib.pyplot as plt\n'), ((587, 607), 'numpy.array', 'np.array', (['[1, x3[i]]'], {}), '([1, x3[i]])\n', (595, 607), True, 'import numpy as np\n'), ((1275, 1290), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (1287, 1290), True, 'import numpy as np\n'), ((1257, 1271), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (1263, 1271), True, 'import numpy as np\n'), ((1965, 1982), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (1973, 1982), True, 'import numpy as np\n'), ((1873, 1888), 'math.exp', 'math.exp', (['power'], {}), '(power)\n', (1881, 1888), False, 'import math\n'), ((2076, 2090), 'numpy.dot', 'np.dot', (['X.T', 'W'], {}), '(X.T, W)\n', (2082, 2090), True, 'import numpy as np\n')]
|
import math
import numpy as np
import matlab.engine
from pyomo.environ import *
from pyomo.dae import *
from pyomo.gdp import *
from pyomo.gdp.plugins.chull import ConvexHull_Transformation
from pyomo.gdp.plugins.bigm import BigM_Transformation
from pyomo.core import Var
from pyomo.dae.plugins.finitedifference import Finite_Difference_Transformation
import hopperUtil
class Hopper:
def __init__(self, N, eng, matlabHopper, name=''):
self.model_disc = []
self.positionMax = 10
self.rotationMax = 2*np.pi
self.velocityMax = 10
self.angularVelocityMax = 10
self.forceMax = 10
self.N = N
self.r = []
self.v = []
self.F = []
self.th = []
self.w = []
self.T = []
self.p = []
self.pd = []
self.R = []
self.dtBounds = (0.05, 0.2)
self.dtNom = 0.1
self.c = []
self.p_MDT = -2
self.P_MDT = -1
self.regions = []
self.base = 10
self.tf = 1
self.nOrientationSectors = 1
self.bodyRadius = 0.25
self.mdt_precision = 1
self.eng = eng
self.matlabHopper = matlabHopper
self.momentOfInertia = self.eng.getDimensionlessMomentOfInertia(self.matlabHopper)
self.hipOffset = self.eng.getHipInBody(self.matlabHopper)
self.footnames = self.hipOffset.keys()
def addPlatform(self, platform_start, platform_end, platform_height, mu, platform_left, platform_right):
self.addRegion(A=np.matrix('-1., 0.,; 1., 0.'),
b=np.matrix('%f; %f' % (-(platform_start+0.1), platform_end-0.1)),
Aeq=np.array([0., 1.]), beq=platform_height, normal=np.matrix('0.; 1.'),
mu=mu)
self.eng.addPlatform(self.matlabHopper, platform_start, platform_end, platform_height, platform_left, platform_right, nargout=0)
def addFreeBlock(self, left=None, right=None, top=None, bottom=None):
Arows = []
brows = []
if left is not None:
Arows.append(np.matrix('-1., 0.'))
brows.append(np.matrix(-left))
if right is not None:
Arows.append(np.matrix('1., 0.'))
brows.append(np.matrix(right))
if top is not None:
Arows.append(np.matrix('0., 1.'))
brows.append(np.matrix(top))
if bottom is not None:
Arows.append(np.matrix('0., -1.'))
brows.append(np.matrix(-bottom))
self.addRegion(A=np.vstack(Arows), b=np.vstack(brows))
def addRegion(self, **kwargs):
self.regions.append(dict.fromkeys(['A', 'b', 'Aeq', 'beq', 'normal', 'mu']))
self.regions[-1]['normal'] = np.matrix('0.; 0.')
self.regions[-1]['mu'] = 0.
for key, value in kwargs.iteritems():
for key2 in self.regions[-1].keys():
if key == key2:
self.regions[-1][key] = value
forMatlab = dict(self.regions[-1])
for key, value in forMatlab.iteritems():
if isinstance(value, type(np.array(0))):
forMatlab[key] = matlab.double(value.tolist())
if value is None:
forMatlab[key] = matlab.double([])
self.eng.addRegion(self.matlabHopper, forMatlab, nargout=0)
def constructVisualizer(self):
self.eng.constructVisualizer(self.matlabHopper, nargout=0)
def playback(self, speed=1.):
self.eng.playback(self.matlabHopper, speed, nargout=0)
def extractTime(self, m):
return np.cumsum([0.]+[m.dt[ti].value for ti in m.t][:-1])
def extractPostition(self, m):
return np.vstack([np.array([m.r[xz, ti].value for ti in m.t]) for xz in m.R2_INDEX])
def extractVelocity(self, m):
return np.vstack([np.array([m.v[xz, ti].value for ti in m.t]) for xz in m.R2_INDEX])
def extractTotalForce(self, m):
return np.vstack([np.array([m.F[xz, ti].value for ti in m.t]) for xz in m.R2_INDEX])
def extractOrientation(self, m):
return np.atleast_2d(np.array([m.th[ti].value for ti in m.t]))
def extractHipPosition(self, m):
return np.dstack([np.vstack([np.array([m.hip[foot, xz, ti].value for ti in m.t]) for xz in m.R2_INDEX]) for foot in m.feet])
def extractRelativeFootPosition(self, m):
return np.dstack([np.vstack([np.array([m.p[foot, xz, ti].value for ti in m.t]) for xz in m.R2_INDEX]) for foot in m.feet])
def extractFootForce(self, m):
return np.dstack([np.vstack([np.array([m.f[foot, xz, ti].value for ti in m.t]) for xz in m.R2_INDEX]) for foot in m.feet])
def extractTotalTorque(self, m):
return np.atleast_2d(np.array([m.T[ti].value for ti in m.t]))
def extractAngularMomentum (self, m):
return np.atleast_2d(np.array([self.momentOfInertia*m.w[ti].value for ti in m.t]))
def extractRegionIndicators(self, m):
return np.dstack([np.vstack([np.array([getattr(m, '%sindicator_var' % m.footRegionConstraints[region, foot, ti].cname()).value for ti in m.t]) for region in m.REGION_INDEX]) for foot in m.feet])
def extractBodyRegionIndicators(self, m):
def extractIndicatorForRegion(region):
if self.regions[region]['mu'] == 0.0:
return np.array([getattr(m, '%sindicator_var' % m.bodyRegionConstraints[region, ti].cname()).value for ti in m.t])
else:
return np.zeros([1, len(m.t)])
return np.vstack([extractIndicatorForRegion(region) for region in m.REGION_INDEX])
def loadResults(self, m):
data = dict()
data['t'] = matlab.double(self.extractTime(m).tolist())
data['r'] = matlab.double(self.extractPostition(m).tolist())
data['v'] = matlab.double(self.extractVelocity(m).tolist())
data['F'] = matlab.double(self.extractTotalForce(m).tolist())
data['th'] = matlab.double(self.extractOrientation(m).tolist())
data['r_hip'] = matlab.double(self.extractHipPosition(m).tolist())
data['p'] = matlab.double(self.extractRelativeFootPosition(m).tolist())
data['f'] = matlab.double(self.extractFootForce(m).tolist())
data['T'] = matlab.double(self.extractTotalTorque(m).tolist())
data['k'] = matlab.double(self.extractAngularMomentum(m).tolist())
data['region_indicators'] = matlab.double(self.extractRegionIndicators(m).tolist())
data['body_region_indicators'] = matlab.double(self.extractBodyRegionIndicators(m).tolist())
self.eng.loadResults(self.matlabHopper, data, nargout=0)
def constructPyomoModel(self):
model = ConcreteModel()
model.R2_INDEX = Set(initialize=['x', 'z'])
model.feet = Set(initialize=self.footnames)
model.REGION_INDEX = RangeSet(0, len(self.regions)-1)
model.t = RangeSet(1, self.N)
model.BV_INDEX = RangeSet(0, 1)
def _bvRule(m, region, bv, xz):
# v = rot(+-atan(mu))*normal
mu = self.regions[region]['mu']
if bv == m.BV_INDEX[1]:
theta = np.arctan(mu)
else:
theta = -np.arctan(mu)
R = np.matrix([[cos(theta), -sin(theta)],
[sin(theta), cos(theta)]])
vec = R*(self.regions[region]['normal'])
if xz == 'x':
return float(vec[0])
else:
return float(vec[1])
model.basisVectors = Param(model.REGION_INDEX, model.BV_INDEX, model.R2_INDEX, initialize=_bvRule)
def _hipOffsetRule(m, foot, xz):
return self.hipOffset[foot][xz]
model.hipOffset = Param(model.feet, model.R2_INDEX, initialize=_hipOffsetRule)
model.dt = Var(model.t, bounds=self.dtBounds, initialize=self.dtNom)
model.r = Var(model.R2_INDEX, model.t, bounds=(-self.positionMax, self.positionMax))
model.v = Var(model.R2_INDEX, model.t, bounds=(-self.velocityMax, self.velocityMax))
model.th = Var(model.t, bounds=(-self.rotationMax, self.rotationMax))
model.w = Var(model.t, bounds=(-self.angularVelocityMax, self.angularVelocityMax))
model.F = Var(model.R2_INDEX, model.t, bounds=(-self.forceMax, self.forceMax))
model.f = Var(model.feet, model.R2_INDEX, model.t, bounds=(-self.forceMax, self.forceMax))
model.hipTorque = Var(model.feet, model.t, bounds=(-self.forceMax, self.forceMax))
model.beta = Var(model.feet, model.BV_INDEX, model.t, within=NonNegativeReals, bounds=(0, self.forceMax))
model.T = Var(model.t, bounds=(-self.forceMax, self.forceMax))
lb = {'x': -0.5, 'z': -1}
ub = {'x': 0.5, 'z': -0.85}
def _pBounds(m, foot, i, t):
return (math.sqrt(2)/2*lb[i], math.sqrt(2)/2*ub[i])
model.p = Var(model.feet, model.R2_INDEX, model.t, bounds=_pBounds)
model.pd = Var(model.feet, model.R2_INDEX, model.t, bounds=(-self.velocityMax/2, self.velocityMax/2))
model.pdd = Var(model.feet, model.R2_INDEX, model.t, bounds=(-self.velocityMax, self.velocityMax))
model.hip = Var(model.feet, model.R2_INDEX, model.t, bounds=(-1, 1))
model.footRelativeToCOM = Var(model.feet, model.R2_INDEX, model.t, bounds=(-1, 1))
model.foot = Var(model.feet, model.R2_INDEX, model.t, bounds=(-self.positionMax, self.positionMax))
model.cth = Var(model.t, bounds=(-1,1))
model.sth = Var(model.t, bounds=(-1,1))
# Fix final dt to zero
model.dt[model.t[-1]].value = 0.0
model.dt[model.t[-1]].fixed = True
# Constraints for BRF vectors
# to avoid warnings, we set breakpoints at or beyond the bounds
numPieces = self.nOrientationSectors
bpts = []
for i in range(numPieces+2):
bpts.append(float(-self.rotationMax + (i*2*self.rotationMax)/numPieces))
def _cos(model, t, th):
return cos(th)
def _sin(model, t, th):
return sin(th)
model.pwCos = Piecewise(model.t, model.cth, model.th, pw_pts=bpts, pw_constr_type='EQ', pw_repn='CC', f_rule=_cos)
model.pwSin = Piecewise(model.t, model.sth, model.th, pw_pts=bpts, pw_constr_type='EQ', pw_repn='CC', f_rule=_sin)
def _momentRule(m, t):
return m.T[t] == -sum(m.footRelativeToCOM[foot,'x',t]*m.f[foot,'z',t] - m.footRelativeToCOM[foot,'z',t]*m.f[foot, 'x',t] for foot in m.feet)
#return m.T[t] == sum(m.footRelativeToCOM[foot,'x',t]*m.f[foot,'z',t] - m.footRelativeToCOM[foot,'z',t]*m.f[foot, 'x',t] for foot in m.feet)
model.momentAbountCOM = Constraint(model.t, rule=_momentRule)
#def _hipTorqueRule(m, foot, t):
#return m.hipTorque[foot, t] == m.p[foot,'x',t]*m.f[foot,'z',t] - m.p[foot,'z',t]*m.f[foot, 'x',t]
#model.hipTorqueConstraint = Constraint(model.feet, model.t, rule=_hipTorqueRule)
def _forceRule(m, i, t):
g = -1 if i == 'z' else 0
return m.F[i,t] == sum(m.f[foot, i, t] for foot in m.feet) + g
model.totalForce = Constraint(model.R2_INDEX, model.t, rule=_forceRule)
# def _legLengthRule(m, foot, t):
# return m.p[foot, 'x', t]**2 + m.p[foot, 'z', t]**2 <= 1
#model.legLengthConstraint = Constraint(model.feet, model.t, rule=_legLengthRule)
# Translational dynamics
def _positionRule(m, i, t):
if t == self.N:
return Constraint.Skip
else:
return m.r[i,t+1] == m.r[i, t] + m.dt[t]*m.v[i, t + 1]
#v_mid = m.v[i,t] + m.dt[t]/2*m.F[i,t]
#return m.r[i,t+1] == m.r[i, t] + m.dt[t]*v_mid
model.positionConstraint = Constraint(model.R2_INDEX, model.t, rule=_positionRule)
def _footPositionDefinition(m, foot, i, t):
return m.foot[foot, i, t] == m.p[foot, i, t] + m.hip[foot, i, t] + m.r[i, t]
model.footPositionDefinition = Constraint(model.feet, model.R2_INDEX, model.t, rule=_footPositionDefinition)
def _footPositionRule(m, foot, i, t):
if t == self.N:
return Constraint.Skip
else:
return m.foot[foot, i, t+1] == m.foot[foot, i, t] + m.dt[t]*m.pd[foot, i, t+1]
model.footPositionConstraint = Constraint(model.feet, model.R2_INDEX, model.t, rule=_footPositionRule)
def _footRelativeToCOMDefinition(m, foot, xz, t):
return m.footRelativeToCOM[foot, xz, t] == m.p[foot, xz, t] + m.hip[foot, xz, t]
model.footRelativeToCOMDefinition = Constraint(model.feet, model.R2_INDEX, model.t, rule=_footRelativeToCOMDefinition)
# Hip position
# r_hip == [cth, sth; -sth, cth]*hip
# r_hip == [cth*hip(1) + sth*hip(2); -sth*hip(1) + cth*hip(2)]
# r_hip == [hip(1), hip(2); hip(2), -hip(1)]*[cth; sth]
def _hipPositionRule(m, foot, xz, t):
if xz == 'x':
return m.hip[foot, xz, t] == m.hipOffset[foot, 'x']*m.cth[t] + m.hipOffset[foot, 'z']*m.sth[t]
else:
return m.hip[foot, xz, t] == m.hipOffset[foot, 'z']*m.cth[t] - m.hipOffset[foot, 'x']*m.sth[t]
model.hipPositionConstraint = Constraint(model.feet, model.R2_INDEX, model.t, rule=_hipPositionRule)
def _footVelocityRule(m, foot, xz, t):
if t == self.N:
return Constraint.Skip
else:
return m.pd[foot, xz, t + 1] == m.pd[foot, xz, t] + m.dt[t]*m.pdd[foot, xz, t+1]
#return m.pd[foot, xz, t + 1] == m.pd[foot, xz, t] + 0.5*m.dt[t]*(m.pdd[foot, xz, t] + m.pdd[foot, xz, t + 1])
model.footVelocityConstraint = Constraint(model.feet, model.R2_INDEX, model.t, rule=_footVelocityRule)
def _velocityRule(m, i, t):
if t == self.N:
return Constraint.Skip
else:
return m.v[i,t+1] == m.v[i,t] + m.dt[t]*m.F[i,t+1]
#return m.v[i,t+1] == m.v[i,t] + m.dt[t]/2*(m.F[i,t] + m.F[i,t+1])
#v_mid = m.v[i,t] + m.dt[t]/2*m.F[i,t]
#return m.v[i,t+1] == v_mid + m.dt[t]/2*m.F[i,t+1]
model.velocityConstraint = Constraint(model.R2_INDEX, model.t, rule=_velocityRule)
def _angularVelocityRule(m, t):
if t == self.N:
return Constraint.Skip
else:
return m.w[t+1] == m.w[t] + m.dt[t]/(self.momentOfInertia)*m.T[t+1]
#w_mid = m.w[t] + m.dt[t]/(2*self.momentOfInertia)*m.T[t]
#return m.w[t+1] == w_mid + m.dt[t]/(2*self.momentOfInertia)*m.T[t+1]
model.angularVelocityConstraint = Constraint(model.t, rule=_angularVelocityRule)
def _orientationRule(m, t):
if t == self.N:
return Constraint.Skip
else:
return m.th[t+1] == m.th[t] + m.dt[t]*m.w[t+1]
#w_mid = m.w[t] + m.dt[t]/(2*self.momentOfInertia)*m.T[t]
#return m.th[t+1] == m.th[t] + m.dt[t]*w_mid
model.orientationConstraint = Constraint(model.t, rule=_orientationRule)
def _footRegionConstraints(disjunct, region, foot, t):
m = disjunct.model()
A = None
if self.regions[region]['A'] is not None:
A = self.regions[region]['A']
b = self.regions[region]['b']
if self.regions[region]['Aeq'] is not None:
Aeq = self.regions[region]['Aeq']
beq = self.regions[region]['beq']
if A is not None:
A = np.vstack((A, Aeq, -Aeq))
b = np.vstack((b, beq, -beq))
else:
A = np.vstack((Aeq, -Aeq))
b = np.vstack((beq, -beq))
A = np.atleast_2d(A)
b = np.atleast_1d(b)
def _contactPositionConstraint(disjunctData, i):
m = disjunctData.model()
return A[i,0]*m.foot[foot, 'x', t] + A[i,1]*m.foot[foot, 'z', t] <= float(b[i])
disjunct.contactPositionConstraint = Constraint(range(A.shape[0]), rule=_contactPositionConstraint)
def _footCollisionAvoidanceConstraint(disjunctData, i, pm1):
m = disjunctData.model()
if self.regions[region]['mu'] == 0. and t != m.t[-1] and t != m.t[1]:
return A[i,0]*m.foot[foot, 'x', t+pm1] + A[i,1]*m.foot[foot, 'z', t+pm1] <= float(b[i])
else:
return Constraint.Skip
disjunct.footCollisionAvoidanceConstraint = Constraint(range(A.shape[0]), [-1, 1], rule=_footCollisionAvoidanceConstraint)
def _hipPositionConstraint(disjunctData, i):
if self.regions[region]['mu'] == 0.:
m = disjunctData.model()
return A[i,0]*(m.r['x', t] + m.hip[foot, 'x', t]) + A[i,1]*(m.r['z', t] + m.hip[foot, 'z', t]) <= float(b[i])
else:
return Constraint.Skip
disjunct.hipPositionConstraint = Constraint(range(A.shape[0]), rule=_hipPositionConstraint)
def _contactForceConstraint(disjunctData, xz):
m = disjunctData.model()
return m.f[foot, xz, t] == sum(m.beta[foot, bv, t]*m.basisVectors[region, bv, xz] for bv in m.BV_INDEX)
disjunct.contactForceConstraint = Constraint(m.R2_INDEX, rule=_contactForceConstraint)
#disjunct.contactForceConstraint1 = Constraint(expr=m.f[foot, 'x', t] <= self.regions[region]['mu']*m.f[foot, 'z', t])
#disjunct.contactForceConstraint2 = Constraint(expr=m.f[foot, 'x', t] >= -self.regions[region]['mu']*m.f[foot, 'z', t])
#def _contactForceConstraint3(disjunctData, xz):
#m = disjunctData.model()
#if self.regions[region]['mu'] == 0.:
#return m.f[foot, xz, t] == 0
#else:
#return Constraint.Skip
#disjunct.contactForceConstraint3 = Constraint(m.R2_INDEX, rule=_contactForceConstraint3)
def _stationaryFootConstraint(disjunctData, xz):
m = disjunctData.model()
if self.regions[region]['mu'] != 0.:
if xz == 'x':
return m.pd[foot, xz, t] == 0
else:
return Constraint.Skip
else:
return Constraint.Skip
disjunct.stationaryFootConstraint = Constraint(m.R2_INDEX, rule=_stationaryFootConstraint)
model.footRegionConstraints = Disjunct(model.REGION_INDEX, model.feet, model.t, rule=_footRegionConstraints)
# Define the disjunction
def _footRegionDisjunction(m, foot, t):
disjunctList = []
for region in m.REGION_INDEX:
disjunctList.append(m.footRegionConstraints[region, foot, t])
return disjunctList
model.footRegionDisjunction = Disjunction(model.feet, model.t, rule=_footRegionDisjunction)
def _bodyRegionConstraints(disjunct, region, t):
if self.regions[region]['mu'] != 0.:
return Constraint.Skip
else:
m = disjunct.model()
A = None
if self.regions[region]['A'] is not None:
A = self.regions[region]['A']
b = self.regions[region]['b'] - self.bodyRadius
A = np.atleast_2d(A)
b = np.atleast_1d(b)
def _bodyPositionConstraint(disjunctData, i):
m = disjunctData.model()
return A[i,0]*m.r['x', t] + A[i,1]*m.r['z', t] <= float(b[i])
disjunct.bodyPositionConstraint = Constraint(range(A.shape[0]), rule=_bodyPositionConstraint)
model.bodyRegionConstraints = Disjunct(model.REGION_INDEX, model.t, rule=_bodyRegionConstraints)
# Define the disjunction
def _bodyRegionDisjunction(m, t):
disjunctList = []
for region in m.REGION_INDEX:
if self.regions[region]['mu'] == 0.:
disjunctList.append(m.bodyRegionConstraints[region, t])
return disjunctList
model.bodyRegionDisjunction = Disjunction(model.t, rule=_bodyRegionDisjunction)
disjunctionTransform = ConvexHull_Transformation()
# disjunctionTransform = BigM_Transformation()
disjunctionTransform.apply_to(model)
def _stanceDurationRule(m, foot, region, t):
window = 2
if self.regions[region]['mu'] != 0.:
t_start = max(1, t - window)
t_end = min(m.t[-1], t + window) + 1
indicators = [getattr(m, 'footRegionConstraints[%d,%s,%d]indicator_var' % (region, foot, ti))
for ti in range(t_start, t_end)]
current_indicator = getattr(m, 'footRegionConstraints[%d,%s,%d]indicator_var' % (region, foot, t))
bigM = window + 1
return -sum(indicators) <= -bigM + bigM*(1 - current_indicator)
else:
return Constraint.Skip
#model.stanceDurationConstraint = Constraint(model.feet, model.REGION_INDEX, model.t, rule=_stanceDurationRule)
def _initialStance(m, foot, region):
if self.regions[region]['mu'] == 0.:
current_indicator = getattr(m, 'footRegionConstraints[%d,%s,1]indicator_var' % (region, foot))
return current_indicator == 0
else:
return Constraint.Skip
model.initialStance = Constraint(model.feet, model.REGION_INDEX, rule=_initialStance)
def _finalStance(m, foot, region):
if self.regions[region]['mu'] == 0.:
current_indicator = getattr(m, 'footRegionConstraints[%d,%s,%d]indicator_var' % (region, foot, m.t[-1]))
return current_indicator == 0
else:
return Constraint.Skip
model.finalStance = Constraint(model.feet, model.REGION_INDEX, rule=_finalStance)
return model
#def testHopper(hopper, r0, rf, legLength):
#hopper.constructPyomoModel()
#m_nlp = hopper.model
#def objRule(m):
# return sum(m.beta[foot, bv, ti]**2 for foot in m.feet for bv in m.BV_INDEX for ti in m.t)
# + sum(m.pdd[foot, i, j]**2 for foot in m.feet for i in m.R2_INDEX for j in m.t)
#return sum(m.f[foot, i, j]**2 + m.pdd[foot, i, j]**2 for foot in m.feet for i in m.R2_INDEX for j in m.t) + sum(m.T[ti]**2 for ti in m.t)
#m_nlp.Obj = Objective(rule=objRule, sense=minimize)
#m_nlp.rx0 = Constraint(expr=m_nlp.r['x',m_nlp.t[1]] == r0[0]/legLength)
#m_nlp.rz0 = Constraint(expr=m_nlp.r['z',m_nlp.t[1]] <= 1)
#m_nlp.th0 = Constraint(expr=m_nlp.th[m_nlp.t[1]] == 0)
#m_nlp.vx0 = Constraint(expr=m_nlp.v['x',m_nlp.t[1]] == 0)
#m_nlp.vz0 = Constraint(expr=m_nlp.v['z',m_nlp.t[1]] == 0)
#m_nlp.w0 = Constraint(expr=m_nlp.w[m_nlp.t[1]] == 0)
#m_nlp.Fx0 = Constraint(expr=m_nlp.F['x', m_nlp.t[1]] == 0)
#m_nlp.Fz0 = Constraint(expr=m_nlp.F['z', m_nlp.t[1]] == 0)
#m_nlp.T0 = Constraint(expr=m_nlp.T[m_nlp.t[1]] == 0)
#m_nlp.rxf = Constraint(expr=m_nlp.r['x',m_nlp.t[-1]] >= rf[0]/legLength)
#m_nlp.rzf = Constraint(expr=m_nlp.r['z',m_nlp.t[-1]] == m_nlp.r['z', m_nlp.t[1]])
#m_nlp.thf = Constraint(expr=m_nlp.th[m_nlp.t[-1]] == 0)
#m_nlp.vxf = Constraint(expr=m_nlp.v['x',m_nlp.t[-1]] == m_nlp.v['x',m_nlp.t[1]])
#m_nlp.vzf = Constraint(expr=m_nlp.v['z',m_nlp.t[-1]] == 0)
#m_nlp.wf = Constraint(expr=m_nlp.w[m_nlp.t[-1]] == 0)
#m_nlp.Fxf = Constraint(expr=m_nlp.F['x', m_nlp.t[-1]] == 0)
#m_nlp.Fzf = Constraint(expr=m_nlp.F['z', m_nlp.t[-1]] == 0)
#m_nlp.Tf = Constraint(expr=m_nlp.T[m_nlp.t[-1]] == 0)
#def _maxVerticalVelocityRule(m, t):
#return m.v['z', t] <= 0.5
# m_nlp.maxVerticalVelocityConstraint = Constraint(m_nlp.t, rule=_maxVerticalVelocityRule)
#def _periodicFootPosition(m, foot, xz):
#return m.p[foot, xz, m.t[1]] == m.p[foot, xz, m.t[-1]]
#m_nlp.periodicFootPosition = Constraint(m_nlp.feet, m_nlp.R2_INDEX, rule=_periodicFootPosition)
#return m_nlp
#opt = SolverFactory('_gurobi_direct')
# opt.set_options('mipgap=0.05')
#if timeout > 0:
#opt.set_options('TimeLimit=%f' % timeout)
#opt.set_options('Threads=%f' % threads)
# opt.set_options('Seed=0')
#opt.set_options('Presolve=2')
|
[
"numpy.atleast_2d",
"pyomo.core.Var",
"math.sqrt",
"pyomo.gdp.plugins.chull.ConvexHull_Transformation",
"numpy.array",
"numpy.arctan",
"numpy.vstack",
"numpy.cumsum",
"numpy.matrix",
"numpy.atleast_1d"
] |
[((2724, 2743), 'numpy.matrix', 'np.matrix', (['"""0.; 0."""'], {}), "('0.; 0.')\n", (2733, 2743), True, 'import numpy as np\n'), ((3561, 3615), 'numpy.cumsum', 'np.cumsum', (['([0.0] + [m.dt[ti].value for ti in m.t][:-1])'], {}), '([0.0] + [m.dt[ti].value for ti in m.t][:-1])\n', (3570, 3615), True, 'import numpy as np\n'), ((7872, 7929), 'pyomo.core.Var', 'Var', (['model.t'], {'bounds': 'self.dtBounds', 'initialize': 'self.dtNom'}), '(model.t, bounds=self.dtBounds, initialize=self.dtNom)\n', (7875, 7929), False, 'from pyomo.core import Var\n'), ((7948, 8022), 'pyomo.core.Var', 'Var', (['model.R2_INDEX', 'model.t'], {'bounds': '(-self.positionMax, self.positionMax)'}), '(model.R2_INDEX, model.t, bounds=(-self.positionMax, self.positionMax))\n', (7951, 8022), False, 'from pyomo.core import Var\n'), ((8041, 8115), 'pyomo.core.Var', 'Var', (['model.R2_INDEX', 'model.t'], {'bounds': '(-self.velocityMax, self.velocityMax)'}), '(model.R2_INDEX, model.t, bounds=(-self.velocityMax, self.velocityMax))\n', (8044, 8115), False, 'from pyomo.core import Var\n'), ((8135, 8193), 'pyomo.core.Var', 'Var', (['model.t'], {'bounds': '(-self.rotationMax, self.rotationMax)'}), '(model.t, bounds=(-self.rotationMax, self.rotationMax))\n', (8138, 8193), False, 'from pyomo.core import Var\n'), ((8212, 8284), 'pyomo.core.Var', 'Var', (['model.t'], {'bounds': '(-self.angularVelocityMax, self.angularVelocityMax)'}), '(model.t, bounds=(-self.angularVelocityMax, self.angularVelocityMax))\n', (8215, 8284), False, 'from pyomo.core import Var\n'), ((8303, 8371), 'pyomo.core.Var', 'Var', (['model.R2_INDEX', 'model.t'], {'bounds': '(-self.forceMax, self.forceMax)'}), '(model.R2_INDEX, model.t, bounds=(-self.forceMax, self.forceMax))\n', (8306, 8371), False, 'from pyomo.core import Var\n'), ((8390, 8475), 'pyomo.core.Var', 'Var', (['model.feet', 'model.R2_INDEX', 'model.t'], {'bounds': '(-self.forceMax, self.forceMax)'}), '(model.feet, model.R2_INDEX, model.t, bounds=(-self.forceMax, self.forceMax)\n )\n', (8393, 8475), False, 'from pyomo.core import Var\n'), ((8497, 8561), 'pyomo.core.Var', 'Var', (['model.feet', 'model.t'], {'bounds': '(-self.forceMax, self.forceMax)'}), '(model.feet, model.t, bounds=(-self.forceMax, self.forceMax))\n', (8500, 8561), False, 'from pyomo.core import Var\n'), ((8583, 8679), 'pyomo.core.Var', 'Var', (['model.feet', 'model.BV_INDEX', 'model.t'], {'within': 'NonNegativeReals', 'bounds': '(0, self.forceMax)'}), '(model.feet, model.BV_INDEX, model.t, within=NonNegativeReals, bounds=(0,\n self.forceMax))\n', (8586, 8679), False, 'from pyomo.core import Var\n'), ((8694, 8746), 'pyomo.core.Var', 'Var', (['model.t'], {'bounds': '(-self.forceMax, self.forceMax)'}), '(model.t, bounds=(-self.forceMax, self.forceMax))\n', (8697, 8746), False, 'from pyomo.core import Var\n'), ((8937, 8994), 'pyomo.core.Var', 'Var', (['model.feet', 'model.R2_INDEX', 'model.t'], {'bounds': '_pBounds'}), '(model.feet, model.R2_INDEX, model.t, bounds=_pBounds)\n', (8940, 8994), False, 'from pyomo.core import Var\n'), ((9014, 9113), 'pyomo.core.Var', 'Var', (['model.feet', 'model.R2_INDEX', 'model.t'], {'bounds': '(-self.velocityMax / 2, self.velocityMax / 2)'}), '(model.feet, model.R2_INDEX, model.t, bounds=(-self.velocityMax / 2, \n self.velocityMax / 2))\n', (9017, 9113), False, 'from pyomo.core import Var\n'), ((9125, 9216), 'pyomo.core.Var', 'Var', (['model.feet', 'model.R2_INDEX', 'model.t'], {'bounds': '(-self.velocityMax, self.velocityMax)'}), '(model.feet, model.R2_INDEX, model.t, bounds=(-self.velocityMax, self.\n velocityMax))\n', (9128, 9216), False, 'from pyomo.core import Var\n'), ((9232, 9288), 'pyomo.core.Var', 'Var', (['model.feet', 'model.R2_INDEX', 'model.t'], {'bounds': '(-1, 1)'}), '(model.feet, model.R2_INDEX, model.t, bounds=(-1, 1))\n', (9235, 9288), False, 'from pyomo.core import Var\n'), ((9323, 9379), 'pyomo.core.Var', 'Var', (['model.feet', 'model.R2_INDEX', 'model.t'], {'bounds': '(-1, 1)'}), '(model.feet, model.R2_INDEX, model.t, bounds=(-1, 1))\n', (9326, 9379), False, 'from pyomo.core import Var\n'), ((9401, 9492), 'pyomo.core.Var', 'Var', (['model.feet', 'model.R2_INDEX', 'model.t'], {'bounds': '(-self.positionMax, self.positionMax)'}), '(model.feet, model.R2_INDEX, model.t, bounds=(-self.positionMax, self.\n positionMax))\n', (9404, 9492), False, 'from pyomo.core import Var\n'), ((9508, 9536), 'pyomo.core.Var', 'Var', (['model.t'], {'bounds': '(-1, 1)'}), '(model.t, bounds=(-1, 1))\n', (9511, 9536), False, 'from pyomo.core import Var\n'), ((9556, 9584), 'pyomo.core.Var', 'Var', (['model.t'], {'bounds': '(-1, 1)'}), '(model.t, bounds=(-1, 1))\n', (9559, 9584), False, 'from pyomo.core import Var\n'), ((20454, 20481), 'pyomo.gdp.plugins.chull.ConvexHull_Transformation', 'ConvexHull_Transformation', ([], {}), '()\n', (20479, 20481), False, 'from pyomo.gdp.plugins.chull import ConvexHull_Transformation\n'), ((4067, 4107), 'numpy.array', 'np.array', (['[m.th[ti].value for ti in m.t]'], {}), '([m.th[ti].value for ti in m.t])\n', (4075, 4107), True, 'import numpy as np\n'), ((4692, 4731), 'numpy.array', 'np.array', (['[m.T[ti].value for ti in m.t]'], {}), '([m.T[ti].value for ti in m.t])\n', (4700, 4731), True, 'import numpy as np\n'), ((4805, 4869), 'numpy.array', 'np.array', (['[(self.momentOfInertia * m.w[ti].value) for ti in m.t]'], {}), '([(self.momentOfInertia * m.w[ti].value) for ti in m.t])\n', (4813, 4869), True, 'import numpy as np\n'), ((15890, 15906), 'numpy.atleast_2d', 'np.atleast_2d', (['A'], {}), '(A)\n', (15903, 15906), True, 'import numpy as np\n'), ((15923, 15939), 'numpy.atleast_1d', 'np.atleast_1d', (['b'], {}), '(b)\n', (15936, 15939), True, 'import numpy as np\n'), ((1529, 1558), 'numpy.matrix', 'np.matrix', (['"""-1., 0.,; 1., 0."""'], {}), "('-1., 0.,; 1., 0.')\n", (1538, 1558), True, 'import numpy as np\n'), ((1585, 1652), 'numpy.matrix', 'np.matrix', (["('%f; %f' % (-(platform_start + 0.1), platform_end - 0.1))"], {}), "('%f; %f' % (-(platform_start + 0.1), platform_end - 0.1))\n", (1594, 1652), True, 'import numpy as np\n'), ((1677, 1697), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1685, 1697), True, 'import numpy as np\n'), ((1725, 1744), 'numpy.matrix', 'np.matrix', (['"""0.; 1."""'], {}), "('0.; 1.')\n", (1734, 1744), True, 'import numpy as np\n'), ((2080, 2100), 'numpy.matrix', 'np.matrix', (['"""-1., 0."""'], {}), "('-1., 0.')\n", (2089, 2100), True, 'import numpy as np\n'), ((2127, 2143), 'numpy.matrix', 'np.matrix', (['(-left)'], {}), '(-left)\n', (2136, 2143), True, 'import numpy as np\n'), ((2200, 2219), 'numpy.matrix', 'np.matrix', (['"""1., 0."""'], {}), "('1., 0.')\n", (2209, 2219), True, 'import numpy as np\n'), ((2246, 2262), 'numpy.matrix', 'np.matrix', (['right'], {}), '(right)\n', (2255, 2262), True, 'import numpy as np\n'), ((2317, 2336), 'numpy.matrix', 'np.matrix', (['"""0., 1."""'], {}), "('0., 1.')\n", (2326, 2336), True, 'import numpy as np\n'), ((2363, 2377), 'numpy.matrix', 'np.matrix', (['top'], {}), '(top)\n', (2372, 2377), True, 'import numpy as np\n'), ((2435, 2455), 'numpy.matrix', 'np.matrix', (['"""0., -1."""'], {}), "('0., -1.')\n", (2444, 2455), True, 'import numpy as np\n'), ((2482, 2500), 'numpy.matrix', 'np.matrix', (['(-bottom)'], {}), '(-bottom)\n', (2491, 2500), True, 'import numpy as np\n'), ((2527, 2543), 'numpy.vstack', 'np.vstack', (['Arows'], {}), '(Arows)\n', (2536, 2543), True, 'import numpy as np\n'), ((2547, 2563), 'numpy.vstack', 'np.vstack', (['brows'], {}), '(brows)\n', (2556, 2563), True, 'import numpy as np\n'), ((3675, 3718), 'numpy.array', 'np.array', (['[m.r[xz, ti].value for ti in m.t]'], {}), '([m.r[xz, ti].value for ti in m.t])\n', (3683, 3718), True, 'import numpy as np\n'), ((3803, 3846), 'numpy.array', 'np.array', (['[m.v[xz, ti].value for ti in m.t]'], {}), '([m.v[xz, ti].value for ti in m.t])\n', (3811, 3846), True, 'import numpy as np\n'), ((3933, 3976), 'numpy.array', 'np.array', (['[m.F[xz, ti].value for ti in m.t]'], {}), '([m.F[xz, ti].value for ti in m.t])\n', (3941, 3976), True, 'import numpy as np\n'), ((7222, 7235), 'numpy.arctan', 'np.arctan', (['mu'], {}), '(mu)\n', (7231, 7235), True, 'import numpy as np\n'), ((19565, 19581), 'numpy.atleast_2d', 'np.atleast_2d', (['A'], {}), '(A)\n', (19578, 19581), True, 'import numpy as np\n'), ((19602, 19618), 'numpy.atleast_1d', 'np.atleast_1d', (['b'], {}), '(b)\n', (19615, 19618), True, 'import numpy as np\n'), ((3087, 3098), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3095, 3098), True, 'import numpy as np\n'), ((7279, 7292), 'numpy.arctan', 'np.arctan', (['mu'], {}), '(mu)\n', (7288, 7292), True, 'import numpy as np\n'), ((15682, 15707), 'numpy.vstack', 'np.vstack', (['(A, Aeq, -Aeq)'], {}), '((A, Aeq, -Aeq))\n', (15691, 15707), True, 'import numpy as np\n'), ((15732, 15757), 'numpy.vstack', 'np.vstack', (['(b, beq, -beq)'], {}), '((b, beq, -beq))\n', (15741, 15757), True, 'import numpy as np\n'), ((15804, 15826), 'numpy.vstack', 'np.vstack', (['(Aeq, -Aeq)'], {}), '((Aeq, -Aeq))\n', (15813, 15826), True, 'import numpy as np\n'), ((15851, 15873), 'numpy.vstack', 'np.vstack', (['(beq, -beq)'], {}), '((beq, -beq))\n', (15860, 15873), True, 'import numpy as np\n'), ((4184, 4235), 'numpy.array', 'np.array', (['[m.hip[foot, xz, ti].value for ti in m.t]'], {}), '([m.hip[foot, xz, ti].value for ti in m.t])\n', (4192, 4235), True, 'import numpy as np\n'), ((4364, 4413), 'numpy.array', 'np.array', (['[m.p[foot, xz, ti].value for ti in m.t]'], {}), '([m.p[foot, xz, ti].value for ti in m.t])\n', (4372, 4413), True, 'import numpy as np\n'), ((4531, 4580), 'numpy.array', 'np.array', (['[m.f[foot, xz, ti].value for ti in m.t]'], {}), '([m.f[foot, xz, ti].value for ti in m.t])\n', (4539, 4580), True, 'import numpy as np\n'), ((8875, 8887), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (8884, 8887), False, 'import math\n'), ((8897, 8909), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (8906, 8909), False, 'import math\n')]
|
import numpy as np
from scipy.spatial import distance
from Quaternions import Quaternions
import Animation
import AnimationStructure
def constrain(positions, constraints):
"""
Constrain animation positions given
a number of VerletParticles constrains
Parameters
----------
positions : (F, J, 3) ndarray
array of joint positions for
F frames and J joints
constraints : [(int, int, float, float, float)]
A list of constraints in the format:
(Joint1, Joint2, Masses1, Masses2, Lengths)
Returns
-------
positions : (F, J, 3) ndarray
joint positions for F
frames and J joints constrained
using the supplied constraints
"""
from VerletParticles import VerletParticles
particles = VerletParticles(positions, gravity=0.0, timestep=0.0)
for i, j, w0, w1, l in constraints:
particles.add_length_constraint(i, j, w0, w1, l)
return particles.constrain()
def extremities(positions, count, **kwargs):
"""
List of most extreme frame indices
Parameters
----------
positions : (F, J, 3) ndarray
array of joint positions for
F frames and J joints
count : int
Number of indices to return,
does not include first and last
frame which are always included
static : bool
Find extremities where root
translation has been removed
Returns
-------
indices : (C) ndarray
Returns C frame indices of the
most extreme frames including
the first and last frames.
Therefore if `count` it specified
as `4` will return and array of
`6` indices.
"""
if kwargs.pop('static', False):
positions = positions - positions[:,0][:,np.newaxis,:]
positions = positions.reshape((len(positions), -1))
distance_matrix = distance.squareform(distance.pdist(positions))
keys = [0]
for _ in range(count-1):
keys.append(int(np.argmax(np.min(distance_matrix[keys], axis=0))))
return np.array(keys)
def load_to_maya(positions, names=None, parents=None, color=None, radius=0.1, thickness=5.0):
import pymel.core as pm
import maya.mel as mel
if names is None:
names = ['joint_%i' % i for i in xrange(positions.shape[1])]
if color is None:
color = (0.5, 0.5, 0.5)
mpoints = []
frames = range(1, len(positions)+1)
for i, name in enumerate(names):
#try:
# point = pm.PyNode(name)
#except pm.MayaNodeError:
# point = pm.sphere(p=(0,0,0), n=name, radius=radius)[0]
point = pm.sphere(p=(0,0,0), n=name, radius=radius)[0]
jpositions = positions[:,i]
for j,attr,attr_name in zip(xrange(3),
[point.tx, point.ty, point.tz],
["_translateX", "_translateY", "_translateZ"]):
conn = attr.listConnections()
if len(conn) == 0:
curve = pm.nodetypes.AnimCurveTU(n=name + attr_name)
pm.connectAttr(curve.output, attr)
else:
curve = conn[0]
curve.addKeys(frames, jpositions[:,j])
mpoints.append(point)
if parents != None:
for i, p in enumerate(parents):
if p == -1: continue
pointname = names[i]
parntname = names[p]
conn = pm.PyNode(pointname).t.listConnections()
if len(conn) != 0: continue
curve = pm.curve(p=[[0,0,0],[0,1,0]], d=1, n=names[i]+'_curve')
pm.connectAttr(pointname+'.t', names[i]+'_curve.cv[0]')
pm.connectAttr(parntname+'.t', names[i]+'_curve.cv[1]')
pm.select(curve)
pm.runtime.AttachBrushToCurves()
stroke = pm.selected()[0]
brush = pm.listConnections(stroke.getChildren()[0]+'.brush')[0]
pm.setAttr(brush+'.color1', color)
pm.setAttr(brush+'.globalScale', thickness)
pm.setAttr(brush+'.endCaps', 1)
pm.setAttr(brush+'.tubeSections', 20)
mel.eval('doPaintEffectsToPoly(1,0,0,1,100000);')
mpoints += [stroke, curve]
return pm.group(mpoints, n='AnimationPositions'), mpoints
def load_from_maya(root, start, end):
import pymel.core as pm
def rig_joints_list(s, js):
for c in s.getChildren():
if 'Geo' in c.name(): continue
if isinstance(c, pm.nodetypes.Joint): js = rig_joints_list(c, js); continue
if isinstance(c, pm.nodetypes.Transform): js = rig_joints_list(c, js); continue
return [s] + js
joints = rig_joints_list(root, [])
names = map(lambda j: j.name(), joints)
positions = np.empty((end - start, len(names), 3))
original_time = pm.currentTime(q=True)
pm.currentTime(start)
for i in range(start, end):
pm.currentTime(i)
for j in joints: positions[i-start, names.index(j.name())] = j.getTranslation(space='world')
pm.currentTime(original_time)
return positions, names
def loop(positions, forward='z'):
fid = 'xyz'.index(forward)
data = positions.copy()
trajectory = data[:,0:1,fid].copy()
data[:,:,fid] -= trajectory
diff = data[0] - data[-1]
data += np.linspace(
0, 1, len(data))[:,np.newaxis,np.newaxis] * diff[np.newaxis]
data[:,:,fid] += trajectory
return data
def extend(positions, length, forward='z'):
fid = 'xyz'.index(forward)
data = positions.copy()
while len(data) < length:
next = positions[1:].copy()
next[:,:,fid] += data[-1,0,fid]
data = np.concatenate([data, next], axis=0)
return data[:length]
def redirect(positions, joint0, joint1, forward='z'):
forwarddir = {
'x': np.array([[[1,0,0]]]),
'y': np.array([[[0,1,0]]]),
'z': np.array([[[0,0,1]]]),
}[forward]
direction = (positions[:,joint0] - positions[:,joint1]).mean(axis=0)[np.newaxis,np.newaxis]
direction = direction / np.sqrt(np.sum(direction**2))
rotation = Quaternions.between(direction, forwarddir).constrained_y()
return rotation * positions
|
[
"pymel.core.nodetypes.AnimCurveTU",
"numpy.array",
"pymel.core.runtime.AttachBrushToCurves",
"pymel.core.connectAttr",
"VerletParticles.VerletParticles",
"pymel.core.selected",
"numpy.concatenate",
"numpy.min",
"pymel.core.setAttr",
"scipy.spatial.distance.pdist",
"pymel.core.select",
"Quaternions.Quaternions.between",
"maya.mel.eval",
"pymel.core.PyNode",
"pymel.core.currentTime",
"pymel.core.sphere",
"numpy.sum",
"pymel.core.curve",
"pymel.core.group"
] |
[((861, 914), 'VerletParticles.VerletParticles', 'VerletParticles', (['positions'], {'gravity': '(0.0)', 'timestep': '(0.0)'}), '(positions, gravity=0.0, timestep=0.0)\n', (876, 914), False, 'from VerletParticles import VerletParticles\n'), ((2233, 2247), 'numpy.array', 'np.array', (['keys'], {}), '(keys)\n', (2241, 2247), True, 'import numpy as np\n'), ((5174, 5196), 'pymel.core.currentTime', 'pm.currentTime', ([], {'q': '(True)'}), '(q=True)\n', (5188, 5196), True, 'import pymel.core as pm\n'), ((5202, 5223), 'pymel.core.currentTime', 'pm.currentTime', (['start'], {}), '(start)\n', (5216, 5223), True, 'import pymel.core as pm\n'), ((5413, 5442), 'pymel.core.currentTime', 'pm.currentTime', (['original_time'], {}), '(original_time)\n', (5427, 5442), True, 'import pymel.core as pm\n'), ((2066, 2091), 'scipy.spatial.distance.pdist', 'distance.pdist', (['positions'], {}), '(positions)\n', (2080, 2091), False, 'from scipy.spatial import distance\n'), ((4520, 4561), 'pymel.core.group', 'pm.group', (['mpoints'], {'n': '"""AnimationPositions"""'}), "(mpoints, n='AnimationPositions')\n", (4528, 4561), True, 'import pymel.core as pm\n'), ((5282, 5299), 'pymel.core.currentTime', 'pm.currentTime', (['i'], {}), '(i)\n', (5296, 5299), True, 'import pymel.core as pm\n'), ((6125, 6161), 'numpy.concatenate', 'np.concatenate', (['[data, next]'], {'axis': '(0)'}), '([data, next], axis=0)\n', (6139, 6161), True, 'import numpy as np\n'), ((2858, 2903), 'pymel.core.sphere', 'pm.sphere', ([], {'p': '(0, 0, 0)', 'n': 'name', 'radius': 'radius'}), '(p=(0, 0, 0), n=name, radius=radius)\n', (2867, 2903), True, 'import pymel.core as pm\n'), ((3804, 3866), 'pymel.core.curve', 'pm.curve', ([], {'p': '[[0, 0, 0], [0, 1, 0]]', 'd': '(1)', 'n': "(names[i] + '_curve')"}), "(p=[[0, 0, 0], [0, 1, 0]], d=1, n=names[i] + '_curve')\n", (3812, 3866), True, 'import pymel.core as pm\n'), ((3873, 3932), 'pymel.core.connectAttr', 'pm.connectAttr', (["(pointname + '.t')", "(names[i] + '_curve.cv[0]')"], {}), "(pointname + '.t', names[i] + '_curve.cv[0]')\n", (3887, 3932), True, 'import pymel.core as pm\n'), ((3942, 4001), 'pymel.core.connectAttr', 'pm.connectAttr', (["(parntname + '.t')", "(names[i] + '_curve.cv[1]')"], {}), "(parntname + '.t', names[i] + '_curve.cv[1]')\n", (3956, 4001), True, 'import pymel.core as pm\n'), ((4011, 4027), 'pymel.core.select', 'pm.select', (['curve'], {}), '(curve)\n', (4020, 4027), True, 'import pymel.core as pm\n'), ((4041, 4073), 'pymel.core.runtime.AttachBrushToCurves', 'pm.runtime.AttachBrushToCurves', ([], {}), '()\n', (4071, 4073), True, 'import pymel.core as pm\n'), ((4203, 4239), 'pymel.core.setAttr', 'pm.setAttr', (["(brush + '.color1')", 'color'], {}), "(brush + '.color1', color)\n", (4213, 4239), True, 'import pymel.core as pm\n'), ((4251, 4296), 'pymel.core.setAttr', 'pm.setAttr', (["(brush + '.globalScale')", 'thickness'], {}), "(brush + '.globalScale', thickness)\n", (4261, 4296), True, 'import pymel.core as pm\n'), ((4308, 4341), 'pymel.core.setAttr', 'pm.setAttr', (["(brush + '.endCaps')", '(1)'], {}), "(brush + '.endCaps', 1)\n", (4318, 4341), True, 'import pymel.core as pm\n'), ((4353, 4392), 'pymel.core.setAttr', 'pm.setAttr', (["(brush + '.tubeSections')", '(20)'], {}), "(brush + '.tubeSections', 20)\n", (4363, 4392), True, 'import pymel.core as pm\n'), ((4404, 4453), 'maya.mel.eval', 'mel.eval', (['"""doPaintEffectsToPoly(1,0,0,1,100000);"""'], {}), "('doPaintEffectsToPoly(1,0,0,1,100000);')\n", (4412, 4453), True, 'import maya.mel as mel\n'), ((6301, 6324), 'numpy.array', 'np.array', (['[[[1, 0, 0]]]'], {}), '([[[1, 0, 0]]])\n', (6309, 6324), True, 'import numpy as np\n'), ((6338, 6361), 'numpy.array', 'np.array', (['[[[0, 1, 0]]]'], {}), '([[[0, 1, 0]]])\n', (6346, 6361), True, 'import numpy as np\n'), ((6375, 6398), 'numpy.array', 'np.array', (['[[[0, 0, 1]]]'], {}), '([[[0, 0, 1]]])\n', (6383, 6398), True, 'import numpy as np\n'), ((6554, 6576), 'numpy.sum', 'np.sum', (['(direction ** 2)'], {}), '(direction ** 2)\n', (6560, 6576), True, 'import numpy as np\n'), ((6598, 6640), 'Quaternions.Quaternions.between', 'Quaternions.between', (['direction', 'forwarddir'], {}), '(direction, forwarddir)\n', (6617, 6640), False, 'from Quaternions import Quaternions\n'), ((3252, 3296), 'pymel.core.nodetypes.AnimCurveTU', 'pm.nodetypes.AnimCurveTU', ([], {'n': '(name + attr_name)'}), '(n=name + attr_name)\n', (3276, 3296), True, 'import pymel.core as pm\n'), ((3314, 3348), 'pymel.core.connectAttr', 'pm.connectAttr', (['curve.output', 'attr'], {}), '(curve.output, attr)\n', (3328, 3348), True, 'import pymel.core as pm\n'), ((4096, 4109), 'pymel.core.selected', 'pm.selected', ([], {}), '()\n', (4107, 4109), True, 'import pymel.core as pm\n'), ((2180, 2217), 'numpy.min', 'np.min', (['distance_matrix[keys]'], {'axis': '(0)'}), '(distance_matrix[keys], axis=0)\n', (2186, 2217), True, 'import numpy as np\n'), ((3687, 3707), 'pymel.core.PyNode', 'pm.PyNode', (['pointname'], {}), '(pointname)\n', (3696, 3707), True, 'import pymel.core as pm\n')]
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for activation function layers."""
from absl.testing import absltest
import numpy as np
import trax.layers as tl
class ActivationFnsTest(absltest.TestCase):
def test_relu(self):
layer = tl.Relu()
x = np.array([-2.0, -1.0, 0.0, 2.0, 3.0, 5.0])
y = layer(x)
self.assertEqual(tl.to_list(y), [0.0, 0.0, 0.0, 2.0, 3.0, 5.0])
def test_parametric_relu(self):
layer = tl.ParametricRelu(a=.25)
x = np.array([-2.0, -1.0, 0.0, 2.0, 3.0, 5.0])
y = layer(x)
self.assertEqual(tl.to_list(y), [0.0, 0.0, 0.0, .5, .75, 1.25])
def test_leaky_relu(self):
layer = tl.LeakyRelu(a=.125)
x = np.array([-2.0, -1.0, 0.0, 2.0, 3.0, 5.0])
y = layer(x)
self.assertEqual(tl.to_list(y), [-.25, -.125, 0.0, 2.0, 3.0, 5.0])
def test_hard_sigmoid(self):
layer = tl.HardSigmoid()
x = np.array([-1.5, -.5, -.25, 0.0, .25, .5, 1.5])
y = layer(x)
self.assertEqual(tl.to_list(y), [0.0, 0.5, 0.75, 1.0, 1.0, 1.0, 1.0])
def test_hard_tanh(self):
layer = tl.HardTanh()
x = np.array([-1.5, -.5, -.25, 0.0, .25, .5, 1.5])
y = layer(x)
self.assertEqual(tl.to_list(y), [-1.0, -.5, -.25, 0.0, .25, .5, 1.0])
if __name__ == '__main__':
absltest.main()
|
[
"trax.layers.to_list",
"trax.layers.ParametricRelu",
"absl.testing.absltest.main",
"numpy.array",
"trax.layers.Relu",
"trax.layers.LeakyRelu",
"trax.layers.HardTanh",
"trax.layers.HardSigmoid"
] |
[((1817, 1832), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (1830, 1832), False, 'from absl.testing import absltest\n'), ((822, 831), 'trax.layers.Relu', 'tl.Relu', ([], {}), '()\n', (829, 831), True, 'import trax.layers as tl\n'), ((840, 882), 'numpy.array', 'np.array', (['[-2.0, -1.0, 0.0, 2.0, 3.0, 5.0]'], {}), '([-2.0, -1.0, 0.0, 2.0, 3.0, 5.0])\n', (848, 882), True, 'import numpy as np\n'), ((1015, 1040), 'trax.layers.ParametricRelu', 'tl.ParametricRelu', ([], {'a': '(0.25)'}), '(a=0.25)\n', (1032, 1040), True, 'import trax.layers as tl\n'), ((1048, 1090), 'numpy.array', 'np.array', (['[-2.0, -1.0, 0.0, 2.0, 3.0, 5.0]'], {}), '([-2.0, -1.0, 0.0, 2.0, 3.0, 5.0])\n', (1056, 1090), True, 'import numpy as np\n'), ((1218, 1239), 'trax.layers.LeakyRelu', 'tl.LeakyRelu', ([], {'a': '(0.125)'}), '(a=0.125)\n', (1230, 1239), True, 'import trax.layers as tl\n'), ((1247, 1289), 'numpy.array', 'np.array', (['[-2.0, -1.0, 0.0, 2.0, 3.0, 5.0]'], {}), '([-2.0, -1.0, 0.0, 2.0, 3.0, 5.0])\n', (1255, 1289), True, 'import numpy as np\n'), ((1422, 1438), 'trax.layers.HardSigmoid', 'tl.HardSigmoid', ([], {}), '()\n', (1436, 1438), True, 'import trax.layers as tl\n'), ((1447, 1497), 'numpy.array', 'np.array', (['[-1.5, -0.5, -0.25, 0.0, 0.25, 0.5, 1.5]'], {}), '([-1.5, -0.5, -0.25, 0.0, 0.25, 0.5, 1.5])\n', (1455, 1497), True, 'import numpy as np\n'), ((1626, 1639), 'trax.layers.HardTanh', 'tl.HardTanh', ([], {}), '()\n', (1637, 1639), True, 'import trax.layers as tl\n'), ((1648, 1698), 'numpy.array', 'np.array', (['[-1.5, -0.5, -0.25, 0.0, 0.25, 0.5, 1.5]'], {}), '([-1.5, -0.5, -0.25, 0.0, 0.25, 0.5, 1.5])\n', (1656, 1698), True, 'import numpy as np\n'), ((921, 934), 'trax.layers.to_list', 'tl.to_list', (['y'], {}), '(y)\n', (931, 934), True, 'import trax.layers as tl\n'), ((1129, 1142), 'trax.layers.to_list', 'tl.to_list', (['y'], {}), '(y)\n', (1139, 1142), True, 'import trax.layers as tl\n'), ((1328, 1341), 'trax.layers.to_list', 'tl.to_list', (['y'], {}), '(y)\n', (1338, 1341), True, 'import trax.layers as tl\n'), ((1532, 1545), 'trax.layers.to_list', 'tl.to_list', (['y'], {}), '(y)\n', (1542, 1545), True, 'import trax.layers as tl\n'), ((1733, 1746), 'trax.layers.to_list', 'tl.to_list', (['y'], {}), '(y)\n', (1743, 1746), True, 'import trax.layers as tl\n')]
|
'''
This program is free software: you can use, modify and/or redistribute it
under the terms of the simplified BSD License. You should have received a
copy of this license along this program.
Copyright 2020, <NAME> <<EMAIL>>
All rights reserved.
'''
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
from pathlib import Path
from PIL import Image
from scipy.ndimage.filters import convolve as filter2
from utilities.classicalUtils import classicalUtilitiesHS as cuhs
from utilities.classicalUtils import classicalUtilitiesPY as cupy
from utilities.cameraUtils import Camera
#main testing function for debuggin the issues in different modules
#testing classicalUtils
img1 = cv2.imread('./data/foreman/frame5.png', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread('./data/foreman/frame7.png', cv2.IMREAD_GRAYSCALE)
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
img1, img2 = cupy.normalize(img1,img2)
img1 = cupy.gaussianSmoothing(img1)
img2 = cupy.gaussianSmoothing(img2)
cupy.scaling(img1)
'''Nothing To Do'''
#testing utils
taxis_frames = list(Path('./data/taxi').iterdir())
#for i in taxis_frames:
# print(i)
taxi1 = Image.open('./data/foreman/frame1.png')
taxi2 = Image.open('./data/foreman/frame3.png')
taxi33 = Image.open('./data/foreman/frame33.png')
taxi35 = Image.open('./data/foreman/frame35.png')
flow = cv2.calcOpticalFlowFarneback(np.array(taxi1), np.array(taxi2) , None, 0.5, 3, 15, 3, 5, 1.2, 0)
flow_ = cv2.calcOpticalFlowFarneback(np.array(taxi33), np.array(taxi35) , None, 0.5, 3, 15, 3, 5, 1.2, 0)
step = 5
plt.quiver(np.arange(0, flow.shape[1], step), np.arange(flow.shape[0], -1, -step), flow[::step, ::step, 0], flow[::step, ::step, 1])
plt.quiver(np.arange(0, flow_.shape[1], step), np.arange(flow_.shape[0], -1, -step), flow_[::step, ::step, 0], flow_[::step, ::step, 1])
plt.savefig('taxis1-2')
plt.savefig('taxis33-35')
#testing test
Camera.cameraMod()
'''Nothing To Do'''
#itesting models
'''Nothing To Do'''
|
[
"PIL.Image.open",
"matplotlib.pyplot.savefig",
"pathlib.Path",
"utilities.classicalUtils.classicalUtilitiesPY.normalize",
"utilities.classicalUtils.classicalUtilitiesPY.gaussianSmoothing",
"numpy.array",
"utilities.cameraUtils.Camera.cameraMod",
"utilities.classicalUtils.classicalUtilitiesPY.scaling",
"numpy.arange",
"cv2.imread"
] |
[((702, 763), 'cv2.imread', 'cv2.imread', (['"""./data/foreman/frame5.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('./data/foreman/frame5.png', cv2.IMREAD_GRAYSCALE)\n", (712, 763), False, 'import cv2\n'), ((771, 832), 'cv2.imread', 'cv2.imread', (['"""./data/foreman/frame7.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('./data/foreman/frame7.png', cv2.IMREAD_GRAYSCALE)\n", (781, 832), False, 'import cv2\n'), ((910, 936), 'utilities.classicalUtils.classicalUtilitiesPY.normalize', 'cupy.normalize', (['img1', 'img2'], {}), '(img1, img2)\n', (924, 936), True, 'from utilities.classicalUtils import classicalUtilitiesPY as cupy\n'), ((944, 972), 'utilities.classicalUtils.classicalUtilitiesPY.gaussianSmoothing', 'cupy.gaussianSmoothing', (['img1'], {}), '(img1)\n', (966, 972), True, 'from utilities.classicalUtils import classicalUtilitiesPY as cupy\n'), ((980, 1008), 'utilities.classicalUtils.classicalUtilitiesPY.gaussianSmoothing', 'cupy.gaussianSmoothing', (['img2'], {}), '(img2)\n', (1002, 1008), True, 'from utilities.classicalUtils import classicalUtilitiesPY as cupy\n'), ((1010, 1028), 'utilities.classicalUtils.classicalUtilitiesPY.scaling', 'cupy.scaling', (['img1'], {}), '(img1)\n', (1022, 1028), True, 'from utilities.classicalUtils import classicalUtilitiesPY as cupy\n'), ((1159, 1198), 'PIL.Image.open', 'Image.open', (['"""./data/foreman/frame1.png"""'], {}), "('./data/foreman/frame1.png')\n", (1169, 1198), False, 'from PIL import Image\n'), ((1207, 1246), 'PIL.Image.open', 'Image.open', (['"""./data/foreman/frame3.png"""'], {}), "('./data/foreman/frame3.png')\n", (1217, 1246), False, 'from PIL import Image\n'), ((1257, 1297), 'PIL.Image.open', 'Image.open', (['"""./data/foreman/frame33.png"""'], {}), "('./data/foreman/frame33.png')\n", (1267, 1297), False, 'from PIL import Image\n'), ((1307, 1347), 'PIL.Image.open', 'Image.open', (['"""./data/foreman/frame35.png"""'], {}), "('./data/foreman/frame35.png')\n", (1317, 1347), False, 'from PIL import Image\n'), ((1839, 1862), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""taxis1-2"""'], {}), "('taxis1-2')\n", (1850, 1862), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1888), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""taxis33-35"""'], {}), "('taxis33-35')\n", (1874, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1922), 'utilities.cameraUtils.Camera.cameraMod', 'Camera.cameraMod', ([], {}), '()\n', (1920, 1922), False, 'from utilities.cameraUtils import Camera\n'), ((1385, 1400), 'numpy.array', 'np.array', (['taxi1'], {}), '(taxi1)\n', (1393, 1400), True, 'import numpy as np\n'), ((1402, 1417), 'numpy.array', 'np.array', (['taxi2'], {}), '(taxi2)\n', (1410, 1417), True, 'import numpy as np\n'), ((1489, 1505), 'numpy.array', 'np.array', (['taxi33'], {}), '(taxi33)\n', (1497, 1505), True, 'import numpy as np\n'), ((1507, 1523), 'numpy.array', 'np.array', (['taxi35'], {}), '(taxi35)\n', (1515, 1523), True, 'import numpy as np\n'), ((1579, 1612), 'numpy.arange', 'np.arange', (['(0)', 'flow.shape[1]', 'step'], {}), '(0, flow.shape[1], step)\n', (1588, 1612), True, 'import numpy as np\n'), ((1614, 1649), 'numpy.arange', 'np.arange', (['flow.shape[0]', '(-1)', '(-step)'], {}), '(flow.shape[0], -1, -step)\n', (1623, 1649), True, 'import numpy as np\n'), ((1712, 1746), 'numpy.arange', 'np.arange', (['(0)', 'flow_.shape[1]', 'step'], {}), '(0, flow_.shape[1], step)\n', (1721, 1746), True, 'import numpy as np\n'), ((1748, 1784), 'numpy.arange', 'np.arange', (['flow_.shape[0]', '(-1)', '(-step)'], {}), '(flow_.shape[0], -1, -step)\n', (1757, 1784), True, 'import numpy as np\n'), ((1085, 1104), 'pathlib.Path', 'Path', (['"""./data/taxi"""'], {}), "('./data/taxi')\n", (1089, 1104), False, 'from pathlib import Path\n')]
|
# This is a sample Python script.
import time
import functools
import sys
import numpy as np
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
sys.setrecursionlimit(10 ** 9)
def find_squares_opt(array):
arr = np.array(array)
return arr**2+1
def find_squares(array):
a_list = []
for i in array:
a_list.append(i ** 2 + 1)
return a_list
@functools.lru_cache
def fibonacci_fast(n):
if n <= 1:
return 1
else:
return fibonacci_fast(n - 1) + fibonacci_fast(n - 2)
def fibonacci(n):
if n <= 1:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
n = 1000
# print("Fibonacci slow")
# start = time.time()
# print(fibonacci(n))
# print(time.time() -start)
print("Squares not-optimized")
start = time.time()
find_squares(range(n))
# print(find_squares(range(n)))
t1 = time.time() - start
print(t1)
print("Squares optimized")
start = time.time()
find_squares_opt(range(n))
# print(find_squares_opt(range(n)))
t2 = time.time() - start
print(t2)
print(t1/t2)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
[
"sys.setrecursionlimit",
"numpy.array",
"time.time"
] |
[((255, 285), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 9)'], {}), '(10 ** 9)\n', (276, 285), False, 'import sys\n'), ((326, 341), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (334, 341), True, 'import numpy as np\n'), ((1003, 1014), 'time.time', 'time.time', ([], {}), '()\n', (1012, 1014), False, 'import time\n'), ((1164, 1175), 'time.time', 'time.time', ([], {}), '()\n', (1173, 1175), False, 'import time\n'), ((1087, 1098), 'time.time', 'time.time', ([], {}), '()\n', (1096, 1098), False, 'import time\n'), ((1256, 1267), 'time.time', 'time.time', ([], {}), '()\n', (1265, 1267), False, 'import time\n')]
|
import numpy as np
import torch
from torch.utils.data import Dataset
import numpy as np
from ad3 import factor_graph as fg
try:
import cPickle as pickle
except:
import pickle
from tqdm import tqdm
import time
from .random_pgm_data import RandomPGMData, worker_init_fn
len = 100000
class RandomPGMHop(Dataset):
def __init__(self, chain_length, hop_order=9, ret_efeature_pw=True, size=len):
self.chain_length = chain_length
self.hop_order = hop_order if hop_order >> 1 else hop_order + 1
self.half_hop = self.hop_order >> 1
self.ret_efeature_pw = ret_efeature_pw
self.size = size
def __len__(self):
return self.size
def _generate_graph(self):
g = fg.PFactorGraph()
var_list = []
for i in range(self.chain_length):
v = g.create_multi_variable(2)
v.set_log_potentials(self.lops[i])
var_list.append(v)
for i in range(self.chain_length - 1):
g.create_factor_dense([var_list[i], var_list[i + 1]], self.pws[i][0])
for i in range(self.chain_length - self.hop_order + 1):
v_list = [
var_list[j].get_state(1) for j in range(i, i + self.hop_order)
]
g.create_factor_budget(v_list, self.cap[i + self.half_hop])
return g
def _get_node_feature(self):
self.lops = np.random.uniform(0.0, 1.0, (self.chain_length, 2))
return np.transpose(self.lops.astype(np.float32), [1, 0])
def _get_edge_feature_pw(self):
self.pws = np.zeros(shape=[self.chain_length, 2, 4], dtype=np.float32)
for i in range(self.chain_length - 1):
# pws_to_right = np.random.randn(2, 2)
pws_to_right = np.zeros([2, 2])
pws_to_right[1, 1] = np.random.uniform(0, 2)
pws_to_left = np.transpose(pws_to_right)
self.pws[i] = [list(pws_to_right.reshape(-1)), list(pws_to_left.reshape(-1))]
efeature = np.zeros(shape=[self.chain_length, 3, 4], dtype=np.float32)
for i in range(self.chain_length):
e_self = np.zeros(4)
e_left = self.pws[i-1][1] if i > 0 else e_self.copy()
e_right = self.pws[i][0] if i < self.chain_length-1 else e_self.copy()
efeature[i, 0] = e_left
efeature[i, 1] = e_self
efeature[i, 2] = e_right
return np.transpose(efeature, [2, 0, 1])
def _generate_edge_feature_hop(self):
self.cap = list(np.random.randint(low=1, high=self.hop_order, size=self.chain_length))
half_hop = self.hop_order >> 1
max_cap = np.zeros(self.hop_order)
max_cap[self.hop_order-1] = 1
efeature = np.zeros(shape=[self.chain_length, self.hop_order], dtype=np.float32)
for i in range(half_hop, self.chain_length - half_hop):
efeature[i, self.cap[i]] = 1
for i in range(half_hop):
efeature[i, self.hop_order-1] = 1
for i in range(self.chain_length - half_hop, self.chain_length):
efeature[i, self.hop_order-1] = 1
return np.expand_dims(np.transpose(efeature, [1, 0]), -1)
''' passing info from node to factor '''
efeature = np.zeros(shape=[self.chain_length, self.hop_order, self.hop_order], dtype=np.float32)
for i in range(self.chain_length):
cur_cap = np.zeros(self.hop_order)
cur_cap[self.cap[i]] = 1
for j in range(i - half_hop, i + half_hop + 1):
efeature[i, j-i+half_hop] = max_cap.copy() \
if j < 0 or j >= self.chain_length else cur_cap.copy()
return np.transpose(efeature, [2, 0, 1])
''' passing info from factor to node '''
efeature2 = np.zeros(shape=[self.chain_length, self.hop_order, self.hop_order], dtype=np.float32)
for i in range(self.chain_length):
for j in range(i-half_hop, i+half_hop+1):
if j < 0 or j >= self.chain_length:
efeature2[i, j-i+half_hop] = max_cap.copy()
else:
cur_cap = np.zeros(self.hop_order)
cur_cap[self.cap[j]] = 1
efeature2[i, j-i+half_hop] = cur_cap
return np.transpose(efeature, [2, 0, 1]), np.transpose(efeature2, [2, 0, 1])
def __getitem__(self, index):
node_feature = self._get_node_feature()
efeature_pw = self._get_edge_feature_pw()
efeature_hop = self._generate_edge_feature_hop()
''' exact solution '''
g = self._generate_graph()
val, post, _, stat = g.solve(tol=1e-6, branch_and_bound=True)
post = np.reshape(np.asarray(post), [self.chain_length, 2])
assign = np.argmax(post, axis=1)
''' approx solution '''
g = self._generate_graph()
val1, post1, _, status = g.solve(branch_and_bound=False)
post1 = np.reshape(np.asarray(post1), [self.chain_length, 2])
assign1 = np.argmax(post1, axis=1)
if self.ret_efeature_pw:
return node_feature, efeature_pw, efeature_hop, assign, assign1
else:
pws = np.expand_dims(np.transpose(self.pws[:, 0, :], [1, 0]), -1)
return node_feature, pws, efeature_hop, assign, assign1
if __name__ == '__main__':
rpgm = RandomPGMHop(chain_length=6, hop_order=5, ret_efeature_pw=False)
node_feature, efeature_pw, efeature_hop, assign, assign1 = rpgm[0]
print('node_feature', node_feature.shape, np.transpose(node_feature, [1, 0]))
print('efeature_pw', efeature_pw.shape, np.transpose(efeature_pw, [1, 2, 0]))
print('efeature_hop', efeature_hop.shape, np.transpose(efeature_hop, [1, 2, 0]))
print('assign', assign)
print('assign1', assign1)
|
[
"numpy.asarray",
"numpy.argmax",
"ad3.factor_graph.PFactorGraph",
"numpy.zeros",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.transpose"
] |
[((726, 743), 'ad3.factor_graph.PFactorGraph', 'fg.PFactorGraph', ([], {}), '()\n', (741, 743), True, 'from ad3 import factor_graph as fg\n'), ((1385, 1436), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(self.chain_length, 2)'], {}), '(0.0, 1.0, (self.chain_length, 2))\n', (1402, 1436), True, 'import numpy as np\n'), ((1559, 1618), 'numpy.zeros', 'np.zeros', ([], {'shape': '[self.chain_length, 2, 4]', 'dtype': 'np.float32'}), '(shape=[self.chain_length, 2, 4], dtype=np.float32)\n', (1567, 1618), True, 'import numpy as np\n'), ((1981, 2040), 'numpy.zeros', 'np.zeros', ([], {'shape': '[self.chain_length, 3, 4]', 'dtype': 'np.float32'}), '(shape=[self.chain_length, 3, 4], dtype=np.float32)\n', (1989, 2040), True, 'import numpy as np\n'), ((2390, 2423), 'numpy.transpose', 'np.transpose', (['efeature', '[2, 0, 1]'], {}), '(efeature, [2, 0, 1])\n', (2402, 2423), True, 'import numpy as np\n'), ((2620, 2644), 'numpy.zeros', 'np.zeros', (['self.hop_order'], {}), '(self.hop_order)\n', (2628, 2644), True, 'import numpy as np\n'), ((2703, 2772), 'numpy.zeros', 'np.zeros', ([], {'shape': '[self.chain_length, self.hop_order]', 'dtype': 'np.float32'}), '(shape=[self.chain_length, self.hop_order], dtype=np.float32)\n', (2711, 2772), True, 'import numpy as np\n'), ((3213, 3303), 'numpy.zeros', 'np.zeros', ([], {'shape': '[self.chain_length, self.hop_order, self.hop_order]', 'dtype': 'np.float32'}), '(shape=[self.chain_length, self.hop_order, self.hop_order], dtype=\n np.float32)\n', (3221, 3303), True, 'import numpy as np\n'), ((3638, 3671), 'numpy.transpose', 'np.transpose', (['efeature', '[2, 0, 1]'], {}), '(efeature, [2, 0, 1])\n', (3650, 3671), True, 'import numpy as np\n'), ((3742, 3832), 'numpy.zeros', 'np.zeros', ([], {'shape': '[self.chain_length, self.hop_order, self.hop_order]', 'dtype': 'np.float32'}), '(shape=[self.chain_length, self.hop_order, self.hop_order], dtype=\n np.float32)\n', (3750, 3832), True, 'import numpy as np\n'), ((4720, 4743), 'numpy.argmax', 'np.argmax', (['post'], {'axis': '(1)'}), '(post, axis=1)\n', (4729, 4743), True, 'import numpy as np\n'), ((4966, 4990), 'numpy.argmax', 'np.argmax', (['post1'], {'axis': '(1)'}), '(post1, axis=1)\n', (4975, 4990), True, 'import numpy as np\n'), ((5485, 5519), 'numpy.transpose', 'np.transpose', (['node_feature', '[1, 0]'], {}), '(node_feature, [1, 0])\n', (5497, 5519), True, 'import numpy as np\n'), ((5565, 5601), 'numpy.transpose', 'np.transpose', (['efeature_pw', '[1, 2, 0]'], {}), '(efeature_pw, [1, 2, 0])\n', (5577, 5601), True, 'import numpy as np\n'), ((5649, 5686), 'numpy.transpose', 'np.transpose', (['efeature_hop', '[1, 2, 0]'], {}), '(efeature_hop, [1, 2, 0])\n', (5661, 5686), True, 'import numpy as np\n'), ((1744, 1760), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (1752, 1760), True, 'import numpy as np\n'), ((1794, 1817), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (1811, 1817), True, 'import numpy as np\n'), ((1844, 1870), 'numpy.transpose', 'np.transpose', (['pws_to_right'], {}), '(pws_to_right)\n', (1856, 1870), True, 'import numpy as np\n'), ((2105, 2116), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2113, 2116), True, 'import numpy as np\n'), ((2491, 2560), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': 'self.hop_order', 'size': 'self.chain_length'}), '(low=1, high=self.hop_order, size=self.chain_length)\n', (2508, 2560), True, 'import numpy as np\n'), ((3108, 3138), 'numpy.transpose', 'np.transpose', (['efeature', '[1, 0]'], {}), '(efeature, [1, 0])\n', (3120, 3138), True, 'import numpy as np\n'), ((3364, 3388), 'numpy.zeros', 'np.zeros', (['self.hop_order'], {}), '(self.hop_order)\n', (3372, 3388), True, 'import numpy as np\n'), ((4236, 4269), 'numpy.transpose', 'np.transpose', (['efeature', '[2, 0, 1]'], {}), '(efeature, [2, 0, 1])\n', (4248, 4269), True, 'import numpy as np\n'), ((4271, 4305), 'numpy.transpose', 'np.transpose', (['efeature2', '[2, 0, 1]'], {}), '(efeature2, [2, 0, 1])\n', (4283, 4305), True, 'import numpy as np\n'), ((4661, 4677), 'numpy.asarray', 'np.asarray', (['post'], {}), '(post)\n', (4671, 4677), True, 'import numpy as np\n'), ((4905, 4922), 'numpy.asarray', 'np.asarray', (['post1'], {}), '(post1)\n', (4915, 4922), True, 'import numpy as np\n'), ((5148, 5187), 'numpy.transpose', 'np.transpose', (['self.pws[:, 0, :]', '[1, 0]'], {}), '(self.pws[:, 0, :], [1, 0])\n', (5160, 5187), True, 'import numpy as np\n'), ((4093, 4117), 'numpy.zeros', 'np.zeros', (['self.hop_order'], {}), '(self.hop_order)\n', (4101, 4117), True, 'import numpy as np\n')]
|
import os
import csv
import shutil
import hashlib
import tempfile
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem, MACCSkeys
from rdkit.Chem import MolFromSmiles
from padelpy import padeldescriptor # required to calculate KlekotaRothFingerPrint
from metstab_shap.config import csv_section, utils_section
DATA = 'DATA'
test = 'test'
def load_data(data_config, fingerprint, morgan_nbits=None):
datasets = []
indices = []
this_start = 0
for path in sorted(data_config[DATA].values()):
x, y, smiles = preprocess_dataset(path=path, data_config=data_config,
fingerprint=fingerprint, morgan_nbits=morgan_nbits)
datasets.append((x, y, smiles))
indices.append((this_start, this_start+len(y)))
this_start += len(y)
x = np.vstack([el[0] for el in datasets])
y = np.hstack([el[1] for el in datasets])
smiles = np.hstack([el[2] for el in datasets])
cv_split = get_cv_split(indices)
# test set
test_x, test_y, test_smiles = preprocess_dataset(path=data_config[utils_section][test],
data_config=data_config,
fingerprint=fingerprint,
morgan_nbits=morgan_nbits)
return x, y, cv_split, test_x, test_y, smiles, test_smiles
def load_data_from_df(dataset_paths, smiles_index, y_index, skip_line=False, delimiter=',', scale=None, average=None):
"""
Load multiple files from csvs, concatenate and return smiles and ys
:param dataset_paths: list: paths to csv files with data
:param smiles_index: int: index of the column with smiles
:param y_index: int: index of the column with the label
:param skip_line: boolean: True if the first line of the file contains column names, False otherwise
:param delimiter: delimeter used in csv
:param scale: should y be scaled? (useful with skewed distributions of y)
:param average: if the same SMILES appears multiple times how should its values be averaged?
:return: (smiles, labels) - np.arrays
"""
# column names present in files?
header = 0 if skip_line else None
# load all files
dfs = []
for data_path in dataset_paths:
dfs.append(pd.read_csv(data_path, delimiter=delimiter, header=header))
# merge
data_df = pd.concat(dfs)
# scaling ys
if scale is not None:
if 'sqrt' == scale.lower().strip():
data_df.iloc[:, y_index] = np.sqrt(data_df.iloc[:, y_index])
elif 'log' == scale.lower().strip():
data_df.iloc[:, y_index] = np.log(1 + data_df.iloc[:, y_index])
else:
raise NotImplementedError(f"Scale {scale} is not implemented.")
# averaging when one smiles has multiple values
if average is not None:
smiles_col = data_df.iloc[:, smiles_index].name
y_col = data_df.iloc[:, y_index].name
data_df = data_df.loc[:, [smiles_col, y_col]] # since now: smiles is 0, y_col is 1, dropping other columns
smiles_index = 0
y_index = 1
if 'median' == average.lower().strip():
data_df[y_col] = data_df[y_col].groupby(data_df[smiles_col]).transform('median')
else:
raise NotImplementedError(f"Averaging {average} is not implemented.")
# breaking into x and y
data_df = data_df.values
data_x = data_df[:, smiles_index]
data_y = data_df[:, y_index]
if data_y.dtype == np.float64:
data_y = data_y.astype(np.float32)
return data_x, data_y
def preprocess_dataset(path, data_config, fingerprint, morgan_nbits=None):
"""Calculate representation for each smiles in the dataset."""
if fingerprint == 'morgan':
assert morgan_nbits is not None, 'Parameter `morgan_nbits` must be set when using Morgan fingerprint.'
smiles, labels = load_data_from_df([path,], **data_config[csv_section])
x = []
y = []
calculated_smiles = []
# we go smiles by smiles because some compounds make rdkit throw errors
for this_smiles, this_label in zip(smiles, labels):
try:
mol = Chem.MolFromSmiles(this_smiles)
if fingerprint == 'morgan':
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 6, nBits=morgan_nbits)
fp = [int(i) for i in fp.ToBitString()]
elif fingerprint == 'maccs':
fp = MACCSkeys.GenMACCSKeys(mol)
fp = np.array(fp)[1:] # index 0 is unset
elif fingerprint == 'krfp':
fp = krfp(this_smiles)
else:
pass # unknown fingerprint
x.append(fp)
y.append(this_label)
calculated_smiles.append(this_smiles)
except Exception as e:
print('exp', e)
return np.array(x), np.array(y), calculated_smiles
def krfp(smi):
"""Calculate Klekota-Roth fingerprint using padelpy."""
# Warning: as this function uses padel it requires descriptors.xml to be
# in the running directory and have KlekotaRothFingerprinter set to true
# we don't want to copy and remove the descriptors.xml file for each smiles
# separately, so we check if it exists and if it has the proper content
cwd = os.getcwd()
descriptors_filename = 'descriptors.xml'
descriptors_hash = 'f6145f57ff346599b907b044316c4e71'
try:
with open(os.path.join(cwd, descriptors_filename), 'r') as desc_file:
desc_file_content = desc_file.read()
m = hashlib.md5()
m.update(desc_file_content.encode('utf-8'))
if m.hexdigest() == descriptors_hash:
pass # descriptors.xml exists and has the right content
else:
# the file exists but it has a wrong content
raise RuntimeError("The descriptors.xml was found in the running directory but its content doesn't match the prototype content. Aborting.")
except FileNotFoundError:
# the file doesn't exist, we have to create it
src_directory = os.path.dirname(os.path.realpath(__file__))
shutil.copyfile(os.path.join(src_directory, descriptors_filename),
os.path.join(cwd, descriptors_filename))
# # #
# # # descriptors.xml exists and looks good, we can continue with calculating the representation
# on prometheus we use SCRATCH, everywhere else the default location is fine
with tempfile.TemporaryDirectory(dir=os.getenv('SCRATCH', None)) as tmpdirname:
smi_file = os.path.join(tmpdirname, "molecules.smi")
with open(smi_file, 'w') as sf:
sf.write(smi)
out = os.path.join(tmpdirname, "out.csv")
padeldescriptor(mol_dir=smi_file, d_file=out, fingerprints=True, retainorder=True)
fp = pd.read_csv(out).values[:,1:].reshape((-1)).astype(int)
return fp
def get_cv_split(indices):
iterator = []
for val_indices in indices:
train_indices = []
for idxs in [list(range(*i)) for i in indices if i != val_indices]:
train_indices.extend(idxs)
val_indices = list(range(*val_indices))
assert len(train_indices) + len(val_indices) == len(set(train_indices + val_indices))
iterator.append((np.array(train_indices), np.array(val_indices)))
return iterator
def log_stability(values):
if isinstance(values, (list, tuple)):
return [np.log(1+v) for v in values]
else:
# for int, float, np.array it'll work, for else - IDK
return np.log(1+values)
def unlog_stability(values):
if isinstance(values, (list, tuple)):
return [np.exp(v)-1 for v in values]
else:
return np.exp(values) - 1
def cutoffs_metstabon(values, log_scale):
"""Changes regression to classification according to cutoffs from
MetStabOn - Online Platform for Metabolic Stability Predictions (Podlewska & Kafel)
values - np.array of metabolic stabilities
log_scale - boolean indicating if the stability values are in log-scale (True) or not (False)
"""
# y <= 0.6 - low
# 0.6 < y <= 2.32 - medium
# 2.32 < y - high
low = 0
medium = 1
high = 2
bottom_threshold = 0.6
top_threshold = 2.32
if log_scale:
bottom_threshold = log_stability(bottom_threshold)
top_threshold = log_stability(top_threshold)
if isinstance(values, np.ndarray):
classification = np.ones(values.shape, dtype=int)
classification[values<=bottom_threshold] = low
classification[values>top_threshold] = high
elif isinstance(values, float):
if values <= bottom_threshold:
return low
else:
return medium if values <= top_threshold else high
else:
raise NotImplementedError(f"Supported types for `values` are numpy.ndarray and float, is {type(values)}.")
return classification
|
[
"numpy.sqrt",
"hashlib.md5",
"numpy.ones",
"numpy.hstack",
"pandas.read_csv",
"os.getenv",
"rdkit.Chem.MACCSkeys.GenMACCSKeys",
"numpy.log",
"os.path.join",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"os.getcwd",
"numpy.exp",
"numpy.array",
"os.path.realpath",
"numpy.vstack",
"padelpy.padeldescriptor",
"pandas.concat"
] |
[((853, 890), 'numpy.vstack', 'np.vstack', (['[el[0] for el in datasets]'], {}), '([el[0] for el in datasets])\n', (862, 890), True, 'import numpy as np\n'), ((899, 936), 'numpy.hstack', 'np.hstack', (['[el[1] for el in datasets]'], {}), '([el[1] for el in datasets])\n', (908, 936), True, 'import numpy as np\n'), ((950, 987), 'numpy.hstack', 'np.hstack', (['[el[2] for el in datasets]'], {}), '([el[2] for el in datasets])\n', (959, 987), True, 'import numpy as np\n'), ((2445, 2459), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (2454, 2459), True, 'import pandas as pd\n'), ((5350, 5361), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5359, 5361), False, 'import os\n'), ((4908, 4919), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4916, 4919), True, 'import numpy as np\n'), ((4921, 4932), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4929, 4932), True, 'import numpy as np\n'), ((5614, 5627), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (5625, 5627), False, 'import hashlib\n'), ((6607, 6648), 'os.path.join', 'os.path.join', (['tmpdirname', '"""molecules.smi"""'], {}), "(tmpdirname, 'molecules.smi')\n", (6619, 6648), False, 'import os\n'), ((6729, 6764), 'os.path.join', 'os.path.join', (['tmpdirname', '"""out.csv"""'], {}), "(tmpdirname, 'out.csv')\n", (6741, 6764), False, 'import os\n'), ((6773, 6859), 'padelpy.padeldescriptor', 'padeldescriptor', ([], {'mol_dir': 'smi_file', 'd_file': 'out', 'fingerprints': '(True)', 'retainorder': '(True)'}), '(mol_dir=smi_file, d_file=out, fingerprints=True,\n retainorder=True)\n', (6788, 6859), False, 'from padelpy import padeldescriptor\n'), ((7601, 7619), 'numpy.log', 'np.log', (['(1 + values)'], {}), '(1 + values)\n', (7607, 7619), True, 'import numpy as np\n'), ((8500, 8532), 'numpy.ones', 'np.ones', (['values.shape'], {'dtype': 'int'}), '(values.shape, dtype=int)\n', (8507, 8532), True, 'import numpy as np\n'), ((2358, 2416), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'delimiter': 'delimiter', 'header': 'header'}), '(data_path, delimiter=delimiter, header=header)\n', (2369, 2416), True, 'import pandas as pd\n'), ((2587, 2620), 'numpy.sqrt', 'np.sqrt', (['data_df.iloc[:, y_index]'], {}), '(data_df.iloc[:, y_index])\n', (2594, 2620), True, 'import numpy as np\n'), ((4226, 4257), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['this_smiles'], {}), '(this_smiles)\n', (4244, 4257), False, 'from rdkit import Chem\n'), ((7485, 7498), 'numpy.log', 'np.log', (['(1 + v)'], {}), '(1 + v)\n', (7491, 7498), True, 'import numpy as np\n'), ((7761, 7775), 'numpy.exp', 'np.exp', (['values'], {}), '(values)\n', (7767, 7775), True, 'import numpy as np\n'), ((2705, 2741), 'numpy.log', 'np.log', (['(1 + data_df.iloc[:, y_index])'], {}), '(1 + data_df.iloc[:, y_index])\n', (2711, 2741), True, 'import numpy as np\n'), ((4319, 4384), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['mol', '(6)'], {'nBits': 'morgan_nbits'}), '(mol, 6, nBits=morgan_nbits)\n', (4356, 4384), False, 'from rdkit.Chem import AllChem, MACCSkeys\n'), ((5493, 5532), 'os.path.join', 'os.path.join', (['cwd', 'descriptors_filename'], {}), '(cwd, descriptors_filename)\n', (5505, 5532), False, 'import os\n'), ((6143, 6169), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (6159, 6169), False, 'import os\n'), ((6195, 6244), 'os.path.join', 'os.path.join', (['src_directory', 'descriptors_filename'], {}), '(src_directory, descriptors_filename)\n', (6207, 6244), False, 'import os\n'), ((6270, 6309), 'os.path.join', 'os.path.join', (['cwd', 'descriptors_filename'], {}), '(cwd, descriptors_filename)\n', (6282, 6309), False, 'import os\n'), ((6545, 6571), 'os.getenv', 'os.getenv', (['"""SCRATCH"""', 'None'], {}), "('SCRATCH', None)\n", (6554, 6571), False, 'import os\n'), ((7329, 7352), 'numpy.array', 'np.array', (['train_indices'], {}), '(train_indices)\n', (7337, 7352), True, 'import numpy as np\n'), ((7354, 7375), 'numpy.array', 'np.array', (['val_indices'], {}), '(val_indices)\n', (7362, 7375), True, 'import numpy as np\n'), ((7707, 7716), 'numpy.exp', 'np.exp', (['v'], {}), '(v)\n', (7713, 7716), True, 'import numpy as np\n'), ((4503, 4530), 'rdkit.Chem.MACCSkeys.GenMACCSKeys', 'MACCSkeys.GenMACCSKeys', (['mol'], {}), '(mol)\n', (4525, 4530), False, 'from rdkit.Chem import AllChem, MACCSkeys\n'), ((4552, 4564), 'numpy.array', 'np.array', (['fp'], {}), '(fp)\n', (4560, 4564), True, 'import numpy as np\n'), ((6869, 6885), 'pandas.read_csv', 'pd.read_csv', (['out'], {}), '(out)\n', (6880, 6885), True, 'import pandas as pd\n')]
|
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm # Displays a progress bar
import sys,os
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import pickle
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold, StratifiedKFold,train_test_split, cross_val_score
from sklearn import preprocessing
from sklearn.decomposition import PCA, sparse_encode
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
import os
from itertools import combinations
import argparse
sys.path.append('.')
from utils import *
from dataset import EnableDataset
def run_classifier(args):
"""
Main function runs training and testing of Heuristic based machine
learning models (SVM, LDA)
Input: argument passes through argparse. Each argument is described
in the --help of each arguments.
Output: No return, but generates a .txt file results of testing
including accuracy of the models.
"""
########## PRAMETER SETTINGS ##############
MODE = args.laterality
CLASSIFIER = args.classifiers
SENSOR = args.sensors
############################################
sensor_str='_'.join(SENSOR)
RESULT_NAME= './results/'+CLASSIFIER+'/'+CLASSIFIER+'_'+MODE+'_'+sensor_str+'_subjects_accuracy.txt'
SAVE_NAME= './checkpoints/'+CLASSIFIER+'/'+CLASSIFIER +'_'+MODE+'_'+sensor_str+'_subjects.pkl'
if not os.path.exists('./results/'+CLASSIFIER):
os.makedirs('./results/'+CLASSIFIER)
subjects = ['156','185','186','188','189','190', '191', '192', '193', '194']
subject_data = []
# Loading/saving the ENABL3S dataset
if args.data_saving:
print("Loading datasets...")
for subject in subjects:
subject_data.append(EnableDataset(subject_list= [subject],model_type=CLASSIFIER,sensors=SENSOR,mode=MODE))
save_object(subject_data,SAVE_NAME)
else:
with open(SAVE_NAME, 'rb') as input:
subject_data = pickle.load(input)
correct=0
steady_state_correct = 0
tot_steady_state = 0
transitional_correct = 0
tot_transitional = 0
# Define cross-validation parameters
skf = KFold(n_splits = len(subject_data), shuffle = True)
# Define PCA parameters
scale = preprocessing.StandardScaler()
pca = PCA()
scale_PCA = Pipeline([('norm',scale),('dimred',pca)])
if CLASSIFIER == 'LDA':
model = LinearDiscriminantAnalysis()
elif CLASSIFIER == 'SVM':
model = SVC(kernel = 'linear', C = 10)
accuracies =[]
ss_accuracies=[]
tr_accuracies=[]
subject_numb = []
i = 0
# main training/testing loop
for train_index, test_index in skf.split(subject_data):
print("**************FOLD {}*********".format(i+1))
print(train_index, test_index)
train_set = [subject_data[i] for i in train_index]
test_set = [subject_data[i] for i in test_index]
BIO_train = torch.utils.data.ConcatDataset(train_set)
wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))
for batch, label, dtype in tqdm(wholeloader):
X_train = batch
y_train = label
types_train = dtype
BIO_test = torch.utils.data.ConcatDataset(test_set)
wholeloader = DataLoader(BIO_test, batch_size=len(BIO_train))
for batch, label, dtype in tqdm(wholeloader):
X_test = batch
y_test = label
types_test = dtype
if CLASSIFIER == 'LDA':
scale_PCA.fit(X_train)
feats_train_PCA = scale_PCA.transform(X_train)
feats_test_PCA = scale_PCA.transform(X_test)
pcaexplainedvar = np.cumsum(scale_PCA.named_steps['dimred'].explained_variance_ratio_)
pcanumcomps = min(min(np.where(pcaexplainedvar > 0.95))) + 1
unique_modes = np.unique(y_train)
model.set_params(priors = np.ones(len(unique_modes))/len(unique_modes))
model.fit(feats_train_PCA, y_train)
y_pred = model.predict(feats_test_PCA)
elif CLASSIFIER == 'SVM':
scale.fit(X_train)
feats_train_norm = scale.transform(X_train)
feats_test_norm = scale.transform(X_test )
model.fit(feats_train_norm, y_train)
y_pred = model.predict(feats_test_norm)
# append model performance metrics
correct = (y_pred==np.array(y_test)).sum().item()
tot = len(y_test)
steady_state_correct = (np.logical_and(y_pred==np.array(y_test), types_test == 1)).sum().item()
tot_steady_state = (types_test == 1).sum().item()
transitional_correct = (np.logical_and(y_pred==np.array(y_test), types_test == 0)).sum().item()
tot_transitional = (types_test == 0).sum().item()
accuracies.append(accuracy_score(y_test, y_pred))
tot_acc = correct/tot
ss_acc = steady_state_correct/tot_steady_state if tot_steady_state != 0 else "No steady state samples used"
tr_acc = transitional_correct/tot_transitional if tot_transitional != 0 else "No transitional samples used"
ss_accuracies.append(ss_acc) if tot_steady_state != 0 else "No steady state samples used"
tr_accuracies.append(tr_acc) if tot_transitional != 0 else "No transitional samples used"
subject_numb.append(test_index[0])
print("Total accuracy: {}".format(accuracy_score(y_test, y_pred)))
print("Total correct: {}, number: {}, accuracy: {}".format(correct,tot,tot_acc))
print("Steady-state correct: {}, number: {}, accuracy: {}".format(steady_state_correct,tot_steady_state,ss_acc))
print("Transistional correct: {}, number: {}, accuracy: {}".format(transitional_correct,tot_transitional,tr_acc))
i +=1
print('********************SUMMARY*****************************')
print('Accuracy_,mean:', np.mean(accuracies),'Accuracy_std: ', np.std(accuracies))
print('SR Accuracy_,mean:', np.mean(ss_accuracies),'Accuracy_std: ', np.std(ss_accuracies))
print('TR Accuracy_,mean:', np.mean(tr_accuracies),'Accuracy_std: ', np.std(tr_accuracies))
print('writing...')
with open(RESULT_NAME, 'w') as f:
f.write('total ')
for item in accuracies:
f.write("%s " % item)
f.write('\n')
f.write('steadystate ')
for item in ss_accuracies:
f.write("%s " % item)
f.write('\n')
f.write('transitional ')
for item in tr_accuracies:
f.write("%s " % item)
f.write('\n')
f.write('subject_numb ')
for item in subject_numb:
f.write("%s " % item)
f.close()
"""This block parses command line arguments and runs the main code"""
p = argparse.ArgumentParser()
p.add_argument("--classifiers", default="LDA", help="classifier types: LDA, SVM")
p.add_argument("--sensors", nargs="+", default=["imu","emg","gon"], help="select combinations of sensor modality types: img, emg, gonio")
p.add_argument("--all_comb", dest='all_comb', action='store_true', help="loop through all combinations")
p.add_argument("--laterality", default='bilateral', type=str, help="select laterality types, bilateral, ipsilateral, contralateral")
p.add_argument("--data_skip", dest='data_saving', action='store_false', help="skip the dataset saving/loading")
args = p.parse_args()
p.set_defaults(data_saving=True)
p.set_defaults(all_comb=False)
comb_number = len(args.sensors)
if args.all_comb:
print('looping through all combinations, overriding sensor selection')
args.sensors = ["imu","emg","gon"]
comb_number = 1
for i in range(comb_number,4):
print('Number of sensors range:' , i ,'to',len(args.sensors))
for combo in combinations(args.sensors,i):
sensor = [item for item in combo]
print("Classifer type: ", args.classifiers)
print("Sensor modality: ", sensor)
print("Sensor laterality: ", args.laterality)
run_classifier(args)
|
[
"torch.utils.data.ConcatDataset",
"numpy.array",
"sys.path.append",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"numpy.where",
"dataset.EnableDataset",
"pickle.load",
"numpy.std",
"sklearn.pipeline.Pipeline",
"sklearn.metrics.accuracy_score",
"sklearn.svm.SVC",
"numpy.unique",
"os.makedirs",
"tqdm.tqdm",
"sklearn.preprocessing.StandardScaler",
"itertools.combinations",
"numpy.cumsum",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
] |
[((577, 597), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (592, 597), False, 'import sys, os\n'), ((6110, 6135), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6133, 6135), False, 'import argparse\n'), ((2167, 2197), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (2195, 2197), False, 'from sklearn import preprocessing\n'), ((2205, 2210), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (2208, 2210), False, 'from sklearn.decomposition import PCA, sparse_encode\n'), ((2224, 2268), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('norm', scale), ('dimred', pca)]"], {}), "([('norm', scale), ('dimred', pca)])\n", (2232, 2268), False, 'from sklearn.pipeline import Pipeline\n'), ((7080, 7109), 'itertools.combinations', 'combinations', (['args.sensors', 'i'], {}), '(args.sensors, i)\n', (7092, 7109), False, 'from itertools import combinations\n'), ((1398, 1439), 'os.path.exists', 'os.path.exists', (["('./results/' + CLASSIFIER)"], {}), "('./results/' + CLASSIFIER)\n", (1412, 1439), False, 'import os\n'), ((1441, 1479), 'os.makedirs', 'os.makedirs', (["('./results/' + CLASSIFIER)"], {}), "('./results/' + CLASSIFIER)\n", (1452, 1479), False, 'import os\n'), ((2302, 2330), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (2328, 2330), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((2775, 2816), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['train_set'], {}), '(train_set)\n', (2805, 2816), False, 'import torch\n'), ((2911, 2928), 'tqdm.tqdm', 'tqdm', (['wholeloader'], {}), '(wholeloader)\n', (2915, 2928), False, 'from tqdm import tqdm\n'), ((3005, 3045), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['test_set'], {}), '(test_set)\n', (3035, 3045), False, 'import torch\n'), ((3139, 3156), 'tqdm.tqdm', 'tqdm', (['wholeloader'], {}), '(wholeloader)\n', (3143, 3156), False, 'from tqdm import tqdm\n'), ((5362, 5381), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (5369, 5381), True, 'import numpy as np\n'), ((5400, 5418), 'numpy.std', 'np.std', (['accuracies'], {}), '(accuracies)\n', (5406, 5418), True, 'import numpy as np\n'), ((5449, 5471), 'numpy.mean', 'np.mean', (['ss_accuracies'], {}), '(ss_accuracies)\n', (5456, 5471), True, 'import numpy as np\n'), ((5490, 5511), 'numpy.std', 'np.std', (['ss_accuracies'], {}), '(ss_accuracies)\n', (5496, 5511), True, 'import numpy as np\n'), ((5542, 5564), 'numpy.mean', 'np.mean', (['tr_accuracies'], {}), '(tr_accuracies)\n', (5549, 5564), True, 'import numpy as np\n'), ((5583, 5604), 'numpy.std', 'np.std', (['tr_accuracies'], {}), '(tr_accuracies)\n', (5589, 5604), True, 'import numpy as np\n'), ((1907, 1925), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (1918, 1925), False, 'import pickle\n'), ((2368, 2394), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'C': '(10)'}), "(kernel='linear', C=10)\n", (2371, 2394), False, 'from sklearn.svm import SVC\n'), ((3390, 3458), 'numpy.cumsum', 'np.cumsum', (["scale_PCA.named_steps['dimred'].explained_variance_ratio_"], {}), "(scale_PCA.named_steps['dimred'].explained_variance_ratio_)\n", (3399, 3458), True, 'import numpy as np\n'), ((3541, 3559), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (3550, 3559), True, 'import numpy as np\n'), ((4376, 4406), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4390, 4406), False, 'from sklearn.metrics import accuracy_score\n'), ((1718, 1809), 'dataset.EnableDataset', 'EnableDataset', ([], {'subject_list': '[subject]', 'model_type': 'CLASSIFIER', 'sensors': 'SENSOR', 'mode': 'MODE'}), '(subject_list=[subject], model_type=CLASSIFIER, sensors=SENSOR,\n mode=MODE)\n', (1731, 1809), False, 'from dataset import EnableDataset\n'), ((4913, 4943), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4927, 4943), False, 'from sklearn.metrics import accuracy_score\n'), ((3484, 3516), 'numpy.where', 'np.where', (['(pcaexplainedvar > 0.95)'], {}), '(pcaexplainedvar > 0.95)\n', (3492, 3516), True, 'import numpy as np\n'), ((4005, 4021), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4013, 4021), True, 'import numpy as np\n'), ((4105, 4121), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4113, 4121), True, 'import numpy as np\n'), ((4255, 4271), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4263, 4271), True, 'import numpy as np\n')]
|
from numpy import arcsin, cos, exp, pi, sin
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW14
A SlotW14 object
Returns
-------
point_dict: dict
A dict of the slot point coordinates
"""
Rbo = self.get_Rbo()
hssp = pi / self.Zs
# alpha is the angle to rotate P0 so ||P1,P9|| = W0
alpha = arcsin(self.W0 / (2 * Rbo))
Harc = float(Rbo * (1 - cos(alpha)))
Z1 = Rbo * exp(-1j * alpha)
if self.is_outwards():
R1 = Rbo - Harc + self.H0 + self.H1
Z2 = Z1 + self.H0
else:
R1 = Rbo - Harc - self.H0 - self.H1
Z2 = Z1 - self.H0
# In the slot ref: Z3=x7+j*y7 with x7 = R1
# In the tooth ref: Zt7 = xt7 + j*yt7 with yt7 = W3/2
# Zt7 = Z3 * exp(1j*hsp) = (x7+jy7)*(cos(hsp)+1j*sin(hsp))
# yt7 = W3/2 = x7*sin(hsp)+y7*cos(hsp)
Z3 = R1 + 1j * (self.W3 / 2 - R1 * sin(hssp)) / cos(hssp)
# Z3t is Z3 in tooth ref
Z3t = Z3 * exp(1j * hssp)
if self.is_outwards():
Z4t = Z3t + self.H3
else:
Z4t = Z3t - self.H3
# In the slot ref: Z5=x5+j*y5 with y5 = 0
# In the tooth ref: Zt5 = xt5 + j*yt5 with xt5 = xt6
# Zt5 = Z5 * exp(1j*hsp) = x5*cos(hsp)+1j*x5*sin(hsp)
# x5 = real(Z4t)/cos(hsp)
Z5 = Z4t.real / cos(hssp)
Z4 = Z4t * exp(-1j * hssp)
point_dict = dict()
# symetry
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
point_dict["Z5"] = Z5
point_dict["Z6"] = Z4.conjugate()
point_dict["Z7"] = Z3.conjugate()
point_dict["Z8"] = Z2.conjugate()
point_dict["Z9"] = Z1.conjugate()
return point_dict
|
[
"numpy.exp",
"numpy.sin",
"numpy.arcsin",
"numpy.cos"
] |
[((434, 461), 'numpy.arcsin', 'arcsin', (['(self.W0 / (2 * Rbo))'], {}), '(self.W0 / (2 * Rbo))\n', (440, 461), False, 'from numpy import arcsin, cos, exp, pi, sin\n'), ((519, 537), 'numpy.exp', 'exp', (['(-1.0j * alpha)'], {}), '(-1.0j * alpha)\n', (522, 537), False, 'from numpy import arcsin, cos, exp, pi, sin\n'), ((1031, 1047), 'numpy.exp', 'exp', (['(1.0j * hssp)'], {}), '(1.0j * hssp)\n', (1034, 1047), False, 'from numpy import arcsin, cos, exp, pi, sin\n'), ((1351, 1360), 'numpy.cos', 'cos', (['hssp'], {}), '(hssp)\n', (1354, 1360), False, 'from numpy import arcsin, cos, exp, pi, sin\n'), ((1376, 1393), 'numpy.exp', 'exp', (['(-1.0j * hssp)'], {}), '(-1.0j * hssp)\n', (1379, 1393), False, 'from numpy import arcsin, cos, exp, pi, sin\n'), ((976, 985), 'numpy.cos', 'cos', (['hssp'], {}), '(hssp)\n', (979, 985), False, 'from numpy import arcsin, cos, exp, pi, sin\n'), ((490, 500), 'numpy.cos', 'cos', (['alpha'], {}), '(alpha)\n', (493, 500), False, 'from numpy import arcsin, cos, exp, pi, sin\n'), ((963, 972), 'numpy.sin', 'sin', (['hssp'], {}), '(hssp)\n', (966, 972), False, 'from numpy import arcsin, cos, exp, pi, sin\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from kf_v4 import f
from simulated_observation import ls_of_observations_v4, real_state_v4
plt.ion()
plt.figure()
# assume the pic is 300 in x-length, and 200 in y-height
real_state = real_state_v4
f.x = ls_of_observations_v4[0]
NUMSTEPS = 10 # number of loops to run the algorithm
delta_x = 1 # suppose the user turns his head at a constant speed
delta_y = -0.5
var = 1 ** 2
con1s = []
real_con1s = []
x1s = []
real_x1s = []
y1s = []
real_y1s = []
w1s = []
real_w1s = []
h1s = []
real_h1s = []
for step in range(NUMSTEPS):
real_state[1] += delta_x
real_state[2] += delta_y
real_state[6] += delta_x
real_state[7] += delta_y
real_state[21] += delta_x
real_state[22] += delta_y
f.predict(u=np.array([[delta_x], [delta_y]]))
f.update(z=ls_of_observations_v4[step])
con1s.append(f.x[20])
print(f.x[20])
real_con1s.append(real_state[20])
x1s.append(f.x[21])
real_x1s.append(real_state[21])
y1s.append(f.x[22])
real_y1s.append(real_state[22])
w1s.append(f.x[23])
real_w1s.append(real_state[23])
h1s.append(f.x[24])
real_h1s.append(real_state[24])
plt.subplot(5, 2, 1)
plt.title('Confidence 1')
plt.plot(con1s, 'r') # red is the one recursively calculated, expected to converge to blue
plt.plot(real_con1s, 'b') # blue is the real state
plt.subplot(5, 2, 3)
plt.title('x1')
plt.plot(x1s, 'r')
plt.plot(real_x1s, 'b')
plt.subplot(5, 2, 5)
plt.title('y1')
plt.plot(y1s, 'r')
plt.plot(real_y1s, 'b')
plt.subplot(5, 2, 7)
plt.title('width1')
plt.plot(w1s, 'r')
plt.plot(real_w1s, 'b')
plt.subplot(5, 2, 9)
plt.title('heights1')
plt.plot(h1s, 'r')
plt.plot(real_h1s, 'b')
plt.show()
plt.ginput(timeout=300)
|
[
"kf_v4.f.update",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ginput",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((145, 154), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (152, 154), True, 'import matplotlib.pyplot as plt\n'), ((155, 167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (165, 167), True, 'import matplotlib.pyplot as plt\n'), ((1182, 1202), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(2)', '(1)'], {}), '(5, 2, 1)\n', (1193, 1202), True, 'import matplotlib.pyplot as plt\n'), ((1203, 1228), 'matplotlib.pyplot.title', 'plt.title', (['"""Confidence 1"""'], {}), "('Confidence 1')\n", (1212, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1249), 'matplotlib.pyplot.plot', 'plt.plot', (['con1s', '"""r"""'], {}), "(con1s, 'r')\n", (1237, 1249), True, 'import matplotlib.pyplot as plt\n'), ((1321, 1346), 'matplotlib.pyplot.plot', 'plt.plot', (['real_con1s', '"""b"""'], {}), "(real_con1s, 'b')\n", (1329, 1346), True, 'import matplotlib.pyplot as plt\n'), ((1374, 1394), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(2)', '(3)'], {}), '(5, 2, 3)\n', (1385, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1395, 1410), 'matplotlib.pyplot.title', 'plt.title', (['"""x1"""'], {}), "('x1')\n", (1404, 1410), True, 'import matplotlib.pyplot as plt\n'), ((1411, 1429), 'matplotlib.pyplot.plot', 'plt.plot', (['x1s', '"""r"""'], {}), "(x1s, 'r')\n", (1419, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1453), 'matplotlib.pyplot.plot', 'plt.plot', (['real_x1s', '"""b"""'], {}), "(real_x1s, 'b')\n", (1438, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(2)', '(5)'], {}), '(5, 2, 5)\n', (1466, 1475), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1491), 'matplotlib.pyplot.title', 'plt.title', (['"""y1"""'], {}), "('y1')\n", (1485, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1492, 1510), 'matplotlib.pyplot.plot', 'plt.plot', (['y1s', '"""r"""'], {}), "(y1s, 'r')\n", (1500, 1510), True, 'import matplotlib.pyplot as plt\n'), ((1511, 1534), 'matplotlib.pyplot.plot', 'plt.plot', (['real_y1s', '"""b"""'], {}), "(real_y1s, 'b')\n", (1519, 1534), True, 'import matplotlib.pyplot as plt\n'), ((1536, 1556), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(2)', '(7)'], {}), '(5, 2, 7)\n', (1547, 1556), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1576), 'matplotlib.pyplot.title', 'plt.title', (['"""width1"""'], {}), "('width1')\n", (1566, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1595), 'matplotlib.pyplot.plot', 'plt.plot', (['w1s', '"""r"""'], {}), "(w1s, 'r')\n", (1585, 1595), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1619), 'matplotlib.pyplot.plot', 'plt.plot', (['real_w1s', '"""b"""'], {}), "(real_w1s, 'b')\n", (1604, 1619), True, 'import matplotlib.pyplot as plt\n'), ((1621, 1641), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(2)', '(9)'], {}), '(5, 2, 9)\n', (1632, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1663), 'matplotlib.pyplot.title', 'plt.title', (['"""heights1"""'], {}), "('heights1')\n", (1651, 1663), True, 'import matplotlib.pyplot as plt\n'), ((1664, 1682), 'matplotlib.pyplot.plot', 'plt.plot', (['h1s', '"""r"""'], {}), "(h1s, 'r')\n", (1672, 1682), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1706), 'matplotlib.pyplot.plot', 'plt.plot', (['real_h1s', '"""b"""'], {}), "(real_h1s, 'b')\n", (1691, 1706), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1719), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1717, 1719), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1743), 'matplotlib.pyplot.ginput', 'plt.ginput', ([], {'timeout': '(300)'}), '(timeout=300)\n', (1730, 1743), True, 'import matplotlib.pyplot as plt\n'), ((816, 855), 'kf_v4.f.update', 'f.update', ([], {'z': 'ls_of_observations_v4[step]'}), '(z=ls_of_observations_v4[step])\n', (824, 855), False, 'from kf_v4 import f\n'), ((778, 810), 'numpy.array', 'np.array', (['[[delta_x], [delta_y]]'], {}), '([[delta_x], [delta_y]])\n', (786, 810), True, 'import numpy as np\n')]
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from kern import CombinationKernel
from ...util.caching import Cache_this
import itertools
def numpy_invalid_op_as_exception(func):
"""
A decorator that allows catching numpy invalid operations
as exceptions (the default behaviour is raising warnings).
"""
def func_wrapper(*args, **kwargs):
np.seterr(invalid='raise')
result = func(*args, **kwargs)
np.seterr(invalid='warn')
return result
return func_wrapper
class Prod(CombinationKernel):
"""
Computes the product of 2 kernels
:param k1, k2: the kernels to multiply
:type k1, k2: Kern
:param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces
:type tensor: Boolean
:rtype: kernel object
"""
def __init__(self, kernels, name='mul'):
for i, kern in enumerate(kernels[:]):
if isinstance(kern, Prod):
del kernels[i]
for part in kern.parts[::-1]:
kern.unlink_parameter(part)
kernels.insert(i, part)
super(Prod, self).__init__(kernels, name)
@Cache_this(limit=2, force_kwargs=['which_parts'])
def K(self, X, X2=None, which_parts=None):
if which_parts is None:
which_parts = self.parts
elif not isinstance(which_parts, (list, tuple)):
# if only one part is given
which_parts = [which_parts]
return reduce(np.multiply, (p.K(X, X2) for p in which_parts))
@Cache_this(limit=2, force_kwargs=['which_parts'])
def Kdiag(self, X, which_parts=None):
if which_parts is None:
which_parts = self.parts
return reduce(np.multiply, (p.Kdiag(X) for p in which_parts))
@numpy_invalid_op_as_exception
def update_gradients_full(self, dL_dK, X, X2=None):
k = self.K(X,X2)*dL_dK
try:
for p in self.parts:
p.update_gradients_full(k/p.K(X,X2),X,X2)
except FloatingPointError:
for combination in itertools.combinations(self.parts, len(self.parts) - 1):
prod = reduce(np.multiply, [p.K(X, X2) for p in combination])
to_update = list(set(self.parts) - set(combination))[0]
to_update.update_gradients_full(dL_dK * prod, X, X2)
def update_gradients_diag(self, dL_dKdiag, X):
k = self.Kdiag(X)*dL_dKdiag
for p in self.parts:
p.update_gradients_diag(k/p.Kdiag(X),X)
@numpy_invalid_op_as_exception
def gradients_X(self, dL_dK, X, X2=None):
target = np.zeros(X.shape)
k = self.K(X,X2)*dL_dK
try:
for p in self.parts:
target += p.gradients_X(k/p.K(X,X2),X,X2)
except FloatingPointError:
for combination in itertools.combinations(self.parts, len(self.parts) - 1):
prod = reduce(np.multiply, [p.K(X, X2) for p in combination])
to_update = list(set(self.parts) - set(combination))[0]
target += to_update.gradients_X(dL_dK * prod, X, X2)
return target
def gradients_X_diag(self, dL_dKdiag, X):
target = np.zeros(X.shape)
k = self.Kdiag(X)*dL_dKdiag
for p in self.parts:
target += p.gradients_X_diag(k/p.Kdiag(X),X)
return target
|
[
"numpy.zeros",
"numpy.seterr"
] |
[((455, 481), 'numpy.seterr', 'np.seterr', ([], {'invalid': '"""raise"""'}), "(invalid='raise')\n", (464, 481), True, 'import numpy as np\n'), ((529, 554), 'numpy.seterr', 'np.seterr', ([], {'invalid': '"""warn"""'}), "(invalid='warn')\n", (538, 554), True, 'import numpy as np\n'), ((2766, 2783), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (2774, 2783), True, 'import numpy as np\n'), ((3347, 3364), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (3355, 3364), True, 'import numpy as np\n')]
|
import os
from glob import glob
import time
import json
from PIL import Image
import pandas as pd
import numpy as np
import torchvision as tv
from rsp.data import bilinear_upsample, BANDS
from tifffile import imread as tiffread
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import base as metadata_base
from kf_d3m_primitives.remote_sensing.featurizer.remote_sensing_pretrained import (
RemoteSensingPretrainedPrimitive,
Hyperparams as rs_hp
)
from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval import (
ImageRetrievalPrimitive,
Hyperparams as ir_hp
)
from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline import ImageRetrievalPipeline
amdim_path = '/static_volumes/8946fea864c29ed785e00a9cbaa9a50295eb5a334b014f27ba20927104b07f46'
moco_path = '/static_volumes/fcc8a5a05fa7dbad8fc55584a77fc5d2c407e03a88610267860b45208e152f1f'
def load_nwpu(data_dir: str = '/NWPU-RESISC45', n_imgs = 200):
paths = sorted(glob(os.path.join(data_dir, '*/*')))
paths = [os.path.abspath(p) for p in paths]
imgs = [Image.open(p) for p in paths[:n_imgs]]
labels = [os.path.basename(os.path.dirname(p)) for p in paths[:n_imgs]]
transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(
mean = (0.3680, 0.3810, 0.3436),
std = (0.2034, 0.1854, 0.1848),
)
])
imgs = [transform(img) for img in imgs]
imgs = d3m_DataFrame(pd.DataFrame({'imgs': imgs}))
labels = np.array(labels)
return imgs, labels
def load_patch(imname):
patch = [
tiffread(f'{imname}_{band}.tif')
for band in BANDS
]
patch = np.stack([bilinear_upsample(xx) for xx in patch])
return patch
def load_big_earthnet():
fnames = sorted(glob('/test_data/bigearth-100-single/*/*.tif'))
imnames = sorted(list(set(['_'.join(f.split('_')[:-1]) for f in fnames])))
imgs = [
load_patch(img_path).astype(np.float32)
for img_path in imnames
]
imgs_df = pd.DataFrame({'image_col': imgs, 'index': range(len(imgs))})
imgs_df = d3m_DataFrame(imgs_df)
imgs_df.metadata = imgs_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, 1),
'https://metadata.datadrivendiscovery.org/types/PrimaryKey'
)
y = [i.split('/')[3] for i in imnames]
return imgs_df, np.array(y)
def iterative_labeling(features, labels, seed_idx = 2, n_rounds = 5):
# initial query image
y = (labels == labels[seed_idx]).astype(np.int)
annotations = np.zeros(features.shape[0]) - 1
annotations[seed_idx] = 1
n_pos, n_neg = 1, 0
for i in range(n_rounds):
print(f'round {i}')
# generate ranking by similarity
sampler = ImageRetrievalPrimitive(
hyperparams=ir_hp(
ir_hp.defaults(),
reduce_dimension=256
)
)
sampler.set_training_data(
inputs = features,
outputs = d3m_DataFrame(pd.DataFrame({'annotations': annotations}))
)
sampler.fit()
ranking_df = sampler.produce(inputs = features).value
assert ranking_df.shape[0] == features.shape[0] - i - 1
exc_labeled = ranking_df['index'].values
inc_labeled = np.concatenate((sampler.pos_idxs, exc_labeled))
# simulate human labeling
next_idx = exc_labeled[0]
next_label = y[next_idx]
annotations[next_idx] = next_label
if next_label == 1:
n_pos += 1
else:
n_neg += 1
# evaluate ranking against ground truth
results = {
'round': i + 1,
'next_idx': int(next_idx),
'next_label': next_label,
'n_pos': n_pos,
'n_neg': n_neg,
'a_p': [
float(y[inc_labeled[:k]].mean())
for k in 2 ** np.arange(11)
], # precision, including labeled
'u_p': [
float(y[exc_labeled[:k]].mean())
for k in 2 ** np.arange(11)
], # precision, excluding labeled
'r_p': [
float(y[inc_labeled[:k]].sum()/y.sum())
for k in 2**np.arange(11)
], # recall, including labeled
}
print()
print(results)
# def test_nwpu():
# train_inputs, labels = load_nwpu()
# featurizer = RemoteSensingPretrainedPrimitive(
# hyperparams=rs_hp(
# rs_hp.defaults(),
# inference_model = 'moco',
# use_columns = [0],
# ),
# volumes = {'amdim_weights': amdim_path, 'moco_weights': moco_path}
# )
# features = featurizer.produce(inputs = train_inputs).value
# #features.to_pickle("dummy.pkl")
# #features = pd.read_pickle("dummy.pkl")
# iterative_labeling(features, labels)
# def test_big_earthnet():
# train_inputs, labels = load_big_earthnet()
# featurizer = RemoteSensingPretrainedPrimitive(
# hyperparams=rs_hp(
# rs_hp.defaults(),
# inference_model = 'moco',
# use_columns = [0],
# ),
# volumes = {'amdim_weights': amdim_path, 'moco_weights': moco_path}
# )
# features = featurizer.produce(inputs = train_inputs).value
# features.to_pickle("dummy.pkl")
# #features = pd.read_pickle("dummy.pkl")
# iterative_labeling(features, labels)
def test_fixed_value_pipeline(
dataset = 'LL1_bigearth_landuse_detection',
n_rows = 2188,
):
pipeline = ImageRetrievalPipeline(annotations = [1], dataset = dataset)
annotations = pipeline.make_annotations_dataset(n_rows)
pipeline = ImageRetrievalPipeline(annotations = annotations, dataset = dataset)
pipeline.write_pipeline()
pipeline.fit_produce()
pipeline.delete_pipeline()
pipeline.delete_annotations_dataset()
def test_two_inputs_pipeline(
dataset = 'LL1_bigearth_landuse_detection',
n_rows = 2188,
n_rounds = 2,
):
pipeline = ImageRetrievalPipeline(dataset = dataset)
pipeline.write_pipeline()
for i in range(n_rounds):
print(f'Running round {i} pipeline...')
pipeline.make_annotations_dataset(n_rows, round_num = i)
pipeline.fit_produce()
pipeline.delete_pipeline()
pipeline.delete_annotations_dataset()
|
[
"kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline.ImageRetrievalPipeline",
"PIL.Image.open",
"tifffile.imread",
"d3m.container.DataFrame",
"pandas.DataFrame",
"kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval.Hyperparams.defaults",
"numpy.arange",
"os.path.join",
"rsp.data.bilinear_upsample",
"numpy.array",
"numpy.zeros",
"os.path.dirname",
"numpy.concatenate",
"torchvision.transforms.Normalize",
"os.path.abspath",
"torchvision.transforms.ToTensor",
"glob.glob"
] |
[((1542, 1558), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1550, 1558), True, 'import numpy as np\n'), ((2138, 2160), 'd3m.container.DataFrame', 'd3m_DataFrame', (['imgs_df'], {}), '(imgs_df)\n', (2151, 2160), True, 'from d3m.container import DataFrame as d3m_DataFrame\n'), ((5576, 5632), 'kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline.ImageRetrievalPipeline', 'ImageRetrievalPipeline', ([], {'annotations': '[1]', 'dataset': 'dataset'}), '(annotations=[1], dataset=dataset)\n', (5598, 5632), False, 'from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline import ImageRetrievalPipeline\n'), ((5712, 5776), 'kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline.ImageRetrievalPipeline', 'ImageRetrievalPipeline', ([], {'annotations': 'annotations', 'dataset': 'dataset'}), '(annotations=annotations, dataset=dataset)\n', (5734, 5776), False, 'from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline import ImageRetrievalPipeline\n'), ((6046, 6085), 'kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline.ImageRetrievalPipeline', 'ImageRetrievalPipeline', ([], {'dataset': 'dataset'}), '(dataset=dataset)\n', (6068, 6085), False, 'from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval_pipeline import ImageRetrievalPipeline\n'), ((1051, 1069), 'os.path.abspath', 'os.path.abspath', (['p'], {}), '(p)\n', (1066, 1069), False, 'import os\n'), ((1098, 1111), 'PIL.Image.open', 'Image.open', (['p'], {}), '(p)\n', (1108, 1111), False, 'from PIL import Image\n'), ((1499, 1527), 'pandas.DataFrame', 'pd.DataFrame', (["{'imgs': imgs}"], {}), "({'imgs': imgs})\n", (1511, 1527), True, 'import pandas as pd\n'), ((1630, 1662), 'tifffile.imread', 'tiffread', (['f"""{imname}_{band}.tif"""'], {}), "(f'{imname}_{band}.tif')\n", (1638, 1662), True, 'from tifffile import imread as tiffread\n'), ((1822, 1868), 'glob.glob', 'glob', (['"""/test_data/bigearth-100-single/*/*.tif"""'], {}), "('/test_data/bigearth-100-single/*/*.tif')\n", (1826, 1868), False, 'from glob import glob\n'), ((2400, 2411), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2408, 2411), True, 'import numpy as np\n'), ((2580, 2607), 'numpy.zeros', 'np.zeros', (['features.shape[0]'], {}), '(features.shape[0])\n', (2588, 2607), True, 'import numpy as np\n'), ((3314, 3361), 'numpy.concatenate', 'np.concatenate', (['(sampler.pos_idxs, exc_labeled)'], {}), '((sampler.pos_idxs, exc_labeled))\n', (3328, 3361), True, 'import numpy as np\n'), ((1006, 1035), 'os.path.join', 'os.path.join', (['data_dir', '"""*/*"""'], {}), "(data_dir, '*/*')\n", (1018, 1035), False, 'import os\n'), ((1169, 1187), 'os.path.dirname', 'os.path.dirname', (['p'], {}), '(p)\n', (1184, 1187), False, 'import os\n'), ((1263, 1287), 'torchvision.transforms.ToTensor', 'tv.transforms.ToTensor', ([], {}), '()\n', (1285, 1287), True, 'import torchvision as tv\n'), ((1297, 1384), 'torchvision.transforms.Normalize', 'tv.transforms.Normalize', ([], {'mean': '(0.368, 0.381, 0.3436)', 'std': '(0.2034, 0.1854, 0.1848)'}), '(mean=(0.368, 0.381, 0.3436), std=(0.2034, 0.1854, \n 0.1848))\n', (1320, 1384), True, 'import torchvision as tv\n'), ((1717, 1738), 'rsp.data.bilinear_upsample', 'bilinear_upsample', (['xx'], {}), '(xx)\n', (1734, 1738), False, 'from rsp.data import bilinear_upsample, BANDS\n'), ((2858, 2874), 'kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval.Hyperparams.defaults', 'ir_hp.defaults', ([], {}), '()\n', (2872, 2874), True, 'from kf_d3m_primitives.remote_sensing.image_retrieval.image_retrieval import ImageRetrievalPrimitive, Hyperparams as ir_hp\n'), ((3040, 3082), 'pandas.DataFrame', 'pd.DataFrame', (["{'annotations': annotations}"], {}), "({'annotations': annotations})\n", (3052, 3082), True, 'import pandas as pd\n'), ((3927, 3940), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (3936, 3940), True, 'import numpy as np\n'), ((4088, 4101), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (4097, 4101), True, 'import numpy as np\n'), ((4254, 4267), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (4263, 4267), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 10:44:31 2019
@author: hasee
"""
from pystruct.models import EdgeFeatureGraphCRF
from pystruct.learners import FrankWolfeSSVM, OneSlackSSVM
from sklearn.metrics import precision_score, recall_score, f1_score
#from sklearn.externals import joblib
from threading import Thread
from queue import Queue
import pandas as pd
import numpy as np
import random
import time
import pickle
import os
import gc
'''
#学习器learner选择问题:
# 当数据集 n_samples >= 5000 时, :FrankWolfeSSVM效果更加好
# 当数据集 n_samples < 5000 时, :OneSlackSSVM效果更加好
'''
K_FOLD = 10
DELTA = 0.5
K = 100
ALPHA = 2.0
CAMPLEN = [2]#range(10, K+1) #选择不同长度训练分类器及预测对应长度 range(10, K+1)表示MARS的长度 >= 10 放在一起训练
FAKE_CAMP = 2#CAMPLEN[0]*0.2
NODE_FEATURES = [0,1,2,4,5,6,7] #0:RD, 1:RANK, 2:EXT, 3:CAMPAIGN_SIZE, 4:WORDSCOUNT, 5:PP1, 6:DAYS, 当前评论时间与第一条评论的时间间隔差, 7:BURST
EDGE_FEATURES = [0,1,2,3] #0:RATE_DIFF, 1:DATE_DIFF, 2:TEXT_JACCARD_SIMILARITY, 3:FRT,first_review_time
CLASS_WEIGHT = [0.1, 0.9] #类别权重 DELTA = 0.0 => #NYC [0.2, 0.8] if len >= 8 else [0.1, 0.9] #Zip [0.2, 0.8] if len >= 8 else [0.1, 0.9]
LEARNER = "OneSlackSSVM" #FrankWolfeSSVM OneSlackSSVM
DATASET = "NYC" #NYC, Zip
if DATASET == "NYC":
PID_LIST = [p for p in range(0, 923)] #NYC
else:
PID_LIST = [p for p in range(0, 5044)] #Zip
GSFG_HOME = "SourceData/Yelp{}/".format(DATASET) #元数据集
RESULT_FILE = "ResultData/{}_d{}_K{}_A{}/".format(DATASET, DELTA, K, ALPHA) #保存结果数据集
FILE_META = "metadata"
FILE_REVIEWCONTENT = "reviewContent"
def loadReview():
# 读取评论数据
file_meta = GSFG_HOME + FILE_META
fp1 = open(file_meta)
for lines in fp1:
lines = lines.replace("\n", "")
lines = lines.split('\t')
globalLabel.append([0, int(lines[1]), 1 if lines[3] == "-1" else 0, 0]) #pred pid label istrain
fp1.close()
def loadPickle(pid, campaignLength):
'''
载入当前pid长度为campaignLength用于CRF训练所需数据包括((features, edges, edge_features), originalIndexs)
originalIndexs原始数据索引list和globalReviews的索引一一对应
input:
pid: 为所需要数据的产品编号
campaignLength: 当前pid下的群组数据,其长度为campaignLength的所有群组数据
output:
campaignsList -> [((features, edges, edge_features), originalIndexs), ...]
'''
pidFileDirectly = RESULT_FILE+str(pid)#+"_1" #读取当前pid子目录文件夹
campDataFile = pidFileDirectly + '/PrepareData_length{}.pkl'.format(campaignLength) #读取当前pid文件夹下的campaignLength长度的所有群组文件
if not os.path.exists(pidFileDirectly):
# print("当前pid:{}文件不存在".format(pid), end=",")
return None
elif not os.path.exists(campDataFile):
# print("当前pid:{}不存在长度为{}的群组".format(pid, campaignLength))
return None
else:
#存在该文件, 调用pickle, 读取该文件, 返回当前pid下的群组数据,其长度为campaignLength的所有群组数据 ->data
with open(campDataFile, 'rb') as pkl_file:
data = pickle.load(pkl_file)
return data
def loadFeature(campdata):
"""
读取特征
"""
featureData = list()
label = list()
campvid = list()
for camp in campdata:
feature, oidList = camp
label.append(np.array([globalLabel[i][2] for i in oidList]))
node_feature = feature[0][:, NODE_FEATURES]
edge = feature[1]
feature[2][:, 2] = np.nan_to_num(feature[2][:, 2]) #解决cosine文本相似度出现nan值
edge_feature = feature[2][:, EDGE_FEATURES]
featureData.append((node_feature, edge, edge_feature))
campvid.append(oidList)
return featureData, label, campvid
def loadPIDFeature():
"""
载入所有产品的特征
"""
for p in PID_LIST:
for camplen in CAMPLEN:
campdata = loadPickle(p, camplen)
if campdata:
feature, label, vidList = loadFeature(campdata)
X_data.extend(feature)
Y_data.extend(label)
Y_camp.extend(vidList)
def statistic_result(y_pred, y_test, camptest):
"""
统计评论结果
"""
df_data = pd.DataFrame([], columns = ["oid", "pred", "label"])
camp_arr = np.hstack(camptest)
pred_arr = np.hstack(y_pred)
label_arr = np.hstack(y_test)
df_data["oid"] = camp_arr
df_data["pred"] = pred_arr
df_data["label"] = label_arr
df_result = df_data.groupby(by="oid").max().reset_index()
del pred_arr, camp_arr, label_arr, df_data
return df_result
def statistic_campaign_result(y_pred, y_test):
"""
统计群组结果
"""
camps_pred = list()
camps_lbl = list()
for ypds, lbls in zip(y_pred, y_test):
camps_pred.append(1 if ypds.sum() >= FAKE_CAMP else 0)
camps_lbl.append(1 if lbls.sum() >= FAKE_CAMP else 0)
return camps_pred, camps_lbl
def model_test(k, head, tail):
"""
CRF训练和预测
"""
each_fold_time = time.time() #开始计时
#divide train set and test set
train_id = dataId[head : tail]
test_id = dataId[:head] + dataId[tail:]
X_train = X_arr[train_id, :]
Y_train = Y_arr[train_id]
X_test = X_arr[test_id, :]
Y_test = Y_arr[test_id]
campTest = Camp_arr[test_id]
#ends divide train set and test set
if len(X_train) > 0:
#实例化CRF
EFGCRF = EdgeFeatureGraphCRF(inference_method='qpbo', class_weight=CLASS_WEIGHT)
if LEARNER == "OneSlackSSVM":
#利用OneSlackSSVM训练模型参数
ssvm = OneSlackSSVM(EFGCRF, C=.1, tol=.1, max_iter=100, switch_to='ad3')
elif LEARNER == "FrankWolfeSSVM":
#利用FrankWolfeSSVM训练模型参数
ssvm = FrankWolfeSSVM(EFGCRF, C=.1, tol=.1, max_iter=100)
else:
#没有选择分类器退出
pass
ssvm.fit(X_train, Y_train)
Y_pred = ssvm.predict(X_test)
df_result = statistic_result(Y_pred, Y_test, campTest)
V_precision = precision_score(df_result["label"], df_result["pred"])
V_recall = recall_score(df_result["label"], df_result["pred"])
V_f1 = f1_score(df_result["label"], df_result["pred"])
camps_pred, camps_lbl = statistic_campaign_result(Y_pred, Y_test)
C_precision = precision_score(camps_lbl, camps_pred)
C_recall = recall_score(camps_lbl, camps_pred)
C_f1 = f1_score(camps_lbl, camps_pred)
result_Queue.put([V_precision, V_recall, V_f1, C_precision, C_recall, C_f1])
else:
print("TRAIN SET is NULL")
print("the {}th fold using time: {:.4f} min".format(k+1, (time.time() - each_fold_time) / 60) )
del X_train, Y_train, X_test, Y_test, Y_pred, campTest
if __name__ == "__main__": # 主函数
print("DELTA:{}, K:{}, ALPHA:{}".format(DELTA, K, ALPHA))
print("DATASET:{} LEARNER:{}".format(DATASET, LEARNER))
print("CALSS_WEIGHT:", CLASS_WEIGHT)
print("FEATURE", NODE_FEATURES, EDGE_FEATURES)
globalLabel = list() #pred pid label istrain 预测值, PID, 标签值, 是否为训练集样本0:不是, 1:是
readReviewTime = time.time()
loadReview() #载入评论
print("load reviews time: {:.4f} min".format((time.time()-readReviewTime)/60))
X_data = list() #特征集合
Y_data = list() #标签集合
Y_camp = list() #id集合
loadPIDFeature() #载入特征
print("load feature time: {:.4f} min".format((time.time()-readReviewTime)/60))
X_arr, Y_arr, Camp_arr = np.array(X_data), np.array(Y_data), np.array(Y_camp)
campaigns_num = len(X_data) #群组数量
Vset = set(np.hstack(Y_camp)) #评论id集合
Vs_num = len(Vset) #评论数量
fake_Vs_num = 0 #作弊评论数量
for vid in Vset:
fake_Vs_num += globalLabel[vid][2]
fake_campaigns_num = 0 #作弊群组个数
for d in Y_data:
if sum(d) >= FAKE_CAMP: fake_campaigns_num += 1
del X_data, Y_data, Y_camp, globalLabel
lenData = len(X_arr)
dataId = [i for i in range(lenData)] #数据集编号
#混乱数据编号
random.shuffle(dataId)
#K_FOLD_CROSS_VALIDATION
each_fold_size = (lenData + 1) / K_FOLD
#用于存放每个FOLD的结果
result_Queue = Queue() #[V_precision, V_recall, V_f1, C_precision, C_recall, f1]
threadList = list() #多线程操作, 存放所有线程的线程池
for k in range(K_FOLD): #each fold
# print("the {}th fold =====".format(k+1))
head, tail = int(k * each_fold_size), int((k + 1) * each_fold_size)
thr = Thread(target=model_test, args=(k, head, tail)) #将每FOLD加入线程
threadList.append(thr) #将该线程加入到线程池
thr.start() #开启当前线程
for t in threadList: #阻塞主线程, 当所有子线程全部运行完毕才继续运行
t.join()
all_result = np.zeros((K_FOLD,6), dtype="float64")
i = 0
while not result_Queue.empty(): #读取结果
all_result[i,:] = result_Queue.get()
i += 1
avg_V_p, avg_V_r, avg_V_f = np.mean(all_result[:, 0]), np.mean(all_result[:, 1]), np.mean(all_result[:, 2])
avg_C_p, avg_C_r, avg_C_f = np.mean(all_result[:, 3]), np.mean(all_result[:, 4]), np.mean(all_result[:, 5])
print("campaign_length: ", CAMPLEN[0])
print("{}_fold_cross_validation result: ".format(K_FOLD))
print("reviews: ")
print("avg_precision||avg_recall||avg_f1_score||Vs_num||fake_Vs_num")
print("%.4f" % avg_V_p, "%.4f" % avg_V_r, "%.4f" % avg_V_f, Vs_num, fake_Vs_num, sep="\t||")
print("std_precision||std_recall||std_f1_score")
print("%.4f" % np.std(all_result[:, 0]), "%.4f" % np.std(all_result[:, 1]), "%.4f" % np.std(all_result[:, 2]), sep="\t||")
print("campaigns: ")
print("avg_precision||avg_recall||avg_f1_score||campaigns_num||fake_campaigns_num")
print("%.4f" % avg_C_p, "%.4f" % avg_C_r, "%.4f" % avg_C_f, campaigns_num, fake_campaigns_num, sep="\t||")
print("std_precision||std_recall||std_f1_score")
print("%.4f" % np.std(all_result[:, 3]), "%.4f" % np.std(all_result[:, 4]), "%.4f" % np.std(all_result[:, 5]), sep="\t||")
print(" " + str(CAMPLEN[0]) if CAMPLEN[0] != 10 else ">=10", "%.4f" % avg_V_p, "%.4f" % avg_V_r, "%.4f" % avg_V_f, "%.4f" % np.std(all_result[:, 0]), "%.4f" % np.std(all_result[:, 1]), "%.4f" % np.std(all_result[:, 2]), sep=",")
print(" " + str(CAMPLEN[0]) if CAMPLEN[0] != 10 else ">=10", "%.4f" % avg_C_p, "%.4f" % avg_C_r, "%.4f" % avg_C_f, "%.4f" % np.std(all_result[:, 3]), "%.4f" % np.std(all_result[:, 4]), "%.4f" % np.std(all_result[:, 5]), sep=",")
#保存模型使用joblib函数即可
del all_result
gc.collect()
print("total running time: {:.4f} min".format((time.time()-readReviewTime)/60))
|
[
"numpy.hstack",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"os.path.exists",
"numpy.mean",
"pystruct.learners.FrankWolfeSSVM",
"pandas.DataFrame",
"pystruct.models.EdgeFeatureGraphCRF",
"random.shuffle",
"pickle.load",
"gc.collect",
"numpy.std",
"time.time",
"sklearn.metrics.f1_score",
"pystruct.learners.OneSlackSSVM",
"numpy.zeros",
"threading.Thread",
"queue.Queue",
"numpy.nan_to_num"
] |
[((4091, 4141), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['oid', 'pred', 'label']"}), "([], columns=['oid', 'pred', 'label'])\n", (4103, 4141), True, 'import pandas as pd\n'), ((4160, 4179), 'numpy.hstack', 'np.hstack', (['camptest'], {}), '(camptest)\n', (4169, 4179), True, 'import numpy as np\n'), ((4196, 4213), 'numpy.hstack', 'np.hstack', (['y_pred'], {}), '(y_pred)\n', (4205, 4213), True, 'import numpy as np\n'), ((4231, 4248), 'numpy.hstack', 'np.hstack', (['y_test'], {}), '(y_test)\n', (4240, 4248), True, 'import numpy as np\n'), ((4934, 4945), 'time.time', 'time.time', ([], {}), '()\n', (4943, 4945), False, 'import time\n'), ((7090, 7101), 'time.time', 'time.time', ([], {}), '()\n', (7099, 7101), False, 'import time\n'), ((8025, 8047), 'random.shuffle', 'random.shuffle', (['dataId'], {}), '(dataId)\n', (8039, 8047), False, 'import random\n'), ((8175, 8182), 'queue.Queue', 'Queue', ([], {}), '()\n', (8180, 8182), False, 'from queue import Queue\n'), ((8743, 8781), 'numpy.zeros', 'np.zeros', (['(K_FOLD, 6)'], {'dtype': '"""float64"""'}), "((K_FOLD, 6), dtype='float64')\n", (8751, 8781), True, 'import numpy as np\n'), ((10554, 10566), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10564, 10566), False, 'import gc\n'), ((2547, 2578), 'os.path.exists', 'os.path.exists', (['pidFileDirectly'], {}), '(pidFileDirectly)\n', (2561, 2578), False, 'import os\n'), ((3368, 3399), 'numpy.nan_to_num', 'np.nan_to_num', (['feature[2][:, 2]'], {}), '(feature[2][:, 2])\n', (3381, 3399), True, 'import numpy as np\n'), ((5341, 5412), 'pystruct.models.EdgeFeatureGraphCRF', 'EdgeFeatureGraphCRF', ([], {'inference_method': '"""qpbo"""', 'class_weight': 'CLASS_WEIGHT'}), "(inference_method='qpbo', class_weight=CLASS_WEIGHT)\n", (5360, 5412), False, 'from pystruct.models import EdgeFeatureGraphCRF\n'), ((5946, 6000), 'sklearn.metrics.precision_score', 'precision_score', (["df_result['label']", "df_result['pred']"], {}), "(df_result['label'], df_result['pred'])\n", (5961, 6000), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((6021, 6072), 'sklearn.metrics.recall_score', 'recall_score', (["df_result['label']", "df_result['pred']"], {}), "(df_result['label'], df_result['pred'])\n", (6033, 6072), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((6089, 6136), 'sklearn.metrics.f1_score', 'f1_score', (["df_result['label']", "df_result['pred']"], {}), "(df_result['label'], df_result['pred'])\n", (6097, 6136), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((6245, 6283), 'sklearn.metrics.precision_score', 'precision_score', (['camps_lbl', 'camps_pred'], {}), '(camps_lbl, camps_pred)\n', (6260, 6283), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((6304, 6339), 'sklearn.metrics.recall_score', 'recall_score', (['camps_lbl', 'camps_pred'], {}), '(camps_lbl, camps_pred)\n', (6316, 6339), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((6356, 6387), 'sklearn.metrics.f1_score', 'f1_score', (['camps_lbl', 'camps_pred'], {}), '(camps_lbl, camps_pred)\n', (6364, 6387), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((7455, 7471), 'numpy.array', 'np.array', (['X_data'], {}), '(X_data)\n', (7463, 7471), True, 'import numpy as np\n'), ((7473, 7489), 'numpy.array', 'np.array', (['Y_data'], {}), '(Y_data)\n', (7481, 7489), True, 'import numpy as np\n'), ((7491, 7507), 'numpy.array', 'np.array', (['Y_camp'], {}), '(Y_camp)\n', (7499, 7507), True, 'import numpy as np\n'), ((7569, 7586), 'numpy.hstack', 'np.hstack', (['Y_camp'], {}), '(Y_camp)\n', (7578, 7586), True, 'import numpy as np\n'), ((8480, 8527), 'threading.Thread', 'Thread', ([], {'target': 'model_test', 'args': '(k, head, tail)'}), '(target=model_test, args=(k, head, tail))\n', (8486, 8527), False, 'from threading import Thread\n'), ((8940, 8965), 'numpy.mean', 'np.mean', (['all_result[:, 0]'], {}), '(all_result[:, 0])\n', (8947, 8965), True, 'import numpy as np\n'), ((8967, 8992), 'numpy.mean', 'np.mean', (['all_result[:, 1]'], {}), '(all_result[:, 1])\n', (8974, 8992), True, 'import numpy as np\n'), ((8994, 9019), 'numpy.mean', 'np.mean', (['all_result[:, 2]'], {}), '(all_result[:, 2])\n', (9001, 9019), True, 'import numpy as np\n'), ((9053, 9078), 'numpy.mean', 'np.mean', (['all_result[:, 3]'], {}), '(all_result[:, 3])\n', (9060, 9078), True, 'import numpy as np\n'), ((9080, 9105), 'numpy.mean', 'np.mean', (['all_result[:, 4]'], {}), '(all_result[:, 4])\n', (9087, 9105), True, 'import numpy as np\n'), ((9107, 9132), 'numpy.mean', 'np.mean', (['all_result[:, 5]'], {}), '(all_result[:, 5])\n', (9114, 9132), True, 'import numpy as np\n'), ((2669, 2697), 'os.path.exists', 'os.path.exists', (['campDataFile'], {}), '(campDataFile)\n', (2683, 2697), False, 'import os\n'), ((3212, 3258), 'numpy.array', 'np.array', (['[globalLabel[i][2] for i in oidList]'], {}), '([globalLabel[i][2] for i in oidList])\n', (3220, 3258), True, 'import numpy as np\n'), ((5507, 5574), 'pystruct.learners.OneSlackSSVM', 'OneSlackSSVM', (['EFGCRF'], {'C': '(0.1)', 'tol': '(0.1)', 'max_iter': '(100)', 'switch_to': '"""ad3"""'}), "(EFGCRF, C=0.1, tol=0.1, max_iter=100, switch_to='ad3')\n", (5519, 5574), False, 'from pystruct.learners import FrankWolfeSSVM, OneSlackSSVM\n'), ((9511, 9535), 'numpy.std', 'np.std', (['all_result[:, 0]'], {}), '(all_result[:, 0])\n', (9517, 9535), True, 'import numpy as np\n'), ((9546, 9570), 'numpy.std', 'np.std', (['all_result[:, 1]'], {}), '(all_result[:, 1])\n', (9552, 9570), True, 'import numpy as np\n'), ((9581, 9605), 'numpy.std', 'np.std', (['all_result[:, 2]'], {}), '(all_result[:, 2])\n', (9587, 9605), True, 'import numpy as np\n'), ((9920, 9944), 'numpy.std', 'np.std', (['all_result[:, 3]'], {}), '(all_result[:, 3])\n', (9926, 9944), True, 'import numpy as np\n'), ((9955, 9979), 'numpy.std', 'np.std', (['all_result[:, 4]'], {}), '(all_result[:, 4])\n', (9961, 9979), True, 'import numpy as np\n'), ((9990, 10014), 'numpy.std', 'np.std', (['all_result[:, 5]'], {}), '(all_result[:, 5])\n', (9996, 10014), True, 'import numpy as np\n'), ((10159, 10183), 'numpy.std', 'np.std', (['all_result[:, 0]'], {}), '(all_result[:, 0])\n', (10165, 10183), True, 'import numpy as np\n'), ((10194, 10218), 'numpy.std', 'np.std', (['all_result[:, 1]'], {}), '(all_result[:, 1])\n', (10200, 10218), True, 'import numpy as np\n'), ((10229, 10253), 'numpy.std', 'np.std', (['all_result[:, 2]'], {}), '(all_result[:, 2])\n', (10235, 10253), True, 'import numpy as np\n'), ((10395, 10419), 'numpy.std', 'np.std', (['all_result[:, 3]'], {}), '(all_result[:, 3])\n', (10401, 10419), True, 'import numpy as np\n'), ((10430, 10454), 'numpy.std', 'np.std', (['all_result[:, 4]'], {}), '(all_result[:, 4])\n', (10436, 10454), True, 'import numpy as np\n'), ((10465, 10489), 'numpy.std', 'np.std', (['all_result[:, 5]'], {}), '(all_result[:, 5])\n', (10471, 10489), True, 'import numpy as np\n'), ((2951, 2972), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (2962, 2972), False, 'import pickle\n'), ((5664, 5716), 'pystruct.learners.FrankWolfeSSVM', 'FrankWolfeSSVM', (['EFGCRF'], {'C': '(0.1)', 'tol': '(0.1)', 'max_iter': '(100)'}), '(EFGCRF, C=0.1, tol=0.1, max_iter=100)\n', (5678, 5716), False, 'from pystruct.learners import FrankWolfeSSVM, OneSlackSSVM\n'), ((6614, 6625), 'time.time', 'time.time', ([], {}), '()\n', (6623, 6625), False, 'import time\n'), ((7184, 7195), 'time.time', 'time.time', ([], {}), '()\n', (7193, 7195), False, 'import time\n'), ((7386, 7397), 'time.time', 'time.time', ([], {}), '()\n', (7395, 7397), False, 'import time\n'), ((10619, 10630), 'time.time', 'time.time', ([], {}), '()\n', (10628, 10630), False, 'import time\n')]
|
import numpy as np
from sklearn.linear_model import LinearRegression
x = np.array([29,59,119,238,464,659]).reshape(-1,1)
y = np.array([0.004,0.009,0.027,0.027,0.051,0.165])
model = LinearRegression().fit(x, y)
r_sq = model.score(x, y)
print('coefficient of determination:', r_sq)
|
[
"numpy.array",
"sklearn.linear_model.LinearRegression"
] |
[((125, 177), 'numpy.array', 'np.array', (['[0.004, 0.009, 0.027, 0.027, 0.051, 0.165]'], {}), '([0.004, 0.009, 0.027, 0.027, 0.051, 0.165])\n', (133, 177), True, 'import numpy as np\n'), ((73, 111), 'numpy.array', 'np.array', (['[29, 59, 119, 238, 464, 659]'], {}), '([29, 59, 119, 238, 464, 659])\n', (81, 111), True, 'import numpy as np\n'), ((181, 199), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (197, 199), False, 'from sklearn.linear_model import LinearRegression\n')]
|
import csv
import os
import random
import uuid
import pickle
from multiprocessing import Pool
from collections import Counter
import numpy as np
import imgaug.augmenters as iaa
from PIL import Image
def rotate_save(img, flip, angle, label, new_label_dict, out_dir):
filename = str(uuid.uuid4()) + ".png"
new_label_dict[filename] = label
if not flip:
img.rotate(angle, expand=True).save(os.path.join(out_dir, filename))
else:
img.rotate(angle, expand=True).transpose(Image.FLIP_LEFT_RIGHT).save(os.path.join(out_dir, filename))
def process_image(image_filename, in_dir, out_dir, label_dict, count):
new_label_dict = {}
img = Image.open(os.path.join(in_dir, image_filename))
config = [
(False, 0),
(False, 90),
(False, 180),
(False, 270),
(True, 0),
(True, 90),
(True, 180),
(True, 270)
]
aug = iaa.Sequential([
iaa.OneOf([
iaa.Affine(scale={"x": (0.7, 1.3), "y": (0.7, 1.3)}),
iaa.Affine(rotate=(-25, 25))
])
])
while count > 0:
flip, angle = config[(count - 1) % len(config)]
rotate_save(Image.fromarray(aug(images=[np.array(img)])[0]), flip, angle, label_dict[image_filename[:-4]], new_label_dict, out_dir)
count -= 1
return new_label_dict
def main(in_dir, out_dir, labels_file):
label_dict = {}
with open(labels_file, "r") as f:
csvfile = csv.reader(f)
# Skip column description
next(csvfile)
for row in csvfile:
label_dict[row[0]] = row[1:].index("1.0")
new_label_dict = {}
counter = {}
files = os.listdir(in_dir)
random.shuffle(files)
for f in files:
counter[label_dict[f[:-4]]] = counter.get(label_dict[f[:-4]], 0) + 1
print(counter)
desired_counts = {k:int(max(0.5*(max(counter.values()) - n) + n, n)) for k, n in counter.items()}
print(desired_counts)
print(len(files))
p = Pool(16)
dicts = p.starmap(
process_image,
[
(
image_filename,
in_dir,
out_dir,
label_dict,
int(desired_counts[label_dict[image_filename[:-4]]] / counter[label_dict[image_filename[:-4]]])
)
for image_filename in files
]
)
combined_dict = {}
for d in dicts:
combined_dict.update(d)
with open("label_dict.pkl", "wb") as f:
pickle.dump(combined_dict, f)
if __name__ == "__main__":
main("train/", "train_aug/", "ISIC2018_Task3_Training_GroundTruth.csv")
|
[
"os.listdir",
"pickle.dump",
"random.shuffle",
"imgaug.augmenters.Affine",
"os.path.join",
"uuid.uuid4",
"numpy.array",
"multiprocessing.Pool",
"csv.reader"
] |
[((1648, 1666), 'os.listdir', 'os.listdir', (['in_dir'], {}), '(in_dir)\n', (1658, 1666), False, 'import os\n'), ((1671, 1692), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (1685, 1692), False, 'import random\n'), ((1972, 1980), 'multiprocessing.Pool', 'Pool', (['(16)'], {}), '(16)\n', (1976, 1980), False, 'from multiprocessing import Pool\n'), ((679, 715), 'os.path.join', 'os.path.join', (['in_dir', 'image_filename'], {}), '(in_dir, image_filename)\n', (691, 715), False, 'import os\n'), ((1440, 1453), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1450, 1453), False, 'import csv\n'), ((2471, 2500), 'pickle.dump', 'pickle.dump', (['combined_dict', 'f'], {}), '(combined_dict, f)\n', (2482, 2500), False, 'import pickle\n'), ((288, 300), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (298, 300), False, 'import uuid\n'), ((409, 440), 'os.path.join', 'os.path.join', (['out_dir', 'filename'], {}), '(out_dir, filename)\n', (421, 440), False, 'import os\n'), ((529, 560), 'os.path.join', 'os.path.join', (['out_dir', 'filename'], {}), '(out_dir, filename)\n', (541, 560), False, 'import os\n'), ((955, 1007), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': (0.7, 1.3), 'y': (0.7, 1.3)}"}), "(scale={'x': (0.7, 1.3), 'y': (0.7, 1.3)})\n", (965, 1007), True, 'import imgaug.augmenters as iaa\n'), ((1014, 1042), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'rotate': '(-25, 25)'}), '(rotate=(-25, 25))\n', (1024, 1042), True, 'import imgaug.augmenters as iaa\n'), ((1186, 1199), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1194, 1199), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.