code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# - coding: utf-8 -*-
# python 3.5
import numpy as np
import cv2
import os
import random
BKroot = "/home/shinpoi/dataset/backgrounds"
ITEMroot = "/home/shinpoi/dataset/humans/"
def histogram_matching(srcArr, dstArr, srcPNG=True):
src_HSV = cv2.cvtColor(srcArr, cv2.COLOR_RGB2HSV)
srcHist = cv2.calcHist((src_HSV,), (2,), None, (256,), (0, 256)).reshape((-1,))
if srcPNG:
srcHist[0] = 0
srcHist /= sum(srcHist)
srcHistMap = np.zeros(256, dtype=np.float32)
for i in range(len(srcHist)):
srcHistMap[i] = sum(srcHist[:i])
dst_HSV = cv2.cvtColor(dstArr, cv2.COLOR_RGB2HSV)
dstHist = cv2.calcHist((dst_HSV,), (2,), None, (256,), (0, 256)).reshape((-1,))
dstHist /= sum(dstHist)
dstHistMap = np.zeros(256, dtype=np.float32)
for i in range(len(dstHist)):
dstHistMap[i] = sum(dstHist[:i])
HistMap = np.zeros(256, dtype=np.uint8)
for i in range(256):
minMap = 1
minTag = None
for j in range(256):
if minMap > abs(srcHistMap[i] - dstHistMap[j]):
minMap = abs(srcHistMap[i] - dstHistMap[j])
minTag = j
HistMap[i] = minTag
# flatten??? may be...
if i > 100000:
if HistMap[i] < HistMap[i-1]:
HistMap[i] = HistMap[i-1]
if HistMap[i] == HistMap[i-1] == HistMap[i-2] == HistMap[i-3]:
HistMap[i] += 1
for i in range(src_HSV.shape[0]):
for j in range(src_HSV.shape[1]):
if src_HSV[i, j, 2] == 0:
continue
else:
src_HSV[i, j, 2] = HistMap[src_HSV[i, j, 2]]
return cv2.cvtColor(src_HSV, cv2.COLOR_HSV2RGB)
def reshape_item(imgArr, maxsize=448, min_t=0.4, filip_rate=0.5):
if imgArr.shape[0] > imgArr.shape[1]:
max_t = (maxsize/imgArr.shape[1])*0.8 - min_t
else:
max_t = (maxsize/imgArr.shape[0])*0.8 - min_t
times = min_t + random.random()*max_t
imgArr = cv2.resize(imgArr, (int(imgArr.shape[1]*times), int(imgArr.shape[0]*times)))
# flip
if random.random() < filip_rate:
imgArr = cv2.flip(imgArr, 1)
return imgArr
class ImageGenerator(object):
def __init__(self, bk_root, item_root, max_img_size=448, batch_size=16):
self.bk_root = bk_root
self.item_root = item_root
self.batch_size = batch_size
self.max_img_size = max_img_size
self.bk_list = []
self.images_pool = None
self.items_pool_rgba = []
self.n_bk = 0
self.BKindex = None
def init_all(self, backup=None):
if not backup:
self.init_backgrounds_pool()
self.init_BKimages_pool(backup=backup)
self.init_items_pool()
self.init_BKindex()
def init_backgrounds_pool(self):
for i in os.walk(self.bk_root):
root, folder, files = i
for f in files:
if f.endswith(".jpg"):
dir_ = root + "/" + f
self.bk_list.append(dir_)
self.n_bk = len(self.bk_list)
print("init_backgrounds_pool() end")
# sepcial
def init_BKimages_pool(self, backup=None):
self.bk_list = np.random.permutation(self.bk_list)
if backup:
bk = np.load(backup)
self.n_bk = len(bk)
return bk
self.n_bk = 32000 # test mode
max_img_size = self.max_img_size
self.images_pool = np.zeros((self.n_bk, max_img_size, max_img_size, 3), dtype=np.uint8)
n = 0
for dir_ in self.bk_list[:self.n_bk]:
img = cv2.imread(dir_)
if img.shape[0] == 270:
st = random.randint(0, 210) # 480-270
img = cv2.resize(img[:, st: st+270], (max_img_size, max_img_size))
elif img.shape[0] == 720:
st = random.randint(0, 560) # 1280-720
img = cv2.resize(img[:, st: st+720], (max_img_size, max_img_size))
else:
print("ignore %s. shape:(%d, %d)" % (dir_, img.shape[0], img.shape[1]))
continue
self.images_pool[n] = img
n += 1
if n%500 == 0:
print("%d/%d" % (n, self.n_bk))
if n != self.n_bk:
print("has igroned images!!")
self.n_bk = n
self.images_pool = self.images_pool[:n]
print("init_BKimages_pool() end, get %d images" % n)
# special
def init_items_pool(self):
item_list = []
for i in os.walk(self.item_root):
root, folder, files = i
for f in files:
if f.endswith(".png"):
dir_ = root + "/" + f
item_list.append(dir_)
self.items_pool_rgba = [[] for i in range(10)]
for dir_ in item_list:
if "blue_1" in dir_:
self.items_pool_rgba[0].append(cv2.imread(dir_, -1))
elif "blue_2" in dir_:
self.items_pool_rgba[1].append(cv2.imread(dir_, -1))
elif "blue_3" in dir_ or "blue_4" in dir_:
self.items_pool_rgba[2].append(cv2.imread(dir_, -1))
elif "blue_5" in dir_:
self.items_pool_rgba[3].append(cv2.imread(dir_, -1))
elif "blue_6" in dir_:
self.items_pool_rgba[4].append(cv2.imread(dir_, -1))
elif "orange_1" in dir_:
self.items_pool_rgba[5].append(cv2.imread(dir_, -1))
elif "orange_2" in dir_:
self.items_pool_rgba[6].append(cv2.imread(dir_, -1))
elif "orange_3" in dir_ or "orange_4" in dir_:
self.items_pool_rgba[7].append(cv2.imread(dir_, -1))
elif "orange_5" in dir_:
self.items_pool_rgba[8].append(cv2.imread(dir_, -1))
elif "orange_6" in dir_:
self.items_pool_rgba[9].append(cv2.imread(dir_, -1))
print("init_items_pool() end")
def compose_img(self, item, background, hm_rate=0.8):
rgba = item.copy()
bk = background.copy()
bk_rows, bk_cols, ch = background.shape
if random.random() <= hm_rate:
rgba[:, :, :3] = histogram_matching(rgba[:, :, :3], background)
# insert coordinate
hum_rows, hum_cols, ch = rgba.shape
lim_rows = int((bk_rows - hum_rows)/2)
lim_cols = bk_cols - hum_cols
row_start = int(lim_rows*random.random()) + lim_rows
col_start = int(lim_cols*random.random())
# create mask
mask = cv2.GaussianBlur(rgba[:, :, 3], (1, 1), 1)
mask_inv = cv2.bitwise_not(mask)
mask = np.array(mask, dtype=np.float32)/255
mask_inv = np.array(mask_inv, dtype=np.float32)/255
mask.resize((hum_rows, hum_cols, 1))
mask_inv.resize((hum_rows, hum_cols, 1))
mask = np.concatenate((mask, mask, mask), axis=2)
mask_inv = np.concatenate((mask_inv, mask_inv, mask_inv), axis=2)
# insert
# print(row_start, col_start, hum_rows, hum_cols)
bk_part = bk[row_start:row_start+hum_rows, col_start:col_start+hum_cols]
bk[row_start:row_start + hum_rows, col_start:col_start + hum_cols] = \
np.array(bk_part * mask_inv + rgba[:, :, :3] * mask, dtype=np.uint8)
# t
x = (2*row_start + hum_rows)/bk_rows/2
y = (2*col_start + hum_cols)/bk_cols/2
h = hum_rows/bk_rows
w = hum_cols/bk_cols
t = [{'x':x, 'y':y, 'w':w, 'h':h, 'label':None, 'one_hot_label':None }]
return bk, t
def init_BKindex(self):
self.BKindex = np.arange(0, self.n_bk, 1, dtype=np.int32)
self.BKindex = np.random.permutation(self.BKindex)
def create_batch(self, batch_size=None, index=0, reuse=0, reuserate=2, img_size=None):
if not img_size:
img_size = self.max_img_size
if not batch_size:
batch_size = self.batch_size
if 2*batch_size > self.n_bk:
self.init_BKindex()
if (reuse != 0) and (reuse % reuserate == 0):
self.init_BKimages_pool()
index = 0
reuse += 1
for i in range(batch_size, img_size):
batch = np.zeros((batch_size, 3, img_size, img_size), dtype=np.float32)
tt = []
BKimg_list = self.BKindex[index: index+batch_size]
index += batch_size
n = 0
for i in BKimg_list:
cla = random.randint(0, 9)
item = random.sample(self.items_pool_rgba[cla], 1)
x, t = self.compose_img(item[0], self.images_pool[i])
if not x.shape[0] == img_size:
x = cv2.resize(x, (img_size, img_size))
batch[n] = x.transpose((2, 0, 1))
t[0]['label'] = cla
oh = np.zeros(10, dtype=np.float32)
oh[cla] = 1
t[0]['one_hot_label'] = oh
tt.append(t)
n += 1
return batch/255, tt
"""
def save_imgspool_backup(self):
print("start backup")
np.save("img_pools.npy", self.images_pool)
print("backup: img_pools.npy")
"""
"""
import image_generator_2 as ig
gen = ig.ImageGenerator(ig.BKroot, ig.ITEMroot)
gen.init_all()
x, t = gen.create_batch()
"""
|
[
"cv2.resize",
"cv2.GaussianBlur",
"numpy.load",
"cv2.bitwise_not",
"random.randint",
"cv2.cvtColor",
"cv2.calcHist",
"random.sample",
"os.walk",
"numpy.zeros",
"random.random",
"cv2.imread",
"numpy.array",
"numpy.arange",
"numpy.random.permutation",
"cv2.flip",
"numpy.concatenate"
] |
[((259, 298), 'cv2.cvtColor', 'cv2.cvtColor', (['srcArr', 'cv2.COLOR_RGB2HSV'], {}), '(srcArr, cv2.COLOR_RGB2HSV)\n', (271, 298), False, 'import cv2\n'), ((471, 502), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.float32'}), '(256, dtype=np.float32)\n', (479, 502), True, 'import numpy as np\n'), ((597, 636), 'cv2.cvtColor', 'cv2.cvtColor', (['dstArr', 'cv2.COLOR_RGB2HSV'], {}), '(dstArr, cv2.COLOR_RGB2HSV)\n', (609, 636), False, 'import cv2\n'), ((769, 800), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.float32'}), '(256, dtype=np.float32)\n', (777, 800), True, 'import numpy as np\n'), ((895, 924), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.uint8'}), '(256, dtype=np.uint8)\n', (903, 924), True, 'import numpy as np\n'), ((1696, 1736), 'cv2.cvtColor', 'cv2.cvtColor', (['src_HSV', 'cv2.COLOR_HSV2RGB'], {}), '(src_HSV, cv2.COLOR_HSV2RGB)\n', (1708, 1736), False, 'import cv2\n'), ((2132, 2147), 'random.random', 'random.random', ([], {}), '()\n', (2145, 2147), False, 'import random\n'), ((2180, 2199), 'cv2.flip', 'cv2.flip', (['imgArr', '(1)'], {}), '(imgArr, 1)\n', (2188, 2199), False, 'import cv2\n'), ((2917, 2938), 'os.walk', 'os.walk', (['self.bk_root'], {}), '(self.bk_root)\n', (2924, 2938), False, 'import os\n'), ((3314, 3349), 'numpy.random.permutation', 'np.random.permutation', (['self.bk_list'], {}), '(self.bk_list)\n', (3335, 3349), True, 'import numpy as np\n'), ((3572, 3640), 'numpy.zeros', 'np.zeros', (['(self.n_bk, max_img_size, max_img_size, 3)'], {'dtype': 'np.uint8'}), '((self.n_bk, max_img_size, max_img_size, 3), dtype=np.uint8)\n', (3580, 3640), True, 'import numpy as np\n'), ((4668, 4691), 'os.walk', 'os.walk', (['self.item_root'], {}), '(self.item_root)\n', (4675, 4691), False, 'import os\n'), ((6742, 6784), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['rgba[:, :, 3]', '(1, 1)', '(1)'], {}), '(rgba[:, :, 3], (1, 1), 1)\n', (6758, 6784), False, 'import cv2\n'), ((6805, 6826), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (6820, 6826), False, 'import cv2\n'), ((7053, 7095), 'numpy.concatenate', 'np.concatenate', (['(mask, mask, mask)'], {'axis': '(2)'}), '((mask, mask, mask), axis=2)\n', (7067, 7095), True, 'import numpy as np\n'), ((7116, 7170), 'numpy.concatenate', 'np.concatenate', (['(mask_inv, mask_inv, mask_inv)'], {'axis': '(2)'}), '((mask_inv, mask_inv, mask_inv), axis=2)\n', (7130, 7170), True, 'import numpy as np\n'), ((7413, 7481), 'numpy.array', 'np.array', (['(bk_part * mask_inv + rgba[:, :, :3] * mask)'], {'dtype': 'np.uint8'}), '(bk_part * mask_inv + rgba[:, :, :3] * mask, dtype=np.uint8)\n', (7421, 7481), True, 'import numpy as np\n'), ((7821, 7863), 'numpy.arange', 'np.arange', (['(0)', 'self.n_bk', '(1)'], {'dtype': 'np.int32'}), '(0, self.n_bk, 1, dtype=np.int32)\n', (7830, 7863), True, 'import numpy as np\n'), ((7888, 7923), 'numpy.random.permutation', 'np.random.permutation', (['self.BKindex'], {}), '(self.BKindex)\n', (7909, 7923), True, 'import numpy as np\n'), ((314, 368), 'cv2.calcHist', 'cv2.calcHist', (['(src_HSV,)', '(2,)', 'None', '(256,)', '(0, 256)'], {}), '((src_HSV,), (2,), None, (256,), (0, 256))\n', (326, 368), False, 'import cv2\n'), ((652, 706), 'cv2.calcHist', 'cv2.calcHist', (['(dst_HSV,)', '(2,)', 'None', '(256,)', '(0, 256)'], {}), '((dst_HSV,), (2,), None, (256,), (0, 256))\n', (664, 706), False, 'import cv2\n'), ((1997, 2012), 'random.random', 'random.random', ([], {}), '()\n', (2010, 2012), False, 'import random\n'), ((3388, 3403), 'numpy.load', 'np.load', (['backup'], {}), '(backup)\n', (3395, 3403), True, 'import numpy as np\n'), ((3722, 3738), 'cv2.imread', 'cv2.imread', (['dir_'], {}), '(dir_)\n', (3732, 3738), False, 'import cv2\n'), ((6322, 6337), 'random.random', 'random.random', ([], {}), '()\n', (6335, 6337), False, 'import random\n'), ((6843, 6875), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.float32'}), '(mask, dtype=np.float32)\n', (6851, 6875), True, 'import numpy as np\n'), ((6900, 6936), 'numpy.array', 'np.array', (['mask_inv'], {'dtype': 'np.float32'}), '(mask_inv, dtype=np.float32)\n', (6908, 6936), True, 'import numpy as np\n'), ((8448, 8511), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, img_size, img_size)'], {'dtype': 'np.float32'}), '((batch_size, 3, img_size, img_size), dtype=np.float32)\n', (8456, 8511), True, 'import numpy as np\n'), ((3798, 3820), 'random.randint', 'random.randint', (['(0)', '(210)'], {}), '(0, 210)\n', (3812, 3820), False, 'import random\n'), ((3854, 3915), 'cv2.resize', 'cv2.resize', (['img[:, st:st + 270]', '(max_img_size, max_img_size)'], {}), '(img[:, st:st + 270], (max_img_size, max_img_size))\n', (3864, 3915), False, 'import cv2\n'), ((6684, 6699), 'random.random', 'random.random', ([], {}), '()\n', (6697, 6699), False, 'import random\n'), ((8706, 8726), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (8720, 8726), False, 'import random\n'), ((8751, 8794), 'random.sample', 'random.sample', (['self.items_pool_rgba[cla]', '(1)'], {}), '(self.items_pool_rgba[cla], 1)\n', (8764, 8794), False, 'import random\n'), ((9085, 9115), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float32'}), '(10, dtype=np.float32)\n', (9093, 9115), True, 'import numpy as np\n'), ((3976, 3998), 'random.randint', 'random.randint', (['(0)', '(560)'], {}), '(0, 560)\n', (3990, 3998), False, 'import random\n'), ((4033, 4094), 'cv2.resize', 'cv2.resize', (['img[:, st:st + 720]', '(max_img_size, max_img_size)'], {}), '(img[:, st:st + 720], (max_img_size, max_img_size))\n', (4043, 4094), False, 'import cv2\n'), ((5058, 5078), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5068, 5078), False, 'import cv2\n'), ((6622, 6637), 'random.random', 'random.random', ([], {}), '()\n', (6635, 6637), False, 'import random\n'), ((8939, 8974), 'cv2.resize', 'cv2.resize', (['x', '(img_size, img_size)'], {}), '(x, (img_size, img_size))\n', (8949, 8974), False, 'import cv2\n'), ((5164, 5184), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5174, 5184), False, 'import cv2\n'), ((5290, 5310), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5300, 5310), False, 'import cv2\n'), ((5396, 5416), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5406, 5416), False, 'import cv2\n'), ((5502, 5522), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5512, 5522), False, 'import cv2\n'), ((5610, 5630), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5620, 5630), False, 'import cv2\n'), ((5718, 5738), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5728, 5738), False, 'import cv2\n'), ((5848, 5868), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5858, 5868), False, 'import cv2\n'), ((5956, 5976), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (5966, 5976), False, 'import cv2\n'), ((6064, 6084), 'cv2.imread', 'cv2.imread', (['dir_', '(-1)'], {}), '(dir_, -1)\n', (6074, 6084), False, 'import cv2\n')]
|
# function that returns [deps/dt, dX/dt, dR/dt]
import math
import numpy as np
import copy
from scipy.integrate import odeint
class viscoPlastic2D:
def __init__(self, E, v, R1, k, K, a, b, c, n, trial, Emax):
self.E = E
self.v = v
self.R1 = R1
self.k = k
self.K = K
self.a = a
self.b = b
self.c = c
self.n = n
self.trial = trial
self.Emax = Emax
# function that returns de/dt (strain rate)
def total_strain(self, t):
tc = 20.0
if (self.trial) == 'xy':
Emax = self.Emax # previous was 0.003
else:
Emax = self.Emax # previous was 0.001
Emin = -Emax
tcicle = t - tc*math.floor(t/tc)
if tcicle <= tc/4.0:
return 4.0*(Emax/tc)*tcicle
if tc/4.0 < tcicle <= (3.0/4.0)*tc:
return (-((4.0*Emax))/tc) * tcicle + (2.0) * Emax
if (3.0/4.0)*tc < tcicle <= tc:
return ((-4.0*Emin)/tc) * tcicle + 4.0*Emin
def model(self, z, t, i, stiff, ET, ann, scaler_x, scaler_y):
# A verificar: - Se nao utilizando o S' se consegue os mesmo resultados
Ei = z[:3].reshape(3, 1) # Inelastic strain tensor
X = z[3:6].reshape(3, 1) # Back stress tensor
R = copy.deepcopy(z[6]) # Drag stress
p = copy.deepcopy(z[7]) # Plastic strain
ET = ET.reshape(3, 1) # Total strain
# Calculate Stress
stress = np.matmul(stiff, ET-Ei)
if (self.trial) == 'xx': # X axis traction
stress[1] = 0 # StressY = 0
elif (self.trial) == 'yy': # Y axis traction
stress[0] = 0 # StressX = 0
# input = scaler_x.transform([[Ei[0, 0], Ei[1, 0], R, stress[0, 0],
# X[0, 0], X[1, 0], p]])
input = scaler_x.transform([[Ei[0, 0], Ei[2, 0], Ei[1, 0], R,
stress[0, 0], stress[2, 0], stress[1, 0],
X[0, 0], X[2, 0], X[1, 0], p]])
output = scaler_y.inverse_transform((ann.predict(input)))
dEIdt = np.array([[output[0][0]], [output[0][2]], [output[0][1]]])
dRdt = output[0][3]
dXdt = np.array([[output[0][4]], [output[0][6]], [output[0][5]]])
dpdt = output[0][7]
# dEIdt = np.array([[output[0][0]], [output[0][1]], [0]])
# dRdt = output[0][2]
# dXdt = np.array([[output[0][3]], [output[0][4]], [0]])
# dpdt = output[0][5]
# Store solutions
self.stress[i, 0] = stress[0, 0]
self.stress[i, 1] = stress[1, 0]
self.stress[i, 2] = stress[2, 0]
self.dEi[i, 0] = dEIdt[0, 0]
self.dEi[i, 1] = dEIdt[1, 0]
self.dEi[i, 2] = dEIdt[2, 0]
self.dX[i, 0] = dXdt[0, 0]
self.dX[i, 1] = dXdt[1, 0]
self.dX[i, 2] = dXdt[2, 0]
self.dp[i] = dpdt
self.dR[i] = dRdt
dzdt = [dEIdt[0][0], dEIdt[1][0], dEIdt[2][0], dXdt[0, 0], dXdt[1, 0],
dXdt[2, 0], dRdt, dpdt]
return dzdt
def solve(self, n, z0, t, ann, scaler_x, scaler_y):
self.ET = np.zeros((n, 3))
self.Ei = np.zeros((n, 3))
self.X = np.zeros((n, 3))
self.p = np.zeros(n)
self.R = np.zeros(n)
self.dEi = np.zeros((n, 3))
self.dX = np.zeros((n, 3))
self.dp = np.zeros(n)
self.dR = np.zeros(n)
self.stress = np.zeros((n, 3))
self.input = np.zeros((n, 12))
self.score = np.zeros(n)
# record initial conditions
self.Ei[0, 0] = z0[0] # Inelastic strain xx direction
self.Ei[0, 1] = z0[1] # Inelastic strain yy direction
self.Ei[0, 2] = z0[2] # Inelastic strain xy direction
self.X[0, 0] = z0[3] # Back stress xx direction
self.X[0, 1] = z0[4] # Back stress yy direction
self.X[0, 2] = z0[5] # Back stress xy direction
self.R[0] = z0[6] # Drag stress
self.p[0] = z0[7] # Plastic strain
stiff = self.E/(1-self.v**2) * np.array([[1, self.v, 0 ],
[self.v, 1 , 0 ],
[0, 0, (1-self.v)/2]])
# Calculate total strain
# TODO
for i in range(1, n):
# Calculate Strain
# Calculate Strain
if (self.trial) == 'xx':
self.ET[i, 0] = self.total_strain(t[i])
self.ET[i, 1] = -self.v * self.ET[i, 0]
elif (self.trial) == 'yy':
self.ET[i, 1] = self.total_strain(t[i])
self.ET[i, 0] = -self.v * self.ET[i, 1]
elif (self.trial)=='xy':
self.ET[i, 2] = self.total_strain(t[i])
# self.ET[i, 1] = self.total_strain(t[i])
# span for next time step
tspan = [t[i-1], t[i]]
# solves for next step
z = odeint(self.model, z0, tspan,
args=(i, stiff, self.ET[i, :], ann, scaler_x,
scaler_y))
#print (z[1][0])
# store solution for plotting
self.Ei[i, 0] = z[1][0]
self.Ei[i, 1] = z[1][1]
self.Ei[i, 2] = z[1][2]
#print ("Ei x -> ", z[1][0],"Ei y -> ", z[1][1], "Ei xy -> ", z[1][2])
self.X[i, 0] = z[1][3]
self.X[i, 1] = z[1][4]
self.X[i, 2] = z[1][5]
#print ("\nX x -> ", z[1][3],"X y -> ", z[1][4], "X xy -> ", z[1][5])
self.R[i] = z[1][6]
#print ("\nR -> ", z[1][6])
self.p[i] = z[1][7]
#print ("\np -> ", z[1][7])
# next initial condition
z0 = z[1]
|
[
"copy.deepcopy",
"scipy.integrate.odeint",
"numpy.zeros",
"math.floor",
"numpy.array",
"numpy.matmul"
] |
[((1323, 1342), 'copy.deepcopy', 'copy.deepcopy', (['z[6]'], {}), '(z[6])\n', (1336, 1342), False, 'import copy\n'), ((1376, 1395), 'copy.deepcopy', 'copy.deepcopy', (['z[7]'], {}), '(z[7])\n', (1389, 1395), False, 'import copy\n'), ((1518, 1543), 'numpy.matmul', 'np.matmul', (['stiff', '(ET - Ei)'], {}), '(stiff, ET - Ei)\n', (1527, 1543), True, 'import numpy as np\n'), ((2237, 2295), 'numpy.array', 'np.array', (['[[output[0][0]], [output[0][2]], [output[0][1]]]'], {}), '([[output[0][0]], [output[0][2]], [output[0][1]]])\n', (2245, 2295), True, 'import numpy as np\n'), ((2339, 2397), 'numpy.array', 'np.array', (['[[output[0][4]], [output[0][6]], [output[0][5]]]'], {}), '([[output[0][4]], [output[0][6]], [output[0][5]]])\n', (2347, 2397), True, 'import numpy as np\n'), ((3249, 3265), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3257, 3265), True, 'import numpy as np\n'), ((3284, 3300), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3292, 3300), True, 'import numpy as np\n'), ((3318, 3334), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3326, 3334), True, 'import numpy as np\n'), ((3352, 3363), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3360, 3363), True, 'import numpy as np\n'), ((3381, 3392), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3389, 3392), True, 'import numpy as np\n'), ((3412, 3428), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3420, 3428), True, 'import numpy as np\n'), ((3447, 3463), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3455, 3463), True, 'import numpy as np\n'), ((3482, 3493), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3490, 3493), True, 'import numpy as np\n'), ((3512, 3523), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3520, 3523), True, 'import numpy as np\n'), ((3546, 3562), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3554, 3562), True, 'import numpy as np\n'), ((3584, 3601), 'numpy.zeros', 'np.zeros', (['(n, 12)'], {}), '((n, 12))\n', (3592, 3601), True, 'import numpy as np\n'), ((3623, 3634), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3631, 3634), True, 'import numpy as np\n'), ((4214, 4282), 'numpy.array', 'np.array', (['[[1, self.v, 0], [self.v, 1, 0], [0, 0, (1 - self.v) / 2]]'], {}), '([[1, self.v, 0], [self.v, 1, 0], [0, 0, (1 - self.v) / 2]])\n', (4222, 4282), True, 'import numpy as np\n'), ((5139, 5229), 'scipy.integrate.odeint', 'odeint', (['self.model', 'z0', 'tspan'], {'args': '(i, stiff, self.ET[i, :], ann, scaler_x, scaler_y)'}), '(self.model, z0, tspan, args=(i, stiff, self.ET[i, :], ann, scaler_x,\n scaler_y))\n', (5145, 5229), False, 'from scipy.integrate import odeint\n'), ((748, 766), 'math.floor', 'math.floor', (['(t / tc)'], {}), '(t / tc)\n', (758, 766), False, 'import math\n')]
|
import chainer
from chainer import training
from chainer.training import extensions, ParallelUpdater
from chainer.training.triggers import ManualScheduleTrigger
from chainer.datasets import TransformDataset
from chainercv.datasets import VOCBboxDataset, voc_bbox_label_names
from chainercv import transforms
from chainercv.transforms.image.resize import resize
import argparse
import numpy as np
import time
#from mask_rcnn_vgg import MaskRCNNVGG16
from mask_rcnn_resnet import MaskRCNNResNet
from coco_dataset import COCODataset
from mask_rcnn_train_chain import MaskRCNNTrainChain
from utils.bn_utils import freeze_bn, bn_to_affine
from utils.cocoapi_evaluator import COCOAPIEvaluator
from utils.detection_coco_evaluator import DetectionCOCOEvaluator
import logging
import traceback
from utils.updater import SubDivisionUpdater
import cv2
def resize_bbox(bbox, in_size, out_size):
bbox_o = bbox.copy()
y_scale = float(out_size[0]) / in_size[0]
x_scale = float(out_size[1]) / in_size[1]
bbox_o[:, 0] = y_scale * bbox[:, 1]
bbox_o[:, 2] = y_scale * (bbox[:, 1]+bbox[:, 3])
bbox_o[:, 1] = x_scale * bbox[:, 0]
bbox_o[:, 3] = x_scale * (bbox[:, 0]+bbox[:, 2])
return bbox_o
def parse():
parser = argparse.ArgumentParser(
description='Mask RCNN trainer')
parser.add_argument('--dataset', choices=('coco2017'),
default='coco2017')
parser.add_argument('--extractor', choices=('resnet50','resnet101'),
default='resnet50', help='extractor network')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--lr', '-l', type=float, default=1e-4)
parser.add_argument('--batchsize', '-b', type=int, default=8)
parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze batchnorm gamma/beta')
parser.add_argument('--bn2affine', action='store_true', default=False, help='batchnorm to affine')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--roialign', action='store_false', default=True, help='default: True')
parser.add_argument('--lr_step', '-ls', type=int, default=120000)
parser.add_argument('--lr_initialchange', '-li', type=int, default=400)
parser.add_argument('--pretrained', '-p', type=str, default='imagenet')
parser.add_argument('--snapshot', type=int, default=4000)
parser.add_argument('--validation', type=int, default=30000)
parser.add_argument('--resume', type=str)
parser.add_argument('--iteration', '-i', type=int, default=180000)
parser.add_argument('--roi_size', '-r', type=int, default=14, help='ROI size for mask head input')
parser.add_argument('--gamma', type=float, default=1, help='mask loss weight')
return parser.parse_args()
class Transform(object):
def __init__(self, net, labelids):
self.net = net
self.labelids = labelids
def __call__(self, in_data):
if len(in_data)==5:
img, label, bbox, mask, i = in_data
elif len(in_data)==4:
img, bbox, label, i= in_data
label = [self.labelids.index(l) for l in label]
_, H, W = img.shape
if chainer.config.train:
img = self.net.prepare(img)
_, o_H, o_W = img.shape
scale = o_H / H
if len(bbox)==0:
return img, [],[],1
bbox = resize_bbox(bbox, (H, W), (o_H, o_W))
mask = resize(mask,(o_H, o_W))
if chainer.config.train:
#horizontal flip
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
bbox = transforms.flip_bbox(
bbox, (o_H, o_W), x_flip=params['x_flip'])
mask = transforms.flip(mask, x_flip=params['x_flip'])
return img, bbox, label, scale, mask, i
def convert(batch, device):
return chainer.dataset.convert.concat_examples(batch, device, padding=-1)
def main():
args = parse()
np.random.seed(args.seed)
print('arguments: ', args)
# Model setup
if args.dataset == 'coco2017':
train_data = COCODataset()
test_data = COCODataset(json_file='instances_val2017.json', name='val2017', id_list_file='val2017.txt')
train_class_ids =train_data.class_ids
test_ids = test_data.ids
cocoanns = test_data.coco
if args.extractor=='vgg16':
mask_rcnn = MaskRCNNVGG16(n_fg_class=80, pretrained_model=args.pretrained, roi_size=args.roi_size, roi_align = args.roialign)
elif args.extractor=='resnet50':
mask_rcnn = MaskRCNNResNet(n_fg_class=80, pretrained_model=args.pretrained,roi_size=args.roi_size, n_layers=50, roi_align = args.roialign, class_ids=train_class_ids)
elif args.extractor=='resnet101':
mask_rcnn = MaskRCNNResNet(n_fg_class=80, pretrained_model=args.pretrained,roi_size=args.roi_size, n_layers=101, roi_align = args.roialign, class_ids=train_class_ids)
mask_rcnn.use_preset('evaluate')
model = MaskRCNNTrainChain(mask_rcnn, gamma=args.gamma, roi_size=args.roi_size)
# Trainer setup
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
#optimizer = chainer.optimizers.Adam()#alpha=0.001, beta1=0.9, beta2=0.999 , eps=0.00000001)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0001))
train_data=TransformDataset(train_data, Transform(mask_rcnn, train_class_ids))
test_data=TransformDataset(test_data, Transform(mask_rcnn, train_class_ids))
train_iter = chainer.iterators.SerialIterator(
train_data, batch_size=args.batchsize)
test_iter = chainer.iterators.SerialIterator(
test_data, batch_size=1, repeat=False, shuffle=False)
updater = SubDivisionUpdater(train_iter, optimizer, device=args.gpu, subdivisions=args.batchsize)
#updater = ParallelUpdater(train_iter, optimizer, devices={"main": 0, "second": 1}, converter=convert ) #for training with multiple GPUs
trainer = training.Trainer(
updater, (args.iteration, 'iteration'), out=args.out)
# Extensions
trainer.extend(
extensions.snapshot_object(model.mask_rcnn, 'snapshot_model.npz'),
trigger=(args.snapshot, 'iteration'))
trainer.extend(extensions.ExponentialShift('lr', 10),
trigger=ManualScheduleTrigger(
[args.lr_initialchange], 'iteration'))
trainer.extend(extensions.ExponentialShift('lr', 0.1),
trigger=(args.lr_step, 'iteration'))
if args.resume is not None:
chainer.serializers.load_npz(args.resume, model.mask_rcnn)
if args.freeze_bn:
freeze_bn(model.mask_rcnn)
if args.bn2affine:
bn_to_affine(model.mask_rcnn)
log_interval = 40, 'iteration'
plot_interval = 160, 'iteration'
print_interval = 40, 'iteration'
#trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu), trigger=(args.validation, 'iteration'))
#trainer.extend(DetectionCOCOEvaluator(test_iter, model.mask_rcnn), trigger=(args.validation, 'iteration')) #COCO AP Evaluator with VOC metric
trainer.extend(COCOAPIEvaluator(test_iter, model.mask_rcnn, test_ids, cocoanns), trigger=(args.validation, 'iteration')) #COCO AP Evaluator
trainer.extend(chainer.training.extensions.observe_lr(),
trigger=log_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport(
['iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/avg_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'main/roi_mask_loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'validation/main/loss',
'validation/main/map',
]), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=1000))
#trainer.extend(extensions.dump_graph('main/loss'))
try:
trainer.run()
except:
traceback.print_exc()
if __name__ == '__main__':
main()
|
[
"chainercv.transforms.flip",
"chainer.training.extensions.observe_lr",
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.cocoapi_evaluator.COCOAPIEvaluator",
"chainer.iterators.SerialIterator",
"chainercv.transforms.image.resize.resize",
"chainer.training.extensions.LogReport",
"traceback.print_exc",
"coco_dataset.COCODataset",
"chainer.serializers.load_npz",
"chainer.training.extensions.ExponentialShift",
"utils.bn_utils.bn_to_affine",
"chainer.training.extensions.snapshot_object",
"chainer.dataset.convert.concat_examples",
"utils.bn_utils.freeze_bn",
"mask_rcnn_train_chain.MaskRCNNTrainChain",
"chainer.optimizer.WeightDecay",
"chainer.training.extensions.PrintReport",
"chainercv.transforms.flip_bbox",
"mask_rcnn_resnet.MaskRCNNResNet",
"chainer.training.Trainer",
"chainer.optimizers.MomentumSGD",
"utils.updater.SubDivisionUpdater",
"chainer.training.extensions.ProgressBar",
"chainercv.transforms.random_flip",
"chainer.training.triggers.ManualScheduleTrigger",
"chainer.cuda.get_device_from_id"
] |
[((1268, 1324), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mask RCNN trainer"""'}), "(description='Mask RCNN trainer')\n", (1291, 1324), False, 'import argparse\n'), ((4069, 4135), 'chainer.dataset.convert.concat_examples', 'chainer.dataset.convert.concat_examples', (['batch', 'device'], {'padding': '(-1)'}), '(batch, device, padding=-1)\n', (4108, 4135), False, 'import chainer\n'), ((4176, 4201), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4190, 4201), True, 'import numpy as np\n'), ((4344, 4439), 'coco_dataset.COCODataset', 'COCODataset', ([], {'json_file': '"""instances_val2017.json"""', 'name': '"""val2017"""', 'id_list_file': '"""val2017.txt"""'}), "(json_file='instances_val2017.json', name='val2017',\n id_list_file='val2017.txt')\n", (4355, 4439), False, 'from coco_dataset import COCODataset\n'), ((5187, 5258), 'mask_rcnn_train_chain.MaskRCNNTrainChain', 'MaskRCNNTrainChain', (['mask_rcnn'], {'gamma': 'args.gamma', 'roi_size': 'args.roi_size'}), '(mask_rcnn, gamma=args.gamma, roi_size=args.roi_size)\n', (5205, 5258), False, 'from mask_rcnn_train_chain import MaskRCNNTrainChain\n'), ((5404, 5460), 'chainer.optimizers.MomentumSGD', 'chainer.optimizers.MomentumSGD', ([], {'lr': 'args.lr', 'momentum': '(0.9)'}), '(lr=args.lr, momentum=0.9)\n', (5434, 5460), False, 'import chainer\n'), ((5841, 5912), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', (['train_data'], {'batch_size': 'args.batchsize'}), '(train_data, batch_size=args.batchsize)\n', (5873, 5912), False, 'import chainer\n'), ((5940, 6030), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', (['test_data'], {'batch_size': '(1)', 'repeat': '(False)', 'shuffle': '(False)'}), '(test_data, batch_size=1, repeat=False,\n shuffle=False)\n', (5972, 6030), False, 'import chainer\n'), ((6052, 6144), 'utils.updater.SubDivisionUpdater', 'SubDivisionUpdater', (['train_iter', 'optimizer'], {'device': 'args.gpu', 'subdivisions': 'args.batchsize'}), '(train_iter, optimizer, device=args.gpu, subdivisions=\n args.batchsize)\n', (6070, 6144), False, 'from utils.updater import SubDivisionUpdater\n'), ((6297, 6367), 'chainer.training.Trainer', 'training.Trainer', (['updater', "(args.iteration, 'iteration')"], {'out': 'args.out'}), "(updater, (args.iteration, 'iteration'), out=args.out)\n", (6313, 6367), False, 'from chainer import training\n'), ((3613, 3637), 'chainercv.transforms.image.resize.resize', 'resize', (['mask', '(o_H, o_W)'], {}), '(mask, (o_H, o_W))\n', (3619, 3637), False, 'from chainercv.transforms.image.resize import resize\n'), ((4313, 4326), 'coco_dataset.COCODataset', 'COCODataset', ([], {}), '()\n', (4324, 4326), False, 'from coco_dataset import COCODataset\n'), ((5611, 5653), 'chainer.optimizer.WeightDecay', 'chainer.optimizer.WeightDecay', ([], {'rate': '(0.0001)'}), '(rate=0.0001)\n', (5640, 5653), False, 'import chainer\n'), ((6428, 6493), 'chainer.training.extensions.snapshot_object', 'extensions.snapshot_object', (['model.mask_rcnn', '"""snapshot_model.npz"""'], {}), "(model.mask_rcnn, 'snapshot_model.npz')\n", (6454, 6493), False, 'from chainer.training import extensions, ParallelUpdater\n'), ((6562, 6599), 'chainer.training.extensions.ExponentialShift', 'extensions.ExponentialShift', (['"""lr"""', '(10)'], {}), "('lr', 10)\n", (6589, 6599), False, 'from chainer.training import extensions, ParallelUpdater\n'), ((6742, 6780), 'chainer.training.extensions.ExponentialShift', 'extensions.ExponentialShift', (['"""lr"""', '(0.1)'], {}), "('lr', 0.1)\n", (6769, 6780), False, 'from chainer.training import extensions, ParallelUpdater\n'), ((6881, 6939), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.resume', 'model.mask_rcnn'], {}), '(args.resume, model.mask_rcnn)\n', (6909, 6939), False, 'import chainer\n'), ((6973, 6999), 'utils.bn_utils.freeze_bn', 'freeze_bn', (['model.mask_rcnn'], {}), '(model.mask_rcnn)\n', (6982, 6999), False, 'from utils.bn_utils import freeze_bn, bn_to_affine\n'), ((7033, 7062), 'utils.bn_utils.bn_to_affine', 'bn_to_affine', (['model.mask_rcnn'], {}), '(model.mask_rcnn)\n', (7045, 7062), False, 'from utils.bn_utils import freeze_bn, bn_to_affine\n'), ((7463, 7527), 'utils.cocoapi_evaluator.COCOAPIEvaluator', 'COCOAPIEvaluator', (['test_iter', 'model.mask_rcnn', 'test_ids', 'cocoanns'], {}), '(test_iter, model.mask_rcnn, test_ids, cocoanns)\n', (7479, 7527), False, 'from utils.cocoapi_evaluator import COCOAPIEvaluator\n'), ((7608, 7648), 'chainer.training.extensions.observe_lr', 'chainer.training.extensions.observe_lr', ([], {}), '()\n', (7646, 7648), False, 'import chainer\n'), ((7712, 7754), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {'trigger': 'log_interval'}), '(trigger=log_interval)\n', (7732, 7754), False, 'from chainer.training import extensions, ParallelUpdater\n'), ((7776, 8039), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['iteration', 'epoch', 'elapsed_time', 'lr', 'main/loss', 'main/avg_loss',\n 'main/roi_loc_loss', 'main/roi_cls_loss', 'main/roi_mask_loss',\n 'main/rpn_loc_loss', 'main/rpn_cls_loss', 'validation/main/loss',\n 'validation/main/map']"], {}), "(['iteration', 'epoch', 'elapsed_time', 'lr',\n 'main/loss', 'main/avg_loss', 'main/roi_loc_loss', 'main/roi_cls_loss',\n 'main/roi_mask_loss', 'main/rpn_loc_loss', 'main/rpn_cls_loss',\n 'validation/main/loss', 'validation/main/map'])\n", (7798, 8039), False, 'from chainer.training import extensions, ParallelUpdater\n'), ((8185, 8229), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {'update_interval': '(1000)'}), '(update_interval=1000)\n', (8207, 8229), False, 'from chainer.training import extensions, ParallelUpdater\n'), ((3728, 3789), 'chainercv.transforms.random_flip', 'transforms.random_flip', (['img'], {'x_random': '(True)', 'return_param': '(True)'}), '(img, x_random=True, return_param=True)\n', (3750, 3789), False, 'from chainercv import transforms\n'), ((3828, 3891), 'chainercv.transforms.flip_bbox', 'transforms.flip_bbox', (['bbox', '(o_H, o_W)'], {'x_flip': "params['x_flip']"}), "(bbox, (o_H, o_W), x_flip=params['x_flip'])\n", (3848, 3891), False, 'from chainercv import transforms\n'), ((3930, 3976), 'chainercv.transforms.flip', 'transforms.flip', (['mask'], {'x_flip': "params['x_flip']"}), "(mask, x_flip=params['x_flip'])\n", (3945, 3976), False, 'from chainercv import transforms\n'), ((4767, 4929), 'mask_rcnn_resnet.MaskRCNNResNet', 'MaskRCNNResNet', ([], {'n_fg_class': '(80)', 'pretrained_model': 'args.pretrained', 'roi_size': 'args.roi_size', 'n_layers': '(50)', 'roi_align': 'args.roialign', 'class_ids': 'train_class_ids'}), '(n_fg_class=80, pretrained_model=args.pretrained, roi_size=\n args.roi_size, n_layers=50, roi_align=args.roialign, class_ids=\n train_class_ids)\n', (4781, 4929), False, 'from mask_rcnn_resnet import MaskRCNNResNet\n'), ((6633, 6692), 'chainer.training.triggers.ManualScheduleTrigger', 'ManualScheduleTrigger', (['[args.lr_initialchange]', '"""iteration"""'], {}), "([args.lr_initialchange], 'iteration')\n", (6654, 6692), False, 'from chainer.training.triggers import ManualScheduleTrigger\n'), ((8343, 8364), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8362, 8364), False, 'import traceback\n'), ((4981, 5144), 'mask_rcnn_resnet.MaskRCNNResNet', 'MaskRCNNResNet', ([], {'n_fg_class': '(80)', 'pretrained_model': 'args.pretrained', 'roi_size': 'args.roi_size', 'n_layers': '(101)', 'roi_align': 'args.roialign', 'class_ids': 'train_class_ids'}), '(n_fg_class=80, pretrained_model=args.pretrained, roi_size=\n args.roi_size, n_layers=101, roi_align=args.roialign, class_ids=\n train_class_ids)\n', (4995, 5144), False, 'from mask_rcnn_resnet import MaskRCNNResNet\n'), ((5315, 5356), 'chainer.cuda.get_device_from_id', 'chainer.cuda.get_device_from_id', (['args.gpu'], {}), '(args.gpu)\n', (5346, 5356), False, 'import chainer\n')]
|
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler, MinMaxScaler, normalize
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from imblearn.over_sampling import SMOTE
import matplotlib.pyplot as plt
import numpy as np
from deployml.sklearn.deploy.base import DeploymentBase
class TrainingBase(DeploymentBase):
def __init__(self, selected_model, tensor=False, keras=False, batch_size=None, steps=None):
"""
Base training functions, this class is usually inherited by a machine learning model
so it's usually not created by itself
:param selected_model: represents machine learning model. Usually passed by a
machine learning model object inheriting this class
"""
super().__init__()
if tensor:
self.tensor = True
self.keras = False
elif keras:
self.tensor = False
self.keras = True
else:
self.tensor = False
self.keras = False
self.batch_size = batch_size
self.steps = steps
self.auc = 0
self.cross_val = 0
self.model = selected_model
self.data = None
self.outcome_pointer = None
self.X = None
self.scaled_inputs = False
self.scaling_tool = None
self.y = None
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.outcome_metrics = None
self.train_errors = []
self.test_errors = []
self.predictions = None
self.trained = False
self.learning_curve = False
# self.penalty = 'l1'
self.grid = 0
self.X_report = None
self.y_report = None
self.general_report = "General Report not generated when model was trained"
self.scaling_title = None
self.input_order = None
self.support_vector = False
self.best_epoch = None
self.best_model = None
def plot_learning_curve(self, batch_size=100, starting_point=100, scale=False, scaling_tool='standard',
resample=False, resample_ratio=1, early_stopping=False, cut_off=30):
"""
Generates lists of training and testing error through the training process
which can be plotted to check for over fitting
:param batch_size: How many data points get trained in each cycle (cannot be zero)
:param starting_point: first batch to be trained (cannot be zero)
:param scale: if set True, the input data is scaled
:param scaling_tool: defines the type of scaling tool used when pre-processing data
:return: trained model with a learning curve
"""
self.train_errors = []
self.test_errors = []
self.scaled_inputs = True
self.X = self.data.drop(self.outcome_pointer, axis=1)
self.input_order = list(self.X.columns.values)
self.y = self.data[self.outcome_pointer]
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.33,
# random_state=101
)
if resample:
sm = SMOTE(ratio=resample_ratio)
self.X_train, self.y_train = sm.fit_sample(self.X_train, self.y_train)
# self.X_train = sampling_data.drop(self.outcome_pointer, axis=1)
# self.y_train = sampling_data[self.outcome_pointer]
self.X_report = np.array(self.X_test)
self.y_report = np.array(self.y_test)
if scale:
self.scaled_inputs = True
self.scaling_title = scaling_tool
if scaling_tool == 'standard':
self.scaled_inputs = True
self.scaling_tool = StandardScaler()
self.scaling_tool.fit(self.X_train)
self.X_train = self.scaling_tool.transform(self.X_train)
self.X_test = self.scaling_tool.transform(self.X_test)
elif scaling_tool == 'min max':
self.scaled_inputs = True
self.scaling_tool = MinMaxScaler()
self.scaling_tool.fit(self.X_train)
self.X_train = self.scaling_tool.transform(self.X_train)
self.X_test = self.scaling_tool.transform(self.X_test)
elif scaling_tool == 'normalize':
self.scaling_tool = normalize(self.X_train)
else:
self.scaled_inputs = False
if self.tensor:
for i in range(starting_point, len(self.X_train), batch_size):
self.model.fit(self.X_train[:i], self.y_train[:i])
y_train_predict = self.model.predict(self.X_train[:i], batch_size=self.batch_size,
)
y_test_predict = self.model.predict(self.X_test)
self.train_errors.append(mean_squared_error(y_train_predict, self.y_train[:i]))
self.test_errors.append(mean_squared_error(y_test_predict, self.y_test))
else:
if early_stopping:
for i in range(starting_point, len(self.X_train), batch_size):
self.model.fit(self.X_train[:i], self.y_train[:i])
y_train_predict = self.model.predict(self.X_train[:i])
y_test_predict = self.model.predict(self.X_test)
self.train_errors.append(mean_squared_error(y_train_predict, self.y_train[:i]))
self.test_errors.append(mean_squared_error(y_test_predict, self.y_test))
if len(self.train_errors) == cut_off:
break
else:
for i in range(starting_point, len(self.X_train), batch_size):
self.model.fit(self.X_train[:i], self.y_train[:i])
y_train_predict = self.model.predict(self.X_train[:i])
y_test_predict = self.model.predict(self.X_test)
self.train_errors.append(mean_squared_error(y_train_predict, self.y_train[:i]))
self.test_errors.append(mean_squared_error(y_test_predict, self.y_test))
def quick_train(self, scale=False, scaling_tool='standard',
resample=False, resample_ratio=1, epochs=1, batch_size=None):
"""
Trains a model quickly
:param scale: if set True, the input data is scaled
:param scaling_tool: defines the type of scaling tool used when pre-processing data
:return: a trained model with no learning curve
"""
self.learning_curve = False
self.X = self.data.drop(self.outcome_pointer, axis=1)
self.y = self.data[self.outcome_pointer]
self.input_order = list(self.X.columns.values)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.33,
# random_state=101
)
if resample:
sm = SMOTE(ratio=resample_ratio)
self.X_train, self.y_train = sm.fit_sample(self.X_train, self.y_train)
# self.X_train = sampling_data.drop(self.outcome_pointer, axis=1)
# self.y_train = sampling_data[self.outcome_pointer]
self.X_report = np.array(self.X_test)
self.y_report = np.array(self.y_test)
if scale:
self.scaled_inputs = True
if scaling_tool == 'standard':
self.scaling_tool = StandardScaler()
elif scaling_tool == 'min max':
self.scaling_tool = MinMaxScaler()
elif scaling_tool == 'normalize':
self.scaling_tool = normalize()
self.scaling_tool.fit(self.X_train)
self.X_train = self.scaling_tool.transform(self.X_train)
self.X_test = self.scaling_tool.transform(self.X_test)
else:
self.scaled_inputs = False
if self.tensor:
self.model.fit(self.X_train, self.y_train, batch_size=self.batch_size,
steps=self.steps)
elif self.keras:
self.model.fit(self.X_train, self.y_train,
epochs=epochs, batch_size=batch_size)
else:
self.model.fit(self.X_train, self.y_train)
def show_learning_curve(self, save=False):
"""
:param save: if set to True plot will be saved as file
Plots the learning curve of test and train sets
"""
plt.figure(figsize=(15, 7))
plt.plot(np.sqrt(self.train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(self.test_errors), "b-", linewidth=3, label="val")
plt.xlabel("Iterations")
plt.ylabel('Error')
plt.title('Learning Curve for {}'.format(self.model_title))
plt.legend(loc='upper right')
if save:
plt.savefig('learning_curve')
plt.show()
def show_roc_curve(self, save=False):
"""
Plots the ROC curve to see True and False positive trade off
:param save: if set to True plot will be saved as file
:return: self.auc which can be used as a score
"""
logit_roc_auc = roc_auc_score(self.y_test, self.model.predict(self.X_test))
self.auc = logit_roc_auc
fpr, tpr, thresholds = roc_curve(self.y_test, self.model.predict_proba(self.X_test)[:, 1])
plt.figure()
plt.plot(fpr, tpr, label='RPC Curve (area = {}0.2f)'.format(logit_roc_auc))
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
if save:
plt.savefig('ROC')
plt.show()
def evaluate_outcome(self, best=False):
"""
Prints classification report of finished model
:return: list of predictions from the X_test data subset
"""
if best:
self.predictions = self.best_model.predict(self.X_test)
elif self.keras:
self.predictions = self.model.predict_classes(self.X_test)
else:
self.predictions = self.model.predict(self.X_test)
self.general_report = classification_report(self.y_test, self.predictions)
print(self.general_report)
def evaluate_cross_validation(self, n_splits=10, random_state=7):
"""
Performs a cross validation score evaluating how the model performs in different subsets
of the data, model needs to be trained first
:return: average value of all 10 scores
"""
k_fold = KFold(n_splits=n_splits, random_state=random_state)
scoring = 'accuracy'
self.cross_val = cross_val_score(self.model, self.X_train, self.y_train, cv=k_fold, scoring=scoring)
print("{}-fold cross validation average accuracy: {}".format(n_splits, self.cross_val.mean()))
def grid_search(self):
"""
override this in you machine learning model class
:return: Nothing, supposed to be overridden in parent class
"""
self.grid = 1
def calculate(self, input_array, happening=True, override=False):
"""
Calculates probability of outcome
WARNING [CANNOT BE USED ONCE MODEL IS PICKLED]
:param input_array: array of inputs (should be same order as training data)
:param happening: if set False, returns probability of event not happening
:param override: set to True if you want to override scaling
:return: float between 0 and 1
"""
if self.scaled_inputs and not override:
input_array = self.scaling_tool.transform(input_array)
if happening:
return self.model.predict_proba([input_array])[0][1]
else:
return self.model.predict_proba([input_array])[0][0]
|
[
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"imblearn.over_sampling.SMOTE",
"sklearn.preprocessing.normalize",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"sklearn.model_selection.KFold",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((3258, 3306), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'test_size': '(0.33)'}), '(self.X, self.y, test_size=0.33)\n', (3274, 3306), False, 'from sklearn.model_selection import train_test_split, cross_val_score, KFold\n'), ((3807, 3828), 'numpy.array', 'np.array', (['self.X_test'], {}), '(self.X_test)\n', (3815, 3828), True, 'import numpy as np\n'), ((3853, 3874), 'numpy.array', 'np.array', (['self.y_test'], {}), '(self.y_test)\n', (3861, 3874), True, 'import numpy as np\n'), ((7187, 7235), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'test_size': '(0.33)'}), '(self.X, self.y, test_size=0.33)\n', (7203, 7235), False, 'from sklearn.model_selection import train_test_split, cross_val_score, KFold\n'), ((7736, 7757), 'numpy.array', 'np.array', (['self.X_test'], {}), '(self.X_test)\n', (7744, 7757), True, 'import numpy as np\n'), ((7782, 7803), 'numpy.array', 'np.array', (['self.y_test'], {}), '(self.y_test)\n', (7790, 7803), True, 'import numpy as np\n'), ((8950, 8977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 7)'}), '(figsize=(15, 7))\n', (8960, 8977), True, 'import matplotlib.pyplot as plt\n'), ((9142, 9166), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (9152, 9166), True, 'import matplotlib.pyplot as plt\n'), ((9175, 9194), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (9185, 9194), True, 'import matplotlib.pyplot as plt\n'), ((9271, 9300), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (9281, 9300), True, 'import matplotlib.pyplot as plt\n'), ((9368, 9378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9376, 9378), True, 'import matplotlib.pyplot as plt\n'), ((9857, 9869), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9867, 9869), True, 'import matplotlib.pyplot as plt\n'), ((9962, 9993), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (9970, 9993), True, 'import matplotlib.pyplot as plt\n'), ((10002, 10022), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (10010, 10022), True, 'import matplotlib.pyplot as plt\n'), ((10031, 10052), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (10039, 10052), True, 'import matplotlib.pyplot as plt\n'), ((10061, 10094), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (10071, 10094), True, 'import matplotlib.pyplot as plt\n'), ((10103, 10135), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (10113, 10135), True, 'import matplotlib.pyplot as plt\n'), ((10144, 10190), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver operating characteristic"""'], {}), "('Receiver operating characteristic')\n", (10153, 10190), True, 'import matplotlib.pyplot as plt\n'), ((10199, 10228), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (10209, 10228), True, 'import matplotlib.pyplot as plt\n'), ((10285, 10295), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10293, 10295), True, 'import matplotlib.pyplot as plt\n'), ((10773, 10825), 'sklearn.metrics.classification_report', 'classification_report', (['self.y_test', 'self.predictions'], {}), '(self.y_test, self.predictions)\n', (10794, 10825), False, 'from sklearn.metrics import classification_report\n'), ((11171, 11222), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'random_state': 'random_state'}), '(n_splits=n_splits, random_state=random_state)\n', (11176, 11222), False, 'from sklearn.model_selection import train_test_split, cross_val_score, KFold\n'), ((11277, 11365), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['self.model', 'self.X_train', 'self.y_train'], {'cv': 'k_fold', 'scoring': 'scoring'}), '(self.model, self.X_train, self.y_train, cv=k_fold, scoring=\n scoring)\n', (11292, 11365), False, 'from sklearn.model_selection import train_test_split, cross_val_score, KFold\n'), ((3527, 3554), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'ratio': 'resample_ratio'}), '(ratio=resample_ratio)\n', (3532, 3554), False, 'from imblearn.over_sampling import SMOTE\n'), ((7456, 7483), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'ratio': 'resample_ratio'}), '(ratio=resample_ratio)\n', (7461, 7483), False, 'from imblearn.over_sampling import SMOTE\n'), ((8995, 9021), 'numpy.sqrt', 'np.sqrt', (['self.train_errors'], {}), '(self.train_errors)\n', (9002, 9021), True, 'import numpy as np\n'), ((9075, 9100), 'numpy.sqrt', 'np.sqrt', (['self.test_errors'], {}), '(self.test_errors)\n', (9082, 9100), True, 'import numpy as np\n'), ((9330, 9359), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""learning_curve"""'], {}), "('learning_curve')\n", (9341, 9359), True, 'import matplotlib.pyplot as plt\n'), ((10258, 10276), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ROC"""'], {}), "('ROC')\n", (10269, 10276), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4115), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4113, 4115), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, normalize\n'), ((7940, 7956), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7954, 7956), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, normalize\n'), ((4434, 4448), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4446, 4448), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, normalize\n'), ((5235, 5288), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train_predict', 'self.y_train[:i]'], {}), '(y_train_predict, self.y_train[:i])\n', (5253, 5288), False, 'from sklearn.metrics import mean_squared_error\n'), ((5330, 5377), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test_predict', 'self.y_test'], {}), '(y_test_predict, self.y_test)\n', (5348, 5377), False, 'from sklearn.metrics import mean_squared_error\n'), ((8037, 8051), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (8049, 8051), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, normalize\n'), ((4727, 4750), 'sklearn.preprocessing.normalize', 'normalize', (['self.X_train'], {}), '(self.X_train)\n', (4736, 4750), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, normalize\n'), ((5767, 5820), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train_predict', 'self.y_train[:i]'], {}), '(y_train_predict, self.y_train[:i])\n', (5785, 5820), False, 'from sklearn.metrics import mean_squared_error\n'), ((5866, 5913), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test_predict', 'self.y_test'], {}), '(y_test_predict, self.y_test)\n', (5884, 5913), False, 'from sklearn.metrics import mean_squared_error\n'), ((6364, 6417), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train_predict', 'self.y_train[:i]'], {}), '(y_train_predict, self.y_train[:i])\n', (6382, 6417), False, 'from sklearn.metrics import mean_squared_error\n'), ((6463, 6510), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test_predict', 'self.y_test'], {}), '(y_test_predict, self.y_test)\n', (6481, 6510), False, 'from sklearn.metrics import mean_squared_error\n'), ((8134, 8145), 'sklearn.preprocessing.normalize', 'normalize', ([], {}), '()\n', (8143, 8145), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, normalize\n')]
|
"""test_3_mac_receive_jpg.py -- receive & display jpg stream.
A simple test program that uses imagezmq to receive an image jpg stream from a
Raspberry Pi and display it as a video steam.
1. Run this program in its own terminal window on the mac:
python test_3_mac_receive_jpg.py
This "receive and display images" program must be running before starting the
RPi sending program.
2. Run the jpg sending program on the RPi:
python test_3_rpi_send_jpg.py
A cv2.imshow() window will appear on the Mac showing the tramsmitted images as
a video stream. You can repeat Step 2 and start the test_3_rpi_send_jpg.py on
multiple RPis and each one will cause a new cv2.imshow() window to open.
To end the programs, press Ctrl-C in the terminal window of the RPi first.
Then press Ctrl-C in the terminal window of the receiving proram. You may
have to press Ctrl-C in the display window as well.
"""
import sys
import numpy as np
import cv2
import imagezmq
image_hub = imagezmq.ImageHub()
while True: # show streamed images until Ctrl-C
rpi_name, jpg_buffer = image_hub.recv_jpg()
image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
# see opencv docs for info on -1 parameter
cv2.imshow(rpi_name, image) # 1 window for each RPi
cv2.waitKey(1)
image_hub.send_reply(b'OK')
|
[
"cv2.waitKey",
"numpy.frombuffer",
"imagezmq.ImageHub",
"cv2.imshow"
] |
[((964, 983), 'imagezmq.ImageHub', 'imagezmq.ImageHub', ([], {}), '()\n', (981, 983), False, 'import imagezmq\n'), ((1203, 1230), 'cv2.imshow', 'cv2.imshow', (['rpi_name', 'image'], {}), '(rpi_name, image)\n', (1213, 1230), False, 'import cv2\n'), ((1260, 1274), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1271, 1274), False, 'import cv2\n'), ((1106, 1146), 'numpy.frombuffer', 'np.frombuffer', (['jpg_buffer'], {'dtype': '"""uint8"""'}), "(jpg_buffer, dtype='uint8')\n", (1119, 1146), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import os
from pathlib import Path
import numpy as np
import tensorflow as tf
import cv2
from cv_bridge import CvBridge, CvBridgeError
import rospy
from hrl_object_detection.msg import FloatList
from sensor_msgs.msg import Image
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# patch tf1 into `utils.ops`
utils_ops.tf = tf.compat.v1
# Patch the location of gfile
tf.gfile = tf.io.gfile
# Bridge to convert images
bridge = CvBridge()
### VARIABLES
path_to_labels_platypus = "/home/user/sanchezs/catkin_ws/src/hrl_object_detection/src/platypus/platypus-detection.pbtxt"
path_to_labels_unicorn = "/home/user/sanchezs/catkin_ws/src/hrl_object_detection/src/unicorn/unicorn_object-detection.pbtxt"
path_to_labels_teddy_monster = "/home/user/sanchezs/catkin_ws/src/hrl_object_detection/src/teddy_monster/teddy_monster_object-detection.pbtxt"
model_name_platypus = "/home/user/sanchezs/catkin_ws/src/hrl_object_detection/src/platypus/platypus_inference_graph"
model_name_unicorn = "/home/user/sanchezs/catkin_ws/src/hrl_object_detection/src/unicorn/unicorn_graph"
model_name_teddy_monster = "/home/user/sanchezs/catkin_ws/src/hrl_object_detection/src/teddy_monster/teddy_monster_inference_graph"
class_to_detect_platypus = "platypus"
class_to_detect_unicorn = "unicorn"
class_to_detect_teddy_monster = "teddy monster"
detected_top_camera = 0
#dominant_colors_unicorn = np.float32(np.array([[20.04054069519043, 16.291893005371094, 22.2891902923584],
#[77.27994537353516, 72.75, 86.98204040527344],
#[110.84375, 104.1611328125, 113.310546875],
#[167.5294952392578, 153.17245483398438, 180.65354919433594],
#[213.70997619628906, 211.31106567382812, 214.26806640625]]))
#dominant_colors_platypus = np.float32(np.array([[7.902225017547607, 10.378686904907227, 18.599586486816406],
#[39.54244613647461, 47.89728546142578, 65.26145935058594],
#[64.30582427978516, 99.69781494140625, 105.12933349609375],
#[104.90493774414062, 104.2941665649414, 135.60922241210938],
#[129.13047790527344, 164.89979553222656, 190.93841552734375]]))
#dominant_colors_monster = np.float32(np.array([[20.952489852905273, 20.191192626953125, 22.156431198120117],
#[65.02598571777344, 83.71105194091797, 86.01863861083984],
#[85.77362823486328, 108.91241455078125, 135.65928649902344],
#[154.9387664794922, 157.45663452148438, 157.8596954345703],
#[212.35440063476562, 214.16761779785156, 214.51052856445312]]))
dominant_colors_unicorn = np.float32(np.array([[14.512475967407227, 10.637235641479492, 12.238003730773926],
[45.42377853393555, 35.51829147338867, 39.001522064208984], [73.22736358642578, 56.2325553894043, 64.88287353515625],
[93.06961822509766, 62.49015808105469, 91.50210571289062],
[101.87042999267578, 79.54219055175781, 140.76304626464844],
[133.23292541503906, 84.64583587646484, 146.19268798828125],
[146.9541778564453, 121.57429504394531, 183.50267028808594],
[175.8449249267578, 130.68478393554688, 196.73333740234375],
[189.53985595703125, 174.63101196289062, 214.19927978515625],
[217.00360107421875, 216.41111755371094, 217.29551696777344]]))
dominant_colors_platypus = np.float32(np.array([[5.150073051452637, 5.405563831329346, 8.959736824035645],
[19.21808433532715, 22.307445526123047, 29.933433532714844],
[37.59239959716797, 38.90922546386719, 33.54680633544922],
[55.65052795410156, 53.83592224121094, 56.52360534667969],
[65.02174377441406, 68.0908432006836, 76.18840789794922],
[87.24678039550781, 87.39642333984375, 78.73574829101562],
[100.889404296875, 89.90760803222656, 118.34420776367188],
[107.12446594238281, 131.53529357910156, 157.36705017089844],
[132.6621551513672, 161.353271484375, 183.85906982421875],
[210.23611450195312, 211.0833282470703, 211.4166717529297]]))
dominant_colors_monster = np.float32(np.array([[8.104718208312988, 5.26467227935791, 5.033371925354004],
[32.14719772338867, 23.15839958190918, 19.88159942626953], [54.60653305053711, 42.97511672973633, 38.53810501098633],
[74.2969741821289, 60.45454788208008, 51.90129852294922],
[88.74835205078125, 71.97657775878906, 63.67147445678711],
[107.48922729492188, 87.00760650634766, 76.51457214355469],
[138.4444580078125, 121.26786041259766, 113.48809814453125],
[151.0196075439453, 150.47964477539062, 150.7978973388672],
[188.70578002929688, 186.58016967773438, 182.24627685546875],
[216.57745361328125, 216.20333862304688, 215.73629760742188]]))
dominant_colors_active = 0
area_detected_platypus = 0.1
area_detected_unicorn = 0.08
area_detected_monster = 0.1
area_detected = 0 # area_detected_monster
detection_offset = 0
detection_relative_area = 0
validated_relative_area = 0
detected = 0
update = False
detection_model = None
class_to_detect = ""
path_to_labels = ""
category_index = None
detection_threshold = 0.6
validating = 0
detection_validated = 0
pub_image = rospy.Publisher('/image_with_box', Image, queue_size=10)
### FUNCTIONS
## Returns the tensorflow model loaded
def load_model(model_name):
model_dir = model_name
model_dir = Path(model_dir)/"saved_model"
model = tf.saved_model.load(str(model_dir), None)
model = model.signatures["serving_default"]
return model
## Return True if the color pattern matches the toy wanted
def is_my_toy(dominants):
global dominant_colors_platypus, dominant_colors_unicorn, dominant_colors_monster
dominants = np.sort(dominants, axis=0)
diff_platypus = np.average(np.absolute(dominants - dominant_colors_platypus))
diff_unicorn = np.average(np.absolute(dominants - dominant_colors_unicorn))
diff_monster = np.average(np.absolute(dominants - dominant_colors_monster))
print("Avg error platypus:\t", diff_platypus)
print("Avg error unicorn:\t", diff_unicorn)
print("Avg error monster:\t", diff_monster)
if model == "platypus":
return diff_platypus < diff_monster and diff_platypus < diff_unicorn
elif model == "unicorn":
return diff_unicorn < diff_monster and diff_unicorn < diff_platypus
elif model == "teddy_monster":
return diff_monster < diff_platypus and diff_monster < diff_unicorn
## Returns the palette of dominant colors from an image
def get_dominant_colors(image, box):
top, left, bottom, right = int(box[0]), int(box[1]), int(box[2]), int(box[3])
box_img = image[top:bottom,left:right]
pixels = np.float32(box_img.reshape(-1, 3))
n_colors = 10
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)
flags = cv2.KMEANS_RANDOM_CENTERS
_, labels, palette = cv2.kmeans(pixels, n_colors, None, criteria, 10, flags)
return palette
## Runs one inference pass for the image
def run_inference_for_single_image(model, image):
# Convert it to numpy array
image = np.asarray(image)
# The input needs to be a tensor
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
output_dict = model(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy()
for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(output_dict['detection_masks'], output_dict['detection_boxes'], image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
## Returns detected box width, height, horizontal center and area
def calc_box_values(box, shape):
box[0] *= shape[0]
box[1] *= shape[1]
box[2] *= shape[0]
box[3] *= shape[1]
detected_box_width = box[3] - box[1]
detected_box_height = box[2] - box[0]
detected_box_centerX = box[1] + (detected_box_width/2.0)
detected_box_area = detected_box_width * detected_box_height
return detected_box_width, detected_box_height, detected_box_centerX, detected_box_area
## Adjusts gamma and brightness for the given image
def adjust(image, gamma=1.0, brightness=1):
hsvImg = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
# decreasing the V channel by a factor from the original
hsvImg[...,2] = hsvImg[...,2]*brightness
new_image = cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(new_image, table)
## Callback function called everytime the listener gets a detection message from the lower camera
def detect_callback(data):
print("Validating:\t", validating)
global detection_offset, detection_relative_area, detected
image = adjust(bridge.imgmsg_to_cv2(data, "bgr8"), gamma=0.7, brightness=0.9)
image_centerX = image.shape[1] / 2.0
image_area = image.shape[0] * image.shape[1]
image_to_publish = image.copy()
output_dict = run_inference_for_single_image(detection_model, image)
## Box detection_boxes: [top, left, bottom, right]
## Detection_scores: value between 0 and 1
## Detection_classes: The name of the class we are detecting
detected_boxes = []
detected_scores = []
# First, filter the predictions that are not the class we are interested in
for i, entry in enumerate(output_dict['detection_classes']):
# print(category_index[entry])
if category_index[entry]["name"] == class_to_detect:
detected_boxes.append(output_dict['detection_boxes'][i])
detected_scores.append(output_dict['detection_scores'][i])
if detected_scores:
# Second, check which one of those detections has the higher score
max_index = detected_scores.index(max(detected_scores))
print("Score:\t\t", detected_scores[max_index], "\r")
print("--------------------------------------------\t")
# print(detected_scores[max_index])
# Third, if that score is higher than a threshold, we compute the values to send to the controller
if detected_scores[max_index] >= detection_threshold:
detected_box = detected_boxes[max_index]
detected_box_width, detected_box_height, detected_box_centerX, detected_box_area = calc_box_values(detected_box, image.shape)
# Update values that we need to send
detection_offset = detected_box_centerX - image_centerX # If positive, the box is on the right side of the image. If negative, the box is on the left side of the image
detection_relative_area = detected_box_area / image_area # Value between 0 and 1 to check if we are close or far away from the object. The closer we are, the bigger the box will be
print("area:\t\t", detection_relative_area)
dominant_colors = get_dominant_colors(image, detected_box)
#print("Dominant colors: \n: ", dominant_colors)
#print("Dominant colors: \n: ", np.sort(dominant_colors, axis=0).tolist())
ismytoy = is_my_toy(dominant_colors)
if (ismytoy):
detected = 1
else:
detected = 0
image_to_publish = cv2.rectangle(image_to_publish, (detected_box[1], detected_box[0]), (detected_box[3], detected_box[2]), (0, 255, 0) if detected else (0, 0, 255), 2)
else:
detected = 0
else:
detected = 0
# publicar la imagen
if not validating:
pub_image.publish(bridge.cv2_to_imgmsg(image_to_publish, "bgr8"))
## Callback function called everytime the listener gets a detection message from the upper camera
def validate_detection_callback(data):
global detection_offset, detection_relative_area, validated_relative_area, detected, validating, detection_validated, detected_top_camera
if (detected == 1 and detection_relative_area > area_detected) or validated_relative_area > 0.3:
validating = 1
image = bridge.imgmsg_to_cv2(data, "bgr8")
image_area = image.shape[0] * image.shape[1]
image_to_publish = image.copy()
image_centerX = image.shape[1] / 2.0
output_dict = run_inference_for_single_image(detection_model, image)
## Box detection_boxes: [top, left, bottom, right]
## Detection_scores: value between 0 and 1
## Detection_classes: The name of the class we are detecting
detected_boxes = []
detected_scores = []
# First, filter the predictions that are not the class we are interested in
for i, entry in enumerate(output_dict['detection_classes']):
# print(category_index[entry])
if category_index[entry]["name"] == class_to_detect:
detected_boxes.append(output_dict['detection_boxes'][i])
detected_scores.append(output_dict['detection_scores'][i])
if detected_scores:
# Second, check which one of those detections has the higher score
max_index = detected_scores.index(max(detected_scores))
print("Confirmation Score:\t", detected_scores[max_index], "\r")
print(".......\r")
# Third, if that score is higher than a threshold, we compute the values to send to the controller
if detected_scores[max_index] >= detection_threshold:
detected_top_camera = 1
detected_box = detected_boxes[max_index]
detected_box_width, detected_box_height, detected_box_centerX, detected_box_area = calc_box_values(detected_box, image.shape)
detection_offset = detected_box_centerX - image_centerX # If positive, the box is on the right side of the image. If negative, the box is on the left side of the image
validated_relative_area = detected_box_area / image_area # Value between 0 and 1 to check if we are close or far away from the object. The closer we are, the bigger the box will be
print("Relative area:\t", validated_relative_area)
if validated_relative_area > 0.3:
detection_validated = 1
else:
detection_validated = 0
image_to_publish = cv2.rectangle(image_to_publish, (detected_box[1], detected_box[0]), (detected_box[3], detected_box[2]), (0, 255, 0), 2)
else:
detected_top_camera = 0
else:
detected_top_camera = 0
pub_image.publish(bridge.cv2_to_imgmsg(image_to_publish, "bgr8"))
else:
validating = 0
## This declares the listener part of the node
def listener():
rospy.Subscriber('camera/rgb/image_raw', Image, detect_callback)
rospy.Subscriber('/camera_sr300/color/image_raw', Image, validate_detection_callback)
## This declares the publisher part of the node
def publisher():
global detection_offset, detection_relative_area, validated_relative_area
pub = rospy.Publisher('/control_robot/camera_detection', FloatList, queue_size=10)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
msg = FloatList()
msg.area = validated_relative_area
msg.xOffset = detection_offset
msg.detected = detected if not validating else detected_top_camera
pub.publish(msg)
rate.sleep()
#model = "platypus"
model = "teddy_monster"
#model= "unicorn"
if __name__ == '__main__':
#global detection_model, path_to_labels, model_name, class_to_detect, category_index
if model == "platypus":
path_to_labels = path_to_labels_platypus
model_name = model_name_platypus
class_to_detect = class_to_detect_platypus
area_detected = area_detected_platypus
elif model == "unicorn":
path_to_labels = path_to_labels_unicorn
model_name = model_name_unicorn
class_to_detect = class_to_detect_unicorn
area_detected = area_detected_unicorn
elif model == "teddy_monster":
path_to_labels = path_to_labels_teddy_monster
model_name = model_name_teddy_monster
class_to_detect = class_to_detect_teddy_monster
area_detected = area_detected_monster
else:
print("ERROR: Specified model doesn't exist!")
# List of the strings that is used to add correct label for each box.
category_index = label_map_util.create_category_index_from_labelmap(path_to_labels, use_display_name=True)
detection_model = load_model(model_name)
rospy.init_node('tf_node', anonymous=True)
listener()
try:
publisher()
except rospy.ROSInterruptException:
pass
|
[
"numpy.absolute",
"rospy.Subscriber",
"pathlib.Path",
"numpy.arange",
"cv2.rectangle",
"cv2.cvtColor",
"rospy.Rate",
"tensorflow.cast",
"rospy.is_shutdown",
"cv2.LUT",
"rospy.init_node",
"numpy.asarray",
"numpy.sort",
"cv_bridge.CvBridge",
"tensorflow.convert_to_tensor",
"rospy.Publisher",
"object_detection.utils.label_map_util.create_category_index_from_labelmap",
"object_detection.utils.ops.reframe_box_masks_to_image_masks",
"cv2.kmeans",
"numpy.array",
"hrl_object_detection.msg.FloatList"
] |
[((573, 583), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (581, 583), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((6669, 6725), 'rospy.Publisher', 'rospy.Publisher', (['"""/image_with_box"""', 'Image'], {'queue_size': '(10)'}), "('/image_with_box', Image, queue_size=10)\n", (6684, 6725), False, 'import rospy\n'), ((3137, 3799), 'numpy.array', 'np.array', (['[[14.512475967407227, 10.637235641479492, 12.238003730773926], [\n 45.42377853393555, 35.51829147338867, 39.001522064208984], [\n 73.22736358642578, 56.2325553894043, 64.88287353515625], [\n 93.06961822509766, 62.49015808105469, 91.50210571289062], [\n 101.87042999267578, 79.54219055175781, 140.76304626464844], [\n 133.23292541503906, 84.64583587646484, 146.19268798828125], [\n 146.9541778564453, 121.57429504394531, 183.50267028808594], [\n 175.8449249267578, 130.68478393554688, 196.73333740234375], [\n 189.53985595703125, 174.63101196289062, 214.19927978515625], [\n 217.00360107421875, 216.41111755371094, 217.29551696777344]]'], {}), '([[14.512475967407227, 10.637235641479492, 12.238003730773926], [\n 45.42377853393555, 35.51829147338867, 39.001522064208984], [\n 73.22736358642578, 56.2325553894043, 64.88287353515625], [\n 93.06961822509766, 62.49015808105469, 91.50210571289062], [\n 101.87042999267578, 79.54219055175781, 140.76304626464844], [\n 133.23292541503906, 84.64583587646484, 146.19268798828125], [\n 146.9541778564453, 121.57429504394531, 183.50267028808594], [\n 175.8449249267578, 130.68478393554688, 196.73333740234375], [\n 189.53985595703125, 174.63101196289062, 214.19927978515625], [\n 217.00360107421875, 216.41111755371094, 217.29551696777344]])\n', (3145, 3799), True, 'import numpy as np\n'), ((4172, 4822), 'numpy.array', 'np.array', (['[[5.150073051452637, 5.405563831329346, 8.959736824035645], [\n 19.21808433532715, 22.307445526123047, 29.933433532714844], [\n 37.59239959716797, 38.90922546386719, 33.54680633544922], [\n 55.65052795410156, 53.83592224121094, 56.52360534667969], [\n 65.02174377441406, 68.0908432006836, 76.18840789794922], [\n 87.24678039550781, 87.39642333984375, 78.73574829101562], [\n 100.889404296875, 89.90760803222656, 118.34420776367188], [\n 107.12446594238281, 131.53529357910156, 157.36705017089844], [\n 132.6621551513672, 161.353271484375, 183.85906982421875], [\n 210.23611450195312, 211.0833282470703, 211.4166717529297]]'], {}), '([[5.150073051452637, 5.405563831329346, 8.959736824035645], [\n 19.21808433532715, 22.307445526123047, 29.933433532714844], [\n 37.59239959716797, 38.90922546386719, 33.54680633544922], [\n 55.65052795410156, 53.83592224121094, 56.52360534667969], [\n 65.02174377441406, 68.0908432006836, 76.18840789794922], [\n 87.24678039550781, 87.39642333984375, 78.73574829101562], [\n 100.889404296875, 89.90760803222656, 118.34420776367188], [\n 107.12446594238281, 131.53529357910156, 157.36705017089844], [\n 132.6621551513672, 161.353271484375, 183.85906982421875], [\n 210.23611450195312, 211.0833282470703, 211.4166717529297]])\n', (4180, 4822), True, 'import numpy as np\n'), ((5250, 5903), 'numpy.array', 'np.array', (['[[8.104718208312988, 5.26467227935791, 5.033371925354004], [\n 32.14719772338867, 23.15839958190918, 19.88159942626953], [\n 54.60653305053711, 42.97511672973633, 38.53810501098633], [\n 74.2969741821289, 60.45454788208008, 51.90129852294922], [\n 88.74835205078125, 71.97657775878906, 63.67147445678711], [\n 107.48922729492188, 87.00760650634766, 76.51457214355469], [\n 138.4444580078125, 121.26786041259766, 113.48809814453125], [\n 151.0196075439453, 150.47964477539062, 150.7978973388672], [\n 188.70578002929688, 186.58016967773438, 182.24627685546875], [\n 216.57745361328125, 216.20333862304688, 215.73629760742188]]'], {}), '([[8.104718208312988, 5.26467227935791, 5.033371925354004], [\n 32.14719772338867, 23.15839958190918, 19.88159942626953], [\n 54.60653305053711, 42.97511672973633, 38.53810501098633], [\n 74.2969741821289, 60.45454788208008, 51.90129852294922], [\n 88.74835205078125, 71.97657775878906, 63.67147445678711], [\n 107.48922729492188, 87.00760650634766, 76.51457214355469], [\n 138.4444580078125, 121.26786041259766, 113.48809814453125], [\n 151.0196075439453, 150.47964477539062, 150.7978973388672], [\n 188.70578002929688, 186.58016967773438, 182.24627685546875], [\n 216.57745361328125, 216.20333862304688, 215.73629760742188]])\n', (5258, 5903), True, 'import numpy as np\n'), ((7197, 7223), 'numpy.sort', 'np.sort', (['dominants'], {'axis': '(0)'}), '(dominants, axis=0)\n', (7204, 7223), True, 'import numpy as np\n'), ((8370, 8425), 'cv2.kmeans', 'cv2.kmeans', (['pixels', 'n_colors', 'None', 'criteria', '(10)', 'flags'], {}), '(pixels, n_colors, None, criteria, 10, flags)\n', (8380, 8425), False, 'import cv2\n'), ((8590, 8607), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (8600, 8607), True, 'import numpy as np\n'), ((8669, 8696), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (8689, 8696), True, 'import tensorflow as tf\n'), ((10571, 10609), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (10583, 10609), False, 'import cv2\n'), ((10737, 10776), 'cv2.cvtColor', 'cv2.cvtColor', (['hsvImg', 'cv2.COLOR_HSV2BGR'], {}), '(hsvImg, cv2.COLOR_HSV2BGR)\n', (10749, 10776), False, 'import cv2\n'), ((11064, 11089), 'cv2.LUT', 'cv2.LUT', (['new_image', 'table'], {}), '(new_image, table)\n', (11071, 11089), False, 'import cv2\n'), ((17312, 17376), 'rospy.Subscriber', 'rospy.Subscriber', (['"""camera/rgb/image_raw"""', 'Image', 'detect_callback'], {}), "('camera/rgb/image_raw', Image, detect_callback)\n", (17328, 17376), False, 'import rospy\n'), ((17381, 17470), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/camera_sr300/color/image_raw"""', 'Image', 'validate_detection_callback'], {}), "('/camera_sr300/color/image_raw', Image,\n validate_detection_callback)\n", (17397, 17470), False, 'import rospy\n'), ((17623, 17699), 'rospy.Publisher', 'rospy.Publisher', (['"""/control_robot/camera_detection"""', 'FloatList'], {'queue_size': '(10)'}), "('/control_robot/camera_detection', FloatList, queue_size=10)\n", (17638, 17699), False, 'import rospy\n'), ((17711, 17725), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (17721, 17725), False, 'import rospy\n'), ((19001, 19094), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['path_to_labels'], {'use_display_name': '(True)'}), '(path_to_labels,\n use_display_name=True)\n', (19051, 19094), False, 'from object_detection.utils import label_map_util\n'), ((19142, 19184), 'rospy.init_node', 'rospy.init_node', (['"""tf_node"""'], {'anonymous': '(True)'}), "('tf_node', anonymous=True)\n", (19157, 19184), False, 'import rospy\n'), ((6852, 6867), 'pathlib.Path', 'Path', (['model_dir'], {}), '(model_dir)\n', (6856, 6867), False, 'from pathlib import Path\n'), ((7260, 7309), 'numpy.absolute', 'np.absolute', (['(dominants - dominant_colors_platypus)'], {}), '(dominants - dominant_colors_platypus)\n', (7271, 7309), True, 'import numpy as np\n'), ((7341, 7389), 'numpy.absolute', 'np.absolute', (['(dominants - dominant_colors_unicorn)'], {}), '(dominants - dominant_colors_unicorn)\n', (7352, 7389), True, 'import numpy as np\n'), ((7421, 7469), 'numpy.absolute', 'np.absolute', (['(dominants - dominant_colors_monster)'], {}), '(dominants - dominant_colors_monster)\n', (7432, 7469), True, 'import numpy as np\n'), ((9582, 9724), 'object_detection.utils.ops.reframe_box_masks_to_image_masks', 'utils_ops.reframe_box_masks_to_image_masks', (["output_dict['detection_masks']", "output_dict['detection_boxes']", 'image.shape[0]', 'image.shape[1]'], {}), "(output_dict['detection_masks'],\n output_dict['detection_boxes'], image.shape[0], image.shape[1])\n", (9624, 9724), True, 'from object_detection.utils import ops as utils_ops\n'), ((9762, 9811), 'tensorflow.cast', 'tf.cast', (['(detection_masks_reframed > 0.5)', 'tf.uint8'], {}), '(detection_masks_reframed > 0.5, tf.uint8)\n', (9769, 9811), True, 'import tensorflow as tf\n'), ((17741, 17760), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (17758, 17760), False, 'import rospy\n'), ((17776, 17787), 'hrl_object_detection.msg.FloatList', 'FloatList', ([], {}), '()\n', (17785, 17787), False, 'from hrl_object_detection.msg import FloatList\n'), ((13840, 13998), 'cv2.rectangle', 'cv2.rectangle', (['image_to_publish', '(detected_box[1], detected_box[0])', '(detected_box[3], detected_box[2])', '((0, 255, 0) if detected else (0, 0, 255))', '(2)'], {}), '(image_to_publish, (detected_box[1], detected_box[0]), (\n detected_box[3], detected_box[2]), (0, 255, 0) if detected else (0, 0, \n 255), 2)\n', (13853, 13998), False, 'import cv2\n'), ((16891, 17015), 'cv2.rectangle', 'cv2.rectangle', (['image_to_publish', '(detected_box[1], detected_box[0])', '(detected_box[3], detected_box[2])', '(0, 255, 0)', '(2)'], {}), '(image_to_publish, (detected_box[1], detected_box[0]), (\n detected_box[3], detected_box[2]), (0, 255, 0), 2)\n', (16904, 17015), False, 'import cv2\n'), ((10965, 10982), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (10974, 10982), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from verstack.categoric_encoders.args_validators import is_not_bool_na_sentinel
class Factorizer():
'''
Assing numeric labels to categoric column (binary/multiclass).
Can make transformation without transforming original NaN values, or assign any user defined \
string/number to replace original NaN values.
'''
__version__ = '0.1.1'
def __init__(self, na_sentinel = -1):
'''
Initialize Factorizer instance.
Parameters
----------
na_sentinel : int/str/np.nan, optional
Will replace NaN by a passed value. Pass np.nan if need to keep NaN untouched.
The default is -1.
Returns
-------
None.
'''
self._pattern = None
self._colname = None
self._na_sentinel = is_not_bool_na_sentinel(na_sentinel)
self._transformed_col_dtype = None # save for transform()
def __repr__(self): # what an object will say when you call/print it
return 'verstack.categoric_encoders.Factorizer'
# ----------------------------------------------------------------------------------------
# define getters
@property
def pattern(self):
return self._pattern
@property
def colname(self):
return self._colname
@property
def na_sentinel(self):
return self._na_sentinel
# ----------------------------------------------------------------------------------------
# no setters: only configurable attribute na_sentinel is defined at init...
def fit_transform(self, df, colname):
'''
Fit encoder, transform column in df, save attributes for transform(/inverse_transform().
Parameters
----------
df : pd.DataFrame
Data containing the colname to transform.
colname : str
Column name in df to be transformed.
Returns
-------
transformed_df : pd.DataFrame
Data with the column transformed.
'''
self._colname = colname
# try:
# data = pd.Series(df[colname])
# except TypeError:
# print('Acceptable arguments for fit_transform(pd.DataFrame, str(colname))')
# return
data = pd.Series(df[colname])
pattern = {}
# use pandas default na_sentinel == -1
labels, uniques = pd.factorize(data)
pattern = dict(zip(uniques, set(labels)))
# change na_sentinel to Factorizer options (including np.nan that is not allowed in pandas)
if -1 in labels:
labels = [x if x!= -1 else self._na_sentinel for x in labels]
nan_dict = {np.nan : self._na_sentinel}
pattern = {**pattern, **nan_dict}
self._pattern = pattern
transformed_df = df.copy()
transformed_df[colname] = labels
self._transformed_col_dtype = transformed_df[colname].dtype
return transformed_df
def transform(self, df):
'''
Factorize data column saved in self._colname using saved patterns
Unseen categories will be represented as NaN.
Parameters
----------
df : pd.DataFrame
Data containing the column which had been passed at fit_transform().
Returns
-------
transformed_df : pd.DataFrame
Data containing the transformed column.
'''
data = pd.Series(df[self._colname])
result = data.map(self._pattern).tolist()
# convert back to int because mapping will make all float if nan was present
result = [int(x) if x==x else x for x in result]
transformed_df = df.copy()
transformed_df[self._colname] = result
# align column type to that of fit_transform(). May be necessary if train had NaNs and test does not.
try: # try because unseen categories (if any) will appear as NaN
transformed_df[self._colname] = transformed_df[self._colname].astype(self._transformed_col_dtype)
except:
pass
return transformed_df
def inverse_transform(self, df):
'''
Return transformed column in df to original values.
Parameters
----------
df : pd.DataFrame
Data containing the column which had been passed at fit_transform().
Returns
-------
transformed_df : pd.DataFrame
Data containing the transformed column.
'''
inverse_pattern = {val: key for key, val in self._pattern.items()}
data = pd.Series(df[self._colname])
try:
result = np.vectorize(inverse_pattern.get)(data)
except ValueError:
result = np.vectorize(inverse_pattern.get, otypes='O')(data)
result = [x if x not in ['nan', 'None'] else np.nan for x in result]
transformed_df = df.copy()
transformed_df[self._colname] = result
return transformed_df
|
[
"pandas.factorize",
"verstack.categoric_encoders.args_validators.is_not_bool_na_sentinel",
"numpy.vectorize",
"pandas.Series"
] |
[((877, 913), 'verstack.categoric_encoders.args_validators.is_not_bool_na_sentinel', 'is_not_bool_na_sentinel', (['na_sentinel'], {}), '(na_sentinel)\n', (900, 913), False, 'from verstack.categoric_encoders.args_validators import is_not_bool_na_sentinel\n'), ((2341, 2363), 'pandas.Series', 'pd.Series', (['df[colname]'], {}), '(df[colname])\n', (2350, 2363), True, 'import pandas as pd\n'), ((2458, 2476), 'pandas.factorize', 'pd.factorize', (['data'], {}), '(data)\n', (2470, 2476), True, 'import pandas as pd\n'), ((3498, 3526), 'pandas.Series', 'pd.Series', (['df[self._colname]'], {}), '(df[self._colname])\n', (3507, 3526), True, 'import pandas as pd\n'), ((4651, 4679), 'pandas.Series', 'pd.Series', (['df[self._colname]'], {}), '(df[self._colname])\n', (4660, 4679), True, 'import pandas as pd\n'), ((4714, 4747), 'numpy.vectorize', 'np.vectorize', (['inverse_pattern.get'], {}), '(inverse_pattern.get)\n', (4726, 4747), True, 'import numpy as np\n'), ((4802, 4847), 'numpy.vectorize', 'np.vectorize', (['inverse_pattern.get'], {'otypes': '"""O"""'}), "(inverse_pattern.get, otypes='O')\n", (4814, 4847), True, 'import numpy as np\n')]
|
"""
molpy.py
A nice molecule manipulation package.
Handles the primary functions
"""
import numpy as np
from .util import distance
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from <NAME>"
return quote
class Molecule:
def __init__(self, symbols, geometry):
self.symbols = np.asarray(symbols, dtype=str)
self.geometry = np.asarray(geometry, dtype=float)
if len(self.geometry.shape) !=2:
self.geometry = self.geometry.reshape(-1,3)
if self.symbols.shape[0] != self.geometry.shape[0]:
raise ValueError("Symbol and geometry length does not match!")
def distance(self, index1, index2):
"""
Add
"""
return distance(self.geometry[index1], self.geometry[index2])
class NamedMolecule(Molecule):
"""
Add docstrings here describing the class
Parameters
----------
name: str
add
Attributes:
"""
def __init__(self, name, symbols, geometry):
self.name = name
super().__init__(symbols, geometry)
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
|
[
"numpy.asarray"
] |
[((796, 826), 'numpy.asarray', 'np.asarray', (['symbols'], {'dtype': 'str'}), '(symbols, dtype=str)\n', (806, 826), True, 'import numpy as np\n'), ((851, 884), 'numpy.asarray', 'np.asarray', (['geometry'], {'dtype': 'float'}), '(geometry, dtype=float)\n', (861, 884), True, 'import numpy as np\n')]
|
import json
from os.path import dirname, join
import numpy as np
import pandas as pd
from sklearn.preprocessing import KBinsDiscretizer
def _discretize(vector, **kwargs):
"""Discretizes vector with sklearn.preprocessing.KBinsDiscretizer.
Parameters
----------
vector : np.array
kwargs
Arguments passed to sklearn.preprocessing.KBinsDiscretizer constructor.
Returns
-------
discretized_vector: np.array
Discretized by **kwargs arguments method vector.
"""
discretizer = KBinsDiscretizer(encode='ordinal', **kwargs)
discretized_vector = discretizer.fit_transform(vector.reshape(-1, 1)).reshape(-1)
return discretized_vector
def load_sample(as_frame=True):
"""Load and return the sample artificial dataset.
================= ==============
Samples total 10000
Dimensionality 35
Target variables 1
================= ==============
Parameters
----------
as_frame : bool, default=True
If True, the data is a pandas DataFrame including columns with
appropriate names. The target is a pandas DataFrame with multiple target variables.
Returns
-------
data : {np.ndarray, pd.DataFrame} of shape (10000, 35)
The data matrix. If `as_frame=True`, `data` will be a pd.DataFrame.
target: {np.ndarray, pd.Series} of shape (10000, 35)
The binary classification target variable. If `as_frame=True`, `target` will be a pd.DataFrame.
costs: {dict, list)
Cost of every feature in data. If `as_frame=True`, `target` will be a dict.
Examples
--------
>>> from bcselector.dataset import load_sample
>>> data, target, costs = load_sample()
"""
module_path = dirname(__file__)
# Load data
data = pd.read_csv(join(module_path, 'data', 'sample_data', 'sample_data.csv'))
targets = pd.read_csv(join(module_path, 'data', 'sample_data', 'sample_target.csv'))
with open(join(module_path, 'data', 'sample_data', 'sample_costs.json'), 'r') as j:
costs = json.load(j)
if as_frame:
return data, targets['Class'], costs
else:
return data.values, targets.values, list(costs.values())
def load_hepatitis(as_frame=True, discretize_data=True, **kwargs):
"""Load and return the hepatitis dataset provided.
The mimic3 dataset is a small medical dataset with single target variable.
Dataset is collected from UCI repository [3]_.
================= ==============
Samples total 155
Dimensionality 19
Target variables 1
================= ==============
Parameters
----------
as_frame : bool, default=True
If True, the data is a pandas DataFrame including columns with
appropriate names. The target is a pandas DataFrame with multiple target variables.
discretize_data: bool, default=True
If True, the returned data is discretized with sklearn.preprocessing.KBinsDiscretizer.
kwargs
Arguments passed to sklearn.preprocessing.KBinsDiscretizer constructor.
Returns
-------
data : {np.ndarray, pd.DataFrame} of shape (6591, 306)
The data matrix. If `as_frame=True`, `data` will be a pd.DataFrame.
target: {np.ndarray, pd.Series} of shape (6591, 10)
The binary classification target variable. If `as_frame=True`, `target` will be a pd.DataFrame.
costs: {dict, list)
Cost of every feature in data. If `as_frame=True`, `target` will be a dict.
References
----------
.. [3] <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
Examples
--------
>>> from bcselector.dataset import load_hepatitis
>>> data, target, costs = load_hepatitis()
"""
module_path = dirname(__file__)
# Load data
data = pd.read_csv(join(module_path, 'data', 'hepatitis', 'hepatitis.csv'))
targets = pd.read_csv(join(module_path, 'data', 'hepatitis', 'hepatitis_target.csv'))
with open(join(module_path, 'data', 'hepatitis', 'hepatitis_costs.json'), 'r') as j:
costs = json.load(j)
if discretize_data:
data_colnames = data.columns
n_bins = kwargs.get('n_bins', 10)
col_to_discretize = data.nunique()[data.nunique() > n_bins].index
col_not_changing = data.nunique()[data.nunique() <= n_bins].index
data_discretized = np.apply_along_axis(func1d=_discretize, axis=0, arr=data[col_to_discretize].values, **kwargs)
data = pd.concat([pd.DataFrame(data_discretized, columns=col_to_discretize), data[col_not_changing]], axis=1)
data = data[data_colnames]
if as_frame:
return data, targets['Class'], costs
else:
return data.values, targets.values, list(costs.values())
|
[
"pandas.DataFrame",
"json.load",
"os.path.dirname",
"numpy.apply_along_axis",
"sklearn.preprocessing.KBinsDiscretizer",
"os.path.join"
] |
[((530, 574), 'sklearn.preprocessing.KBinsDiscretizer', 'KBinsDiscretizer', ([], {'encode': '"""ordinal"""'}), "(encode='ordinal', **kwargs)\n", (546, 574), False, 'from sklearn.preprocessing import KBinsDiscretizer\n'), ((1781, 1798), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1788, 1798), False, 'from os.path import dirname, join\n'), ((3947, 3964), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (3954, 3964), False, 'from os.path import dirname, join\n'), ((1838, 1897), 'os.path.join', 'join', (['module_path', '"""data"""', '"""sample_data"""', '"""sample_data.csv"""'], {}), "(module_path, 'data', 'sample_data', 'sample_data.csv')\n", (1842, 1897), False, 'from os.path import dirname, join\n'), ((1925, 1986), 'os.path.join', 'join', (['module_path', '"""data"""', '"""sample_data"""', '"""sample_target.csv"""'], {}), "(module_path, 'data', 'sample_data', 'sample_target.csv')\n", (1929, 1986), False, 'from os.path import dirname, join\n'), ((2093, 2105), 'json.load', 'json.load', (['j'], {}), '(j)\n', (2102, 2105), False, 'import json\n'), ((4005, 4060), 'os.path.join', 'join', (['module_path', '"""data"""', '"""hepatitis"""', '"""hepatitis.csv"""'], {}), "(module_path, 'data', 'hepatitis', 'hepatitis.csv')\n", (4009, 4060), False, 'from os.path import dirname, join\n'), ((4088, 4150), 'os.path.join', 'join', (['module_path', '"""data"""', '"""hepatitis"""', '"""hepatitis_target.csv"""'], {}), "(module_path, 'data', 'hepatitis', 'hepatitis_target.csv')\n", (4092, 4150), False, 'from os.path import dirname, join\n'), ((4258, 4270), 'json.load', 'json.load', (['j'], {}), '(j)\n', (4267, 4270), False, 'import json\n'), ((4551, 4649), 'numpy.apply_along_axis', 'np.apply_along_axis', ([], {'func1d': '_discretize', 'axis': '(0)', 'arr': 'data[col_to_discretize].values'}), '(func1d=_discretize, axis=0, arr=data[col_to_discretize]\n .values, **kwargs)\n', (4570, 4649), True, 'import numpy as np\n'), ((2003, 2064), 'os.path.join', 'join', (['module_path', '"""data"""', '"""sample_data"""', '"""sample_costs.json"""'], {}), "(module_path, 'data', 'sample_data', 'sample_costs.json')\n", (2007, 2064), False, 'from os.path import dirname, join\n'), ((4167, 4229), 'os.path.join', 'join', (['module_path', '"""data"""', '"""hepatitis"""', '"""hepatitis_costs.json"""'], {}), "(module_path, 'data', 'hepatitis', 'hepatitis_costs.json')\n", (4171, 4229), False, 'from os.path import dirname, join\n'), ((4671, 4728), 'pandas.DataFrame', 'pd.DataFrame', (['data_discretized'], {'columns': 'col_to_discretize'}), '(data_discretized, columns=col_to_discretize)\n', (4683, 4728), True, 'import pandas as pd\n')]
|
import datetime
import json
import logging
import os
import sys
import time
from urllib.parse import urlparse
import boto3
import click
import numpy as np
import pandas as pd
logging.basicConfig(
format="[%(asctime)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
)
timeout_seconds = 10800
SEED = 1234
def read_file(input_file):
file_contents = []
with open(input_file) as f:
for line in f:
file_contents.append(line.strip())
return file_contents
def read_json(json_file):
""" (file) -> dict
Read in json_file, which is in json format, and
output a dict with its contents
"""
with open(json_file) as data_file:
data = json.load(data_file)
return data
def get_envvars(environment_variable_list):
if environment_variable_list:
additional_vars = []
for envvar in environment_variable_list:
split_point = envvar.index("=")
k = envvar[:split_point]
v = envvar[split_point + 1 :]
d = {"name": k, "value": v}
additional_vars.append(d)
else:
additional_vars = []
return additional_vars
def get_images_from_s3_dataset(input_bucket, upload_metadata, bids):
# setup boto stuff so it only needs to be called once
if bids:
s3_client = boto3.client("s3")
bucket, base_key = split_s3_path(input_bucket)
if upload_metadata:
s3_resource = boto3.resource("s3")
# Using the CMeDS-style demographics.tsv to ensure all required fields are available
# future release may use participants.tsv directly with the bids option
demographics_file = os.path.join(input_bucket, "demographics.tsv")
demographics = pd.read_csv(demographics_file, sep="\t")
images = []
for _, row in demographics.iterrows():
# a `file_type` column is required, which contains either dicom or nifti
file_type = row["file_type"]
if bids:
subject_id = f"sub-{row['subject_id']}"
key = os.path.join(base_key, subject_id)
objs = s3_client.list_objects(Bucket=bucket, Prefix=key)
# NOTE: this block gets around a missing image in bids, may be hacky for now
try:
t1w_images = [
obj["Key"] for obj in objs["Contents"] if "T1w.nii.gz" in obj["Key"]
]
# #@# NOTE: choosing random image to use for now
image = f"s3://{bucket}/{select_random_image(t1w_images)}"
except KeyError:
logging.log(logging.INFO, f"#@# {key}")
continue
else:
subject_id = row["subject_id"]
if file_type == "nifti":
image = os.path.join(input_bucket, subject_id, f"{subject_id}.nii.gz")
elif file_type == "dicom":
image = os.path.join(input_bucket, subject_id, "dicom")
else:
raise ValueError("file_type must be either dicom or nifti")
images.append(image)
if upload_metadata:
if (
file_type == "dicom"
): # we want the metadata to be named after `subject_id`, not 'dicom'
image = os.path.join(os.path.dirname(image), subject_id)
upload_metadata_json(image, row, s3_resource, bids, false_dates=True)
return images
def select_random_image(image_list):
np.random.seed(SEED)
return np.random.choice(image_list)
def upload_metadata_json(image, row, s3_resource, bids, false_dates):
metadata = create_metadata_json(row, false_dates)
bucket, key = split_s3_path(image)
content = json.dumps(metadata, indent=2, sort_keys=True)
if bids:
path, image = os.path.split(key)
# subject name is the first part of the filename in BIDS, removing the `sub-` part
subname = image.split("_")[0].lstrip("sub-")
metadata_path = os.path.join(path, f"{subname}.json")
else:
metadata_path = f"{key.strip('nii.gz')}.json"
logging.log(
logging.INFO,
f"#@# Uploading metadata to s3://{os.path.join(bucket, metadata_path)}:\n{metadata}",
)
return s3_resource.Object(bucket, metadata_path).put(Body=content)
def split_s3_path(image):
parsed = urlparse(image)
bucket = parsed.netloc
key = parsed.path.lstrip("/")
return bucket, key
def create_metadata_json(row, false_dates=True):
# #@# note-- false dates are used for now, if not specified
# in demographics, dates should be in %Y%m%d format (or standardized in someway)
# will use a made up date or put Unknown in for now, if not present. doesn't force format
fake_date = datetime.date(1970, 1, 1)
fake_scandate = False
try:
scan_date = (
f"{datetime.datetime.strptime(str(row['scan_date']), '%Y%m%d'):%Y%m%d}"
)
except KeyError:
if false_dates:
scan_date = f"{fake_date:%Y%m%d}"
else:
scan_date = "Unknown"
fake_scandate = True
try:
birth_date = f"{datetime.datetime.strptime(str(row['dob']), '%Y%m%d'):%Y%m%d}"
except KeyError:
if fake_scandate:
if false_dates:
birth_date = (
f"{fake_date - datetime.timedelta(days=365.25*row['age']):%Y%m%d}"
)
else:
birth_date == "Unknown"
else:
birth_date = f"{datetime.datetime.strptime(scan_date, '%Y%m%d') - datetime.timedelta(days=365.25*row['age']):%Y%m%d}"
scan_details = {
"Patient's Name": row["subject_id"],
"Patient ID": row["source"],
"Patient's Sex": row["sex"],
"Patient's Birth Date": str(birth_date),
"Patient's Age": int(row["age"]),
"Acquisition Date": str(scan_date),
"Manufacturer": row["manufacturer"],
"Magnetic Field Strength": float(row["field_strength"]),
"Diagnosis": row["diagnosis"],
}
subject_metadata = {"metadata": {"scan_details": {}}}
subject_metadata["metadata"]["scan_details"] = scan_details
return subject_metadata
def create_submission_details(
image, output_bucket, license, timeout, additional_vars, bids
):
"""
Subject image is found from image name, using schema described in the study_dir readme
"""
submission_details = get_parsed_s3_image(image, bids)
submission_details["name"] = "--".join(
[submission_details["group"], submission_details["subject"]]
)
submission_details["output_path"] = os.path.join(
output_bucket, submission_details["group"], submission_details["subject"]
)
# #@# for now, hardcoding product_id/license_key within the submission, as opposed to the job_definition previously
# #@# eventually, this needs to be changed to real values (and passed without exposing values)
submission_details["environment_overrides"] = {
"environment": [
{"name": "INPUT_IMAGE", "value": submission_details["image"]},
{"name": "CMET_TIMEOUT", "value": str(timeout)},
{"name": "OUTPUT_PATH", "value": submission_details["output_path"]},
*additional_vars,
*license,
]
}
if submission_details["mode"] == "nifti":
# makes assumption that sidecar json exists, which is required for CMet dataset structure
if bids:
json_path, _ = os.path.split(submission_details["image"])
subject_json = os.path.join(
json_path, f"{submission_details['subject']}.json"
)
submission_details["json"] = subject_json
else:
submission_details["json"] = f"{image.strip('nii.gz')}.json"
submission_details["environment_overrides"]["environment"].append(
{"name": "SUBJECT_METADATA", "value": submission_details["json"]}
)
return submission_details
def get_parsed_s3_image(s3_image_path, bids):
bucket, path = split_s3_path(s3_image_path)
split_path = path.split("/")
parsed_dict = dict()
if len(split_path) < 3:
# this happens if the s3_image_path is not s3://bucket/dataset/subject/subject_image (or longer)
message = "The provided s3_image_path is too short! Doesn't contain enough parts to determine subject's name/group."
raise TypeError(message)
if path.endswith("nii.gz"):
parsed_dict["mode"] = "nifti"
else: # assuming if it is not a .nii.gz file, it is a directory of dicoms
parsed_dict["mode"] = "dicom"
parsed_dict["image"] = s3_image_path
parsed_dict["bucket"] = bucket
parsed_dict["key"] = path
parsed_dict["image_basename"] = split_path[
-1
] # the last part of the s3_image path (the *.nii.gz file or dicom dir)
if bids:
split_image_basename = parsed_dict["image_basename"].split("_")[
0
] # in BIDS, the subject name is the first part of the image_basename
parsed_dict["subject"] = split_image_basename.strip(
"sub-"
) # remove the 'sub-' part (which is part of the BIDS spec)
parsed_dict["group"] = find_bids_group(split_path)
else:
parsed_dict["subject"] = split_path[
-2
] # 2nd-to-last part of s3_image_path (which is the subject's name in the CMet dataset structure)
parsed_dict["group"] = split_path[
-3
] # the 3rd-to-last part of s3_image_path (which is the directory containing subjects in the CMet dataset structure)
return parsed_dict
def find_bids_group(split_path):
group = ""
for part in split_path:
if (
"sub-" in part
): # this will first occur at 1 level deeper than the group, according to the BIDS spec
return group
else:
group = part # make this part the candidate for group name
raise ValueError("Cannot determine group name!")
def submit_job(job, job_definition, queue, client, log=True):
response = client.submit_job(
jobName=job["name"],
jobQueue=queue,
jobDefinition=job_definition,
containerOverrides=job["environment_overrides"],
)
if log:
logging.log(logging.INFO, "#@# jobName: {}".format(job["name"]))
logging.log(logging.INFO, response)
return response
def save_submission_details(submitted_jobs, output_file, queue, job_definition):
""" (dict(dict), str, str, str) -> None
Write out a JSON file of information about Batch submissions to output_file,
with details taken from submitted_jobs
Includes the following items:
jobId- the AWS generated ID: response['jobId']. also, this is the Key for each submitted
job's JSON object
jobName- the user-generated job name: response['jobName']
date- time of submission: response['ResponseMetadata']['HTTPHeaders']['date']
date is in the format of: '%a,%d %b %Y %H:%M:%S %Z'
output_path- the location where processing output is saved
subject_id- name of subject being processed
image- image being processed
group- group that the subject belongs to
queue- queue ARN the job was submitted to
jobDefinition- job defintion that was used for processing
"""
submission_details = dict()
for job_id, job in submitted_jobs.items():
output_dict = dict()
output_dict["jobId"] = job["jobId"]
output_dict["jobName"] = job["jobName"]
output_dict["date"] = job["ResponseMetadata"]["HTTPHeaders"]["date"]
output_dict["output_path"] = job["output_path"]
output_dict["subject_id"] = job["subject"]
output_dict["image"] = job["image"]
output_dict["group"] = job["group"]
output_dict["queue"] = queue
output_dict["jobDefinition"] = job_definition
submission_details[job_id] = output_dict
with open(output_file, "w") as f:
json.dump(submission_details, f, sort_keys=True, indent=2)
CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"], "max_content_width": 160}
@click.command(context_settings=CONTEXT_SETTINGS)
@click.version_option()
@click.option(
"--queue", "-q", help="AWS Batch job queue to submit to. Must be a valid ARN."
)
# @click.option(
# "--container_tag",
# "-t",
# help=(
# "Tag and URL of the container to be used within the batch job. "
# "This builds a job definition on the fly (if one doesn't exist). "
# "If the images to be processed are a mix of dicom and nifti, "
# "then this MUST be used. "
# "An example would be:\n"
# " - nifti: 123456789012.abc.ecr.us-east-1.amazonaws.com/my-company/rethinq/rethinq-test:1.0.0-rc.14\n"
# "Cannot be used with: --job_definition/-j"
# ),
# )
@click.option(
"--job_definition",
"-j",
help=(
"Job definition to run on AWS Batch. Must be a valid ARN. "
# "Cannot be used with --container_tag/-t"
),
)
@click.option(
"--image_list",
"-f",
default=None,
help=(
"File containing images to process, one AWS S3 path per line. "
"Cannot use with --image/-i or --input_bucket/-I"
),
)
@click.option(
"--input_bucket",
"-I",
default=None,
help=(
"Dataset to process, stored on S3 in CMet dataset structure format (CMeDS). "
"This is an S3 directory, with a top-level demographics.tsv file "
"containing all subjects that should be processed. "
"Cannot use with --image_list/-f or --image/-i"
),
)
@click.option(
"--image",
"-i",
multiple=True,
help=(
"AWS S3 path for image to process "
"Can use multiple times "
"(such as `-i s3://bucket/image -i s3://bucket/image2). "
"Cannot use with --image_list/-f or --input_bucket/-I"
),
)
@click.option("--output_bucket", "-o", help=("AWS S3 bucket pathto write to. "))
@click.option(
"--timeout",
type=int,
default=timeout_seconds,
help=f"CMet timeout time in seconds. Default is {timeout_seconds} ({timeout_seconds/3600:.1f} hours).",
)
@click.option(
"--save_details/--no_save_details",
default=True,
help="Save CSV of job submission details. Default will save details.",
)
@click.option(
"--submission_log",
"-l",
type=click.Path(),
default=os.path.join(os.getcwd(), "submission.json"),
help=(
"File path of where to save CSV of job submission details "
"Default is $PWD/submission.json."
),
)
@click.option(
"--upload_metadata",
is_flag=True,
default=False,
help=(
"Upload metadata_json before submission. "
"Can only be used with --input_bucket/-I. "
"Default will not upload metadata"
),
)
@click.option(
"--upload_only",
is_flag=True,
default=False,
help=(
"Only upload subject_metadata, and do not process any data. "
"Can only be used with --upload_metadata. "
"Default will also submit data for processing"
),
)
@click.option(
"--check/--no_check",
default=True,
help="Checks whether image exists before submitting. Default will check image exists.",
)
@click.option(
"--skip",
is_flag=True,
default=False,
help=(
"Skip images that don't exist, and submit remaining existing images. "
"Can only be used with --check. "
"Default will cause an exit if images don't exist"
),
)
@click.option(
"--bids",
is_flag=True,
default=False,
help=(
"Input data is in BIDS format, as opposed to CMet dataset structure (CMeDS). "
"Can only be used with --input_bucket/-I. Default is CMeDS"
),
)
@click.option(
"--stagger",
is_flag=True,
default=False,
help=(
"Staggers submission of images so that AWS `toomanyrequests` errors don't occur. "
"This submits 50 images, waits 5 minutes for them to begin processing, "
"then submits additional images with a 2 second pause between each. "
"Only does something if there are greater than 200 images to process. "
"Default will not stagger (and submit all images with no waiting)"
),
)
# @click.option(
# "--license_strict",
# is_flag=True,
# default=False,
# help=(
# "Uses a normal user license instead of a dev license. "
# "This will cause submitted images to fail if certain metadata is not provided. "
# "Note that most nifti input images will fail if this is used."
# "Default will use a dev license (not failing out on thse checks)."
# ),
# )
@click.option(
"--license_file",
"-L",
help=(
"Use a user-provided license. "
"This must be a JSON file in the format of:\n"
'[{"name": "CMET_RETHINQ_PRODUCT_ID","value":\n"cmet_rethinq_product_id_value"},\n"name": "CMET_RETHINQ_LICENSE_KEY","value":\n"cmet_rethinq_license_key_value"}]\n'
),
)
# @click.option(
# "--product_type",
# type=click.Choice(["rethinq", "thinq"]),
# default="rethinq",
# help=(
# "Product type to use (either thinq or rethinq. "
# "Note that thinq cannot except nifti inputs. "
# "Default is rethinq"
# ),
# )
@click.option(
"--environment_variable",
"-e",
multiple=True,
help=(
"Additional environment variable(s) to pass to the container. "
"NOTE: must use the syntax `KEY=VALUE` (no space in between)."
),
)
def submit_subjects(
queue,
# container_tag, #@# setting up job definition by container is a bit of a hassle
job_definition,
image_list,
input_bucket,
image,
output_bucket,
timeout,
save_details,
submission_log,
upload_metadata,
upload_only,
check,
skip,
bids,
stagger,
# license_strict,
license_file,
# product_type, #@# this will only work with rethinq
environment_variable,
):
# make sure only one way to specify input images is used
if not image and not image_list and not input_bucket:
message = (
"Must specify input, with one of:\n"
" --image_list/-f\n"
" --image/-i\n"
" --input_bucket/-I\n"
)
sys.exit(message)
if (
(image_list and image)
or (image_list and input_bucket)
or (input_bucket and image)
):
message = (
"Cannot specify multiple input opions! Use one of:\n"
" --image_list/-f\n"
" --image/-i\n"
" --input_bucket/-I\n"
)
sys.exit(message)
if not output_bucket:
message = "Output bucket required!"
sys.exit(message)
if upload_metadata and not input_bucket:
message = (
"Can only specify --upload_metadata when giving a s3 dataset to process!"
)
sys.exit(message)
if bids and not input_bucket:
message = "Can only specify --bids when giving a s3 dataset to process!"
sys.exit(message)
# `upload_only` can only be done if `upload_metadata` is also selected
if upload_only and not upload_metadata:
message = "Can only do --upload_only when --upload_metadata is also selected!"
sys.exit(message)
# find images to process, and exit if there aren't any
images_to_process = []
if image_list:
images_to_process.extend(read_file(image_list))
if image:
images_to_process.extend(image)
if input_bucket:
images_to_process.extend(
get_images_from_s3_dataset(input_bucket, upload_metadata, bids)
)
if not images_to_process:
message = "No images to process!"
sys.exit(message)
# only do checks and submissions if not upload_only
if not upload_only:
# license info
if not license_file:
message = "License is required!"
sys.exit(message)
license = read_json(license_file)
if not job_definition:
message = "job definition must be given! Use --job_definition/-j to specify"
sys.exit(message)
# make sure we check for image existence if we use skip
if not check and skip:
message = "Must check for image existence to be able to skip those that don't exist"
sys.exit(message)
# check that images exist
nonexistent = []
if check:
s3 = boto3.client("s3")
for img in images_to_process:
bucket, key = split_s3_path(img)
objs = s3.list_objects(Bucket=bucket, Prefix=key)
if "Contents" not in objs:
nonexistent.append(img)
if nonexistent:
imgs = "\n".join(nonexistent)
message = f"The following images do not exist:\n{imgs}"
logging.log(logging.INFO, message)
if skip:
logging.log(
logging.INFO,
"Nonexistent images were skipped, continuing to submit existing images...",
)
if not skip:
message = (
"Exiting without submitting images.\n"
"Confirm the correct images are given, or run with --skip to submit all existing images.\n"
"Alternatively, run with the --no-check option to attempt to submit all images without checking for existence."
)
sys.exit(message)
# start a boto3 session, check queue exists, submit the jobs, and optionally save the details
batch_client = boto3.client("batch")
queue_response = batch_client.describe_job_queues()
all_queues = [queue["jobQueueArn"] for queue in queue_response["jobQueues"]]
if queue not in all_queues:
message = f"Queue '{queue}' does not exist!)"
sys.exit(message)
submitted_jobs = dict()
count = 0
for img in images_to_process:
if (img in nonexistent) and skip:
continue
additional_vars = get_envvars(environment_variable)
job = create_submission_details(
img, output_bucket, license, timeout, additional_vars, bids
)
if stagger and len(images_to_process) > 200:
if count == 50:
message = f"#@# Waiting 5 minutes for AWS to catch up..."
logging.log(logging.INFO, message)
time.sleep(300)
if count > 50:
time.sleep(2)
job_submission_response = submit_job(
job, job_definition, queue, batch_client
)
job_key = job_submission_response["jobId"]
submitted_jobs[job_key] = {**job, **job_submission_response}
count += 1
message = f"#@# Submitted job {count} out of {len(images_to_process)}"
logging.log(logging.INFO, message)
if save_details:
save_submission_details(
submitted_jobs, submission_log, queue, job_definition
)
|
[
"click.version_option",
"numpy.random.seed",
"boto3.client",
"pandas.read_csv",
"click.option",
"json.dumps",
"boto3.resource",
"click.Path",
"os.path.join",
"urllib.parse.urlparse",
"os.path.dirname",
"logging.log",
"click.command",
"datetime.timedelta",
"numpy.random.choice",
"json.dump",
"datetime.date",
"time.sleep",
"datetime.datetime.strptime",
"sys.exit",
"json.load",
"logging.basicConfig",
"os.getcwd",
"os.path.split"
] |
[((177, 285), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s] %(message)s"""', 'level': 'logging.INFO', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format='[%(asctime)s] %(message)s', level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n", (196, 285), False, 'import logging\n'), ((12099, 12147), 'click.command', 'click.command', ([], {'context_settings': 'CONTEXT_SETTINGS'}), '(context_settings=CONTEXT_SETTINGS)\n', (12112, 12147), False, 'import click\n'), ((12149, 12171), 'click.version_option', 'click.version_option', ([], {}), '()\n', (12169, 12171), False, 'import click\n'), ((12173, 12270), 'click.option', 'click.option', (['"""--queue"""', '"""-q"""'], {'help': '"""AWS Batch job queue to submit to. Must be a valid ARN."""'}), "('--queue', '-q', help=\n 'AWS Batch job queue to submit to. Must be a valid ARN.')\n", (12185, 12270), False, 'import click\n'), ((12817, 12926), 'click.option', 'click.option', (['"""--job_definition"""', '"""-j"""'], {'help': '"""Job definition to run on AWS Batch. Must be a valid ARN. """'}), "('--job_definition', '-j', help=\n 'Job definition to run on AWS Batch. Must be a valid ARN. ')\n", (12829, 12926), False, 'import click\n'), ((13005, 13180), 'click.option', 'click.option', (['"""--image_list"""', '"""-f"""'], {'default': 'None', 'help': '"""File containing images to process, one AWS S3 path per line. Cannot use with --image/-i or --input_bucket/-I"""'}), "('--image_list', '-f', default=None, help=\n 'File containing images to process, one AWS S3 path per line. Cannot use with --image/-i or --input_bucket/-I'\n )\n", (13017, 13180), False, 'import click\n'), ((13218, 13521), 'click.option', 'click.option', (['"""--input_bucket"""', '"""-I"""'], {'default': 'None', 'help': '"""Dataset to process, stored on S3 in CMet dataset structure format (CMeDS). This is an S3 directory, with a top-level demographics.tsv file containing all subjects that should be processed. Cannot use with --image_list/-f or --image/-i"""'}), "('--input_bucket', '-I', default=None, help=\n 'Dataset to process, stored on S3 in CMet dataset structure format (CMeDS). This is an S3 directory, with a top-level demographics.tsv file containing all subjects that should be processed. Cannot use with --image_list/-f or --image/-i'\n )\n", (13230, 13521), False, 'import click\n'), ((13581, 13807), 'click.option', 'click.option', (['"""--image"""', '"""-i"""'], {'multiple': '(True)', 'help': '"""AWS S3 path for image to process Can use multiple times (such as `-i s3://bucket/image -i s3://bucket/image2). Cannot use with --image_list/-f or --input_bucket/-I"""'}), "('--image', '-i', multiple=True, help=\n 'AWS S3 path for image to process Can use multiple times (such as `-i s3://bucket/image -i s3://bucket/image2). Cannot use with --image_list/-f or --input_bucket/-I'\n )\n", (13593, 13807), False, 'import click\n'), ((13867, 13944), 'click.option', 'click.option', (['"""--output_bucket"""', '"""-o"""'], {'help': '"""AWS S3 bucket pathto write to. """'}), "('--output_bucket', '-o', help='AWS S3 bucket pathto write to. ')\n", (13879, 13944), False, 'import click\n'), ((13948, 14124), 'click.option', 'click.option', (['"""--timeout"""'], {'type': 'int', 'default': 'timeout_seconds', 'help': 'f"""CMet timeout time in seconds. Default is {timeout_seconds} ({timeout_seconds / 3600:.1f} hours)."""'}), "('--timeout', type=int, default=timeout_seconds, help=\n f'CMet timeout time in seconds. Default is {timeout_seconds} ({timeout_seconds / 3600:.1f} hours).'\n )\n", (13960, 14124), False, 'import click\n'), ((14133, 14271), 'click.option', 'click.option', (['"""--save_details/--no_save_details"""'], {'default': '(True)', 'help': '"""Save CSV of job submission details. Default will save details."""'}), "('--save_details/--no_save_details', default=True, help=\n 'Save CSV of job submission details. Default will save details.')\n", (14145, 14271), False, 'import click\n'), ((14544, 14738), 'click.option', 'click.option', (['"""--upload_metadata"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Upload metadata_json before submission. Can only be used with --input_bucket/-I. Default will not upload metadata"""'}), "('--upload_metadata', is_flag=True, default=False, help=\n 'Upload metadata_json before submission. Can only be used with --input_bucket/-I. Default will not upload metadata'\n )\n", (14556, 14738), False, 'import click\n'), ((14787, 15008), 'click.option', 'click.option', (['"""--upload_only"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Only upload subject_metadata, and do not process any data. Can only be used with --upload_metadata. Default will also submit data for processing"""'}), "('--upload_only', is_flag=True, default=False, help=\n 'Only upload subject_metadata, and do not process any data. Can only be used with --upload_metadata. Default will also submit data for processing'\n )\n", (14799, 15008), False, 'import click\n'), ((15057, 15203), 'click.option', 'click.option', (['"""--check/--no_check"""'], {'default': '(True)', 'help': '"""Checks whether image exists before submitting. Default will check image exists."""'}), "('--check/--no_check', default=True, help=\n 'Checks whether image exists before submitting. Default will check image exists.'\n )\n", (15069, 15203), False, 'import click\n'), ((15210, 15427), 'click.option', 'click.option', (['"""--skip"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Skip images that don\'t exist, and submit remaining existing images. Can only be used with --check. Default will cause an exit if images don\'t exist"""'}), '(\'--skip\', is_flag=True, default=False, help=\n "Skip images that don\'t exist, and submit remaining existing images. Can only be used with --check. Default will cause an exit if images don\'t exist"\n )\n', (15222, 15427), False, 'import click\n'), ((15476, 15679), 'click.option', 'click.option', (['"""--bids"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Input data is in BIDS format, as opposed to CMet dataset structure (CMeDS). Can only be used with --input_bucket/-I. Default is CMeDS"""'}), "('--bids', is_flag=True, default=False, help=\n 'Input data is in BIDS format, as opposed to CMet dataset structure (CMeDS). Can only be used with --input_bucket/-I. Default is CMeDS'\n )\n", (15488, 15679), False, 'import click\n'), ((15717, 16140), 'click.option', 'click.option', (['"""--stagger"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Staggers submission of images so that AWS `toomanyrequests` errors don\'t occur. This submits 50 images, waits 5 minutes for them to begin processing, then submits additional images with a 2 second pause between each. Only does something if there are greater than 200 images to process. Default will not stagger (and submit all images with no waiting)"""'}), '(\'--stagger\', is_flag=True, default=False, help=\n "Staggers submission of images so that AWS `toomanyrequests` errors don\'t occur. This submits 50 images, waits 5 minutes for them to begin processing, then submits additional images with a 2 second pause between each. Only does something if there are greater than 200 images to process. Default will not stagger (and submit all images with no waiting)"\n )\n', (15729, 16140), False, 'import click\n'), ((16628, 16917), 'click.option', 'click.option', (['"""--license_file"""', '"""-L"""'], {'help': '"""Use a user-provided license. This must be a JSON file in the format of:\n[{"name": "CMET_RETHINQ_PRODUCT_ID","value":\n"cmet_rethinq_product_id_value"},\n"name": "CMET_RETHINQ_LICENSE_KEY","value":\n"cmet_rethinq_license_key_value"}]\n"""'}), '(\'--license_file\', \'-L\', help=\n """Use a user-provided license. This must be a JSON file in the format of:\n[{"name": "CMET_RETHINQ_PRODUCT_ID","value":\n"cmet_rethinq_product_id_value"},\n"name": "CMET_RETHINQ_LICENSE_KEY","value":\n"cmet_rethinq_license_key_value"}]\n"""\n )\n', (16640, 16917), False, 'import click\n'), ((17249, 17448), 'click.option', 'click.option', (['"""--environment_variable"""', '"""-e"""'], {'multiple': '(True)', 'help': '"""Additional environment variable(s) to pass to the container. NOTE: must use the syntax `KEY=VALUE` (no space in between)."""'}), "('--environment_variable', '-e', multiple=True, help=\n 'Additional environment variable(s) to pass to the container. NOTE: must use the syntax `KEY=VALUE` (no space in between).'\n )\n", (17261, 17448), False, 'import click\n'), ((1661, 1707), 'os.path.join', 'os.path.join', (['input_bucket', '"""demographics.tsv"""'], {}), "(input_bucket, 'demographics.tsv')\n", (1673, 1707), False, 'import os\n'), ((1727, 1767), 'pandas.read_csv', 'pd.read_csv', (['demographics_file'], {'sep': '"""\t"""'}), "(demographics_file, sep='\\t')\n", (1738, 1767), True, 'import pandas as pd\n'), ((3429, 3449), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (3443, 3449), True, 'import numpy as np\n'), ((3461, 3489), 'numpy.random.choice', 'np.random.choice', (['image_list'], {}), '(image_list)\n', (3477, 3489), True, 'import numpy as np\n'), ((3669, 3715), 'json.dumps', 'json.dumps', (['metadata'], {'indent': '(2)', 'sort_keys': '(True)'}), '(metadata, indent=2, sort_keys=True)\n', (3679, 3715), False, 'import json\n'), ((4293, 4308), 'urllib.parse.urlparse', 'urlparse', (['image'], {}), '(image)\n', (4301, 4308), False, 'from urllib.parse import urlparse\n'), ((4703, 4728), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (4716, 4728), False, 'import datetime\n'), ((6567, 6659), 'os.path.join', 'os.path.join', (['output_bucket', "submission_details['group']", "submission_details['subject']"], {}), "(output_bucket, submission_details['group'], submission_details\n ['subject'])\n", (6579, 6659), False, 'import os\n'), ((705, 725), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (714, 725), False, 'import json\n'), ((1330, 1348), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (1342, 1348), False, 'import boto3\n'), ((1450, 1470), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (1464, 1470), False, 'import boto3\n'), ((3752, 3770), 'os.path.split', 'os.path.split', (['key'], {}), '(key)\n', (3765, 3770), False, 'import os\n'), ((3939, 3976), 'os.path.join', 'os.path.join', (['path', 'f"""{subname}.json"""'], {}), "(path, f'{subname}.json')\n", (3951, 3976), False, 'import os\n'), ((10314, 10349), 'logging.log', 'logging.log', (['logging.INFO', 'response'], {}), '(logging.INFO, response)\n', (10325, 10349), False, 'import logging\n'), ((11950, 12008), 'json.dump', 'json.dump', (['submission_details', 'f'], {'sort_keys': '(True)', 'indent': '(2)'}), '(submission_details, f, sort_keys=True, indent=2)\n', (11959, 12008), False, 'import json\n'), ((18257, 18274), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (18265, 18274), False, 'import sys\n'), ((18608, 18625), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (18616, 18625), False, 'import sys\n'), ((18704, 18721), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (18712, 18721), False, 'import sys\n'), ((18891, 18908), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (18899, 18908), False, 'import sys\n'), ((19032, 19049), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (19040, 19049), False, 'import sys\n'), ((19265, 19282), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (19273, 19282), False, 'import sys\n'), ((19721, 19738), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (19729, 19738), False, 'import sys\n'), ((21696, 21717), 'boto3.client', 'boto3.client', (['"""batch"""'], {}), "('batch')\n", (21708, 21717), False, 'import boto3\n'), ((14340, 14352), 'click.Path', 'click.Path', ([], {}), '()\n', (14350, 14352), False, 'import click\n'), ((2033, 2067), 'os.path.join', 'os.path.join', (['base_key', 'subject_id'], {}), '(base_key, subject_id)\n', (2045, 2067), False, 'import os\n'), ((7439, 7481), 'os.path.split', 'os.path.split', (["submission_details['image']"], {}), "(submission_details['image'])\n", (7452, 7481), False, 'import os\n'), ((7509, 7573), 'os.path.join', 'os.path.join', (['json_path', 'f"""{submission_details[\'subject\']}.json"""'], {}), '(json_path, f"{submission_details[\'subject\']}.json")\n', (7521, 7573), False, 'import os\n'), ((19929, 19946), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (19937, 19946), False, 'import sys\n'), ((20122, 20139), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (20130, 20139), False, 'import sys\n'), ((20344, 20361), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (20352, 20361), False, 'import sys\n'), ((20456, 20474), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (20468, 20474), False, 'import boto3\n'), ((21969, 21986), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (21977, 21986), False, 'import sys\n'), ((23041, 23075), 'logging.log', 'logging.log', (['logging.INFO', 'message'], {}), '(logging.INFO, message)\n', (23052, 23075), False, 'import logging\n'), ((14379, 14390), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14388, 14390), False, 'import os\n'), ((2749, 2811), 'os.path.join', 'os.path.join', (['input_bucket', 'subject_id', 'f"""{subject_id}.nii.gz"""'], {}), "(input_bucket, subject_id, f'{subject_id}.nii.gz')\n", (2761, 2811), False, 'import os\n'), ((4123, 4158), 'os.path.join', 'os.path.join', (['bucket', 'metadata_path'], {}), '(bucket, metadata_path)\n', (4135, 4158), False, 'import os\n'), ((20881, 20915), 'logging.log', 'logging.log', (['logging.INFO', 'message'], {}), '(logging.INFO, message)\n', (20892, 20915), False, 'import logging\n'), ((2566, 2605), 'logging.log', 'logging.log', (['logging.INFO', 'f"""#@# {key}"""'], {}), "(logging.INFO, f'#@# {key}')\n", (2577, 2605), False, 'import logging\n'), ((2875, 2922), 'os.path.join', 'os.path.join', (['input_bucket', 'subject_id', '"""dicom"""'], {}), "(input_bucket, subject_id, 'dicom')\n", (2887, 2922), False, 'import os\n'), ((3249, 3271), 'os.path.dirname', 'os.path.dirname', (['image'], {}), '(image)\n', (3264, 3271), False, 'import os\n'), ((20961, 21066), 'logging.log', 'logging.log', (['logging.INFO', '"""Nonexistent images were skipped, continuing to submit existing images..."""'], {}), "(logging.INFO,\n 'Nonexistent images were skipped, continuing to submit existing images...')\n", (20972, 21066), False, 'import logging\n'), ((21552, 21569), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (21560, 21569), False, 'import sys\n'), ((22535, 22569), 'logging.log', 'logging.log', (['logging.INFO', 'message'], {}), '(logging.INFO, message)\n', (22546, 22569), False, 'import logging\n'), ((22590, 22605), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (22600, 22605), False, 'import time\n'), ((22658, 22671), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (22668, 22671), False, 'import time\n'), ((5456, 5503), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['scan_date', '"""%Y%m%d"""'], {}), "(scan_date, '%Y%m%d')\n", (5482, 5503), False, 'import datetime\n'), ((5506, 5550), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': "(365.25 * row['age'])"}), "(days=365.25 * row['age'])\n", (5524, 5550), False, 'import datetime\n'), ((5286, 5330), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': "(365.25 * row['age'])"}), "(days=365.25 * row['age'])\n", (5304, 5330), False, 'import datetime\n')]
|
import itertools
import os
import h5py
import numpy as np
from skimage import io, img_as_float32, img_as_uint, img_as_ubyte
from skimage.transform import rescale
class ims:
def __init__(self, file, ResolutionLevelLock=0, write=False, cache_location=None, mem_size=None, disk_size=2000, squeeze_output=True):
## mem_size = in gigabytes that remain FREE as cache fills
## disk_size = in gigabytes that remain FREE as cache fills
## NOTE: Caching is currently not implemented.
self.filePathComplete = file
self.write = write
self.open()
self.filePathBase = os.path.split(file)[0]
self.fileName = os.path.split(file)[1]
self.fileExtension = os.path.splitext(self.fileName)[1]
if cache_location is None and mem_size is None:
self.cache = None
else:
self.cache = True
self.cache_location = cache_location
self.disk_size = disk_size * 1e9 if disk_size is not None else None
self.mem_size = mem_size * 1e9 if mem_size is not None else None
self.memCache = {}
self.squeeze_output = squeeze_output
self.cacheFiles = []
self.metaData = {}
self.ResolutionLevelLock = ResolutionLevelLock
resolution_0 = self.dataset['ResolutionLevel 0']
time_point_0 = resolution_0['TimePoint 0']
channel_0 = time_point_0['Channel 0']
data = channel_0['Data']
self.ResolutionLevels = len(self.dataset)
self.TimePoints = len(resolution_0)
self.Channels = len(time_point_0)
self.resolution = (
round(
(self.read_numerical_dataset_attr('ExtMax2') - self.read_numerical_dataset_attr('ExtMin2'))
/ self.read_numerical_dataset_attr('Z'),
3),
round(
(self.read_numerical_dataset_attr('ExtMax1') - self.read_numerical_dataset_attr('ExtMin1'))
/ self.read_numerical_dataset_attr('Y'),
3),
round(
(self.read_numerical_dataset_attr('ExtMax0') - self.read_numerical_dataset_attr('ExtMin0'))
/ self.read_numerical_dataset_attr('X'),
3)
)
self.shape = (
self.TimePoints,
self.Channels,
int(self.read_attribute('DataSetInfo/Image', 'Z')),
int(self.read_attribute('DataSetInfo/Image', 'Y')),
int(self.read_attribute('DataSetInfo/Image', 'X'))
)
self.chunks = (1, 1, data.chunks[0], data.chunks[1], data.chunks[2])
self.dtype = data.dtype
self.shapeH5Array = data.shape
for r, t, c in itertools.product(range(self.ResolutionLevels), range(self.TimePoints),
range(self.Channels)):
location_attr = self.location_generator(r, t, c, data='attrib')
location_data = self.location_generator(r, t, c, data='data')
# Collect attribute info
self.metaData[r, t, c, 'shape'] = (
1,
1,
int(self.read_attribute(location_attr, 'ImageSizeZ')),
int(self.read_attribute(location_attr, 'ImageSizeY')),
int(self.read_attribute(location_attr, 'ImageSizeX'))
)
self.metaData[r, t, c, 'resolution'] = tuple(
[round(float((origShape / newShape) * origRes), 3) for origRes, origShape, newShape in
zip(self.resolution, self.shape[-3:], self.metaData[r, t, c, 'shape'][-3:])]
)
self.metaData[r, t, c, 'HistogramMax'] = int(float(self.read_attribute(location_attr, 'HistogramMax')))
self.metaData[r, t, c, 'HistogramMin'] = int(float(self.read_attribute(location_attr, 'HistogramMin')))
# Collect dataset info
self.metaData[r, t, c, 'chunks'] = (
1, 1, self.hf[location_data].chunks[0], self.hf[location_data].chunks[1], self.hf[location_data].chunks[2])
self.metaData[r, t, c, 'shapeH5Array'] = self.hf[location_data].shape
self.metaData[r, t, c, 'dtype'] = self.hf[location_data].dtype
if isinstance(self.ResolutionLevelLock, int):
self.change_resolution_lock(self.ResolutionLevelLock)
def change_resolution_lock(self,ResolutionLevelLock):
## Pull information from the only required dataset at each resolution
## which is time_point=0, channel=0
self.ResolutionLevelLock = ResolutionLevelLock
self.shape = (
self.TimePoints,
self.Channels,
self.metaData[self.ResolutionLevelLock, 0, 0, 'shape'][-3],
self.metaData[self.ResolutionLevelLock, 0, 0, 'shape'][-2],
self.metaData[self.ResolutionLevelLock, 0, 0, 'shape'][-1]
)
self.ndim = len(self.shape)
self.chunks = self.metaData[self.ResolutionLevelLock, 0, 0, 'chunks']
self.shapeH5Array = self.metaData[self.ResolutionLevelLock, 0, 0, 'shapeH5Array']
self.resolution = self.metaData[self.ResolutionLevelLock, 0, 0, 'resolution']
self.dtype = self.metaData[self.ResolutionLevelLock, 0, 0, 'dtype']
# def __enter__(self):
# print('Opening file: {}'.format(self.filePathComplete))
# self.hf = h5py.File(self.filePathComplete, 'r')
# self.dataset = self.hf['DataSet']
# def __exit__(self, type, value, traceback):
# ## Implement flush?
# self.hf.close()
# self.hf = None
def open(self):
if self.write == False:
print('Opening readonly file: {} \n'.format(self.filePathComplete))
self.hf = h5py.File(self.filePathComplete, 'r', swmr=True)
self.dataset = self.hf['DataSet']
# print('OPENED file: {} \n'.format(self.filePathComplete))
elif self.write == True:
print('Opening writeable file: {} \n'.format(self.filePathComplete))
self.hf = h5py.File(self.filePathComplete, 'a', swmr=True)
self.dataset = self.hf['DataSet']
# print('OPENED file: {} \n'.format(self.filePathComplete))
def __del__(self):
self.close()
def close(self):
## Implement flush?
if self.write == True:
print('Flushing Buffers to Disk')
self.hf.flush()
print('Closing file: {} \n'.format(self.filePathComplete))
if self.hf is not None:
self.hf.close()
self.hf = None
self.dataset = None
# print('CLOSED file: {} \n'.format(self.filePathComplete))
def __getitem__(self, key):
"""
All ims class objects are represented as shape (TCZYX)
An integer only slice will return the entire timepoint (T) data as a numpy array
Any other variation on slice will be coerced to 5 dimensions and
extract that array
If a 6th dimensions is present in the slice, dim[0] is assumed to be the resolutionLevel
this will be used when choosing which array to extract. Otherwise ResolutionLevelLock
will be obeyed. If ResolutionLevelLock is == None - default resolution is 0 (full-res)
and a slice of 5 or less dimensions will extract information from resolutionLevel 0.
ResolutionLevelLock is used to focus on a specific resoltion level.
This option enables a 5D slicing to lock on to a specified resolution level.
"""
res, key = self.transform_key(key)
slice_returned = self.get_slice(
r=res if res is not None else 0, # Force ResolutionLock of None to be 0 when slicing
t=self.slice_fixer(key[0], 't', res=res),
c=self.slice_fixer(key[1], 'c', res=res),
z=self.slice_fixer(key[2], 'z', res=res),
y=self.slice_fixer(key[3], 'y', res=res),
x=self.slice_fixer(key[4], 'x', res=res)
)
return slice_returned
def __setitem__(self,key,newValue):
if self.write == False:
print("""
IMS File can not be written to.
imsClass.write = False.
""")
pass
res, key = self.transform_key(key)
self.set_slice(
r=res if res is not None else 0, # Force ResolutionLock of None to be 0 when slicing
t=self.slice_fixer(key[0], 't', res=res),
c=self.slice_fixer(key[1], 'c', res=res),
z=self.slice_fixer(key[2], 'z', res=res),
y=self.slice_fixer(key[3], 'y', res=res),
x=self.slice_fixer(key[4], 'x', res=res),
newData=newValue
)
def transform_key(self,key):
res = self.ResolutionLevelLock
if not isinstance(key, slice) and not isinstance(key, int) and len(key) == 6:
res = key[0]
if res >= self.ResolutionLevels:
raise ValueError('Layer is larger than the number of ResolutionLevels')
key = tuple((x for x in key[1::]))
# All slices will be converted to 5 dims and placed into a tuple
if isinstance(key, slice):
key = [key]
if isinstance(key, int):
key = [slice(key)]
# Convert int/slice mix to a tuple of slices
elif isinstance(key, tuple):
key = tuple((slice(x) if isinstance(x, int) else x for x in key))
key = list(key)
while len(key) < 5:
key.append(slice(None))
key = tuple(key)
return res,key
def read_numerical_dataset_attr(self, attrib):
return float(self.read_attribute('DataSetInfo/Image', attrib))
def slice_fixer(self, slice_object, dim, res):
"""
Converts slice.stop == None to the origional image dims
dim = dimension. should be str: r,t,c,z,y,x
Always returns a fully filled slice object (ie NO None)
Negative slice values are not implemented yet self[:-5]
Slicing with lists (fancy) is not implemented yet self[[1,2,3]]
"""
if res is None:
res = 0
dims = {'r': self.ResolutionLevels,
't': self.TimePoints,
'c': self.Channels,
'z': self.metaData[(res, 0, 0, 'shape')][-3],
'y': self.metaData[(res, 0, 0, 'shape')][-2],
'x': self.metaData[(res, 0, 0, 'shape')][-1]
}
if (slice_object.stop is not None) and (slice_object.stop > dims[dim]):
raise ValueError('The specified stop dimension "{}" in larger than the dimensions of the \
origional image'.format(dim))
if (slice_object.start is not None) and (slice_object.start > dims[dim]):
raise ValueError('The specified start dimension "{}" in larger than the dimensions of the \
origional image'.format(dim))
if isinstance(slice_object.stop, int) and slice_object.start == None and slice_object.step == None:
return slice(
slice_object.stop,
slice_object.stop + 1,
1 if slice_object.step is None else slice_object.step
)
if slice_object == slice(None):
return slice(0, dims[dim], 1)
if slice_object.step is None:
slice_object = slice(slice_object.start, slice_object.stop, 1)
if slice_object.stop is None:
slice_object = slice(
slice_object.start,
dims[dim],
slice_object.step
)
# TODO: Need to reevaluate if this last statement is still required
if isinstance(slice_object.stop, int) and slice_object.start is None:
slice_object = slice(
max(0, slice_object.stop - 1),
slice_object.stop,
slice_object.step
)
return slice_object
@staticmethod
def location_generator(r, t, c, data='data'):
"""
Given R, T, C, this function will generate a path to data in an imaris file
default data == 'data' the path will reference with array of data
if data == 'attrib' the bath will reference the channel location where attributes are stored
"""
location = 'DataSet/ResolutionLevel {}/TimePoint {}/Channel {}'.format(r, t, c)
if data == 'data':
location = location + '/Data'
return location
def read_attribute(self, location, attrib):
"""
Location should be specified as a path: for example
'DataSet/ResolutionLevel 0/TimePoint 0/Channel 1'
attrib is a string that defines the attribute to extract: for example
'ImageSizeX'
"""
return str(self.hf[location].attrs[attrib], encoding='ascii')
def get_slice(self, r, t, c, z, y, x):
"""
IMS stores 3D datasets ONLY with Resolution, Time, and Color as 'directory'
structure writing HDF5. Thus, data access can only happen across dims XYZ
for a specific RTC.
"""
incomingSlices = (r,t,c,z,y,x)
t_size = list(range(self.TimePoints)[t])
c_size = list(range(self.Channels)[c])
z_size = len(range(self.metaData[(r, 0, 0, 'shape')][-3])[z])
y_size = len(range(self.metaData[(r, 0, 0, 'shape')][-2])[y])
x_size = len(range(self.metaData[(r, 0, 0, 'shape')][-1])[x])
output_array = np.zeros((len(t_size), len(c_size), z_size, y_size, x_size), dtype=self.dtype)
for idxt, t in enumerate(t_size):
for idxc, c in enumerate(c_size):
## Below method is faster than all others tried
d_set_string = self.location_generator(r, t, c, data='data')
self.hf[d_set_string].read_direct(output_array, np.s_[z, y, x], np.s_[idxt, idxc, :, :, :])
# with h5py.File(self.filePathComplete, 'r') as hf:
# for idxt, t in enumerate(t_size):
# for idxc, c in enumerate(c_size):
# # Old method below
# d_set_string = self.location_generator(r, t, c, data='data')
# output_array[idxt, idxc, :, :, :] = hf[d_set_string][z, y, x]
"""
The return statements can provide some specific use cases for when the
class is providing data to Napari.
Currently, a custom print statement provides visual feed back that
data are loading and what specific data is requested / returned
The napari_imaris_loader currently hard codes os.environ["NAPARI_ASYNC"] == '1'
"""
# if "NAPARI_ASYNC" in os.environ and os.environ["NAPARI_ASYNC"] == '1':
# # output_array = np.squeeze(output_array)
# print('Slices Requested: {} / Shape returned: {} \n'.format(incomingSlices,output_array.shape))
# return output_array
# # elif "NAPARI_OCTREE" in os.environ and os.environ["NAPARI_OCTREE"] == '1':
# # return output_array
# else:
if self.squeeze_output:
return np.squeeze(output_array)
else:
return output_array
def set_slice(self, r, t, c, z, y, x, newData):
"""
IMS stores 3D datasets ONLY with Resolution, Time, and Color as 'directory'
structure writing HDF5. Thus, data access can only happen across dims XYZ
for a specific RTC.
"""
# incomingSlices = (r,t,c,z,y,x)
t_size = list(range(self.TimePoints)[t])
c_size = list(range(self.Channels)[c])
z_size = len(range(self.metaData[(r, 0, 0, 'shape')][-3])[z])
y_size = len(range(self.metaData[(r, 0, 0, 'shape')][-2])[y])
x_size = len(range(self.metaData[(r, 0, 0, 'shape')][-1])[x])
# if isinstance(newData,int):
toWrite = np.zeros((len(t_size), len(c_size), z_size, y_size, x_size), dtype=self.dtype)
toWrite[:] = newData
print(toWrite.shape)
print(toWrite)
print(t_size)
print(t_size)
for idxt, t in enumerate(t_size):
for idxc, c in enumerate(c_size):
## Below method is faster than all others tried
d_set_string = self.location_generator(r, t, c, data='data')
self.hf[d_set_string].write_direct(toWrite, np.s_[idxt, idxc, :, :, :], np.s_[z, y, x])
def dtypeImgConvert(self, image):
"""
Convert any numpy image to the dtype of the original ims file
"""
if self.dtype == float or self.dtype == np.float32:
return img_as_float32(image)
elif self.dtype == np.uint16:
return img_as_uint(image)
elif self.dtype == np.uint8:
return img_as_ubyte(image)
def projection(self, projection_type,
time_point=None, channel=None, z=None, y=None, x=None, resolution_level=0):
""" Create a min or max projection across a specified (time_point,channel,z,y,x) space.
projection_type = STR: 'min', 'max', 'mean',
time_point = INT,
channel = INT,
z = tuple (zStart, zStop),
y = None or (yStart,yStop),
z = None or (xStart,xStop)
resolution_level = INT >=0 : 0 is the highest resolution
"""
assert projection_type == 'max' or projection_type == 'min' or projection_type == 'mean'
# Set defaults
resolution_level = 0 if resolution_level == None else resolution_level
time_point = 0 if time_point == None else time_point
channel = 0 if channel == None else channel
if z is None:
z = range(self.metaData[(resolution_level,time_point,channel,'shape')][-3])
elif isinstance(z, tuple):
z = range(z[0], z[1], 1)
if y is None:
y = slice(0, self.metaData[(resolution_level,time_point,channel,'shape')][-2], 1)
elif isinstance(z, tuple):
y = slice(y[0], y[1], 1)
if x is None:
x = slice(0, self.metaData[(resolution_level,time_point,channel,'shape')][-1], 1)
elif isinstance(z, tuple):
x = slice(y[0], y[1], 1)
image = None
for num, z_layer in enumerate(z):
print('Reading layer ' + str(num) + ' of ' + str(z))
if image is None:
image = self[resolution_level, time_point, channel, z_layer, y, x]
print(image.dtype)
if projection_type == 'mean':
image = img_as_float32(image)
else:
imageNew = self[resolution_level, time_point, channel, z_layer, y, x]
print('Incoroprating layer ' + str(num) + ' of ' + str(z))
if projection_type == 'max':
image[:] = np.maximum(image,imageNew)
elif projection_type == 'min':
image[:] = np.minimum(image,imageNew)
elif projection_type == 'mean':
image[:] = image + img_as_float32(imageNew)
if projection_type == 'mean':
image = image / len(z)
image = np.clip(image, 0, 1)
image = self.dtypeImgConvert(image)
return image.squeeze()
def get_Volume_At_Specific_Resolution(
self, output_resolution=(100, 100, 100), time_point=0, channel=0, anti_aliasing=True
):
"""
This function extracts a time_point and channel at a specific resolution.
The function extracts the whole volume at the highest resolution_level without
going below the designated output_resolution. It then resizes to the volume
to the specified resolution by using the skimage rescale function.
The option to turn off anti_aliasing during skimage.rescale (anti_aliasing=False) is provided.
anti_aliasing can be very time consuming when extracting large resolutions.
Everything is completed in RAM, very high resolutions may cause a crash.
"""
# Find ResolutionLevel that is closest in size but larger
resolutionLevelToExtract = 0
for res in range(self.ResolutionLevels):
currentResolution = self.metaData[res,time_point,channel,'resolution']
resCompare = [x <= y for x,y in zip(currentResolution,output_resolution)]
resEqual = [x == y for x,y in zip(currentResolution,self.resolution)]
if all(resCompare) == True or (all(resCompare) == False and any(resEqual) == True):
resolutionLevelToExtract = res
workingVolumeResolution = self.metaData[resolutionLevelToExtract,time_point,channel,'resolution']
print('Reading ResolutionLevel {}'.format(resolutionLevelToExtract))
workingVolume = self.get_Resolution_Level(resolutionLevelToExtract,time_point=time_point,channel=channel)
print('Resizing volume from resolution in microns {} to {}'.format(str(workingVolumeResolution), str(output_resolution)))
rescaleFactor = tuple([round(x/y,5) for x,y in zip(workingVolumeResolution,output_resolution)])
print('Rescale Factor = {}'.format(rescaleFactor))
workingVolume = img_as_float32(workingVolume)
workingVolume = rescale(workingVolume, rescaleFactor, anti_aliasing=anti_aliasing)
return self.dtypeImgConvert(workingVolume)
def get_Resolution_Level(self, resolution_level, time_point=0, channel=0):
return self[resolution_level, time_point, channel, :, :, :]
@staticmethod
def image_file_namer(resolution, time_point, channel, z_layer, prefix='', ext='.tif'):
if ext[0] != '.':
ext = '.' + ext
if prefix == '':
form = '{}r{}_t{}_c{}_z{}{}'
else:
form = '{}_r{}_t{}_c{}_z{}{}'
return form.format(
prefix,
str(resolution).zfill(2),
str(time_point).zfill(2),
str(channel).zfill(2),
str(z_layer).zfill(4),
ext
)
def save_Tiff_Series(
self, location=None, time_points=(), channels=(), resolutionLevel=0, cropYX=(), overwrite=False
):
assert isinstance(channels,tuple)
assert isinstance(resolutionLevel,int)
assert isinstance(cropYX,tuple)
assert isinstance(overwrite,bool)
assert (location is None) or isinstance(location,str)
if location is None:
location = os.path.join(self.filePathBase,'{}_tiffSeries'.format(self.fileName))
if os.path.exists(location) == False:
os.makedirs(location, exist_ok=False)
elif os.path.exists(location) == True and overwrite == True:
os.makedirs(location, exist_ok=True)
elif os.path.exists(location) == True and overwrite == False:
raise Exception("tiffSeries path already exists: If you want to overwite the existing data, designate overwrite=True")
if time_points == ():
time_points = tuple(range(self.TimePoints))
if channels == ():
channels = tuple(range(self.Channels))
if cropYX == ():
cropYX = (
0, self.metaData[(resolutionLevel, 0, 0, 'shape')][-2],
0,self.metaData[(resolutionLevel, 0, 0, 'shape')][-1]
)
failed = []
for time in time_points:
for color in channels:
for layer in range(self.metaData[(resolutionLevel,0,0,'shape')][-3]):
fileName = os.path.join(location,self.image_file_namer(resolutionLevel,time,color,layer,prefix='', ext='.tif'))
if os.path.exists(fileName):
print('Skipping {} becasue it already exists'.format(fileName))
continue
try:
array = self[resolutionLevel,time,color,layer,cropYX[0]:cropYX[1],cropYX[2]:cropYX[3]]
except:
failed.append((resolutionLevel,time,color,layer,cropYX[0],cropYX[1],cropYX[2],cropYX[3]))
continue
print('Saving: {}'.format(fileName))
io.imsave(fileName, array, check_contrast=False)
if len(failed) > 0:
print('Failed to extract the following layers:')
print(failed)
else:
print('All layers have been extracted')
def save_multilayer_tiff_stack(self, location=None, time_point=0, channel=0, resolution_level=0):
"""
Extract 1 time point, 1 channel, all (z,y,x) for a specified resolution level.
location - path to the output tiff file. If not provided, file is saved next to the .ims file
Output: tiff stack (all z layers in one file)
"""
if not location:
location = os.path.join(self.filePathBase, '{}_multilayer_stack.tif'.format(self.fileName))
elif type(location) != str or location == '':
raise TypeError('location must be a nonempty string')
if os.path.exists(location):
raise OSError('file at specified location already exists')
else:
os.makedirs(os.path.basename(location), exist_ok=True)
try:
time_point = int(time_point)
channel = int(channel)
resolution_level = int(resolution_level)
except TypeError:
raise TypeError('time_point, channel and resolution_level must be convertable to int')
try:
output_array = self[resolution_level, time_point, channel, :, :, :]
except: # try extracting z layers one-by-one
output_array_shape = self.metaData[(resolution_level, time_point, channel, 'shape')][-3:]
output_array = np.zeros([*output_array_shape], dtype=self.dtype) # layers with errors will be left as 0
for layer in range(output_array_shape[0]):
try:
z_plane = self[resolution_level, time_point, channel, layer, :, :]
output_array[layer, :, :] = z_plane
except Exception as e:
print(f'Failed to extract layer {layer}: {e}')
io.imsave(location, output_array, check_contrast=False)
|
[
"h5py.File",
"numpy.minimum",
"skimage.img_as_float32",
"os.makedirs",
"skimage.transform.rescale",
"os.path.basename",
"skimage.img_as_uint",
"numpy.maximum",
"os.path.exists",
"numpy.zeros",
"numpy.clip",
"skimage.img_as_ubyte",
"os.path.splitext",
"numpy.squeeze",
"os.path.split",
"skimage.io.imsave"
] |
[((21471, 21500), 'skimage.img_as_float32', 'img_as_float32', (['workingVolume'], {}), '(workingVolume)\n', (21485, 21500), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n'), ((21525, 21591), 'skimage.transform.rescale', 'rescale', (['workingVolume', 'rescaleFactor'], {'anti_aliasing': 'anti_aliasing'}), '(workingVolume, rescaleFactor, anti_aliasing=anti_aliasing)\n', (21532, 21591), False, 'from skimage.transform import rescale\n'), ((25334, 25358), 'os.path.exists', 'os.path.exists', (['location'], {}), '(location)\n', (25348, 25358), False, 'import os\n'), ((26481, 26536), 'skimage.io.imsave', 'io.imsave', (['location', 'output_array'], {'check_contrast': '(False)'}), '(location, output_array, check_contrast=False)\n', (26490, 26536), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n'), ((639, 658), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (652, 658), False, 'import os\n'), ((686, 705), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (699, 705), False, 'import os\n'), ((738, 769), 'os.path.splitext', 'os.path.splitext', (['self.fileName'], {}), '(self.fileName)\n', (754, 769), False, 'import os\n'), ((5778, 5826), 'h5py.File', 'h5py.File', (['self.filePathComplete', '"""r"""'], {'swmr': '(True)'}), "(self.filePathComplete, 'r', swmr=True)\n", (5787, 5826), False, 'import h5py\n'), ((15314, 15338), 'numpy.squeeze', 'np.squeeze', (['output_array'], {}), '(output_array)\n', (15324, 15338), True, 'import numpy as np\n'), ((16823, 16844), 'skimage.img_as_float32', 'img_as_float32', (['image'], {}), '(image)\n', (16837, 16844), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n'), ((19416, 19436), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (19423, 19436), True, 'import numpy as np\n'), ((22814, 22838), 'os.path.exists', 'os.path.exists', (['location'], {}), '(location)\n', (22828, 22838), False, 'import os\n'), ((22861, 22898), 'os.makedirs', 'os.makedirs', (['location'], {'exist_ok': '(False)'}), '(location, exist_ok=False)\n', (22872, 22898), False, 'import os\n'), ((6081, 6129), 'h5py.File', 'h5py.File', (['self.filePathComplete', '"""a"""'], {'swmr': '(True)'}), "(self.filePathComplete, 'a', swmr=True)\n", (6090, 6129), False, 'import h5py\n'), ((16902, 16920), 'skimage.img_as_uint', 'img_as_uint', (['image'], {}), '(image)\n', (16913, 16920), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n'), ((22980, 23016), 'os.makedirs', 'os.makedirs', (['location'], {'exist_ok': '(True)'}), '(location, exist_ok=True)\n', (22991, 23016), False, 'import os\n'), ((25469, 25495), 'os.path.basename', 'os.path.basename', (['location'], {}), '(location)\n', (25485, 25495), False, 'import os\n'), ((26057, 26106), 'numpy.zeros', 'np.zeros', (['[*output_array_shape]'], {'dtype': 'self.dtype'}), '([*output_array_shape], dtype=self.dtype)\n', (26065, 26106), True, 'import numpy as np\n'), ((16977, 16996), 'skimage.img_as_ubyte', 'img_as_ubyte', (['image'], {}), '(image)\n', (16989, 16996), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n'), ((18799, 18820), 'skimage.img_as_float32', 'img_as_float32', (['image'], {}), '(image)\n', (18813, 18820), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n'), ((19078, 19105), 'numpy.maximum', 'np.maximum', (['image', 'imageNew'], {}), '(image, imageNew)\n', (19088, 19105), True, 'import numpy as np\n'), ((22912, 22936), 'os.path.exists', 'os.path.exists', (['location'], {}), '(location)\n', (22926, 22936), False, 'import os\n'), ((23928, 23952), 'os.path.exists', 'os.path.exists', (['fileName'], {}), '(fileName)\n', (23942, 23952), False, 'import os\n'), ((24463, 24511), 'skimage.io.imsave', 'io.imsave', (['fileName', 'array'], {'check_contrast': '(False)'}), '(fileName, array, check_contrast=False)\n', (24472, 24511), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n'), ((19183, 19210), 'numpy.minimum', 'np.minimum', (['image', 'imageNew'], {}), '(image, imageNew)\n', (19193, 19210), True, 'import numpy as np\n'), ((23030, 23054), 'os.path.exists', 'os.path.exists', (['location'], {}), '(location)\n', (23044, 23054), False, 'import os\n'), ((19297, 19321), 'skimage.img_as_float32', 'img_as_float32', (['imageNew'], {}), '(imageNew)\n', (19311, 19321), False, 'from skimage import io, img_as_float32, img_as_uint, img_as_ubyte\n')]
|
import numpy as np
import math
from icrestimator import ICREstimator
global tolerance
tolerance = 0.05
def init_icre(alphas, ls, bs):
alphas = np.array(alphas)
ls = np.array(ls)
bs = np.array(bs)
epsilon = np.zeros(shape=(3, 1))
icre = ICREstimator(epsilon, alphas, ls, bs)
return icre
def test_estimate_lambda():
icre = init_icre([0, math.pi / 2, math.pi], [1, 1, 1], [0, 0, 0])
q = np.zeros(shape=(3,)) # ICR on the robot's origin
desired_lmda = np.array([0, 0, 1])
lmda_e = icre.estimate_lmda(q)
assert np.allclose(desired_lmda, lmda_e.T)
q = np.array([math.pi / 4, 0, -math.pi / 4])
icr = np.array([0, -1, 1])
desired_lmda = icr * 1/np.linalg.norm(icr)
lmda_e = icre.estimate_lmda(q)
assert np.allclose(desired_lmda, lmda_e.T, atol=tolerance)
# driving along the y axis
q = np.array([0, math.pi/2, 0])
# so the ICR should be on the U axis
desired_lmda = np.array([1, 0, 0])
lmda_e = icre.estimate_lmda(q)
assert np.allclose(desired_lmda, lmda_e.T, atol=tolerance)
# A square robot with 4 wheels, one at each corner, the difference between each
# alpha value is pi/4 and the distance from the centre of the robot to each module
# is the same
alpha = math.pi / 4
alphas = [alpha, math.pi - alpha, -math.pi + alpha, -alpha]
icre = init_icre(alphas, [1] * 4, [0, 0, 0, 0])
# test case from the simulator
q = np.array([6.429e-04, -6.429e-04, 3.1422, 3.1409])
desired_lmda = np.array([0, 0, 1])
lmda_e = icre.estimate_lmda(q)
assert np.allclose(desired_lmda, lmda_e.T, atol=tolerance)
# ICR on a wheel, should be a singularity
q = np.array([-math.pi / 4, 0, math.pi / 4, 0])
desired_lmda = np.array([-0.5, 0.5, 1 / math.sqrt(2)])
lmda_e = icre.estimate_lmda(q)
assert np.allclose(desired_lmda, lmda_e.T, atol=tolerance)
assert icre.handle_singularities(lmda_e)
# Another square robot with side length of 2 to make calculations simpler
alpha = math.pi / 4
alphas = [alpha, math.pi - alpha, -math.pi + alpha, -alpha]
icre = init_icre(alphas, [math.sqrt(2)] * 4, [0] * 4)
# ICR on one side of the robot frame
q = np.array(
[
math.acos(6 / (2 * math.sqrt(10))),
math.pi / 4,
-math.pi / 4,
-math.acos(6 / (2 * math.sqrt(10))),
]
)
icr = np.array([0, -1, 1])
desired_lmda = icr * 1 / np.linalg.norm(icr)
lmda_e = icre.estimate_lmda(q)
print(f"estimated ICR = {lmda_e.T}")
assert np.allclose(desired_lmda, lmda_e.T, atol=tolerance)
# # afaik this is the worst case scenario, 2 wheel singularities and a co-linear singularity
# q = np.array([-math.pi/4, math.pi/4, math.pi/4, -math.pi/4])
# icr = np.array([0.5, 0.5, 1/math.sqrt(2)])
# desired_lmda = icr * 1 / np.linalg.norm(icr)
# lmda_e = icre.estimate_lmda(q)
# # print(f"norm of estimate - {np.linalg.norm(lmda_e)}")
# assert np.allclose(desired_lmda, lmda_e.T, atol=tolerance)
def test_joint_space_conversion():
icre = init_icre([math.pi/4], [1], [0])
lmda = np.array([0, 0, -1]).reshape(-1, 1)
beta_target = np.array([0])
assert np.allclose(beta_target, icre.S(lmda))
lmda = np.array([0, -1, 0]).reshape(-1, 1)
beta_target = np.array([math.pi/4])
assert np.allclose(beta_target, icre.S(lmda))
# square robot with side length of 2 to make calculations simpler
alpha = math.pi / 4
alphas = [alpha, math.pi - alpha, -math.pi + alpha, -alpha]
icre = init_icre(alphas, [math.sqrt(2)] * 4, [0] * 4)
icr = np.array([-1, 0, 1])
lmda = icr * 1 / np.linalg.norm(icr)
beta_target = np.array(
[
-math.acos(6 / (2 * math.sqrt(10))),
-math.pi / 4,
math.pi / 4,
math.acos(6 / (2 * math.sqrt(10))),
]
)
assert np.allclose(beta_target, icre.S(lmda), atol=tolerance)
def test_solve():
# for now, check only for runtime errors until compute_derivatives works
icre = init_icre([math.pi/4, -math.pi/4, math.pi], [1, 1, 1], [0, 0, 0])
lmda = np.array([0, -1, 0]).reshape(-1, 1)
S_u = np.array([1/math.sqrt(2), 1/math.sqrt(2), 0])
S_v = np.array([0, 0, 1])
q = np.array([0, 0, 0])
icre.solve(S_u, S_v, q, lmda)
def test_compute_derivatives():
# for now, check only for runtime errors
icre = init_icre([math.pi/4, -math.pi/4, math.pi], [1, 1, 1], [0, 0, 0])
lmda = np.array([0, 0, -1]).reshape(-1, 1)
S_u, S_v = icre.compute_derivatives(lmda)
def test_handle_singularities():
icre = init_icre([0, math.pi/2, math.pi], [1, 1, 1], [0, 0, 0])
# icr on wheel 0 on the R^2 plane
icr = np.array([1, 0, 1]).reshape(-1, 1)
lmda = icr * 1/np.linalg.norm(icr)
singularity, wheel_number = icre.handle_singularities(lmda)
assert singularity
assert wheel_number is 0
icr = np.array([100, 0, 1]).reshape(-1, 1)
lmda = icr * 1/np.linalg.norm(icr)
singularity, wheel_number = icre.handle_singularities(lmda)
assert not singularity
assert wheel_number is None
def test_update_parameters():
icre = init_icre([0, math.pi/2, math.pi], [1, 1, 1], [0, 0, 0])
q = np.zeros(shape=(3,)) # ICR on the robot's origin
desired_lmda = np.array([0, 0, 1])
u, v = -0.1, -0.1 # ICR estimate too negative
lmda_estimate = np.array([u, v,
math.sqrt(1-np.linalg.norm([u, v]))]).reshape(-1, 1)
delta_u, delta_v = 0.1, 0.1
lmda_t, worse = icre.update_parameters(lmda_estimate, delta_u, delta_v,
q)
assert np.allclose(lmda_t.T, desired_lmda)
assert not worse
delta_u, delta_v = -0.1, -0.1
lmda_t, worse = icre.update_parameters(lmda_estimate, delta_u, delta_v,
q)
assert worse
def test_select_starting_points():
icre = init_icre([0, math.pi/2, math.pi], [1, 1, 1], [0, 0, 0])
q = np.zeros(shape=(3,)) # ICR on the robot's origin
desired_lmda = np.array([0, 0, -1])
starting_points = icre.select_starting_points(q)
for sp in starting_points:
assert np.allclose(desired_lmda[:2], sp[:2])
q = np.array([math.pi / 4, 0, -math.pi / 4])
icr = np.array([0, -1, 1]).reshape(-1, 1)
desired_lmda = icr * 1/np.linalg.norm(icr)
starting_points = icre.select_starting_points(q)
assert np.allclose(desired_lmda[:2], starting_points[0][:2])
# should have unit norm
for sp in starting_points:
assert np.isclose(np.linalg.norm(sp), 1)
# driving along the y axis
q = np.array([0, math.pi/2, 0])
# so the ICR should be on the U axis
desired_lmda = np.array([1, 0, 0]).reshape(-1, 1)
starting_points = icre.select_starting_points(q)
assert np.allclose(desired_lmda[:2], (starting_points[0][:2]))
for sp in starting_points:
assert np.isclose(np.linalg.norm(sp), 1)
# A square robot with 4 wheels, one at each corner, the difference between each
# alpha value is pi/4 and the distance from the centre of the robot to each module
# is the same
# test case from the simulator
alpha = math.pi / 4
alphas = [alpha, math.pi - alpha, -math.pi + alpha, -alpha]
icre = init_icre(alphas, [1] * 4, [0] * 4)
q = np.array([6.429e-04, -6.429e-04, 3.1422, 3.1409])
desired_lmda = np.array([0, 0, 1])
sp = icre.select_starting_points(q)
close=[]
for p in sp:
close.append(np.allclose(desired_lmda, p.T, atol=tolerance))
assert any(close)
print(f"Close {close}")
# Another square robot with side length of 2 to make calculations simpler
alpha = math.pi / 4
alphas = [alpha, math.pi - alpha, -math.pi + alpha, -alpha]
icre = init_icre(alphas, [math.sqrt(2)] * 4, [0] * 4)
# Two wheels are pointing in the same direction (the lines between them are co-linear), the other
# two perpendiculars meet at a point halfway between the first two wheel. - currently failing
q = np.array(
[
-math.acos(6 / (2 * math.sqrt(10))),
-math.pi / 4,
math.pi / 4,
math.acos(6 / (2 * math.sqrt(10))),
]
)
icr = np.array([-1, 0, 1])
desired_lmda = icr * 1 / np.linalg.norm(icr)
print(f"desired lmda = {desired_lmda}")
sp = icre.select_starting_points(q)
close = []
for p in sp:
close.append(np.allclose(desired_lmda, p.T, atol=tolerance))
assert any(close)
print(f"Close {close}")
def test_flip_wheel():
# S_lmda on robot origin
alpha = math.pi / 4 # 45 degrees
alphas = [alpha, math.pi - alpha, -math.pi + alpha, -alpha]
q = np.array([2 * math.pi, 7 * math.pi, math.pi / 2, math.pi])
icre = init_icre(alphas, [1] * 4, q)
S_lmda = np.array([0] * 4)
assert (
np.linalg.norm(icre.flip_wheel(q, S_lmda) - np.array([0, 0, math.pi / 2, 0]))
< tolerance
)
assert all(icre.flipped == [False, True, False, True])
q = np.array([-2 * math.pi, -7 * math.pi, -math.pi / 2, -math.pi])
S_lmda = np.array([0] * 4)
assert (
np.linalg.norm(icre.flip_wheel(q, S_lmda) - np.array([0, 0, -math.pi / 2, 0]))
< tolerance
)
assert all(icre.flipped == [False, True, False, True])
|
[
"icrestimator.ICREstimator",
"math.sqrt",
"numpy.allclose",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.array"
] |
[((150, 166), 'numpy.array', 'np.array', (['alphas'], {}), '(alphas)\n', (158, 166), True, 'import numpy as np\n'), ((176, 188), 'numpy.array', 'np.array', (['ls'], {}), '(ls)\n', (184, 188), True, 'import numpy as np\n'), ((198, 210), 'numpy.array', 'np.array', (['bs'], {}), '(bs)\n', (206, 210), True, 'import numpy as np\n'), ((225, 247), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, 1)'}), '(shape=(3, 1))\n', (233, 247), True, 'import numpy as np\n'), ((259, 296), 'icrestimator.ICREstimator', 'ICREstimator', (['epsilon', 'alphas', 'ls', 'bs'], {}), '(epsilon, alphas, ls, bs)\n', (271, 296), False, 'from icrestimator import ICREstimator\n'), ((421, 441), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (429, 441), True, 'import numpy as np\n'), ((490, 509), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (498, 509), True, 'import numpy as np\n'), ((556, 591), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'lmda_e.T'], {}), '(desired_lmda, lmda_e.T)\n', (567, 591), True, 'import numpy as np\n'), ((601, 641), 'numpy.array', 'np.array', (['[math.pi / 4, 0, -math.pi / 4]'], {}), '([math.pi / 4, 0, -math.pi / 4])\n', (609, 641), True, 'import numpy as np\n'), ((652, 672), 'numpy.array', 'np.array', (['[0, -1, 1]'], {}), '([0, -1, 1])\n', (660, 672), True, 'import numpy as np\n'), ((766, 817), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'lmda_e.T'], {'atol': 'tolerance'}), '(desired_lmda, lmda_e.T, atol=tolerance)\n', (777, 817), True, 'import numpy as np\n'), ((858, 887), 'numpy.array', 'np.array', (['[0, math.pi / 2, 0]'], {}), '([0, math.pi / 2, 0])\n', (866, 887), True, 'import numpy as np\n'), ((946, 965), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (954, 965), True, 'import numpy as np\n'), ((1012, 1063), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'lmda_e.T'], {'atol': 'tolerance'}), '(desired_lmda, lmda_e.T, atol=tolerance)\n', (1023, 1063), True, 'import numpy as np\n'), ((1438, 1487), 'numpy.array', 'np.array', (['[0.0006429, -0.0006429, 3.1422, 3.1409]'], {}), '([0.0006429, -0.0006429, 3.1422, 3.1409])\n', (1446, 1487), True, 'import numpy as np\n'), ((1507, 1526), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1515, 1526), True, 'import numpy as np\n'), ((1573, 1624), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'lmda_e.T'], {'atol': 'tolerance'}), '(desired_lmda, lmda_e.T, atol=tolerance)\n', (1584, 1624), True, 'import numpy as np\n'), ((1680, 1723), 'numpy.array', 'np.array', (['[-math.pi / 4, 0, math.pi / 4, 0]'], {}), '([-math.pi / 4, 0, math.pi / 4, 0])\n', (1688, 1723), True, 'import numpy as np\n'), ((1829, 1880), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'lmda_e.T'], {'atol': 'tolerance'}), '(desired_lmda, lmda_e.T, atol=tolerance)\n', (1840, 1880), True, 'import numpy as np\n'), ((2394, 2414), 'numpy.array', 'np.array', (['[0, -1, 1]'], {}), '([0, -1, 1])\n', (2402, 2414), True, 'import numpy as np\n'), ((2551, 2602), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'lmda_e.T'], {'atol': 'tolerance'}), '(desired_lmda, lmda_e.T, atol=tolerance)\n', (2562, 2602), True, 'import numpy as np\n'), ((3178, 3191), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3186, 3191), True, 'import numpy as np\n'), ((3307, 3330), 'numpy.array', 'np.array', (['[math.pi / 4]'], {}), '([math.pi / 4])\n', (3315, 3330), True, 'import numpy as np\n'), ((3607, 3627), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (3615, 3627), True, 'import numpy as np\n'), ((4224, 4243), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4232, 4243), True, 'import numpy as np\n'), ((4252, 4271), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4260, 4271), True, 'import numpy as np\n'), ((5210, 5230), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (5218, 5230), True, 'import numpy as np\n'), ((5278, 5297), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (5286, 5297), True, 'import numpy as np\n'), ((5632, 5667), 'numpy.allclose', 'np.allclose', (['lmda_t.T', 'desired_lmda'], {}), '(lmda_t.T, desired_lmda)\n', (5643, 5667), True, 'import numpy as np\n'), ((5974, 5994), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (5982, 5994), True, 'import numpy as np\n'), ((6042, 6062), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (6050, 6062), True, 'import numpy as np\n'), ((6209, 6249), 'numpy.array', 'np.array', (['[math.pi / 4, 0, -math.pi / 4]'], {}), '([math.pi / 4, 0, -math.pi / 4])\n', (6217, 6249), True, 'import numpy as np\n'), ((6407, 6460), 'numpy.allclose', 'np.allclose', (['desired_lmda[:2]', 'starting_points[0][:2]'], {}), '(desired_lmda[:2], starting_points[0][:2])\n', (6418, 6460), True, 'import numpy as np\n'), ((6609, 6638), 'numpy.array', 'np.array', (['[0, math.pi / 2, 0]'], {}), '([0, math.pi / 2, 0])\n', (6617, 6638), True, 'import numpy as np\n'), ((6796, 6849), 'numpy.allclose', 'np.allclose', (['desired_lmda[:2]', 'starting_points[0][:2]'], {}), '(desired_lmda[:2], starting_points[0][:2])\n', (6807, 6849), True, 'import numpy as np\n'), ((7301, 7350), 'numpy.array', 'np.array', (['[0.0006429, -0.0006429, 3.1422, 3.1409]'], {}), '([0.0006429, -0.0006429, 3.1422, 3.1409])\n', (7309, 7350), True, 'import numpy as np\n'), ((7370, 7389), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (7378, 7389), True, 'import numpy as np\n'), ((8206, 8226), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (8214, 8226), True, 'import numpy as np\n'), ((8675, 8733), 'numpy.array', 'np.array', (['[2 * math.pi, 7 * math.pi, math.pi / 2, math.pi]'], {}), '([2 * math.pi, 7 * math.pi, math.pi / 2, math.pi])\n', (8683, 8733), True, 'import numpy as np\n'), ((8788, 8805), 'numpy.array', 'np.array', (['([0] * 4)'], {}), '([0] * 4)\n', (8796, 8805), True, 'import numpy as np\n'), ((8999, 9061), 'numpy.array', 'np.array', (['[-2 * math.pi, -7 * math.pi, -math.pi / 2, -math.pi]'], {}), '([-2 * math.pi, -7 * math.pi, -math.pi / 2, -math.pi])\n', (9007, 9061), True, 'import numpy as np\n'), ((9075, 9092), 'numpy.array', 'np.array', (['([0] * 4)'], {}), '([0] * 4)\n', (9083, 9092), True, 'import numpy as np\n'), ((700, 719), 'numpy.linalg.norm', 'np.linalg.norm', (['icr'], {}), '(icr)\n', (714, 719), True, 'import numpy as np\n'), ((2444, 2463), 'numpy.linalg.norm', 'np.linalg.norm', (['icr'], {}), '(icr)\n', (2458, 2463), True, 'import numpy as np\n'), ((3649, 3668), 'numpy.linalg.norm', 'np.linalg.norm', (['icr'], {}), '(icr)\n', (3663, 3668), True, 'import numpy as np\n'), ((4758, 4777), 'numpy.linalg.norm', 'np.linalg.norm', (['icr'], {}), '(icr)\n', (4772, 4777), True, 'import numpy as np\n'), ((4960, 4979), 'numpy.linalg.norm', 'np.linalg.norm', (['icr'], {}), '(icr)\n', (4974, 4979), True, 'import numpy as np\n'), ((6162, 6199), 'numpy.allclose', 'np.allclose', (['desired_lmda[:2]', 'sp[:2]'], {}), '(desired_lmda[:2], sp[:2])\n', (6173, 6199), True, 'import numpy as np\n'), ((6323, 6342), 'numpy.linalg.norm', 'np.linalg.norm', (['icr'], {}), '(icr)\n', (6337, 6342), True, 'import numpy as np\n'), ((8256, 8275), 'numpy.linalg.norm', 'np.linalg.norm', (['icr'], {}), '(icr)\n', (8270, 8275), True, 'import numpy as np\n'), ((3124, 3144), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (3132, 3144), True, 'import numpy as np\n'), ((3253, 3273), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (3261, 3273), True, 'import numpy as np\n'), ((4122, 4142), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (4130, 4142), True, 'import numpy as np\n'), ((4472, 4492), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (4480, 4492), True, 'import numpy as np\n'), ((4704, 4723), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (4712, 4723), True, 'import numpy as np\n'), ((4904, 4925), 'numpy.array', 'np.array', (['[100, 0, 1]'], {}), '([100, 0, 1])\n', (4912, 4925), True, 'import numpy as np\n'), ((6260, 6280), 'numpy.array', 'np.array', (['[0, -1, 1]'], {}), '([0, -1, 1])\n', (6268, 6280), True, 'import numpy as np\n'), ((6546, 6564), 'numpy.linalg.norm', 'np.linalg.norm', (['sp'], {}), '(sp)\n', (6560, 6564), True, 'import numpy as np\n'), ((6697, 6716), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (6705, 6716), True, 'import numpy as np\n'), ((6909, 6927), 'numpy.linalg.norm', 'np.linalg.norm', (['sp'], {}), '(sp)\n', (6923, 6927), True, 'import numpy as np\n'), ((7481, 7527), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'p.T'], {'atol': 'tolerance'}), '(desired_lmda, p.T, atol=tolerance)\n', (7492, 7527), True, 'import numpy as np\n'), ((8413, 8459), 'numpy.allclose', 'np.allclose', (['desired_lmda', 'p.T'], {'atol': 'tolerance'}), '(desired_lmda, p.T, atol=tolerance)\n', (8424, 8459), True, 'import numpy as np\n'), ((1768, 1780), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1777, 1780), False, 'import math\n'), ((2123, 2135), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (2132, 2135), False, 'import math\n'), ((3568, 3580), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (3577, 3580), False, 'import math\n'), ((4180, 4192), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4189, 4192), False, 'import math\n'), ((4196, 4208), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4205, 4208), False, 'import math\n'), ((7776, 7788), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (7785, 7788), False, 'import math\n'), ((8871, 8903), 'numpy.array', 'np.array', (['[0, 0, math.pi / 2, 0]'], {}), '([0, 0, math.pi / 2, 0])\n', (8879, 8903), True, 'import numpy as np\n'), ((9158, 9191), 'numpy.array', 'np.array', (['[0, 0, -math.pi / 2, 0]'], {}), '([0, 0, -math.pi / 2, 0])\n', (9166, 9191), True, 'import numpy as np\n'), ((2251, 2264), 'math.sqrt', 'math.sqrt', (['(10)'], {}), '(10)\n', (2260, 2264), False, 'import math\n'), ((3838, 3851), 'math.sqrt', 'math.sqrt', (['(10)'], {}), '(10)\n', (3847, 3851), False, 'import math\n'), ((8163, 8176), 'math.sqrt', 'math.sqrt', (['(10)'], {}), '(10)\n', (8172, 8176), False, 'import math\n'), ((2351, 2364), 'math.sqrt', 'math.sqrt', (['(10)'], {}), '(10)\n', (2360, 2364), False, 'import math\n'), ((3739, 3752), 'math.sqrt', 'math.sqrt', (['(10)'], {}), '(10)\n', (3748, 3752), False, 'import math\n'), ((5426, 5448), 'numpy.linalg.norm', 'np.linalg.norm', (['[u, v]'], {}), '([u, v])\n', (5440, 5448), True, 'import numpy as np\n'), ((8064, 8077), 'math.sqrt', 'math.sqrt', (['(10)'], {}), '(10)\n', (8073, 8077), False, 'import math\n')]
|
#!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib.pyplot as plt
import numpy as np
import argparse
import re
import os
# Describe arguments and options.
parser = argparse.ArgumentParser(description='Create ROC curves based on StegExpose reports.')
parser.add_argument("reports_dir", help="Path to the StegExpose reports.")
# Parse argumets.
args = parser.parse_args()
def get_instances_from_report(report_file_path):
"""
Parse StegExpose report and return a list of (class, score).
E.g. [('p', 0.10), ('n', 0.05)]
"""
instances = []
with open(report_file_path, 'r') as report_file:
for line in report_file:
# Filter the lines without images.
if re.match(r'.*\.(png|jpg),', line):
# Get the important data.
pieces = line.split(sep=',')
image_name = pieces[0]
real_class = 'p' if re.match(r'.*_\d+p\.(png|jpg),', line) else 'n'
fusion_score = float(pieces[-1])
# print(real_class, fusion_score, image_name)
instances.append((real_class, fusion_score))
return instances
def calculate_roc_points(instances):
"""From a sorted list of instances, calculate the points that draw the ROC curve."""
# Calculate the number of positives and negatives (the real ones).
P = N = 0
for label, score in instances:
if label == 'p':
P += 1
else:
N += 1
# Calculate each point.
TP = FP = 0
points = []
for label, score in instances:
if label == 'p':
TP += 1
else:
FP +=1
point = (FP/N, TP/P)
points.append(point)
return points
def calculate_discrete_classifier_point(instances, threshold):
"""
From a list of instances, calculate the coordinates for a discrete classifier
that uses the given threshold.
"""
TP = 0 # True positives
FP = 0 # False positives
P = 0 # Total positives
N = 0 # Total negatives
for label, score in instances:
if label == 'p':
P += 1
# Is it classified as positive?
if score >= threshold:
TP += 1
else:
N += 1
# Is it classified as positive? Even though it is not!
if score >= threshold:
FP += 1
tp_rate = TP / P
fp_rate = FP / N
return (fp_rate, tp_rate)
# Parse instance data out of StegExpose reports.
clean_png_instances = get_instances_from_report(os.path.join(args.reports_dir, 'clean-png.csv'))
clean_jpg_instances = get_instances_from_report(os.path.join(args.reports_dir, 'clean-jpeg.csv'))
steghide_instances = get_instances_from_report(os.path.join(args.reports_dir, 'steghide.csv'))
outguess_instances = get_instances_from_report(os.path.join(args.reports_dir, 'outguess.csv'))
f5_instances = get_instances_from_report(os.path.join(args.reports_dir, 'f5.csv'))
stepic_instances = get_instances_from_report(os.path.join(args.reports_dir, 'stepic.csv'))
lsbsteg_instances = get_instances_from_report(os.path.join(args.reports_dir, 'lsbsteg.csv'))
# for i in clean_png_instances: print(i)
# for i in clean_jpg_instances: print(i)
# for i in steghide_instances: print(i)
# for i in outguess_instances: print(i)
# for i in f5_instances: print(i)
# for i in stepic_instances: print(i)
# for i in lsbsteg_instances: print(i)
# Merge the dirty instances with their respective clean instances.
# Programs that use JPEG:
all_steghide_instances = steghide_instances + clean_jpg_instances
all_outguess_instances = outguess_instances + clean_jpg_instances
all_f5_instances = f5_instances + clean_jpg_instances
# Programs that use PNG:
all_stepic_instances = stepic_instances + clean_png_instances
all_lsbsteg_instances = lsbsteg_instances + clean_png_instances
# Sort the instances by their score.
all_steghide_instances.sort(key=lambda i: i[1], reverse=True)
all_outguess_instances.sort(key=lambda i: i[1], reverse=True)
all_f5_instances .sort(key=lambda i: i[1], reverse=True)
all_stepic_instances .sort(key=lambda i: i[1], reverse=True)
all_lsbsteg_instances .sort(key=lambda i: i[1], reverse=True)
# for i in all_steghide_instances: print(i)
# for i in all_outguess_instances: print(i)
# for i in all_f5_instances: print(i)
# for i in all_stepic_instances: print(i)
# for i in all_lsbsteg_instances: print(i)
# Calculate point to plot.
steghide_points = calculate_roc_points(all_steghide_instances)
outguess_points = calculate_roc_points(all_outguess_instances)
f5_points = calculate_roc_points(all_f5_instances)
stepic_points = calculate_roc_points(all_stepic_instances)
lsbsteg_points = calculate_roc_points(all_lsbsteg_instances)
# Calculate the points for a discrete classifier of threshold 0.2.
threshold = 0.005
steghide_discrete_point = calculate_discrete_classifier_point(all_steghide_instances, threshold)
outguess_discrete_point = calculate_discrete_classifier_point(all_outguess_instances, threshold)
f5_discrete_point = calculate_discrete_classifier_point(all_f5_instances, threshold)
stepic_discrete_point = calculate_discrete_classifier_point(all_stepic_instances, threshold)
lsbsteg_discrete_point = calculate_discrete_classifier_point(all_lsbsteg_instances, threshold)
# print("steghide_discrete_point:", steghide_discrete_point)
# print("outguess_discrete_point:", outguess_discrete_point)
# print("f5_discrete_point:", f5_discrete_point)
# print("stepic_discrete_point:", stepic_discrete_point)
# print("lsbsteg_discrete_point:", lsbsteg_discrete_point)
# Plot all of them on a single graph.
# Create lists with x and y coordinates.
lsbsteg_xs = list(map(lambda p: p[0], lsbsteg_points))
lsbsteg_ys = list(map(lambda p: p[1], lsbsteg_points))
stepic_xs = list(map(lambda p: p[0], stepic_points))
stepic_ys = list(map(lambda p: p[1], stepic_points))
steghide_xs = list(map(lambda p: p[0], steghide_points))
steghide_ys = list(map(lambda p: p[1], steghide_points))
outguess_xs = list(map(lambda p: p[0], outguess_points))
outguess_ys = list(map(lambda p: p[1], outguess_points))
f5_xs = list(map(lambda p: p[0], f5_points))
f5_ys = list(map(lambda p: p[1], f5_points))
# These are the AUCs
f5_auc = np.trapz(f5_ys,f5_xs)
steghide_auc = np.trapz(steghide_ys,steghide_xs)
outguess_auc = np.trapz(outguess_ys,outguess_xs)
stepic_auc = np.trapz(stepic_ys,stepic_xs)
lsbsteg_auc = np.trapz(lsbsteg_ys,lsbsteg_xs)
# Plot the ROC curves.
plt.plot(steghide_xs, steghide_ys, lw=2, color='Red', label='Steghide (AUC = %0.2f)' % steghide_auc)
plt.plot(outguess_xs, outguess_ys, lw=2, color='Yellow', label='Outguess (AUC = %0.2f)' % outguess_auc)
plt.plot(f5_xs, f5_ys, lw=2, color='Brown', label='F5 (AUC = %0.2f)' % f5_auc)
plt.plot(stepic_xs, stepic_ys, lw=2, color='Green', label='Stepic (AUC = %0.2f)' % stepic_auc)
plt.plot(lsbsteg_xs, lsbsteg_ys, lw=2, color='Blue', label='LSBSteg (AUC = %0.2f)' % lsbsteg_auc)
# Plot the discrete classifiers.
plt.plot(steghide_discrete_point[0], steghide_discrete_point[1], 'o', markersize=10, color='Red')
plt.plot(outguess_discrete_point[0], outguess_discrete_point[1], 'o', markersize=10, color='Yellow')
plt.plot(f5_discrete_point[0], f5_discrete_point[1], 'o', markersize=10, color='Brown')
plt.plot(stepic_discrete_point[0], stepic_discrete_point[1], 'o', markersize=10, color='Green')
plt.plot(lsbsteg_discrete_point[0], lsbsteg_discrete_point[1], 'o', markersize=10, color='Blue')
# Plot the diagonal.
plt.plot([0,1], [0,1], color="black", ls='--', lw=0.5)
# Write title, labels, legends and all to the figure.
# plt.title(title)
plt.xlabel("Taxa de Positivos Falsos")
plt.ylabel("Taxa de Positivos Verdadeiros")
plt.legend(loc="lower right")
plt.axis([0, 1, 0, 1])
plt.grid(True)
# Save or show figure.
plt.savefig('rocs.png', bbox_inches='tight')
# plt.show()
|
[
"numpy.trapz",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"re.match",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((238, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create ROC curves based on StegExpose reports."""'}), "(description=\n 'Create ROC curves based on StegExpose reports.')\n", (261, 328), False, 'import argparse\n'), ((6298, 6320), 'numpy.trapz', 'np.trapz', (['f5_ys', 'f5_xs'], {}), '(f5_ys, f5_xs)\n', (6306, 6320), True, 'import numpy as np\n'), ((6335, 6369), 'numpy.trapz', 'np.trapz', (['steghide_ys', 'steghide_xs'], {}), '(steghide_ys, steghide_xs)\n', (6343, 6369), True, 'import numpy as np\n'), ((6384, 6418), 'numpy.trapz', 'np.trapz', (['outguess_ys', 'outguess_xs'], {}), '(outguess_ys, outguess_xs)\n', (6392, 6418), True, 'import numpy as np\n'), ((6431, 6461), 'numpy.trapz', 'np.trapz', (['stepic_ys', 'stepic_xs'], {}), '(stepic_ys, stepic_xs)\n', (6439, 6461), True, 'import numpy as np\n'), ((6475, 6507), 'numpy.trapz', 'np.trapz', (['lsbsteg_ys', 'lsbsteg_xs'], {}), '(lsbsteg_ys, lsbsteg_xs)\n', (6483, 6507), True, 'import numpy as np\n'), ((6531, 6636), 'matplotlib.pyplot.plot', 'plt.plot', (['steghide_xs', 'steghide_ys'], {'lw': '(2)', 'color': '"""Red"""', 'label': "('Steghide (AUC = %0.2f)' % steghide_auc)"}), "(steghide_xs, steghide_ys, lw=2, color='Red', label=\n 'Steghide (AUC = %0.2f)' % steghide_auc)\n", (6539, 6636), True, 'import matplotlib.pyplot as plt\n'), ((6632, 6740), 'matplotlib.pyplot.plot', 'plt.plot', (['outguess_xs', 'outguess_ys'], {'lw': '(2)', 'color': '"""Yellow"""', 'label': "('Outguess (AUC = %0.2f)' % outguess_auc)"}), "(outguess_xs, outguess_ys, lw=2, color='Yellow', label=\n 'Outguess (AUC = %0.2f)' % outguess_auc)\n", (6640, 6740), True, 'import matplotlib.pyplot as plt\n'), ((6736, 6814), 'matplotlib.pyplot.plot', 'plt.plot', (['f5_xs', 'f5_ys'], {'lw': '(2)', 'color': '"""Brown"""', 'label': "('F5 (AUC = %0.2f)' % f5_auc)"}), "(f5_xs, f5_ys, lw=2, color='Brown', label='F5 (AUC = %0.2f)' % f5_auc)\n", (6744, 6814), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6926), 'matplotlib.pyplot.plot', 'plt.plot', (['stepic_xs', 'stepic_ys'], {'lw': '(2)', 'color': '"""Green"""', 'label': "('Stepic (AUC = %0.2f)' % stepic_auc)"}), "(stepic_xs, stepic_ys, lw=2, color='Green', label=\n 'Stepic (AUC = %0.2f)' % stepic_auc)\n", (6835, 6926), True, 'import matplotlib.pyplot as plt\n'), ((6926, 7028), 'matplotlib.pyplot.plot', 'plt.plot', (['lsbsteg_xs', 'lsbsteg_ys'], {'lw': '(2)', 'color': '"""Blue"""', 'label': "('LSBSteg (AUC = %0.2f)' % lsbsteg_auc)"}), "(lsbsteg_xs, lsbsteg_ys, lw=2, color='Blue', label=\n 'LSBSteg (AUC = %0.2f)' % lsbsteg_auc)\n", (6934, 7028), True, 'import matplotlib.pyplot as plt\n'), ((7060, 7161), 'matplotlib.pyplot.plot', 'plt.plot', (['steghide_discrete_point[0]', 'steghide_discrete_point[1]', '"""o"""'], {'markersize': '(10)', 'color': '"""Red"""'}), "(steghide_discrete_point[0], steghide_discrete_point[1], 'o',\n markersize=10, color='Red')\n", (7068, 7161), True, 'import matplotlib.pyplot as plt\n'), ((7158, 7262), 'matplotlib.pyplot.plot', 'plt.plot', (['outguess_discrete_point[0]', 'outguess_discrete_point[1]', '"""o"""'], {'markersize': '(10)', 'color': '"""Yellow"""'}), "(outguess_discrete_point[0], outguess_discrete_point[1], 'o',\n markersize=10, color='Yellow')\n", (7166, 7262), True, 'import matplotlib.pyplot as plt\n'), ((7259, 7350), 'matplotlib.pyplot.plot', 'plt.plot', (['f5_discrete_point[0]', 'f5_discrete_point[1]', '"""o"""'], {'markersize': '(10)', 'color': '"""Brown"""'}), "(f5_discrete_point[0], f5_discrete_point[1], 'o', markersize=10,\n color='Brown')\n", (7267, 7350), True, 'import matplotlib.pyplot as plt\n'), ((7359, 7458), 'matplotlib.pyplot.plot', 'plt.plot', (['stepic_discrete_point[0]', 'stepic_discrete_point[1]', '"""o"""'], {'markersize': '(10)', 'color': '"""Green"""'}), "(stepic_discrete_point[0], stepic_discrete_point[1], 'o',\n markersize=10, color='Green')\n", (7367, 7458), True, 'import matplotlib.pyplot as plt\n'), ((7459, 7559), 'matplotlib.pyplot.plot', 'plt.plot', (['lsbsteg_discrete_point[0]', 'lsbsteg_discrete_point[1]', '"""o"""'], {'markersize': '(10)', 'color': '"""Blue"""'}), "(lsbsteg_discrete_point[0], lsbsteg_discrete_point[1], 'o',\n markersize=10, color='Blue')\n", (7467, 7559), True, 'import matplotlib.pyplot as plt\n'), ((7580, 7636), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""black"""', 'ls': '"""--"""', 'lw': '(0.5)'}), "([0, 1], [0, 1], color='black', ls='--', lw=0.5)\n", (7588, 7636), True, 'import matplotlib.pyplot as plt\n'), ((7709, 7747), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Taxa de Positivos Falsos"""'], {}), "('Taxa de Positivos Falsos')\n", (7719, 7747), True, 'import matplotlib.pyplot as plt\n'), ((7748, 7791), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Taxa de Positivos Verdadeiros"""'], {}), "('Taxa de Positivos Verdadeiros')\n", (7758, 7791), True, 'import matplotlib.pyplot as plt\n'), ((7792, 7821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (7802, 7821), True, 'import matplotlib.pyplot as plt\n'), ((7822, 7844), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (7830, 7844), True, 'import matplotlib.pyplot as plt\n'), ((7845, 7859), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7853, 7859), True, 'import matplotlib.pyplot as plt\n'), ((7884, 7928), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rocs.png"""'], {'bbox_inches': '"""tight"""'}), "('rocs.png', bbox_inches='tight')\n", (7895, 7928), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2666), 'os.path.join', 'os.path.join', (['args.reports_dir', '"""clean-png.csv"""'], {}), "(args.reports_dir, 'clean-png.csv')\n", (2631, 2666), False, 'import os\n'), ((2716, 2764), 'os.path.join', 'os.path.join', (['args.reports_dir', '"""clean-jpeg.csv"""'], {}), "(args.reports_dir, 'clean-jpeg.csv')\n", (2728, 2764), False, 'import os\n'), ((2813, 2859), 'os.path.join', 'os.path.join', (['args.reports_dir', '"""steghide.csv"""'], {}), "(args.reports_dir, 'steghide.csv')\n", (2825, 2859), False, 'import os\n'), ((2908, 2954), 'os.path.join', 'os.path.join', (['args.reports_dir', '"""outguess.csv"""'], {}), "(args.reports_dir, 'outguess.csv')\n", (2920, 2954), False, 'import os\n'), ((2997, 3037), 'os.path.join', 'os.path.join', (['args.reports_dir', '"""f5.csv"""'], {}), "(args.reports_dir, 'f5.csv')\n", (3009, 3037), False, 'import os\n'), ((3084, 3128), 'os.path.join', 'os.path.join', (['args.reports_dir', '"""stepic.csv"""'], {}), "(args.reports_dir, 'stepic.csv')\n", (3096, 3128), False, 'import os\n'), ((3176, 3221), 'os.path.join', 'os.path.join', (['args.reports_dir', '"""lsbsteg.csv"""'], {}), "(args.reports_dir, 'lsbsteg.csv')\n", (3188, 3221), False, 'import os\n'), ((780, 813), 're.match', 're.match', (['""".*\\\\.(png|jpg),"""', 'line'], {}), "('.*\\\\.(png|jpg),', line)\n", (788, 813), False, 'import re\n'), ((977, 1016), 're.match', 're.match', (['""".*_\\\\d+p\\\\.(png|jpg),"""', 'line'], {}), "('.*_\\\\d+p\\\\.(png|jpg),', line)\n", (985, 1016), False, 'import re\n')]
|
import caffe
import numpy as np
import pickle
TEST_LAYER_NAME = 'bn205'
# TEST_LAYER_NAME = None
net = caffe.Net('proto.prototxt', caffe.TEST)
params = pickle.load(open('params.pkl', 'rb'))
for key in params:
if not key in net.params:
break
if 'kernel' in params[key]:
# net.params[key][0].data[:] = np.transpose(params[key]['kernel'], [3,2,1,0])
net.params[key][0].data[:] = params[key]['kernel']
elif 'dwkernel' in params[key]:
# net.params[key][0].data[:] = np.transpose(params[key]['dwkernel'], [2,3,1,0])
net.params[key][0].data[:] = params[key]['dwkernel']
elif 'fckernel' in params[key]:
# net.params[key][0].data[:] = np.transpose(params[key]['fckernel'], [1,0])
net.params[key][0].data[:] = params[key]['fckernel']
elif 'mean' in params[key]:
net.params[key][0].data[:] = params[key]['mean']
net.params[key][1].data[:] = params[key]['var']
if 'scale' in params[key]:
net.params[key][2].data[:] = params[key]['scale']
elif 'scale' in params[key]:
net.params[key][0].data[:] = params[key]['scale']
if 'bias' in params[key]:
net.params[key][1].data[:] = params[key]['bias']
if 'gamma' in params[key]: # used for prelu, not sure if other layers use this too
net.params[key][0].data[:] = params[key]['gamma']
print(net.blobs['inputdata'].data.shape)
net.blobs['inputdata'].data[:] = np.ones([1,3,112,112]).astype(np.float32)
# print(net.blobs.keys())
net.forward()
def tstlayer(name):
if name is None:
return
a = net.blobs[name].data
# a = np.transpose(a, [0,2,3,1])
print(a)
# print(a)
print(a.shape)
tstlayer(TEST_LAYER_NAME)
net.save('abc.caffemodel')
|
[
"caffe.Net",
"numpy.ones"
] |
[((108, 147), 'caffe.Net', 'caffe.Net', (['"""proto.prototxt"""', 'caffe.TEST'], {}), "('proto.prototxt', caffe.TEST)\n", (117, 147), False, 'import caffe\n'), ((1332, 1357), 'numpy.ones', 'np.ones', (['[1, 3, 112, 112]'], {}), '([1, 3, 112, 112])\n', (1339, 1357), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from lxml import etree
from tqdm import tqdm
from pytorch_extras import RAdam, SingleCycleScheduler
from pytorch_transformers import GPT2Model, GPT2Tokenizer
from deps.torch_train_test_loop.torch_train_test_loop import LoopComponent, TrainTestLoop
from models import SSTClassifier
from lm.inference import ModelWrapper
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="2"
DEVICE = 'cuda'
model_path = '/Home/ode/pimenov/Documents/sirprey/Streets/nlp/de345-root'
mw = ModelWrapper.load(Path(model_path))
MAX_LEN = 0#395 #0 for routing
TOT_MAX_LEN = 512
_stoi = { s: i for i, s in enumerate(['negative', 'neutral', 'positive'])}
create_batch = lambda : type('', (), {'text':([], []), 'label':[], '__len__': lambda self: len(self.label)})()
def load_xml(fname, pad_token='', resample = None):
if not pad_token:
pad_token = mw.END_OF_TEXT
r = etree.parse(fname).getroot()
clssizes = [0 for i in range(len(_stoi))]
texts = [([*mw.tokenize(d.getchildren()[-1].text)[:(min(MAX_LEN, TOT_MAX_LEN) if MAX_LEN else TOT_MAX_LEN) - 1], pad_token], _stoi[d.getchildren()[-2].text]) for d in r.getchildren()]
print('Too long texts:%d out of %d'%( sum(1 for tokens,_ in texts if len(tokens)>1024), len(texts)))
maxlen = MAX_LEN if MAX_LEN else max(len(tokens) for tokens,_ in texts)
print('Maximal length of text:%d'%(maxlen))
inputs = [([mw.token_to_id(token) for token in tokens + [pad_token] * (maxlen - len(tokens))], [1.0] * len(tokens) + [0.0] * (maxlen - len(tokens)), label) for tokens, label in texts]
outputs = []
for tokens, mask, label in inputs:
if resample:
for i in range(resample[label]):
outputs.append((tokens, mask, label))
clssizes[label] += 1
print('Class sizes:' + str(clssizes))
return outputs if resample else inputs
#load_xml('test_TIMESTAMP1.xml')
class LoopMain(LoopComponent):
def __init__(self, n_classes, device, pct_warmup=0.1, mixup=(0.2, 0.2)):
self.n_classes, self.device, self.pct_warmup = (n_classes, device, pct_warmup)
self.mixup_dist = torch.distributions.Beta(torch.tensor(mixup[0]), torch.tensor(mixup[1]))
self.onehot = torch.eye(self.n_classes, device=self.device)
self.saved_data = []
def on_train_begin(self, loop):
n_iters = len(loop.train_data) * loop.n_epochs
loop.optimizer = RAdam(loop.model.parameters(), lr=1e-3)#5e-4)
loop.scheduler = SingleCycleScheduler(
loop.optimizer, loop.n_optim_steps, frac=self.pct_warmup, min_lr=1e-5)
def on_grads_reset(self, loop):
loop.model.zero_grad()
def on_batch_begin(self, loop):
if loop.is_training:
for i in range(len(_stoi)):
loop.tp[i] //= 2
loop.fp[i] //= 2
loop.fn[i] //= 2
def on_epoch_begin(self, loop):
loop.tp = [0] * len(_stoi)
loop.fp = [0] * len(_stoi)
loop.fn = [0] * len(_stoi)
#loop.precision = 0
#loop.recall = 0
#loop.f1 = 0
loop.p1s = [0] * len(_stoi)
loop.r1s = [0] * len(_stoi)
loop.f1s = [0] * len(_stoi)
def on_forward_pass(self, loop):
model, batch = (loop.model, loop.batch)
mask, embs = batch.text
target_probs = self.onehot[batch.label]
if loop.is_training:
r = self.mixup_dist.sample([len(mask)]).to(device=mask.device)
idx = torch.randperm(len(mask))
mask = mask.lerp(mask[idx], r[:, None])
embs = embs.lerp(embs[idx], r[:, None, None, None])
target_probs = target_probs.lerp(target_probs[idx], r[:, None])
pred_scores, _, _ = model(mask, embs)
_, pred_ids = pred_scores.max(-1)
for i in range(len(_stoi)):
loop.tp[i] += torch.sum((pred_ids == i)*(batch.label == i))
loop.fp[i] += torch.sum((pred_ids == i)*(batch.label != i))
loop.fn[i] += torch.sum((pred_ids != i)*(batch.label == i))
#loop.precision = [loop.tp.float().item() /(loop.fp.item()+loop.tp.item()+1e-3)
#loop.recall = loop.tp.float().item()/(loop.tp.item() + loop.fn.item()+1e-3)
#loop.f1 = 2 * loop.precision * loop.recall / (loop.precision + loop.recall + 1e-3)
loop.p1s = [loop.tp[i].float().item() /(loop.fp[i].item()+loop.tp[i].item()+1e-3) for i in range(len(_stoi))]
loop.r1s = [loop.tp[i].float().item()/(loop.tp[i].item() + loop.fn[i].item()+1e-3) for i in range(len(_stoi))]
loop.f1s = [2 * loop.p1s[i] * loop.r1s[i] / (loop.p1s[i] + loop.r1s[i] + 1e-3) for i in range(len(_stoi))]
accuracy = (pred_ids == batch.label).float().mean()
loop.pred_scores, loop.target_probs, loop.accuracy = (pred_scores, target_probs, accuracy)
def on_loss_compute(self, loop):
losses = -loop.target_probs * F.log_softmax(loop.pred_scores, dim=-1) # CE
loop.loss = losses.sum(dim=-1).mean() # sum of classes, mean of batch
def on_backward_pass(self, loop):
loop.loss.backward()
def on_optim_step(self, loop):
loop.optimizer.step()
loop.scheduler.step()
def on_batch_end(self, loop):
self.saved_data.append({
'n_samples': len(loop.batch),
'epoch_desc': loop.epoch_desc,
'epoch_num': loop.epoch_num,
'epoch_frac': loop.epoch_num + loop.batch_num / loop.n_batches,
'batch_num' : loop.batch_num,
'accuracy': loop.accuracy.item(),
'loss': loop.loss.item(),
'lr': loop.optimizer.param_groups[0]['lr'],
'momentum': loop.optimizer.param_groups[0]['betas'][0],
})
class LoopProgressBar(LoopComponent):
def __init__(self, item_names=['loss', 'accuracy']):
self.item_names = item_names
def on_epoch_begin(self, loop):
self.fp = 0
self.fn = 0
self.total, self.count = ({ name: 0.0 for name in self.item_names }, 0)
self.pbar = tqdm(total=loop.n_batches, desc=f"{loop.epoch_desc} epoch {loop.epoch_num}")
def on_batch_end(self, loop):
n = len(loop.batch)
self.count += n
for name in self.item_names:
self.total[name] += getattr(loop, name).item() * n
self.pbar.update(1)
if (not loop.is_training):
means = { f'mean_{name}': self.total[name] / self.count for name in self.item_names }
#means['P1'] = loop.precision
#means['R1'] = loop.recall
for i in range(len(_stoi)):
means[f'F1_{i}'] = loop.f1s[i]
self.pbar.set_postfix(means)
else:
means = { f'mean_{name}': self.total[name] / self.count for name in self.item_names }
for i in range(len(_stoi)):
means[f'F1_{i}'] = loop.f1s[i]
self.pbar.set_postfix(means)
def on_epoch_end(self, loop):
self.pbar.close()
#Initialize model.
n_classes = len(_stoi)
print("Num classes: " + str(n_classes))
model = SSTClassifier(
d_depth=mw.model.hparams.n_layer + 1,
d_emb=mw.model.hparams.n_hidden,
d_inp=64,
d_cap=2,
n_parts=64,
n_classes=n_classes,
n = MAX_LEN,
)
model = model.cuda(device=DEVICE)
print('Total number of parameters: {:,}'.format(sum(np.prod(p.shape) for p in model.parameters())))
trn_batches = load_xml('train_v1.4.xml', resample=[1,1,2])#, resample=[2,1,5])#,resample = [2, 1, 7])
val_batches = load_xml('dev_v1.4.xml')
class IterB:
def __init__(self, inputs, batch_size = 32):
self.inputs = inputs
self.batch_size = batch_size
self.indcs = np.arange(len(inputs))
self.ni = -1
self.ln = len(self.inputs)
self.ln = self.ln // self.batch_size + (0 if self.ln % self.batch_size == 0 else 1)
def __len__(self):
return self.ln
def __iter__(self):
np.random.shuffle(self.indcs)
self.ni = 0
return self
def __next__(self):
if self.ni >= len(self.inputs):
raise StopIteration
batch = create_batch()
for i in range(self.batch_size):
tokens, mask, label = self.inputs[self.indcs[self.ni]]
batch.text[0].append(mask)
batch.text[1].append(tokens)
batch.label.append(label)
self.ni += 1
if self.ni >= len(self.inputs):
break
batch.text = (torch.tensor(batch.text[0]).to(device=DEVICE), mw.get_hidden(torch.tensor(batch.text[1]).to(device=DEVICE)))
batch.label = torch.tensor(batch.label).to(device=DEVICE)
return batch
loop = TrainTestLoop(model, [LoopMain(n_classes, DEVICE), LoopProgressBar()], IterB(trn_batches), IterB(val_batches))
#model.load_state_dict(torch.load('result3.pt'))
#model.eval()
# Train model
loop.train(n_epochs=5)
torch.save(model.state_dict(), 'model64_.pt')
#model.load_state_dict(torch.load('result.pt'))
#model.eval()
tst_batches = load_xml('test_TIMESTAMP1.xml')
loop.test(IterB(tst_batches, batch_size = 16))
tst_batches = load_xml('test_TIMESTAMP2.xml')
loop.test(IterB(tst_batches, batch_size = 16))
|
[
"tqdm.tqdm",
"torch.tensor",
"torch.eye",
"pytorch_extras.SingleCycleScheduler",
"numpy.prod",
"pathlib.Path",
"torch.nn.functional.log_softmax",
"lxml.etree.parse",
"models.SSTClassifier",
"torch.sum",
"numpy.random.shuffle"
] |
[((7210, 7362), 'models.SSTClassifier', 'SSTClassifier', ([], {'d_depth': '(mw.model.hparams.n_layer + 1)', 'd_emb': 'mw.model.hparams.n_hidden', 'd_inp': '(64)', 'd_cap': '(2)', 'n_parts': '(64)', 'n_classes': 'n_classes', 'n': 'MAX_LEN'}), '(d_depth=mw.model.hparams.n_layer + 1, d_emb=mw.model.hparams.\n n_hidden, d_inp=64, d_cap=2, n_parts=64, n_classes=n_classes, n=MAX_LEN)\n', (7223, 7362), False, 'from models import SSTClassifier\n'), ((690, 706), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (694, 706), False, 'from pathlib import Path\n'), ((2382, 2427), 'torch.eye', 'torch.eye', (['self.n_classes'], {'device': 'self.device'}), '(self.n_classes, device=self.device)\n', (2391, 2427), False, 'import torch\n'), ((2645, 2742), 'pytorch_extras.SingleCycleScheduler', 'SingleCycleScheduler', (['loop.optimizer', 'loop.n_optim_steps'], {'frac': 'self.pct_warmup', 'min_lr': '(1e-05)'}), '(loop.optimizer, loop.n_optim_steps, frac=self.\n pct_warmup, min_lr=1e-05)\n', (2665, 2742), False, 'from pytorch_extras import RAdam, SingleCycleScheduler\n'), ((6184, 6260), 'tqdm.tqdm', 'tqdm', ([], {'total': 'loop.n_batches', 'desc': 'f"""{loop.epoch_desc} epoch {loop.epoch_num}"""'}), "(total=loop.n_batches, desc=f'{loop.epoch_desc} epoch {loop.epoch_num}')\n", (6188, 6260), False, 'from tqdm import tqdm\n'), ((8068, 8097), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indcs'], {}), '(self.indcs)\n', (8085, 8097), True, 'import numpy as np\n'), ((1062, 1080), 'lxml.etree.parse', 'etree.parse', (['fname'], {}), '(fname)\n', (1073, 1080), False, 'from lxml import etree\n'), ((2312, 2334), 'torch.tensor', 'torch.tensor', (['mixup[0]'], {}), '(mixup[0])\n', (2324, 2334), False, 'import torch\n'), ((2336, 2358), 'torch.tensor', 'torch.tensor', (['mixup[1]'], {}), '(mixup[1])\n', (2348, 2358), False, 'import torch\n'), ((4009, 4056), 'torch.sum', 'torch.sum', (['((pred_ids == i) * (batch.label == i))'], {}), '((pred_ids == i) * (batch.label == i))\n', (4018, 4056), False, 'import torch\n'), ((4081, 4128), 'torch.sum', 'torch.sum', (['((pred_ids == i) * (batch.label != i))'], {}), '((pred_ids == i) * (batch.label != i))\n', (4090, 4128), False, 'import torch\n'), ((4153, 4200), 'torch.sum', 'torch.sum', (['((pred_ids != i) * (batch.label == i))'], {}), '((pred_ids != i) * (batch.label == i))\n', (4162, 4200), False, 'import torch\n'), ((5052, 5091), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['loop.pred_scores'], {'dim': '(-1)'}), '(loop.pred_scores, dim=-1)\n', (5065, 5091), True, 'import torch.nn.functional as F\n'), ((7477, 7493), 'numpy.prod', 'np.prod', (['p.shape'], {}), '(p.shape)\n', (7484, 7493), True, 'import numpy as np\n'), ((8744, 8769), 'torch.tensor', 'torch.tensor', (['batch.label'], {}), '(batch.label)\n', (8756, 8769), False, 'import torch\n'), ((8613, 8640), 'torch.tensor', 'torch.tensor', (['batch.text[0]'], {}), '(batch.text[0])\n', (8625, 8640), False, 'import torch\n'), ((8674, 8701), 'torch.tensor', 'torch.tensor', (['batch.text[1]'], {}), '(batch.text[1])\n', (8686, 8701), False, 'import torch\n')]
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_geotiff.py
Created by <NAME> on 2013-09-18.
"""
from __future__ import division, print_function, absolute_import
from builtins import str
import os
import numpy as np
import pytest
from pygaarst import geotiff
from pygaarst.rasterhelpers import PygaarstRasterError
DATADIR = "tests/data"
rgbgeotiff = os.path.join(DATADIR, 'LC8_754_8bit.tiff')
basicgeotiff = os.path.join(DATADIR, 'bogota_crop.tif')
def test_valid_geotiff_open():
a = geotiff.GeoTIFF(rgbgeotiff)
assert a.ncol == 15
assert a.nrow == 15
def test_basic_geotiff_properties():
a = geotiff.GeoTIFF(basicgeotiff)
assert type(a) == geotiff.GeoTIFF
assert a.delx == 60.0
def test_rgb_geotiff_properties():
a = geotiff.GeoTIFF(rgbgeotiff)
assert a.lrx == 501105.0
assert a.coordtrans.srs == u'+proj=utm +zone=6 +datum=WGS84 +units=m +no_defs'
assert isinstance(a, geotiff.GeoTIFF)
assert a.data[0][10][5] == 55
assert a.projection == u'PROJCS["WGS 84 / UTM zone 6N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-147],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32606"]]'
assert a.proj4 == u'+proj=utm +zone=6 +datum=WGS84 +units=m +no_defs'
assert a.coordtrans(-145, 65) == (594301.0123902344, 7209946.446071797)
assert a.delx == 30.0
assert a.dely == -30.0
assert a.easting[0] == 500655.0
assert a.northing[0] == 7200285.0
assert a.x_pxcenter[-1] == 501090.0
assert a.y_pxcenter[-1] == 7200720.0
assert np.isclose(a.Lon[0][0], -146.98614807600831)
assert np.isclose(a.Lon_pxcenter[-1][0], -146.98582879544685)
assert np.isclose(a.Lat[0][0], 64.926695025329934)
assert np.isclose(a.Lat_pxcenter[-1][0], 64.930598198154968)
def test_geotiff_methods():
a = geotiff.GeoTIFF(rgbgeotiff)
assert a.ij2xy(1, 1) == (500685.0, 7200705.0)
assert a.xy2ij(500750, 7200725) == (0, 3)
def test_geotiff_error():
a = geotiff.GeoTIFF(rgbgeotiff)
with pytest.raises(PygaarstRasterError):
a.ij2xy(250, 1)
def test_geotiff_plotting():
a = geotiff.GeoTIFF(rgbgeotiff)
a.simpleplot()
def test_geotiff_cloning(tmpdir):
a = geotiff.GeoTIFF(rgbgeotiff)
fn = tmpdir.mkdir("sub").join("clone.tif")
b = a.clone(str(fn), a.data)
assert type(b) == geotiff.GeoTIFF
|
[
"pytest.raises",
"numpy.isclose",
"pygaarst.geotiff.GeoTIFF",
"builtins.str",
"os.path.join"
] |
[((353, 395), 'os.path.join', 'os.path.join', (['DATADIR', '"""LC8_754_8bit.tiff"""'], {}), "(DATADIR, 'LC8_754_8bit.tiff')\n", (365, 395), False, 'import os\n'), ((411, 451), 'os.path.join', 'os.path.join', (['DATADIR', '"""bogota_crop.tif"""'], {}), "(DATADIR, 'bogota_crop.tif')\n", (423, 451), False, 'import os\n'), ((493, 520), 'pygaarst.geotiff.GeoTIFF', 'geotiff.GeoTIFF', (['rgbgeotiff'], {}), '(rgbgeotiff)\n', (508, 520), False, 'from pygaarst import geotiff\n'), ((616, 645), 'pygaarst.geotiff.GeoTIFF', 'geotiff.GeoTIFF', (['basicgeotiff'], {}), '(basicgeotiff)\n', (631, 645), False, 'from pygaarst import geotiff\n'), ((755, 782), 'pygaarst.geotiff.GeoTIFF', 'geotiff.GeoTIFF', (['rgbgeotiff'], {}), '(rgbgeotiff)\n', (770, 782), False, 'from pygaarst import geotiff\n'), ((1969, 2012), 'numpy.isclose', 'np.isclose', (['a.Lon[0][0]', '(-146.9861480760083)'], {}), '(a.Lon[0][0], -146.9861480760083)\n', (1979, 2012), True, 'import numpy as np\n'), ((2025, 2079), 'numpy.isclose', 'np.isclose', (['a.Lon_pxcenter[-1][0]', '(-146.98582879544685)'], {}), '(a.Lon_pxcenter[-1][0], -146.98582879544685)\n', (2035, 2079), True, 'import numpy as np\n'), ((2091, 2133), 'numpy.isclose', 'np.isclose', (['a.Lat[0][0]', '(64.92669502532993)'], {}), '(a.Lat[0][0], 64.92669502532993)\n', (2101, 2133), True, 'import numpy as np\n'), ((2146, 2198), 'numpy.isclose', 'np.isclose', (['a.Lat_pxcenter[-1][0]', '(64.93059819815497)'], {}), '(a.Lat_pxcenter[-1][0], 64.93059819815497)\n', (2156, 2198), True, 'import numpy as np\n'), ((2238, 2265), 'pygaarst.geotiff.GeoTIFF', 'geotiff.GeoTIFF', (['rgbgeotiff'], {}), '(rgbgeotiff)\n', (2253, 2265), False, 'from pygaarst import geotiff\n'), ((2398, 2425), 'pygaarst.geotiff.GeoTIFF', 'geotiff.GeoTIFF', (['rgbgeotiff'], {}), '(rgbgeotiff)\n', (2413, 2425), False, 'from pygaarst import geotiff\n'), ((2534, 2561), 'pygaarst.geotiff.GeoTIFF', 'geotiff.GeoTIFF', (['rgbgeotiff'], {}), '(rgbgeotiff)\n', (2549, 2561), False, 'from pygaarst import geotiff\n'), ((2625, 2652), 'pygaarst.geotiff.GeoTIFF', 'geotiff.GeoTIFF', (['rgbgeotiff'], {}), '(rgbgeotiff)\n', (2640, 2652), False, 'from pygaarst import geotiff\n'), ((2435, 2469), 'pytest.raises', 'pytest.raises', (['PygaarstRasterError'], {}), '(PygaarstRasterError)\n', (2448, 2469), False, 'import pytest\n'), ((2716, 2723), 'builtins.str', 'str', (['fn'], {}), '(fn)\n', (2719, 2723), False, 'from builtins import str\n')]
|
"""
UNSUPERVISED LEARNING METRICS
Author: <NAME>
License: MIT License
Source: http://www.github.com/bstienen/AUMVC
Implementation of the Area under the Mass-Volume Curve algorithm as by
- <NAME> and <NAME>, Scoring anomalies: a M-estimation
formulation approach. 2013-04
Implementation is inspired by
https://github.com/albertcthomas/anomaly_tuning
"""
import warnings
import numpy as np
from scipy.misc import comb
from sklearn.metrics import auc
def aumvc(scoring_function,
X_test,
N_mc=100000,
N_levelsets=100,
normalise=True):
""" Calculate the area under the mass-volume curve for an anomaly detection
function or algorithm
This function uses monte carlo sampling in the parameter space box spanned
by the provided test data in order to estimate the level set of the
scoring function. For higher dimensionalities the amount of sampled data
points would yield this algorithm intractable. In these cases the use of
the `aumvc_hd` function is advised instead.
Parameters
----------
scoring_function: function
Function that takes datapoints as numpy.ndarray (nPoints, nFeatures)
and returns an anomaly score. This score should be in range [0,1],
where 1 indicates the point not being an anomaly (and 0 that the point
*is* an anomaly).
X_test: numpy.ndarray of shape (nPoints, nFeatures)
Datapoints used for testing the algorithm.
N_mc: int (default: 100,000)
Number of datapoints to sample in the parameter space to estimate the
level sets of the scoring function.
N_levelsets: int (default: 100)
Number of level sets to evaluate.
normalise: bool (default: True)
Indicates if output scores of the scoring_function should be normalised
before calculating the mass-volume curve. """
# Get ranges for the test data
mins = np.amin(X_test, axis=0)
maxs = np.amax(X_test, axis=0)
# Generate uniform MC data
U = np.random.rand(N_mc, len(mins))*(maxs-mins)+mins
# Calculate volume of total cube
vol_tot_cube = np.prod(maxs - mins)
# Score test and MC data
score_U = scoring_function(U)
score_test = scoring_function(X_test)
# Do normalising if needed
if normalise:
minimum = min(np.amin(score_U), np.amin(score_test))
maximum = max(np.amax(score_U), np.amax(score_test))
score_U = (score_U - minimum) / (maximum - minimum)
score_test = (score_test - minimum) / (maximum - minimum)
# Calculate alphas to use
alphas = np.linspace(0, 1, N_levelsets)
# Compute offsets
offsets = np.percentile(score_test, 100 * (1 - alphas))
# Compute volumes of associated level sets
volumes = (np.array([np.mean(score_U >= offset)
for offset in offsets]) * vol_tot_cube)
# Calculating area under the curve
area = auc(alphas, volumes)
# Return area and curve variables
return (area, alphas, volumes)
def aumvc_hd(scoring_function_generator,
X_train,
X_test,
N_selected_dim=5,
N_iterations=100,
N_mc=100000,
N_levelsets=1000,
normalise=True):
""" Calculate the area under the mass-volume curve for an anomaly detection
function or algorithm working in high-dimensional parameter spaces
The curse of dimensionality is avoided by taking the average over multiple
AUMVC values for randomly selected subspaces of the parameter space under
consideration. The AUMVCs are calculated using the `aumvc` function above.
As this requires a retraining of the scoring function for each random
subspace, the `aumvc_hd` function does not take a scoring function as
input, but rather a generator of scoring functions. This function should
take the training data as input and return a scoring function (see
description of `aumvc` for requirements of this function).
Parameters
----------
scoring_function_generator: function
Function that takes training datapoints as numpy.ndarray of shape
(nPoints, nFeatures) and returns a scoring function. See description of
`aumvc` function for requirements on the scoring function.
X_train: numpy.ndarray of shape (nPoints, nFeatures)
Data points for which randomly selected subspaces are passed to the
scoring function generator for creation of the scoring function.
X_test: numpy.ndarray of shape (nPoints, nFeatures)
Data points used for testing the algorithm. Number of data points does
not have to match the number of training points, but the number of
features *does* have to match.
N_selected_dim: int (default=5)
Number of dimensions selected for the random subspace generation. This
number should be equal to or smaller than the number of features in
the testing data.
N_iterations: int (default=100)
Number of random subspaces have to be evaluated. A warning will be
raised if this number is higher than the total number of unique
combinations that can be randomly selected from the provided parameter
space.
N_mc: int (default=100,000)
Number of datapoints to sample in the parameter space to estimate the
level sets of the scoring function.
N_levelsets: int (default=100)
Number of level sets to evaluate.
normalise: bool (default: True)
Indicates if output scores of the scoring_function should be normalised
before calculating the mass-volume curve. """
# Check if N_selected_dim <= dim(X_test)
data_dim = X_test.shape[1]
if data_dim > N_selected_dim:
raise Exception("""The number of dimensions to select in each iteration
is larger than the number of dimensions in the provided data.""")
# Check if the dimensionality of training data matches the dimensionality
# of the testing data
if X_train.shape[1] != data_dim:
raise Exception("""The number of features in the training data does not
match the number of features in the testing data.""")
# Check if the number of unique random subspaces is significantly larger
# (i.e. > a factor of 2) than the requested number of iterations
N_unique = np.random.choice(data_dim, N_selected_dim, replace=False)
if N_unique < 2 * N_selected_dim:
warnings.warn("""The number of unique combinations of the dimensions of
the input space is smaller than the number of dimensions to select in each
iterations.""")
# Initialise final AUMVC variable
area_hd = 0
# Run over each iteration
for _ in range(N_iterations):
# Make feature subselection
features = np.random.choice(data_dim, N_selected_dim, replace=False)
X_selection = X_test[:, features]
X_train_selection = X_train[:, features]
# Train scoring function
scoring_function = scoring_function_generator(X_train_selection)
# Calculate area under curve and collect it in final variable
area, _, _ = aumvc(scoring_function,
X_selection,
N_mc,
N_levelsets,
normalise)
area_hd += area
# Return mean area
return area_hd / N_iterations
|
[
"numpy.amin",
"numpy.amax",
"numpy.percentile",
"sklearn.metrics.auc",
"numpy.mean",
"numpy.linspace",
"numpy.random.choice",
"warnings.warn",
"numpy.prod"
] |
[((1928, 1951), 'numpy.amin', 'np.amin', (['X_test'], {'axis': '(0)'}), '(X_test, axis=0)\n', (1935, 1951), True, 'import numpy as np\n'), ((1963, 1986), 'numpy.amax', 'np.amax', (['X_test'], {'axis': '(0)'}), '(X_test, axis=0)\n', (1970, 1986), True, 'import numpy as np\n'), ((2133, 2153), 'numpy.prod', 'np.prod', (['(maxs - mins)'], {}), '(maxs - mins)\n', (2140, 2153), True, 'import numpy as np\n'), ((2602, 2632), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N_levelsets'], {}), '(0, 1, N_levelsets)\n', (2613, 2632), True, 'import numpy as np\n'), ((2670, 2715), 'numpy.percentile', 'np.percentile', (['score_test', '(100 * (1 - alphas))'], {}), '(score_test, 100 * (1 - alphas))\n', (2683, 2715), True, 'import numpy as np\n'), ((2931, 2951), 'sklearn.metrics.auc', 'auc', (['alphas', 'volumes'], {}), '(alphas, volumes)\n', (2934, 2951), False, 'from sklearn.metrics import auc\n'), ((6346, 6403), 'numpy.random.choice', 'np.random.choice', (['data_dim', 'N_selected_dim'], {'replace': '(False)'}), '(data_dim, N_selected_dim, replace=False)\n', (6362, 6403), True, 'import numpy as np\n'), ((6450, 6622), 'warnings.warn', 'warnings.warn', (['"""The number of unique combinations of the dimensions of\nthe input space is smaller than the number of dimensions to select in each\niterations."""'], {}), '(\n """The number of unique combinations of the dimensions of\nthe input space is smaller than the number of dimensions to select in each\niterations."""\n )\n', (6463, 6622), False, 'import warnings\n'), ((6789, 6846), 'numpy.random.choice', 'np.random.choice', (['data_dim', 'N_selected_dim'], {'replace': '(False)'}), '(data_dim, N_selected_dim, replace=False)\n', (6805, 6846), True, 'import numpy as np\n'), ((2332, 2348), 'numpy.amin', 'np.amin', (['score_U'], {}), '(score_U)\n', (2339, 2348), True, 'import numpy as np\n'), ((2350, 2369), 'numpy.amin', 'np.amin', (['score_test'], {}), '(score_test)\n', (2357, 2369), True, 'import numpy as np\n'), ((2393, 2409), 'numpy.amax', 'np.amax', (['score_U'], {}), '(score_U)\n', (2400, 2409), True, 'import numpy as np\n'), ((2411, 2430), 'numpy.amax', 'np.amax', (['score_test'], {}), '(score_test)\n', (2418, 2430), True, 'import numpy as np\n'), ((2789, 2815), 'numpy.mean', 'np.mean', (['(score_U >= offset)'], {}), '(score_U >= offset)\n', (2796, 2815), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Slightech Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
import os
import sys
TOOLBOX_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(TOOLBOX_DIR, 'internal'))
# pylint: disable=import-error,wrong-import-position
from data import ROSBag, MYNTEYE, What
ANGLE_DEGREES = 'd'
ANGLE_RADIANS = 'r'
ANGLE_UNITS = (ANGLE_DEGREES, ANGLE_RADIANS)
BIN_IMG_NAME = 'stamp_analytics_img.bin'
BIN_IMU_NAME = 'stamp_analytics_imu.bin'
RESULT_FIGURE = 'stamp_analytics.png'
class BinDataset(object):
def __init__(self, path, dataset_creator):
self.path = path
self.dataset_creator = dataset_creator
self._digest()
def _digest(self):
bindir = os.path.splitext(self.path)[0]
binimg = os.path.join(bindir, BIN_IMG_NAME)
binimu = os.path.join(bindir, BIN_IMU_NAME)
if os.path.isfile(binimg) and os.path.isfile(binimu):
print('find binary files ...')
print(' binimg: {}'.format(binimg))
print(' binimu: {}'.format(binimu))
while True:
sys.stdout.write('Do you want to use it directly? [Y/n] ')
choice = raw_input().lower()
if choice == '' or choice == 'y':
self._binimg = binimg
self._binimu = binimu
self._has_img = True
self._has_imu = True
return
elif choice == 'n':
break
else:
print('Please respond with \'y\' or \'n\'.')
self._convert()
def _convert(self):
import numpy as np
dataset = self.dataset_creator(self.path)
bindir = os.path.splitext(self.path)[0]
if not os.path.exists(bindir):
os.makedirs(bindir)
binimg = os.path.join(bindir, BIN_IMG_NAME)
binimu = os.path.join(bindir, BIN_IMU_NAME)
print('save to binary files ...')
print(' binimg: {}'.format(binimg))
print(' binimu: {}'.format(binimu))
has_img = False
has_imu = False
with open(binimg, 'wb') as f_img, open(binimu, 'wb') as f_imu:
img_count = 0
imu_count = 0
for result in dataset.generate(What.img_left, What.imu):
if What.img_left in result:
img = result[What.img_left]
np.array([(
img.timestamp
)], dtype="f8").tofile(f_img)
img_count = img_count + 1
has_img = True
if What.imu in result:
imu = result[What.imu]
np.array([(
imu.timestamp,
imu.accel_x, imu.accel_y, imu.accel_z,
imu.gyro_x, imu.gyro_y, imu.gyro_z
)], dtype="f8, f8, f8, f8, f8, f8, f8").tofile(f_imu)
imu_count = imu_count + 1
has_imu = True
sys.stdout.write('\r img: {}, imu: {}'.format(img_count, imu_count))
sys.stdout.write('\n')
# pylint: disable=attribute-defined-outside-init
self._binimg = binimg
self._binimu = binimu
self._has_img = has_img
self._has_imu = has_imu
def stamp_analytics(self, args):
outdir = args.outdir
import numpy as np
if self.has_img:
# pd.cut fails on readonly arrays
# https://github.com/pandas-dev/pandas/issues/18773
# imgs = np.memmap(self._binimg, dtype=[
# ('t', 'f8')
# ], mode='r')
imgs = np.fromfile(self._binimg, dtype=[
('t', 'f8')
])
else:
sys.exit("Error: there are no imgs.")
if self.has_imu:
imus = np.memmap(self._binimu, dtype=[
('t', 'f8'),
('accel_x', 'f8'), ('accel_y', 'f8'), ('accel_z', 'f8'),
('gyro_x', 'f8'), ('gyro_y', 'f8'), ('gyro_z', 'f8'),
], mode='r')
else:
sys.exit("Error: there are no imus.")
period_img = 1. / args.rate_img
period_imu = 1. / args.rate_imu
print('\nrate (Hz)')
print(' img: {}, imu: {}'.format(args.rate_img, args.rate_imu))
print('sample period (s)')
print(' img: {}, imu: {}'.format(period_img, period_imu))
imgs_t_diff = np.diff(imgs['t'])
imus_t_diff = np.diff(imus['t'])
print('\ndiff count')
print(' imgs: {}, imus: {}'.format(imgs['t'].size, imus['t'].size))
print(' imgs_t_diff: {}, imus_t_diff: {}'
.format(imgs_t_diff.size, imus_t_diff.size))
print('\ndiff where (factor={})'.format(args.factor))
where = np.argwhere(imgs_t_diff > period_img * (1 + args.factor))
print(' imgs where diff > {}*{} ({})'.format(period_img,
1 + args.factor, where.size))
for x in where:
print(' {:8d}: {:.16f}'.format(x[0], imgs_t_diff[x][0]))
where = np.argwhere(imgs_t_diff < period_img * (1 - args.factor))
print(' imgs where diff < {}*{} ({})'.format(period_img,
1 - args.factor, where.size))
for x in where:
print(' {:8d}: {:.16f}'.format(x[0], imgs_t_diff[x][0]))
where = np.argwhere(imus_t_diff > period_imu * (1 + args.factor))
print(' imus where diff > {}*{} ({})'.format(period_imu,
1 + args.factor, where.size))
for x in where:
print(' {:8d}: {:.16f}'.format(x[0], imus_t_diff[x][0]))
where = np.argwhere(imus_t_diff < period_imu * (1 - args.factor))
print(' imus where diff < {}*{} ({})'.format(period_imu,
1 - args.factor, where.size))
for x in where:
print(' {:8d}: {:.16f}'.format(x[0], imus_t_diff[x][0]))
import pandas as pd
bins = imgs['t']
bins_n = imgs['t'].size
bins = pd.Series(data=bins).drop_duplicates(keep='first')
cats = pd.cut(imus['t'], bins)
print('\nimage timestamp duplicates: {}'.format(bins_n - bins.size))
self._plot(outdir, imgs_t_diff, imus_t_diff, cats.value_counts())
def _plot(self, outdir, imgs_t_diff, imus_t_diff, imgs_t_imus):
import matplotlib.pyplot as plt
import numpy as np
fig_1 = plt.figure(1, [16, 6])
fig_1.suptitle('Stamp Analytics')
fig_1.subplots_adjust(
left=0.1,
right=0.95,
top=0.85,
bottom=0.15,
wspace=0.4)
ax_imgs_t_diff = fig_1.add_subplot(131)
ax_imgs_t_diff.set_title('Image Timestamp Diff')
ax_imgs_t_diff.set_xlabel('diff index')
ax_imgs_t_diff.set_ylabel('diff (s)')
ax_imgs_t_diff.axis('auto')
ax_imus_t_diff = fig_1.add_subplot(132)
ax_imus_t_diff.set_title('Imu Timestamp Diff')
ax_imus_t_diff.set_xlabel('diff index')
ax_imus_t_diff.set_ylabel('diff (s)')
ax_imus_t_diff.axis('auto')
ax_imgs_t_imus = fig_1.add_subplot(133)
ax_imgs_t_imus.set_title('Imu Count Per Image Intervel')
ax_imgs_t_imus.set_xlabel('intervel index')
ax_imgs_t_imus.set_ylabel('imu count')
ax_imgs_t_imus.axis('auto')
ax_imgs_t_diff.set_xlim([0, imgs_t_diff.size])
ax_imgs_t_diff.plot(imgs_t_diff)
ax_imus_t_diff.set_xlim([0, imus_t_diff.size])
ax_imus_t_diff.plot(imus_t_diff)
# print(imgs_t_imus.values)
# imgs_t_imus.plot(kind='line', ax=ax_imgs_t_imus)
data = imgs_t_imus.values
ax_imgs_t_imus.set_xlim([0, data.size])
ax_imgs_t_imus.set_ylim([np.min(data) - 1, np.max(data) + 1])
ax_imgs_t_imus.plot(data)
if outdir:
figpath = os.path.join(outdir, RESULT_FIGURE)
print('\nsave figure to:\n {}'.format(figpath))
if not os.path.exists(outdir):
os.makedirs(outdir)
fig_1.savefig(figpath, dpi=100)
plt.show()
@property
def has_img(self):
return self._has_img
@property
def has_imu(self):
return self._has_imu
def _parse_args():
import argparse
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
formatter_class=argparse.RawTextHelpFormatter,
description='usage examples:'
'\n python %(prog)s -i DATASET')
parser.add_argument(
'-i',
'--input',
dest='input',
metavar='DATASET',
required=True,
help='the input dataset path')
parser.add_argument(
'-o',
'--outdir',
dest='outdir',
metavar='OUTDIR',
help='the output directory')
parser.add_argument(
'-c',
'--config',
dest='config',
metavar='CONFIG',
help='yaml config file about input dataset')
parser.add_argument(
'-f',
'--factor',
dest='factor',
metavar='FACTOR',
default=0.1,
type=float,
help='the wave factor (default: %(default)s)')
parser.add_argument(
'--rate-img',
dest='rate_img',
metavar='RATE',
default=25,
type=int,
help='the img rate (default: %(default)s)')
parser.add_argument(
'--rate-imu',
dest='rate_imu',
metavar='RATE',
default=500,
type=int,
help='the imu rate (default: %(default)s)')
return parser.parse_args()
def _main():
args = _parse_args()
dataset_path = args.input
if not dataset_path or not os.path.exists(dataset_path):
sys.exit('Error: the dataset path not exists, %s' % dataset_path)
dataset_path = os.path.normpath(dataset_path)
outdir = args.outdir
if not args.outdir:
outdir = os.path.splitext(dataset_path)[0]
else:
outdir = os.path.abspath(outdir)
args.outdir = outdir
print('stamp analytics ...')
print(' input: %s' % dataset_path)
print(' outdir: %s' % outdir)
def dataset_creator(path):
print('open dataset ...')
if args.config:
import yaml
config = yaml.load(file(args.config, 'r'))
model = config['dataset']
if model == 'rosbag':
dataset = ROSBag(path, **config['rosbag'])
elif model == 'mynteye':
dataset = MYNTEYE(path)
else:
sys.exit('Error: dataset model not supported {}'.format(model))
else:
dataset = ROSBag(path,
topic_img_left='/mynteye/left/image_raw',
topic_imu='/mynteye/imu/data_raw')
return dataset
dataset = BinDataset(dataset_path, dataset_creator)
dataset.stamp_analytics(args)
print('stamp analytics done')
if __name__ == '__main__':
_main()
|
[
"sys.stdout.write",
"matplotlib.pyplot.figure",
"os.path.isfile",
"os.path.join",
"os.path.abspath",
"os.path.exists",
"numpy.max",
"os.path.normpath",
"matplotlib.pyplot.show",
"os.path.basename",
"data.ROSBag",
"pandas.cut",
"numpy.min",
"pandas.Series",
"numpy.argwhere",
"numpy.memmap",
"sys.exit",
"os.makedirs",
"numpy.fromfile",
"numpy.diff",
"numpy.array",
"os.path.splitext",
"data.MYNTEYE"
] |
[((839, 876), 'os.path.join', 'os.path.join', (['TOOLBOX_DIR', '"""internal"""'], {}), "(TOOLBOX_DIR, 'internal')\n", (851, 876), False, 'import os\n'), ((9640, 9670), 'os.path.normpath', 'os.path.normpath', (['dataset_path'], {}), '(dataset_path)\n', (9656, 9670), False, 'import os\n'), ((795, 820), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (810, 820), False, 'import os\n'), ((1416, 1450), 'os.path.join', 'os.path.join', (['bindir', 'BIN_IMG_NAME'], {}), '(bindir, BIN_IMG_NAME)\n', (1428, 1450), False, 'import os\n'), ((1464, 1498), 'os.path.join', 'os.path.join', (['bindir', 'BIN_IMU_NAME'], {}), '(bindir, BIN_IMU_NAME)\n', (1476, 1498), False, 'import os\n'), ((2331, 2365), 'os.path.join', 'os.path.join', (['bindir', 'BIN_IMG_NAME'], {}), '(bindir, BIN_IMG_NAME)\n', (2343, 2365), False, 'import os\n'), ((2379, 2413), 'os.path.join', 'os.path.join', (['bindir', 'BIN_IMU_NAME'], {}), '(bindir, BIN_IMU_NAME)\n', (2391, 2413), False, 'import os\n'), ((4585, 4603), 'numpy.diff', 'np.diff', (["imgs['t']"], {}), "(imgs['t'])\n", (4592, 4603), True, 'import numpy as np\n'), ((4622, 4640), 'numpy.diff', 'np.diff', (["imus['t']"], {}), "(imus['t'])\n", (4629, 4640), True, 'import numpy as np\n'), ((4915, 4972), 'numpy.argwhere', 'np.argwhere', (['(imgs_t_diff > period_img * (1 + args.factor))'], {}), '(imgs_t_diff > period_img * (1 + args.factor))\n', (4926, 4972), True, 'import numpy as np\n'), ((5212, 5269), 'numpy.argwhere', 'np.argwhere', (['(imgs_t_diff < period_img * (1 - args.factor))'], {}), '(imgs_t_diff < period_img * (1 - args.factor))\n', (5223, 5269), True, 'import numpy as np\n'), ((5509, 5566), 'numpy.argwhere', 'np.argwhere', (['(imus_t_diff > period_imu * (1 + args.factor))'], {}), '(imus_t_diff > period_imu * (1 + args.factor))\n', (5520, 5566), True, 'import numpy as np\n'), ((5806, 5863), 'numpy.argwhere', 'np.argwhere', (['(imus_t_diff < period_imu * (1 - args.factor))'], {}), '(imus_t_diff < period_imu * (1 - args.factor))\n', (5817, 5863), True, 'import numpy as np\n'), ((6237, 6260), 'pandas.cut', 'pd.cut', (["imus['t']", 'bins'], {}), "(imus['t'], bins)\n", (6243, 6260), True, 'import pandas as pd\n'), ((6545, 6567), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '[16, 6]'], {}), '(1, [16, 6])\n', (6555, 6567), True, 'import matplotlib.pyplot as plt\n'), ((8056, 8066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8064, 8066), True, 'import matplotlib.pyplot as plt\n'), ((9557, 9622), 'sys.exit', 'sys.exit', (["('Error: the dataset path not exists, %s' % dataset_path)"], {}), "('Error: the dataset path not exists, %s' % dataset_path)\n", (9565, 9622), False, 'import sys\n'), ((9785, 9808), 'os.path.abspath', 'os.path.abspath', (['outdir'], {}), '(outdir)\n', (9800, 9808), False, 'import os\n'), ((1372, 1399), 'os.path.splitext', 'os.path.splitext', (['self.path'], {}), '(self.path)\n', (1388, 1399), False, 'import os\n'), ((1506, 1528), 'os.path.isfile', 'os.path.isfile', (['binimg'], {}), '(binimg)\n', (1520, 1528), False, 'import os\n'), ((1533, 1555), 'os.path.isfile', 'os.path.isfile', (['binimu'], {}), '(binimu)\n', (1547, 1555), False, 'import os\n'), ((2226, 2253), 'os.path.splitext', 'os.path.splitext', (['self.path'], {}), '(self.path)\n', (2242, 2253), False, 'import os\n'), ((2268, 2290), 'os.path.exists', 'os.path.exists', (['bindir'], {}), '(bindir)\n', (2282, 2290), False, 'import os\n'), ((2298, 2317), 'os.makedirs', 'os.makedirs', (['bindir'], {}), '(bindir)\n', (2309, 2317), False, 'import os\n'), ((3396, 3418), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (3412, 3418), False, 'import sys\n'), ((3892, 3938), 'numpy.fromfile', 'np.fromfile', (['self._binimg'], {'dtype': "[('t', 'f8')]"}), "(self._binimg, dtype=[('t', 'f8')])\n", (3903, 3938), True, 'import numpy as np\n'), ((3973, 4010), 'sys.exit', 'sys.exit', (['"""Error: there are no imgs."""'], {}), "('Error: there are no imgs.')\n", (3981, 4010), False, 'import sys\n'), ((4046, 4219), 'numpy.memmap', 'np.memmap', (['self._binimu'], {'dtype': "[('t', 'f8'), ('accel_x', 'f8'), ('accel_y', 'f8'), ('accel_z', 'f8'), (\n 'gyro_x', 'f8'), ('gyro_y', 'f8'), ('gyro_z', 'f8')]", 'mode': '"""r"""'}), "(self._binimu, dtype=[('t', 'f8'), ('accel_x', 'f8'), ('accel_y',\n 'f8'), ('accel_z', 'f8'), ('gyro_x', 'f8'), ('gyro_y', 'f8'), ('gyro_z',\n 'f8')], mode='r')\n", (4055, 4219), True, 'import numpy as np\n'), ((4267, 4304), 'sys.exit', 'sys.exit', (['"""Error: there are no imus."""'], {}), "('Error: there are no imus.')\n", (4275, 4304), False, 'import sys\n'), ((7857, 7892), 'os.path.join', 'os.path.join', (['outdir', 'RESULT_FIGURE'], {}), '(outdir, RESULT_FIGURE)\n', (7869, 7892), False, 'import os\n'), ((8271, 8297), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (8287, 8297), False, 'import os\n'), ((9523, 9551), 'os.path.exists', 'os.path.exists', (['dataset_path'], {}), '(dataset_path)\n', (9537, 9551), False, 'import os\n'), ((9730, 9760), 'os.path.splitext', 'os.path.splitext', (['dataset_path'], {}), '(dataset_path)\n', (9746, 9760), False, 'import os\n'), ((10366, 10460), 'data.ROSBag', 'ROSBag', (['path'], {'topic_img_left': '"""/mynteye/left/image_raw"""', 'topic_imu': '"""/mynteye/imu/data_raw"""'}), "(path, topic_img_left='/mynteye/left/image_raw', topic_imu=\n '/mynteye/imu/data_raw')\n", (10372, 10460), False, 'from data import ROSBag, MYNTEYE, What\n'), ((1706, 1764), 'sys.stdout.write', 'sys.stdout.write', (['"""Do you want to use it directly? [Y/n] """'], {}), "('Do you want to use it directly? [Y/n] ')\n", (1722, 1764), False, 'import sys\n'), ((6175, 6195), 'pandas.Series', 'pd.Series', ([], {'data': 'bins'}), '(data=bins)\n', (6184, 6195), True, 'import pandas as pd\n'), ((7961, 7983), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (7975, 7983), False, 'import os\n'), ((7993, 8012), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (8004, 8012), False, 'import os\n'), ((10160, 10192), 'data.ROSBag', 'ROSBag', (['path'], {}), "(path, **config['rosbag'])\n", (10166, 10192), False, 'from data import ROSBag, MYNTEYE, What\n'), ((7758, 7770), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (7764, 7770), True, 'import numpy as np\n'), ((7776, 7788), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (7782, 7788), True, 'import numpy as np\n'), ((10242, 10255), 'data.MYNTEYE', 'MYNTEYE', (['path'], {}), '(path)\n', (10249, 10255), False, 'from data import ROSBag, MYNTEYE, What\n'), ((2829, 2866), 'numpy.array', 'np.array', (['[img.timestamp]'], {'dtype': '"""f8"""'}), "([img.timestamp], dtype='f8')\n", (2837, 2866), True, 'import numpy as np\n'), ((3044, 3186), 'numpy.array', 'np.array', (['[(imu.timestamp, imu.accel_x, imu.accel_y, imu.accel_z, imu.gyro_x, imu.\n gyro_y, imu.gyro_z)]'], {'dtype': '"""f8, f8, f8, f8, f8, f8, f8"""'}), "([(imu.timestamp, imu.accel_x, imu.accel_y, imu.accel_z, imu.gyro_x,\n imu.gyro_y, imu.gyro_z)], dtype='f8, f8, f8, f8, f8, f8, f8')\n", (3052, 3186), True, 'import numpy as np\n')]
|
import json
import numpy as np
import pkg_resources
import pickle
from .. import utils
from .. import ExcursionProblem
datafile = pkg_resources.resource_filename('excursion','testcases/data/checkmate_dense.json')
def modify(zv):
return np.log(zv)-np.log(0.05)
truthX, truthy_obs, truthy_exp = [], [], []
for p,_,result in json.load(open(datafile))['precomputed']:
if p[0] < p[1]+200: continue
truthX.append(p)
truthy_obs.append(max(float(result[1]['observed_CLs']),0.001) if result[1] else 0.001)
truthy_exp.append(max(float(result[1]['expected_CLs']),0.001) if result[1] else 0.001)
truthX = np.array(truthX)
truthy_obs = np.array(truthy_obs)
truthy_obs = modify(truthy_obs)
truthy_exp = np.array(truthy_exp)
truthy_exp = modify(truthy_exp)
import sklearn.preprocessing
scaler = sklearn.preprocessing.MinMaxScaler()
scaler.fit(truthX)
truthX = scaler.transform(truthX)
picklefile = pkg_resources.resource_filename('excursion','testcases/data/checkmate.pkl')
d = pickle.load(open(picklefile,'rb'))
def truth_obs(X):
return 2*d['obs'].predict(X)
def truth_exp(X):
return 2*d['exp'].predict(X)
thresholds = [modify(0.05)]
functions = [truth_obs, truth_exp]
def invalid_region(x):
oX = scaler.inverse_transform(x)
return oX[:,0] < oX[:,1] + 202
exp_and_obs = ExcursionProblem(
functions, thresholds, ndim = 2,
invalid_region = invalid_region,
plot_npoints=[350,350]
)
|
[
"numpy.array",
"numpy.log",
"pkg_resources.resource_filename"
] |
[((132, 219), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""excursion"""', '"""testcases/data/checkmate_dense.json"""'], {}), "('excursion',\n 'testcases/data/checkmate_dense.json')\n", (163, 219), False, 'import pkg_resources\n'), ((618, 634), 'numpy.array', 'np.array', (['truthX'], {}), '(truthX)\n', (626, 634), True, 'import numpy as np\n'), ((649, 669), 'numpy.array', 'np.array', (['truthy_obs'], {}), '(truthy_obs)\n', (657, 669), True, 'import numpy as np\n'), ((716, 736), 'numpy.array', 'np.array', (['truthy_exp'], {}), '(truthy_exp)\n', (724, 736), True, 'import numpy as np\n'), ((913, 989), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""excursion"""', '"""testcases/data/checkmate.pkl"""'], {}), "('excursion', 'testcases/data/checkmate.pkl')\n", (944, 989), False, 'import pkg_resources\n'), ((243, 253), 'numpy.log', 'np.log', (['zv'], {}), '(zv)\n', (249, 253), True, 'import numpy as np\n'), ((254, 266), 'numpy.log', 'np.log', (['(0.05)'], {}), '(0.05)\n', (260, 266), True, 'import numpy as np\n')]
|
from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import pandas as pd
import ta
from ta import add_all_ta_features
from ta.utils import dropna
from ta.volatility import BollingerBands
# Load datas
df = pd.read_csv(r'C:\mlvisualtrader\financial_data\Binance_BTCUSDT_1h_Backtest.csv', sep=',')
df = df.iloc[: , 1:]
# Clean NaN values
df = dropna(df)
df["ema"] = ta.trend.EMAIndicator(df["Close"], window = 14, fillna = False).ema_indicator()
df["sma"] = ta.trend.SMAIndicator(df["Close"], window = 14, fillna = False).sma_indicator()
# Add bollinger band high indicator filling nans values
df["bb_high_indicator"] = ta.volatility.bollinger_hband_indicator(
df["Close"], window=20, window_dev=2, fillna=True
).bollinger_hband_indicator()
# Add bollinger band low indicator filling nans values
df["bb_low_indicator"] = ta.volatility.bollinger_lband_indicator(
df["Close"], window=20, window_dev=2, fillna=True
).bollinger_lband_indicator()
asd = ta.volatility.bollinger_lband_indicator(
df["Close"], window=20, window_dev=2, fillna=True
)
df = df.to_csv(r'C:\mlvisualtrader\financial_data\Binance_BTCUSDT_1h_Backtest_with_indicators.csv')
ad = genfromtxt(r'C:/mlvisualtrader/financial_data/Binance_BTCUSDT_1h_Backtest_with_indicators.csv', delimiter=',' ,dtype=str, skip_header=1)
ad = np.delete(ad,0,1)
pd = np.flipud(ad)
buy_dir = 'C://mlvisualtrader//data//train//buy//'
sell_dir = 'C://mlvisualtrader//data//train//sell//'
neutral_dir = 'C://mlvisualtrader//data//train//neutral//'
def convolve_sma(array, period):
return np.convolve(array, np.ones((period,))/period, mode='valid')
def graphwerk(start, finish):
open = []
high = []
low = []
close = []
volume = []
date = []
hlc3 = []
bbupper = []
bblower = []
mySMA = []
myEMA = []
for x in range(finish-start):
# Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out
# what means open, high and close in their respective order.
start = start + 1
open.append(float(pd[start][1]))
high.append(float(pd[start][2]))
low.append(float(pd[start][3]))
close.append(float(pd[start][4]))
volume.append(float(pd[start][5])*100)
date.append(pd[start][0])
hlc3temp = (float(pd[start][2]) + float(pd[start][3]) + float(pd[start][4]))/3
hlc3.append(hlc3temp)
myEMA.append(float(pd[start][6]))
mySMA.append(float(pd[start][7]))
bbupper.append(float(pd[start][8]))
bblower.append(float(pd[start][9]))
close_next = float(pd[finish][4])
sma = convolve_sma(hlc3, 7)
smb = list(sma)
diff = sma[-1] - sma[-2]
for x in range(len(close)-len(smb)):
smb.append(smb[-1]+diff)
fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor='w', edgecolor='k')
dx = fig.add_subplot(111)
dx.grid(False)
dx.set_xticklabels([])
dx.set_yticklabels([])
dx.xaxis.set_visible(False)
dx.yaxis.set_visible(False)
dx.axis('off')
ax2 = dx.twinx()
a = mpl_finance.volume_overlay(ax2, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=0)
ax2.add_collection(a)
ax2.grid(False)
ax2.set_xticklabels([])
ax2.set_yticklabels([])
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax2.axis('off')
mpl_finance.candlestick2_ochl(dx,open, close, high, low, width=1.5, colorup='g', colordown='r', alpha=0.5)
plt.autoscale()
plt.autoscale(ax2)
plt.plot(smb, color="blue", linewidth=10, alpha=0.25)
plt.plot(bbupper, color="black", linewidth=10, alpha=0.25)
plt.plot(bblower, color="orange", linewidth=10, alpha=0.25)
plt.plot(myEMA, color="green", linewidth=10, alpha=0.25)
plt.plot(mySMA, color="yellow", linewidth=10, alpha=0.25)
#plt.plot(smblong, color="black", linewidth=10, alpha=0.5)
plt.axis('off')
comp_ratio = close_next / close[-1]
print(comp_ratio)
if close_next/close[-1] > 1.01:
print('last value: ' + str(close[-1]))
print('next value: ' + str(close_next))
print('buy')
plt.savefig(buy_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')
elif close_next/close[-1]<0.99:
print('last value: '+ str(close[-1]))
print('next value: ' + str(close_next))
print('sell')
plt.savefig(sell_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
else:
print('close value is smaller')
print('last value: '+ str(close[-1]))
print('next value: ' + str(close_next))
print('neutral')
plt.savefig(neutral_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
#plt.show()
open.clear()
close.clear()
volume.clear()
high.clear()
low.clear()
hlc3.clear()
plt.cla()
plt.clf()
iter_count = int(len(pd)/4)
print(iter_count)
iter = 0
for x in range(len(pd)-4):
graphwerk(iter, iter+12) #eigentlich nur 12. 13 zum check
iter = iter + 2
|
[
"matplotlib.pyplot.clf",
"pandas.read_csv",
"numpy.ones",
"matplotlib.pyplot.figure",
"mpl_finance.candlestick2_ochl",
"ta.utils.dropna",
"ta.volatility.bollinger_hband_indicator",
"ta.trend.EMAIndicator",
"numpy.genfromtxt",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.cla",
"ta.trend.SMAIndicator",
"numpy.flipud",
"ta.volatility.bollinger_lband_indicator",
"numpy.delete",
"uuid.uuid4",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"mpl_finance.volume_overlay"
] |
[((264, 364), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\mlvisualtrader\\\\financial_data\\\\Binance_BTCUSDT_1h_Backtest.csv"""'], {'sep': '""","""'}), "(\n 'C:\\\\mlvisualtrader\\\\financial_data\\\\Binance_BTCUSDT_1h_Backtest.csv',\n sep=',')\n", (275, 364), True, 'import pandas as pd\n'), ((399, 409), 'ta.utils.dropna', 'dropna', (['df'], {}), '(df)\n', (405, 409), False, 'from ta.utils import dropna\n'), ((1017, 1112), 'ta.volatility.bollinger_lband_indicator', 'ta.volatility.bollinger_lband_indicator', (["df['Close']"], {'window': '(20)', 'window_dev': '(2)', 'fillna': '(True)'}), "(df['Close'], window=20, window_dev=\n 2, fillna=True)\n", (1056, 1112), False, 'import ta\n'), ((1220, 1365), 'numpy.genfromtxt', 'genfromtxt', (['"""C:/mlvisualtrader/financial_data/Binance_BTCUSDT_1h_Backtest_with_indicators.csv"""'], {'delimiter': '""","""', 'dtype': 'str', 'skip_header': '(1)'}), "(\n 'C:/mlvisualtrader/financial_data/Binance_BTCUSDT_1h_Backtest_with_indicators.csv'\n , delimiter=',', dtype=str, skip_header=1)\n", (1230, 1365), False, 'from numpy import genfromtxt\n'), ((1362, 1381), 'numpy.delete', 'np.delete', (['ad', '(0)', '(1)'], {}), '(ad, 0, 1)\n', (1371, 1381), True, 'import numpy as np\n'), ((1385, 1398), 'numpy.flipud', 'np.flipud', (['ad'], {}), '(ad)\n', (1394, 1398), True, 'import numpy as np\n'), ((2860, 2931), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'figsize': '(3, 3)', 'dpi': '(50)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=1, figsize=(3, 3), dpi=50, facecolor='w', edgecolor='k')\n", (2870, 2931), True, 'import matplotlib.pyplot as plt\n'), ((3147, 3251), 'mpl_finance.volume_overlay', 'mpl_finance.volume_overlay', (['ax2', 'open', 'close', 'volume'], {'width': '(0.4)', 'colorup': '"""b"""', 'colordown': '"""b"""', 'alpha': '(0)'}), "(ax2, open, close, volume, width=0.4, colorup='b',\n colordown='b', alpha=0)\n", (3173, 3251), False, 'import mpl_finance\n'), ((3440, 3551), 'mpl_finance.candlestick2_ochl', 'mpl_finance.candlestick2_ochl', (['dx', 'open', 'close', 'high', 'low'], {'width': '(1.5)', 'colorup': '"""g"""', 'colordown': '"""r"""', 'alpha': '(0.5)'}), "(dx, open, close, high, low, width=1.5,\n colorup='g', colordown='r', alpha=0.5)\n", (3469, 3551), False, 'import mpl_finance\n'), ((3552, 3567), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (3565, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3572, 3590), 'matplotlib.pyplot.autoscale', 'plt.autoscale', (['ax2'], {}), '(ax2)\n', (3585, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3595, 3648), 'matplotlib.pyplot.plot', 'plt.plot', (['smb'], {'color': '"""blue"""', 'linewidth': '(10)', 'alpha': '(0.25)'}), "(smb, color='blue', linewidth=10, alpha=0.25)\n", (3603, 3648), True, 'import matplotlib.pyplot as plt\n'), ((3653, 3711), 'matplotlib.pyplot.plot', 'plt.plot', (['bbupper'], {'color': '"""black"""', 'linewidth': '(10)', 'alpha': '(0.25)'}), "(bbupper, color='black', linewidth=10, alpha=0.25)\n", (3661, 3711), True, 'import matplotlib.pyplot as plt\n'), ((3716, 3775), 'matplotlib.pyplot.plot', 'plt.plot', (['bblower'], {'color': '"""orange"""', 'linewidth': '(10)', 'alpha': '(0.25)'}), "(bblower, color='orange', linewidth=10, alpha=0.25)\n", (3724, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3836), 'matplotlib.pyplot.plot', 'plt.plot', (['myEMA'], {'color': '"""green"""', 'linewidth': '(10)', 'alpha': '(0.25)'}), "(myEMA, color='green', linewidth=10, alpha=0.25)\n", (3788, 3836), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3898), 'matplotlib.pyplot.plot', 'plt.plot', (['mySMA'], {'color': '"""yellow"""', 'linewidth': '(10)', 'alpha': '(0.25)'}), "(mySMA, color='yellow', linewidth=10, alpha=0.25)\n", (3849, 3898), True, 'import matplotlib.pyplot as plt\n'), ((3971, 3986), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3979, 3986), True, 'import matplotlib.pyplot as plt\n'), ((4938, 4947), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4945, 4947), True, 'import matplotlib.pyplot as plt\n'), ((4952, 4961), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4959, 4961), True, 'import matplotlib.pyplot as plt\n'), ((423, 482), 'ta.trend.EMAIndicator', 'ta.trend.EMAIndicator', (["df['Close']"], {'window': '(14)', 'fillna': '(False)'}), "(df['Close'], window=14, fillna=False)\n", (444, 482), False, 'import ta\n'), ((515, 574), 'ta.trend.SMAIndicator', 'ta.trend.SMAIndicator', (["df['Close']"], {'window': '(14)', 'fillna': '(False)'}), "(df['Close'], window=14, fillna=False)\n", (536, 574), False, 'import ta\n'), ((678, 773), 'ta.volatility.bollinger_hband_indicator', 'ta.volatility.bollinger_hband_indicator', (["df['Close']"], {'window': '(20)', 'window_dev': '(2)', 'fillna': '(True)'}), "(df['Close'], window=20, window_dev=\n 2, fillna=True)\n", (717, 773), False, 'import ta\n'), ((884, 979), 'ta.volatility.bollinger_lband_indicator', 'ta.volatility.bollinger_lband_indicator', (["df['Close']"], {'window': '(20)', 'window_dev': '(2)', 'fillna': '(True)'}), "(df['Close'], window=20, window_dev=\n 2, fillna=True)\n", (923, 979), False, 'import ta\n'), ((1627, 1645), 'numpy.ones', 'np.ones', (['(period,)'], {}), '((period,))\n', (1634, 1645), True, 'import numpy as np\n'), ((4252, 4264), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4262, 4264), False, 'import uuid\n'), ((4499, 4511), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4509, 4511), False, 'import uuid\n'), ((4769, 4781), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4779, 4781), False, 'import uuid\n')]
|
# This scripts assumes that the dataframe has been created and saved in data.txt
import sys
sys.path.insert(1, '../src/MyAIGuide/utilities')
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from dataFrameUtilities import check_if_zero_then_adjust_var_and_place_in_data, insert_data_to_tracker_mean_steps, subset_period, transformPain, predict_values
from sklearn.preprocessing import MinMaxScaler
import peaksAnalysisFunctions
# Getting data
input = open("../data/preprocessed/preprocessedDataParticipant2.txt", "rb")
data = pickle.load(input)
input.close()
data = data[data.index >= '2018-05-11']
data = data[data.index <= '2020-05-04']
# data["kneepain"] = transformPain(data["kneepain"])
data["kneepain"] = data["kneepain"].fillna(1)
# Steps
cols = ['movessteps', 'cum_gain_walking', 'googlefitsteps', 'elevation_gain', 'oruxcumulatedelevationgain', 'kneepain']
for idx, val in enumerate(cols):
data[val] = data[val] / np.max(data[val])
data["steps"] = data[["movessteps", "googlefitsteps"]].max(axis=1)
data["denivelation"] = data[["cum_gain_walking", "elevation_gain", "oruxcumulatedelevationgain"]].max(axis=1)
fig, axes = plt.subplots(nrows=8, ncols=1)
cols = cols + ["steps", "denivelation"]
for idx, val in enumerate(cols):
if val == 'oruxcumulatedelevationgain':
data[val].plot(ax=axes[idx], color='green', marker='o', linestyle='dashed', markersize=2)
else:
data[val].plot(ax=axes[idx])
axes[idx].legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# Analysis parameters
parameters = {'rollingMeanWindow': 15, 'rollingMinMaxScalerWindow': 90, 'rollingMedianWindow': 15, 'minProminenceForPeakDetect': 0.05, 'windowForLocalPeakMinMaxFind': 7, 'plotGraph': True, 'plotZoomedGraph': False, 'plotGraphStrainDuringDescendingPain': False}
plotGraphs = True
peaksAnalysisFunctions.calculateForAllRegionsParticipant2(data, parameters, plotGraphs)
|
[
"peaksAnalysisFunctions.calculateForAllRegionsParticipant2",
"matplotlib.pyplot.show",
"sys.path.insert",
"numpy.max",
"pickle.load",
"matplotlib.pyplot.subplots"
] |
[((93, 141), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../src/MyAIGuide/utilities"""'], {}), "(1, '../src/MyAIGuide/utilities')\n", (108, 141), False, 'import sys\n'), ((589, 607), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (600, 607), False, 'import pickle\n'), ((1208, 1238), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(8)', 'ncols': '(1)'}), '(nrows=8, ncols=1)\n', (1220, 1238), True, 'import matplotlib.pyplot as plt\n'), ((1553, 1563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1561, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1959), 'peaksAnalysisFunctions.calculateForAllRegionsParticipant2', 'peaksAnalysisFunctions.calculateForAllRegionsParticipant2', (['data', 'parameters', 'plotGraphs'], {}), '(data, parameters,\n plotGraphs)\n', (1925, 1959), False, 'import peaksAnalysisFunctions\n'), ((991, 1008), 'numpy.max', 'np.max', (['data[val]'], {}), '(data[val])\n', (997, 1008), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as pyplot
import matplotlib as mplot
from matplotlib import rc
import easygui as eg
import matplotlib.cm as cm
def time_series(f):
line = f.readline().split('\t')
# For some reason the last element of 'line' comes with a '\n' last element. Removing this '\n' character:
line[-1] = line[-1][:-1]
# Storing the names of the curves in the signal_names variable
signal_names = line
# If the second line contains the "Step information" string, this means that the file contains a stepped dataset.
line = f.readline()
if 'Step Information' in line:
stepped_dataset = True
f.seek(0)
f.readline()
# step_data storess the raw data, including time values, as a list of two-dimensional series_data. In the case of a stepped dataset, it is comprised of a three-dimensional array where the first dimension is the step run, the second dimension is the number of the datasample, and the third index is the step run.
temp_series_data = []
step_data = []
step_info = []
for line in f:
if 'Step Information' in line:
# if the line contains "Step Information" then a step timeseries has ended. The temp time series is stored and the temp_series_data variable is cleared
step_info.append(line.replace('Step Information: ', '').replace('\n',''))
if temp_series_data != []:
step_data.append(temp_series_data)
temp_series_data = []
else:
line_temp = [float(x) for x in line.split('\t')]
temp_series_data.append(line_temp)
# The last step does not have a following "Step Information" line to trigger the append of temp_series_data into step_data. Hence, when the file ends, this line forcefully appends the last temp_series_data
step_data.append(temp_series_data)
# Selecting target curves
step_choice = eg.multchoicebox('What steps do you want to plot?' , 'LT2PY step selecion', step_info)
if step_choice == None:
print('No plot steps selected!')
raise SystemExit(0)
# Once the choices are chosen, step_choice_index stores the indexes of the curves chosen relative to the curve data stored either in step_data or series_data
step_choice_index = []
for x in step_choice:
for k in range(len(step_info)):
if x == step_info[k]:
step_choice_index.append('{}'.format(k))
step_choice_index = [int(x) for x in step_choice_index]
# Same drill now for choosing target steps
curve_choice = eg.multchoicebox('What curves do you want to plot?' , 'LT2PY curve plot selecion', signal_names[1:])
if curve_choice == None:
print('No plot curves selected!')
raise SystemExit(0)
curve_choice_index = []
for x in curve_choice:
for k in range(len(signal_names)):
if x == signal_names[k]:
curve_choice_index.append('{}'.format(k))
curve_choice_index = [int(x) for x in curve_choice_index]
# What happens in stepped data is that each step has a different time series with different time stamps. So for every step the timestap vector needs to be stored
timestamps = []
for step in step_data:
timestamps.append([x[0] for x in step])
if eg.ynbox('Do you want to plot steps in individual figures?','Please confirm'): # show a Continue/Cancel dialog
fig_list = []
ax_list = []
for choice in step_choice:
temp_fig = pyplot.figure()
temp_ax = temp_fig.add_axes([0.1,0.1,0.8,0.8]);
fig_list.append(temp_fig)
ax_list.append(temp_ax)
for step_choice_number, ax_choice in zip(step_choice_index, ax_list):
for curve_choice_number in curve_choice_index:
ax_choice.plot(timestamps[step_choice_number], [x[curve_choice_number] for x in step_data[step_choice_number]],label=signal_names[curve_choice_number],linewidth=1)
for step_choice_number, ax in zip(step_choice_index, ax_list):
ax.legend(loc='best')
ax.grid(which='major')
ax.grid(which='minor',dashes=(5,2))
ax.set_xlim([min([min(x) for x in timestamps]),max([max(x) for x in timestamps])])
ax.set_title(step_info[step_choice_number])
else:
fig1 = pyplot.figure()
ax1 = fig1.add_axes([0.1,0.1,0.8,0.8]);
# If the data set is stepped but the user has only
if len(curve_choice) == 1:
colors = [cm.rainbow(x) for x in np.linspace(0, 1, len(step_choice))]
for step_choice_number, color_index in zip(step_choice_index, colors):
ax1.plot(timestamps[step_choice_number], [x[curve_choice_index[0]] for x in step_data[step_choice_number]],linewidth=1, label=step_info[step_choice_number], color = color_index)
else:
colors = [cm.rainbow(x) for x in np.linspace(0, 1, len(curve_choice))]
temp_signal_names = signal_names
for step_choice_number in step_choice_index:
for curve_choice_number, color_index in zip(curve_choice_index, colors):
ax1.plot(timestamps[step_choice_number], [x[curve_choice_number] for x in step_data[step_choice_number]],label=temp_signal_names[curve_choice_number],linewidth=1, color=color_index)
temp_signal_names[curve_choice_number] = '_nolegend_'
ax1.legend(loc='best')
ax1.grid(which='major')
ax1.grid(which='minor',dashes=(5,2))
ax1.set_xlim([min([min(x) for x in timestamps]),max([max(x) for x in timestamps])])
else: #If the second line does not contain "Step information" then the dataset is not stepped.
stepped_dataset = False
f.seek(0)
f.readline()
# series_data storess the raw data, including time values. In the case of a non-stepped dataset, it is comprised of a two-dimensional array where the vertical dimension is the number of the datasample and the horizontal direction is the curves index.
series_data = []
for line in f:
line_temp = [float(x) for x in line.split('\t')]
series_data.append(line_temp)
series_data = np.array(series_data)
choice = eg.multchoicebox('What curves do you want to plot?' , 'LT2PY plot selecion', signal_names[1:])
if choice == None:
print('No plot curves selected!')
raise SystemExit(0)
choice_index = []
for x in choice:
for k in range(len(signal_names)):
if x == signal_names[k]:
choice_index.append('{}'.format(k))
choice_index = [int(x) for x in choice_index]
time_values = [x[0] for x in series_data]
fig1 = pyplot.figure();
ax1 = fig1.add_axes([0.1,0.1,0.8,0.8]);
for choice_number in choice_index:
ax1.plot(time_values, [x[choice_number] for x in series_data],label=signal_names[choice_number],linewidth=1)
ax1.legend(loc='best')
ax1.grid(which='major')
ax1.grid(which='minor',dashes=(5,2))
ax1.set_xlim([min(time_values),max(time_values)])
pyplot.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.array",
"easygui.ynbox",
"matplotlib.cm.rainbow",
"easygui.multchoicebox"
] |
[((6534, 6547), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (6545, 6547), True, 'import matplotlib.pyplot as pyplot\n'), ((1805, 1894), 'easygui.multchoicebox', 'eg.multchoicebox', (['"""What steps do you want to plot?"""', '"""LT2PY step selecion"""', 'step_info'], {}), "('What steps do you want to plot?', 'LT2PY step selecion',\n step_info)\n", (1821, 1894), True, 'import easygui as eg\n'), ((2416, 2519), 'easygui.multchoicebox', 'eg.multchoicebox', (['"""What curves do you want to plot?"""', '"""LT2PY curve plot selecion"""', 'signal_names[1:]'], {}), "('What curves do you want to plot?',\n 'LT2PY curve plot selecion', signal_names[1:])\n", (2432, 2519), True, 'import easygui as eg\n'), ((3089, 3167), 'easygui.ynbox', 'eg.ynbox', (['"""Do you want to plot steps in individual figures?"""', '"""Please confirm"""'], {}), "('Do you want to plot steps in individual figures?', 'Please confirm')\n", (3097, 3167), True, 'import easygui as eg\n'), ((5717, 5738), 'numpy.array', 'np.array', (['series_data'], {}), '(series_data)\n', (5725, 5738), True, 'import numpy as np\n'), ((5751, 5848), 'easygui.multchoicebox', 'eg.multchoicebox', (['"""What curves do you want to plot?"""', '"""LT2PY plot selecion"""', 'signal_names[1:]'], {}), "('What curves do you want to plot?', 'LT2PY plot selecion',\n signal_names[1:])\n", (5767, 5848), True, 'import easygui as eg\n'), ((6179, 6194), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (6192, 6194), True, 'import matplotlib.pyplot as pyplot\n'), ((4020, 4035), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (4033, 4035), True, 'import matplotlib.pyplot as pyplot\n'), ((3283, 3298), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (3296, 3298), True, 'import matplotlib.pyplot as pyplot\n'), ((4178, 4191), 'matplotlib.cm.rainbow', 'cm.rainbow', (['x'], {}), '(x)\n', (4188, 4191), True, 'import matplotlib.cm as cm\n'), ((4519, 4532), 'matplotlib.cm.rainbow', 'cm.rainbow', (['x'], {}), '(x)\n', (4529, 4532), True, 'import matplotlib.cm as cm\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Bridge for using cclib data in PySCF (https://github.com/pyscf/pyscf)."""
from cclib.parser.utils import find_package, PeriodicTable, convertor
import numpy as np
l_sym2num = {"S": 0, "P": 1, "D": 2, "F": 3, "G": 4}
class MissingAttributeError(Exception):
pass
_found_pyscf = find_package("pyscf")
if _found_pyscf:
from pyscf import gto
def _check_pyscf(found_pyscf):
if not found_pyscf:
raise ImportError("You must install `pyscf` to use this function")
def makepyscf(data, charge=0, mult=1):
"""Create a Pyscf Molecule."""
_check_pyscf(_found_pyscf)
inputattrs = data.__dict__
required_attrs = {"atomcoords", "atomnos"}
missing = [x for x in required_attrs if not hasattr(data, x)]
if missing:
missing = " ".join(missing)
raise MissingAttributeError(
f"Could not create pyscf molecule due to missing attribute: {missing}"
)
mol = gto.Mole(
atom=[
[f"{data.atomnos[i]}", data.atomcoords[-1][i]] for i in range(data.natom)
],
unit="Angstrom",
charge=charge,
multiplicity=mult
)
inputattr = data.__dict__
pt = PeriodicTable()
if "gbasis" in inputattr:
basis = {} # object for internal PySCF format
uatoms, uatoms_idx = np.unique(
data.atomnos, return_index=True
) # find unique atoms
for idx, i in enumerate(uatoms_idx):
curr_atom_basis = data.gbasis[i]
for jdx, j in enumerate(curr_atom_basis):
curr_l = j[0]
curr_e_prim = j[1]
new_list = [l_sym2num[f"{curr_l}"]]
new_list += curr_e_prim
if not f"{pt.element[uatoms[idx]]}" in basis:
basis[f"{pt.element[uatoms[idx]]}"] = [new_list]
else:
basis[f"{pt.element[uatoms[idx]]}"].append(new_list)
mol.basis = basis
mol.cart = True
return mol
def makepyscf_mos(ccdata,mol):
"""
Returns pyscf formatted MO properties from a cclib object.
Parameters
---
ccdata: cclib object
cclib object from parsed output
mol: pyscf Molecule object
molecule object that must contain the mol.basis attribute
Returns
----
mo_coeff : n_spin x nmo x nao ndarray
molecular coeffcients, unnormalized according to pyscf standards
mo_occ : array
molecular orbital occupation
mo_syms : array
molecular orbital symmetry labels
mo_energies: array
molecular orbital energies in units of Hartree
"""
inputattrs = ccdata.__dict__
if "mocoeffs" in inputattrs:
mol.build()
s = mol.intor('int1e_ovlp')
if np.shape(ccdata.mocoeffs)[0] == 1:
mo_coeffs = np.einsum('i,ij->ij', np.sqrt(1/s.diagonal()), ccdata.mocoeffs[0].T)
mo_occ = np.zeros(ccdata.nmo)
mo_occ[:ccdata.homos[0]+1] = 2
mo_energies = convertor(np.array(ccdata.moenergies),"eV","hartree")
if hasattr(ccdata, 'mosyms'):
mo_syms = ccdata.mosyms
else:
mo_syms = np.full_like(ccdata.moenergies, 'A', dtype=str)
elif np.shape(ccdata.mocoeffs)[0] == 2:
mo_coeff_a = np.einsum('i,ij->ij', np.sqrt(1/s.diagonal()), ccdata.mocoeffs[0].T)
mo_coeff_b = np.einsum('i,ij->ij', np.sqrt(1/s.diagonal()), ccdata.mocoeffs[1].T)
mo_occ = np.zeros((2,ccdata.nmo))
mo_occ[0,:ccdata.homos[0]+1] = 1
mo_occ[1,:ccdata.homos[1]+1] = 1
mo_coeffs = np.array([mo_coeff_a,mo_coeff_b])
mo_energies = convertor(np.array(ccdata.moenergies),"eV","hartree")
if hasattr(ccdata, 'mosyms'):
mo_syms = ccdata.mosyms
else:
mo_syms = np.full_like(ccdata.moenergies, 'A', dtype=str)
return mo_coeffs, mo_occ, mo_syms, mo_energies
del find_package
|
[
"numpy.full_like",
"numpy.zeros",
"numpy.shape",
"numpy.array",
"cclib.parser.utils.PeriodicTable",
"cclib.parser.utils.find_package",
"numpy.unique"
] |
[((504, 525), 'cclib.parser.utils.find_package', 'find_package', (['"""pyscf"""'], {}), "('pyscf')\n", (516, 525), False, 'from cclib.parser.utils import find_package, PeriodicTable, convertor\n'), ((1415, 1430), 'cclib.parser.utils.PeriodicTable', 'PeriodicTable', ([], {}), '()\n', (1428, 1430), False, 'from cclib.parser.utils import find_package, PeriodicTable, convertor\n'), ((1548, 1590), 'numpy.unique', 'np.unique', (['data.atomnos'], {'return_index': '(True)'}), '(data.atomnos, return_index=True)\n', (1557, 1590), True, 'import numpy as np\n'), ((3180, 3200), 'numpy.zeros', 'np.zeros', (['ccdata.nmo'], {}), '(ccdata.nmo)\n', (3188, 3200), True, 'import numpy as np\n'), ((3029, 3054), 'numpy.shape', 'np.shape', (['ccdata.mocoeffs'], {}), '(ccdata.mocoeffs)\n', (3037, 3054), True, 'import numpy as np\n'), ((3282, 3309), 'numpy.array', 'np.array', (['ccdata.moenergies'], {}), '(ccdata.moenergies)\n', (3290, 3309), True, 'import numpy as np\n'), ((3456, 3503), 'numpy.full_like', 'np.full_like', (['ccdata.moenergies', '"""A"""'], {'dtype': 'str'}), "(ccdata.moenergies, 'A', dtype=str)\n", (3468, 3503), True, 'import numpy as np\n'), ((3767, 3792), 'numpy.zeros', 'np.zeros', (['(2, ccdata.nmo)'], {}), '((2, ccdata.nmo))\n', (3775, 3792), True, 'import numpy as np\n'), ((3909, 3943), 'numpy.array', 'np.array', (['[mo_coeff_a, mo_coeff_b]'], {}), '([mo_coeff_a, mo_coeff_b])\n', (3917, 3943), True, 'import numpy as np\n'), ((3520, 3545), 'numpy.shape', 'np.shape', (['ccdata.mocoeffs'], {}), '(ccdata.mocoeffs)\n', (3528, 3545), True, 'import numpy as np\n'), ((3980, 4007), 'numpy.array', 'np.array', (['ccdata.moenergies'], {}), '(ccdata.moenergies)\n', (3988, 4007), True, 'import numpy as np\n'), ((4154, 4201), 'numpy.full_like', 'np.full_like', (['ccdata.moenergies', '"""A"""'], {'dtype': 'str'}), "(ccdata.moenergies, 'A', dtype=str)\n", (4166, 4201), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at <EMAIL>
# For commercial licensing contact, please contact <EMAIL>
import torch
import torch.nn as nn
import numpy as np
import pickle
import torch.nn.functional as F
class FLAMETex(nn.Module):
"""
FLAME texture:
https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64
FLAME texture converted from BFM:
https://github.com/TimoBolkart/BFM_to_FLAME
"""
def __init__(self, config):
super(FLAMETex, self).__init__()
if config.tex_type == 'BFM':
mu_key = 'MU'
pc_key = 'PC'
n_pc = 199
tex_path = config.tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)
texture_basis = tex_space[pc_key].reshape(-1, n_pc)
elif config.tex_type == 'FLAME':
mu_key = 'mean'
pc_key = 'tex_dir'
n_pc = 200
tex_path = config.flame_tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)/255.
texture_basis = tex_space[pc_key].reshape(-1, n_pc)/255.
else:
print('texture type ', config.tex_type, 'not exist!')
raise NotImplementedError
n_tex = config.n_tex
num_components = texture_basis.shape[1]
texture_mean = torch.from_numpy(texture_mean).float()[None,...]
texture_basis = torch.from_numpy(texture_basis[:,:n_tex]).float()[None,...]
self.register_buffer('texture_mean', texture_mean)
self.register_buffer('texture_basis', texture_basis)
def forward(self, texcode=None):
'''
texcode: [batchsize, n_tex]
texture: [bz, 3, 256, 256], range: 0-1
'''
texture = self.texture_mean + (self.texture_basis*texcode[:,None,:]).sum(-1)
texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0,3,1,2)
texture = F.interpolate(texture, [256, 256])
texture = texture[:,[2,1,0], :,:]
return texture
def texture_flame2smplx(cached_data, flame_texture, smplx_texture):
''' Convert flame texture map (face-only) into smplx texture map (includes body texture)
TODO: pytorch version ==> grid sample
'''
if smplx_texture.shape[0] != smplx_texture.shape[1]:
print('SMPL-X texture not squared (%d != %d)' % (smplx_texture[0], smplx_texture[1]))
return
if smplx_texture.shape[0] != cached_data['target_resolution']:
print('SMPL-X texture size does not match cached image resolution (%d != %d)' % (smplx_texture.shape[0], cached_data['target_resolution']))
return
x_coords = cached_data['x_coords']
y_coords = cached_data['y_coords']
target_pixel_ids = cached_data['target_pixel_ids']
source_uv_points = cached_data['source_uv_points']
source_tex_coords = np.zeros_like((source_uv_points)).astype(int)
source_tex_coords[:, 0] = np.clip(flame_texture.shape[0]*(1.0-source_uv_points[:,1]), 0.0, flame_texture.shape[0]).astype(int)
source_tex_coords[:, 1] = np.clip(flame_texture.shape[1]*(source_uv_points[:,0]), 0.0, flame_texture.shape[1]).astype(int)
smplx_texture[y_coords[target_pixel_ids].astype(int), x_coords[target_pixel_ids].astype(int), :] = flame_texture[source_tex_coords[:,0], source_tex_coords[:,1]]
return smplx_texture
|
[
"numpy.load",
"numpy.zeros_like",
"numpy.clip",
"torch.nn.functional.interpolate",
"torch.from_numpy"
] |
[((2552, 2586), 'torch.nn.functional.interpolate', 'F.interpolate', (['texture', '[256, 256]'], {}), '(texture, [256, 256])\n', (2565, 2586), True, 'import torch.nn.functional as F\n'), ((1263, 1280), 'numpy.load', 'np.load', (['tex_path'], {}), '(tex_path)\n', (1270, 1280), True, 'import numpy as np\n'), ((3474, 3505), 'numpy.zeros_like', 'np.zeros_like', (['source_uv_points'], {}), '(source_uv_points)\n', (3487, 3505), True, 'import numpy as np\n'), ((3550, 3647), 'numpy.clip', 'np.clip', (['(flame_texture.shape[0] * (1.0 - source_uv_points[:, 1]))', '(0.0)', 'flame_texture.shape[0]'], {}), '(flame_texture.shape[0] * (1.0 - source_uv_points[:, 1]), 0.0,\n flame_texture.shape[0])\n', (3557, 3647), True, 'import numpy as np\n'), ((3681, 3771), 'numpy.clip', 'np.clip', (['(flame_texture.shape[1] * source_uv_points[:, 0])', '(0.0)', 'flame_texture.shape[1]'], {}), '(flame_texture.shape[1] * source_uv_points[:, 0], 0.0, flame_texture\n .shape[1])\n', (3688, 3771), True, 'import numpy as np\n'), ((1598, 1615), 'numpy.load', 'np.load', (['tex_path'], {}), '(tex_path)\n', (1605, 1615), True, 'import numpy as np\n'), ((1969, 1999), 'torch.from_numpy', 'torch.from_numpy', (['texture_mean'], {}), '(texture_mean)\n', (1985, 1999), False, 'import torch\n'), ((2042, 2084), 'torch.from_numpy', 'torch.from_numpy', (['texture_basis[:, :n_tex]'], {}), '(texture_basis[:, :n_tex])\n', (2058, 2084), False, 'import torch\n')]
|
#################################################################################
# argument parser
#################################################################################
# --dataset: str, 'melanoma', 'oct', or 'chestx', dataset type
# --model_path: str, path to model weight '*/*.h5'
# --model_type: 'InceptionV3', 'VGG16', 'ResNet50'
# --norm_type: str, '2' or 'inf', norm type of UAPs
# --norm_rate: float, noise strength (zeta)
# --epsilon: float, attack strength (step size)
# --max_iter: int, maximum number of iterations for computing UAP (i_max)
# --freqdim: int, frequency dimension for Q_DCT
# --nb_samples: int, input dataset size
# --targeted: int, target class (negative value indicates non-targeted attacks)
# --save_path: str, path to output files
# --gpu: str, for os.environ["CUDA_VISIBLE_DEVICES"]
#################################################################################
import warnings
warnings.filterwarnings('ignore')
import os, sys, gc, pdb, argparse
sys.stdout = os.fdopen(sys.stdout.fileno(), "w", buffering=1)
import numpy as np
import logging
import tensorflow as tf
from tensorflow.keras import utils
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.layers import Lambda, Input, Dense, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.optimizers import SGD
from art.classifiers import KerasClassifier
from art.attacks.evasion import Universal_SimBA
from art.utils import random_sphere
from art.utils import projection
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def set_seed(seed=200):
tf.random.set_seed(seed)
# optional
# for numpy.random
np.random.seed(seed)
# for built-in random
random.seed(seed)
# for hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
seed=123
# Configure a logger to capture ART outputs; these are printed in console and the level of detail is set to INFO
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# check the starting time for computing total processing time
import time
start_time = time.time()
# set labels
label2nb_dict = {
'chestx':
{'NORMAL': 0, 'PNEUMONIA': 1},
'oct':
{'CNV': 0, 'DME': 1, 'DRUSEN': 2, 'NORMAL': 3},
'melanoma':
{'MEL': 0, 'NV': 1, 'BCC': 2,
'AKIEC': 3, 'BKL': 4, 'DF': 5, 'VASC': 6}
}
batch_size= 256
### UAP class ###
# classifier: classifier
# X_train: ndarray, input images
# y_train: ndarray, the labels of the input images
# X_test: ndarray, validation images
# y_test: ndarray, the labels of the validation images
# X_original_train: ndarray, training images
# y_original_train: ndarray, the labels of the training images
# norm_type: 2 or np.inf, norm type of UAPs
# norm_size: float, noise size (xi)
# epsilon: float, attack strength (step size)
# freqdim: int, frequency dimension for Q_DCT
# max_iter: int, maximum number of iterations for computing UAP.
# targeted: int, target class (negative value indicates non-targeted attacks)
# save_path: str, path to output files
class my_UAP:
def __init__(
self,
classifier,
X_train, y_train,
X_test, y_test,
X_original_train, y_original_train,
norm_type,
norm_size,
epsilon,
freqdim,
max_iter,
targeted,
save_path,
):
self.classifier = classifier
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.X_original_train = X_original_train
self.y_original_train = y_original_train
self.norm_type = norm_type
self.norm_size = norm_size
self.epsilon = epsilon
self.freqdim = freqdim
self.max_iter = max_iter
self.targeted = targeted
self.save_path = save_path
### compute the attack success rate
# images: ndarray, target image set
# noise: ndarray, UAP
def my_calc_fooling_ratio(self, images=0, noise=0):
adv_images = images + noise
if self.targeted < 0:
preds = np.argmax(self.classifier.predict(images, batch_size=batch_size), axis=1)
preds_adv = np.argmax(self.classifier.predict(adv_images, batch_size=batch_size), axis=1)
fooling_ratio = np.sum(preds_adv != preds) / images.shape[0]
return fooling_ratio
else:
preds_adv = np.argmax(self.classifier.predict(adv_images, batch_size=batch_size), axis=1)
fooling_ratio_targeted = np.sum(preds_adv == self.targeted) / adv_images.shape[0]
return fooling_ratio_targeted
### generate the labels (in one-hot vector representation) for targeted attacks
# length: int, number of target images
def my_target_labels(self, length=0):
classes = self.y_train.shape[1]
return utils.to_categorical([self.targeted] * length, classes)
### generate UAP
def my_gen_UAP(self):
imshape = self.X_train[0].shape
if self.targeted >= 0:
print(" *** targeted attack *** \n")
adv_crafter = Universal_SimBA(
self.classifier,
attack='dct',
epsilon=self.epsilon,
freq_dim=self.freqdim,
max_iter=self.max_iter,
eps=self.norm_size,
norm=self.norm_type,
targeted=True,
batch_size=batch_size
)
else:
print(" *** non-targeted attack *** \n")
adv_crafter = Universal_SimBA(
self.classifier,
attack='dct',
epsilon=self.epsilon,
freq_dim=self.freqdim,
max_iter=self.max_iter,
eps=self.norm_size,
norm=self.norm_type,
targeted=False,
batch_size=batch_size
)
# initialization
LOG = []
X_materials_cnt = 0
X_materials = self.X_train
# craft UAP
if self.targeted >= 0:
# generate the one-hot vector of the target label
Y_materials_tar = self.my_target_labels(length=len(X_materials))
_ = adv_crafter.generate(X_materials, y=Y_materials_tar)
else:
_ = adv_crafter.generate(X_materials)
# handling for no noise
if type(adv_crafter.noise[0,:]) == int:
noise = np.zeros(imshape)
else:
noise = np.copy(adv_crafter.noise)
noise = np.reshape(noise, imshape)
# generate random UAP whose size equals to the size of the UAP
noise_size = float(np.linalg.norm(noise.reshape(-1), ord=self.norm_type))
noise_random = random_sphere(
nb_points=1,
nb_dims=np.prod(X_materials[0].shape),
radius=noise_size,
norm=self.norm_type
).reshape(imshape)
# compute attack success rate of UAP
# for input data
fr_train = self.my_calc_fooling_ratio(images=self.X_train, noise=noise)
# for validation data
fr_test = self.my_calc_fooling_ratio(images=self.X_test, noise=noise)
# for training data
fr_m = self.my_calc_fooling_ratio(images=self.X_original_train, noise=noise)
# compute attack success rate of random UAP
# for input data
fr_train_r = self.my_calc_fooling_ratio(images=self.X_train, noise=noise_random)
# for validation data
fr_test_r = self.my_calc_fooling_ratio(images=self.X_test, noise=noise_random)
# for training data
fr_m_r = self.my_calc_fooling_ratio(images=self.X_original_train, noise=noise_random)
# compute UAP size
norm_2 = np.linalg.norm(noise)
norm_inf = abs(noise).max()
LOG.append([X_materials_cnt, norm_2, norm_inf, fr_train, fr_test, fr_m, fr_train_r, fr_test_r, fr_m_r])
print("LOG: {} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(X_materials_cnt, norm_2, norm_inf, fr_train, fr_test, fr_m, fr_train_r, fr_test_r, fr_m_r))
np.save(self.save_path+'/noise', noise)
np.save(self.save_path+'/LOG', np.array(LOG))
return noise, np.array(LOG)
### cofiguration of classifier
# model_type: 'InceptionV3', 'VGG16', 'ResNet50'
# model_path: str, path to model weight
# output_class: int, number of classes
# mono: int, monochrome images if mono = 1, RGB images otherwise
# silence: int, prevent to output model summary if silence = 1, not otherwise
class my_DNN:
def __init__(
self,
model_type,
model_path,
output_class,
mono,
silence
):
self.model_type = model_type
self.model_path = model_path
self.output_class = output_class
self.mono = mono
self.silence = silence
def my_classifier(self):
if self.mono==1:
input_shape = (299, 299, 3)
if self.model_type == 'inceptionv3':
print(" MODEL: InceptionV3")
base_model = InceptionV3(weights='imagenet', input_shape=input_shape, include_top=False)
elif self.model_type == 'vgg16':
print(" MODEL: VGG16")
base_model = VGG16(weights=None, input_shape=input_shape, include_top=False)
elif self.model_type == "resnet50":
print(" MODEL: ResNet50")
base_model = ResNet50(weights='imagenet', input_shape=input_shape, include_top=False)
else:
print(" --- ERROR : UNKNOWN MODEL TYPE --- ")
base_model.layers.pop(0)
newInput = Input(batch_shape=(None, 299,299,1))
x = Lambda(lambda image: tf.image.grayscale_to_rgb(image))(newInput)
tmp_out = base_model(x)
tmpModel = Model(newInput, tmp_out)
x = tmpModel.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(self.output_class, activation='softmax')(x)
model = Model(tmpModel.input, predictions)
else:
input_shape = (299, 299, 3)
if self.model_type == 'inceptionv3':
print(" MODEL: InceptionV3")
base_model = InceptionV3(weights='imagenet', input_shape=input_shape, include_top=False)
elif self.model_type == 'vgg16':
print(" MODEL: VGG16")
base_model = VGG16(weights='imagenet', input_shape=input_shape, include_top=False)
elif self.model_type == "resnet50":
print(" MODEL: ResNet50")
base_model = ResNet50(weights='imagenet', input_shape=input_shape, include_top=False)
else:
print(" --- ERROR: UNKNOWN MODEL TYPE --- ")
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(self.output_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in model.layers:
layer.trainable = True
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights(self.model_path,)
if self.silence != 1:
model.summary()
classifier = KerasClassifier(model=model)
print("Finish Load Model")
return classifier
### Main ###
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str)
parser.add_argument('--model_path', type=str)
parser.add_argument('--model_type', type=str)
parser.add_argument('--norm_type', type=str)
parser.add_argument('--norm_rate', type=float)
parser.add_argument('--epsilon', type=float)
parser.add_argument('--max_iter', type=int)
parser.add_argument('--freqdim', type=int)
parser.add_argument('--nb_samples', type=int)
parser.add_argument('--targeted', type=int)
parser.add_argument('--save_path', type=str)
parser.add_argument('--gpu', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
physical_devices = tf.config.list_physical_devices('GPU')
if len(physical_devices) > 0:
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
print('{} memory growth: {}'.format(device, tf.config.experimental.get_memory_growth(device)))
else:
print("Not enough GPU hardware devices available")
os.makedirs(args.save_path, exist_ok=False)
handler2 = logging.FileHandler(filename=f"{args.save_path}/log.txt")
handler2.setLevel(logging.INFO)
handler2.setFormatter(logging.Formatter("%(asctime)s %(levelname)8s %(message)s"))
logger.addHandler(handler2)
if args.norm_type == '2':
norm_type = 2
elif args.norm_type == 'inf':
norm_type = np.inf
norm_rate = args.norm_rate
# load data
X_train = np.load(f"./data/{args.dataset}/X_train.npy")
y_train = np.load(f"./data/{args.dataset}/y_train.npy")
X_test_1 = np.load(f"./data/{args.dataset}/X_test_1_{args.nb_samples}.npy")
y_test_1 = np.load(f"./data/{args.dataset}/y_test_1_{args.nb_samples}.npy")
X_test_2 = np.load(f"./data/{args.dataset}/X_test_2_{args.nb_samples}.npy")
y_test_2 = np.load(f"./data/{args.dataset}/y_test_2_{args.nb_samples}.npy")
# check color type (mono or RGB)
if X_train.shape[-1] != 3:
mono = 1
else:
mono = 0
# compute the actual norm size from the ratio `norm_rate` of the Lp of the UAP to the average Lp norm of an image in the dataset (training images)
if norm_type == np.inf:
norm_mean = 0
for img in X_test_1:
norm_mean += abs(img).max()
norm_mean = norm_mean/X_test_1.shape[0]
norm_size = float(norm_rate*norm_mean/128.0)
logger.info("\n ------------------------------------")
logger.info(" Linf norm: {:.2f} ".format(norm_size))
else:
norm_mean = 0
for img in X_test_1:
norm_mean += np.linalg.norm(img)
norm_mean = norm_mean/X_test_1.shape[0]
norm_size = float(norm_rate*norm_mean/128.0)
logger.info(" L2 norm: {:.2f} ".format(norm_size))
# normalization
X_train -= 128.0
X_train /= 128.0
X_test_1 -= 128.0
X_test_1 /= 128.0
X_test_2 -= 128.0
X_test_2 /= 128.0
logger.info(f"Train Size: {y_train.shape}")
logger.info(f"Test Size: {y_test_1.shape}")
logger.info(f"Eval Size: {y_test_2.shape}")
dnn = my_DNN(
model_type=args.model_type,
model_path=args.model_path,
output_class=y_train.shape[1],
mono=mono,
silence=1
)
classifier = dnn.my_classifier()
# compute the accuracies for clean images
preds_train = np.argmax(classifier.predict(X_train, batch_size=batch_size), axis=1)
acc = np.sum(preds_train == np.argmax(y_train, axis=1)) / y_train.shape[0]
logger.info(" Accuracy [train]: {:.2f}".format(acc))
preds_test1 = np.argmax(classifier.predict(X_test_1, batch_size=batch_size), axis=1)
acc = np.sum(preds_test1 == np.argmax(y_test_1, axis=1)) / y_test_1.shape[0]
logger.info(" Accuracy [test 1]: {:.2f}".format(acc))
preds_test2 = np.argmax(classifier.predict(X_test_2, batch_size=batch_size), axis=1)
acc = np.sum(preds_test2 == np.argmax(y_test_2, axis=1)) / y_test_2.shape[0]
logger.info(" Accuracy [test 2]: {:.2f}".format(acc))
logger.info(" ------------------------------------\n")
# generate UAP
uap = my_UAP(
classifier=classifier,
X_train=X_test_1, y_train=y_test_1,
X_test=X_test_2, y_test=y_test_2,
X_original_train=X_train, y_original_train=y_train,
norm_type=norm_type,
norm_size=norm_size,
epsilon=args.epsilon,
freqdim=args.freqdim,
max_iter=args.max_iter,
targeted=args.targeted,
save_path=args.save_path,
)
noise, LOG = uap.my_gen_UAP()
# output processing time
processing_time = time.time() - start_time
logger.info("\n\t ------------------------------------")
logger.info("\t total processing time : {:.2f} h.".format(processing_time/3600))
logger.info("\t ------------------------------------\n")
# save figures
save_f_img = f'{args.save_path}/sample.png'
make_adv_img(
clean_img=X_test_1[0],
noise=noise,
adv_img=X_test_1[0] + noise,
save_file_name=save_f_img,
nlz="11"
)
|
[
"tensorflow.random.set_seed",
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.image.grayscale_to_rgb",
"tensorflow.keras.optimizers.SGD",
"logging.Formatter",
"numpy.linalg.norm",
"numpy.prod",
"logging.FileHandler",
"numpy.copy",
"sys.stdout.fileno",
"art.attacks.evasion.Universal_SimBA",
"numpy.reshape",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"numpy.save",
"tensorflow.keras.utils.to_categorical",
"art.classifiers.KerasClassifier",
"logging.StreamHandler",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.models.Model",
"tensorflow.keras.applications.vgg16.VGG16",
"matplotlib.use",
"os.makedirs",
"tensorflow.config.experimental.get_memory_growth",
"warnings.filterwarnings",
"tensorflow.keras.applications.inception_v3.InceptionV3",
"tensorflow.config.list_physical_devices",
"numpy.zeros",
"time.time",
"numpy.array",
"logging.getLogger"
] |
[((963, 996), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (986, 996), False, 'import warnings\n'), ((1840, 1854), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (1847, 1854), True, 'import matplotlib as mpl\n'), ((2250, 2269), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2267, 2269), False, 'import logging\n'), ((2310, 2333), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2331, 2333), False, 'import logging\n'), ((2346, 2394), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s] %(message)s"""'], {}), "('[%(levelname)s] %(message)s')\n", (2363, 2394), False, 'import logging\n'), ((2543, 2554), 'time.time', 'time.time', ([], {}), '()\n', (2552, 2554), False, 'import time\n'), ((1054, 1073), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (1071, 1073), False, 'import os, sys, gc, pdb, argparse\n'), ((1916, 1940), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (1934, 1940), True, 'import tensorflow as tf\n'), ((1983, 2003), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1997, 2003), True, 'import numpy as np\n'), ((12121, 12146), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12144, 12146), False, 'import os, sys, gc, pdb, argparse\n'), ((12834, 12872), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (12865, 12872), True, 'import tensorflow as tf\n'), ((13195, 13238), 'os.makedirs', 'os.makedirs', (['args.save_path'], {'exist_ok': '(False)'}), '(args.save_path, exist_ok=False)\n', (13206, 13238), False, 'import os, sys, gc, pdb, argparse\n'), ((13254, 13311), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'f"""{args.save_path}/log.txt"""'}), "(filename=f'{args.save_path}/log.txt')\n", (13273, 13311), False, 'import logging\n'), ((13643, 13688), 'numpy.load', 'np.load', (['f"""./data/{args.dataset}/X_train.npy"""'], {}), "(f'./data/{args.dataset}/X_train.npy')\n", (13650, 13688), True, 'import numpy as np\n'), ((13703, 13748), 'numpy.load', 'np.load', (['f"""./data/{args.dataset}/y_train.npy"""'], {}), "(f'./data/{args.dataset}/y_train.npy')\n", (13710, 13748), True, 'import numpy as np\n'), ((13764, 13828), 'numpy.load', 'np.load', (['f"""./data/{args.dataset}/X_test_1_{args.nb_samples}.npy"""'], {}), "(f'./data/{args.dataset}/X_test_1_{args.nb_samples}.npy')\n", (13771, 13828), True, 'import numpy as np\n'), ((13844, 13908), 'numpy.load', 'np.load', (['f"""./data/{args.dataset}/y_test_1_{args.nb_samples}.npy"""'], {}), "(f'./data/{args.dataset}/y_test_1_{args.nb_samples}.npy')\n", (13851, 13908), True, 'import numpy as np\n'), ((13924, 13988), 'numpy.load', 'np.load', (['f"""./data/{args.dataset}/X_test_2_{args.nb_samples}.npy"""'], {}), "(f'./data/{args.dataset}/X_test_2_{args.nb_samples}.npy')\n", (13931, 13988), True, 'import numpy as np\n'), ((14004, 14068), 'numpy.load', 'np.load', (['f"""./data/{args.dataset}/y_test_2_{args.nb_samples}.npy"""'], {}), "(f'./data/{args.dataset}/y_test_2_{args.nb_samples}.npy')\n", (14011, 14068), True, 'import numpy as np\n'), ((5413, 5468), 'tensorflow.keras.utils.to_categorical', 'utils.to_categorical', (['([self.targeted] * length)', 'classes'], {}), '([self.targeted] * length, classes)\n', (5433, 5468), False, 'from tensorflow.keras import utils\n'), ((8303, 8324), 'numpy.linalg.norm', 'np.linalg.norm', (['noise'], {}), '(noise)\n', (8317, 8324), True, 'import numpy as np\n'), ((8662, 8703), 'numpy.save', 'np.save', (["(self.save_path + '/noise')", 'noise'], {}), "(self.save_path + '/noise', noise)\n", (8669, 8703), True, 'import numpy as np\n'), ((11704, 11759), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)\n', (11707, 11759), False, 'from tensorflow.keras.optimizers import SGD\n'), ((11974, 12002), 'art.classifiers.KerasClassifier', 'KerasClassifier', ([], {'model': 'model'}), '(model=model)\n', (11989, 12002), False, 'from art.classifiers import KerasClassifier\n'), ((13374, 13433), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)8s %(message)s"""'], {}), "('%(asctime)s %(levelname)8s %(message)s')\n", (13391, 13433), False, 'import logging\n'), ((16860, 16871), 'time.time', 'time.time', ([], {}), '()\n', (16869, 16871), False, 'import time\n'), ((5664, 5867), 'art.attacks.evasion.Universal_SimBA', 'Universal_SimBA', (['self.classifier'], {'attack': '"""dct"""', 'epsilon': 'self.epsilon', 'freq_dim': 'self.freqdim', 'max_iter': 'self.max_iter', 'eps': 'self.norm_size', 'norm': 'self.norm_type', 'targeted': '(True)', 'batch_size': 'batch_size'}), "(self.classifier, attack='dct', epsilon=self.epsilon,\n freq_dim=self.freqdim, max_iter=self.max_iter, eps=self.norm_size, norm\n =self.norm_type, targeted=True, batch_size=batch_size)\n", (5679, 5867), False, 'from art.attacks.evasion import Universal_SimBA\n'), ((6110, 6314), 'art.attacks.evasion.Universal_SimBA', 'Universal_SimBA', (['self.classifier'], {'attack': '"""dct"""', 'epsilon': 'self.epsilon', 'freq_dim': 'self.freqdim', 'max_iter': 'self.max_iter', 'eps': 'self.norm_size', 'norm': 'self.norm_type', 'targeted': '(False)', 'batch_size': 'batch_size'}), "(self.classifier, attack='dct', epsilon=self.epsilon,\n freq_dim=self.freqdim, max_iter=self.max_iter, eps=self.norm_size, norm\n =self.norm_type, targeted=False, batch_size=batch_size)\n", (6125, 6314), False, 'from art.attacks.evasion import Universal_SimBA\n'), ((6996, 7013), 'numpy.zeros', 'np.zeros', (['imshape'], {}), '(imshape)\n', (7004, 7013), True, 'import numpy as np\n'), ((7048, 7074), 'numpy.copy', 'np.copy', (['adv_crafter.noise'], {}), '(adv_crafter.noise)\n', (7055, 7074), True, 'import numpy as np\n'), ((7095, 7121), 'numpy.reshape', 'np.reshape', (['noise', 'imshape'], {}), '(noise, imshape)\n', (7105, 7121), True, 'import numpy as np\n'), ((8741, 8754), 'numpy.array', 'np.array', (['LOG'], {}), '(LOG)\n', (8749, 8754), True, 'import numpy as np\n'), ((8778, 8791), 'numpy.array', 'np.array', (['LOG'], {}), '(LOG)\n', (8786, 8791), True, 'import numpy as np\n'), ((10275, 10313), 'tensorflow.keras.layers.Input', 'Input', ([], {'batch_shape': '(None, 299, 299, 1)'}), '(batch_shape=(None, 299, 299, 1))\n', (10280, 10313), False, 'from tensorflow.keras.layers import Lambda, Input, Dense, GlobalAveragePooling2D\n'), ((10452, 10476), 'tensorflow.keras.models.Model', 'Model', (['newInput', 'tmp_out'], {}), '(newInput, tmp_out)\n', (10457, 10476), False, 'from tensorflow.keras.models import Model\n'), ((10649, 10683), 'tensorflow.keras.models.Model', 'Model', (['tmpModel.input', 'predictions'], {}), '(tmpModel.input, predictions)\n', (10654, 10683), False, 'from tensorflow.keras.models import Model\n'), ((11566, 11617), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'predictions'}), '(inputs=base_model.input, outputs=predictions)\n', (11571, 11617), False, 'from tensorflow.keras.models import Model\n'), ((12959, 13013), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['device', '(True)'], {}), '(device, True)\n', (12999, 13013), True, 'import tensorflow as tf\n'), ((14764, 14783), 'numpy.linalg.norm', 'np.linalg.norm', (['img'], {}), '(img)\n', (14778, 14783), True, 'import numpy as np\n'), ((4858, 4884), 'numpy.sum', 'np.sum', (['(preds_adv != preds)'], {}), '(preds_adv != preds)\n', (4864, 4884), True, 'import numpy as np\n'), ((5089, 5123), 'numpy.sum', 'np.sum', (['(preds_adv == self.targeted)'], {}), '(preds_adv == self.targeted)\n', (5095, 5123), True, 'import numpy as np\n'), ((9690, 9765), 'tensorflow.keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""', 'input_shape': 'input_shape', 'include_top': '(False)'}), "(weights='imagenet', input_shape=input_shape, include_top=False)\n", (9701, 9765), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3\n'), ((10525, 10549), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (10547, 10549), False, 'from tensorflow.keras.layers import Lambda, Input, Dense, GlobalAveragePooling2D\n'), ((10579, 10625), 'tensorflow.keras.layers.Dense', 'Dense', (['self.output_class'], {'activation': '"""softmax"""'}), "(self.output_class, activation='softmax')\n", (10584, 10625), False, 'from tensorflow.keras.layers import Lambda, Input, Dense, GlobalAveragePooling2D\n'), ((10862, 10937), 'tensorflow.keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""', 'input_shape': 'input_shape', 'include_top': '(False)'}), "(weights='imagenet', input_shape=input_shape, include_top=False)\n", (10873, 10937), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3\n'), ((11442, 11466), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (11464, 11466), False, 'from tensorflow.keras.layers import Lambda, Input, Dense, GlobalAveragePooling2D\n'), ((11496, 11542), 'tensorflow.keras.layers.Dense', 'Dense', (['self.output_class'], {'activation': '"""softmax"""'}), "(self.output_class, activation='softmax')\n", (11501, 11542), False, 'from tensorflow.keras.layers import Lambda, Input, Dense, GlobalAveragePooling2D\n'), ((15618, 15644), 'numpy.argmax', 'np.argmax', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (15627, 15644), True, 'import numpy as np\n'), ((15843, 15870), 'numpy.argmax', 'np.argmax', (['y_test_1'], {'axis': '(1)'}), '(y_test_1, axis=1)\n', (15852, 15870), True, 'import numpy as np\n'), ((16071, 16098), 'numpy.argmax', 'np.argmax', (['y_test_2'], {'axis': '(1)'}), '(y_test_2, axis=1)\n', (16080, 16098), True, 'import numpy as np\n'), ((9879, 9942), 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': 'None', 'input_shape': 'input_shape', 'include_top': '(False)'}), '(weights=None, input_shape=input_shape, include_top=False)\n', (9884, 9942), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), ((11051, 11120), 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'input_shape': 'input_shape', 'include_top': '(False)'}), "(weights='imagenet', input_shape=input_shape, include_top=False)\n", (11056, 11120), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), ((13070, 13118), 'tensorflow.config.experimental.get_memory_growth', 'tf.config.experimental.get_memory_growth', (['device'], {}), '(device)\n', (13110, 13118), True, 'import tensorflow as tf\n'), ((7359, 7388), 'numpy.prod', 'np.prod', (['X_materials[0].shape'], {}), '(X_materials[0].shape)\n', (7366, 7388), True, 'import numpy as np\n'), ((10062, 10134), 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'input_shape': 'input_shape', 'include_top': '(False)'}), "(weights='imagenet', input_shape=input_shape, include_top=False)\n", (10070, 10134), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), ((10349, 10381), 'tensorflow.image.grayscale_to_rgb', 'tf.image.grayscale_to_rgb', (['image'], {}), '(image)\n', (10374, 10381), True, 'import tensorflow as tf\n'), ((11240, 11312), 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'input_shape': 'input_shape', 'include_top': '(False)'}), "(weights='imagenet', input_shape=input_shape, include_top=False)\n", (11248, 11312), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n')]
|
import numpy as np
from scipy.stats import norm
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
import warnings
import keras.backend as K
from keras.initializers import glorot_uniform
import tensorflow as tf
from sklearn.model_selection import KFold
import os
class HPermT(object):
"""Class for holdout permutation test (HPT) based on deep neural networks.
Parameters
----------
inf_feats : list-like of shape (num of tests, dim of features)
List of covariates/Features under hypothesis testings, one element corresponding to a hypothesis testing.
model : {keras-defined neural network}
A neural network for original full dataset
alpha: float (0,1), default=0.05
The nominal level of the hypothesis testing
verbose: {0, 1}, default=0
If print the testing results, 1 indicates YES, 0 indicates NO.
eva_metric: {'mse', 'zero-one', 'cross-entropy', or custom metric function}
The evaluation metric, ``'mse'`` is the l2-loss for regression, ``'zero-one'`` is the zero-one loss for classification, ``'cross-entropy'`` is log-loss for classification. It can also be custom metric function as ``eva_metric(y_true, y_pred)``.
cp_path: {string}, default='./HPT_checkpoints'
The checkpoints path to save the models
"""
def __init__(self, inf_feats, model, alpha=.05, num_perm=1000, verbose=0, eva_metric='mse', cp_path = './HPT_checkpoints'):
self.inf_feats = inf_feats
self.model = model
self.alpha = alpha
self.num_perm = num_perm
self.eva_metric = eva_metric
self.cp_path = cp_path
def metric(self, y_true, y_pred):
if self.eva_metric == 'mse':
metric_tmp = ((y_true - y_pred)**2).flatten()
elif self.eva_metric == 'mae':
metric_tmp = abs(y_true - y_pred).flatten()
elif self.eva_metric == 'zero-one':
label_pred = np.argmax(y_pred, 1)
label_true = np.argmax(y_true, 1)
metric_tmp = 1. - 1.*(label_true == label_pred)
elif self.eva_metric == 'cross-entropy':
label_true = np.argmax(y_true, 1)
metric_tmp = np.log(y_pred[range(len(y_pred)),label_true])
else:
metric_tmp = self.eva_metric(y_true, y_pred)
return metric_tmp
def save_init(self):
"""
Save the initialization for the network model under class HPT
"""
self.model.save_weights(self.cp_path+'/model_init.h5')
# self.model_mask.save_weights(self.cp_path+'/model_mask_init.h5')
def reset_model(self):
"""
Reset the full and mask network models under class HPT
"""
self.model.load_weights(self.cp_path+'/model_init.h5')
# self.model_mask.load_weights(self.cp_path+'/model_mask_init.h5')
# def reset_model(self):
# if int(tf.__version__[0]) == 2:
# # for layer in self.model.layers:
# # if isinstance(layer, tf.keras.Model):
# # reset_weights(layer)
# # continue
# # for k, initializer in layer.__dict__.items():
# # if "initializer" not in k:
# # continue
# # # find the corresponding variable
# # var = getattr(layer, k.replace("_initializer", ""))
# # var.assign(initializer(var.shape, var.dtype))
#
# for layer in self.model.layers:
# if isinstance(layer, tf.keras.Model): #if you're using a model as a layer
# reset_weights(layer) #apply function recursively
# continue
#
# #where are the initializers?
# if hasattr(layer, 'cell'):
# init_container = layer.cell
# else:
# init_container = layer
#
# for key, initializer in init_container.__dict__.items():
# if "initializer" not in key: #is this item an initializer?
# continue #if no, skip it
#
# # find the corresponding variable, like the kernel or the bias
# if key == 'recurrent_initializer': #special case check
# var = getattr(init_container, 'recurrent_kernel')
# else:
# var = getattr(init_container, key.replace("_initializer", ""))
#
# if var is None:
# continue
# else:
# var.assign(initializer(var.shape, var.dtype))
#
# if int(tf.__version__[0]) == 1:
# session = K.get_session()
# for layer in self.model.layers:
# if hasattr(layer, 'kernel_initializer'):
# layer.kernel.initializer.run(session=session)
# if hasattr(layer, 'bias_initializer'):
# layer.bias.initializer.run(session=session)
# for layer in self.model_perm.layers:
# if hasattr(layer, 'kernel_initializer'):
# layer.kernel.initializer.run(session=session)
# if hasattr(layer, 'bias_initializer'):
# layer.bias.initializer.run(session=session)
## can be extent to @abstractmethod
# def mask_cov(self, X, k=0):
# """
# Return instances with masked k-th hypothesized features.
#
# Parameters
# ----------
# X : array-like
# Target instances.
#
# k : integer, default = 0
# k-th hypothesized features in inf_feats
# """
# Z = X.copy()
# if type(self.inf_feats[k]) is list:
# Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], 0] = 0.
# else:
# Z[:,self.inf_feats[k]]= 0.
# return Z
def perm_cov(self, X, k=0):
"""
Return instances with permuted k-th hypothesized features.
Parameters
----------
X : array-like
Target instances.
k : integer, default = 0
k-th hypothesized features in inf_feats
"""
Z = X.copy()
if type(self.inf_feats[k]) is list:
## for channels_last image data: shape should be (#samples, img_rows, img_cols, channel)
Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], :] = np.random.permutation(Z[:,self.inf_feats[k][0][:,None], self.inf_feats[k][1], :])
else:
Z[:,self.inf_feats[k]]= np.random.permutation(Z[:,self.inf_feats[k]])
return Z
def noise_cov(self, X, k=0):
Z = X.copy()
Z[:,self.inf_feats[k]] = np.random.randn(len(X), len(self.inf_feats[k]))
return Z
def testing(self, X, y, fit_params, cv_num=5, cp='hommel', inf_ratio=.2):
"""
Return p-values for hypothesis testing for inf_feats in class HpermT.
Parameters
----------
X : array-like | shape = (n_samples, dim_features)
Instances matrix/tensor, where n_samples in the number of samples and dim_features is the dimension of the features.
If X is vectorized feature, ``shape`` should be ``(#Samples, dim of feaures)``
If X is image/matrix data, ``shape`` should be ``(#samples, img_rows, img_cols, channel)``, that is, **X must channel_last image data**. - **y: {array-like} of shape (n_samples,)**
Output vector/matrix relative to X.
fit_params: {dict of fitting parameters}
See keras ``fit``: (https://keras.rstudio.com/reference/fit.html), including ``batch_size``, ``epoch``, ``callbacks``, ``validation_split``, ``validation_data``, and so on.
cv_num: int, default=5
The number of cross-validation to shuffle the estimation/inference samples in testing.
cp: {'gmean', 'min', 'hmean', 'Q1', 'hommel', 'cauchy'}, default ='hommel'
A method to combine p-values obtained from cross-validation.
inf_ratio: float, default=None
A pre-specific inference sample ratio, if ``est_size=None``, then it is determined by adaptive splitting method ``metric``.
Return
------
P_value: array of float [0, 1]
The p_values for target hypothesis testings.
"""
## create checkpoints path
if not os.path.exists(self.cp_path):
os.mkdir(self.cp_path)
## save initial weights
self.save_init()
P_value = []
for k in range(len(self.inf_feats)):
self.reset_model()
m, n = int(inf_ratio * len(X)), len(X) - int(inf_ratio * len(X))
P_value_cv = []
score_cv, score_perm_cv = [], []
for h in range(cv_num):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=n, random_state=h)
## prediction and inference in full model
self.reset_model()
history = self.model.fit(X_train, y_train, **fit_params)
## save and load model
path_tmp = self.cp_path+'/model'+'_inf'+str(k)+'_cv'+str(h)+'.h5'
self.model.save_weights(path_tmp)
self.model.load_weights(path_tmp)
## prediction for original dataset
pred_y = self.model.predict(X_test)
metric_full = self.metric(y_test, pred_y)
score = metric_full.mean()
score_cv.append(score)
score_perm = []
for l in range(self.num_perm):
Z_test = self.perm_cov(X_test, k)
pred_y_perm = self.model.predict(Z_test)
# pred_y_perm = self.model(Z_tmp, training=False)
metric_perm = self.metric(y_test, pred_y_perm)
score_perm.append(metric_perm.mean())
score_perm_cv.append(score_perm)
score_cv = np.array(score_cv)
score_perm_cv = np.array(score_perm_cv)
cv_ave_score, cv_ave_score_perm = score_cv.mean(), np.mean(score_perm_cv, axis=0)
# print(cv_ave_score_perm)
## compute p-value
print("%d th inf-feats perf score: %.3f, perf permutation score: %.3f(%.3f)" %(k, cv_ave_score, cv_ave_score_perm.mean(), cv_ave_score_perm.std()))
p_value_tmp = (np.sum(cv_ave_score_perm <= cv_ave_score) + 1.0) / (self.num_perm + 1.0)
if p_value_tmp < self.alpha:
print('reject %d th H0 with p_value: %.3f' %(k, p_value_tmp))
else:
print('accept %d th H0 with p_value: %.3f' %(k, p_value_tmp))
P_value.append(p_value_tmp)
# return P_value
self.p_values = P_value
return P_value
|
[
"os.mkdir",
"numpy.sum",
"numpy.argmax",
"sklearn.model_selection.train_test_split",
"os.path.exists",
"numpy.mean",
"numpy.array",
"numpy.random.permutation"
] |
[((5414, 5502), 'numpy.random.permutation', 'np.random.permutation', (['Z[:, self.inf_feats[k][0][:, None], self.inf_feats[k][1], :]'], {}), '(Z[:, self.inf_feats[k][0][:, None], self.inf_feats[k]\n [1], :])\n', (5435, 5502), True, 'import numpy as np\n'), ((5531, 5577), 'numpy.random.permutation', 'np.random.permutation', (['Z[:, self.inf_feats[k]]'], {}), '(Z[:, self.inf_feats[k]])\n', (5552, 5577), True, 'import numpy as np\n'), ((7171, 7199), 'os.path.exists', 'os.path.exists', (['self.cp_path'], {}), '(self.cp_path)\n', (7185, 7199), False, 'import os\n'), ((7204, 7226), 'os.mkdir', 'os.mkdir', (['self.cp_path'], {}), '(self.cp_path)\n', (7212, 7226), False, 'import os\n'), ((8420, 8438), 'numpy.array', 'np.array', (['score_cv'], {}), '(score_cv)\n', (8428, 8438), True, 'import numpy as np\n'), ((8458, 8481), 'numpy.array', 'np.array', (['score_perm_cv'], {}), '(score_perm_cv)\n', (8466, 8481), True, 'import numpy as np\n'), ((7538, 7590), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': 'n', 'random_state': 'h'}), '(X, y, train_size=n, random_state=h)\n', (7554, 7590), False, 'from sklearn.model_selection import train_test_split\n'), ((8536, 8566), 'numpy.mean', 'np.mean', (['score_perm_cv'], {'axis': '(0)'}), '(score_perm_cv, axis=0)\n', (8543, 8566), True, 'import numpy as np\n'), ((1816, 1836), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (1825, 1836), True, 'import numpy as np\n'), ((1853, 1873), 'numpy.argmax', 'np.argmax', (['y_true', '(1)'], {}), '(y_true, 1)\n', (1862, 1873), True, 'import numpy as np\n'), ((8788, 8829), 'numpy.sum', 'np.sum', (['(cv_ave_score_perm <= cv_ave_score)'], {}), '(cv_ave_score_perm <= cv_ave_score)\n', (8794, 8829), True, 'import numpy as np\n'), ((1984, 2004), 'numpy.argmax', 'np.argmax', (['y_true', '(1)'], {}), '(y_true, 1)\n', (1993, 2004), True, 'import numpy as np\n')]
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.arrays import *
from mos3d.models.world.objects import GridWorldObject
import mos3d.util as util
import numpy as np
class Robot(GridWorldObject):
# By default, the robot has a camera, and
# it looks into the direction (-1, 0, 0)
def __init__(self, id_,
camera_pose, # 6D camera pose relative to the robot
camera_model,
objtype="robot"):
super().__init__(id_, objtype=objtype)
self._camera_pose = camera_pose
self._camera_model = camera_model
sx, sy, sz, sthx, sthy, sthz = self._camera_pose
self._camera_model.transform_camera(self._camera_pose, permanent=True) #)sx, sy, sz, sthx, sthy, sthz, permanent=True)
def init_render(self):
vertices, indices, colors = util.cube(color=(1,0,0))
self._vertex_vbo, self._index_vbo, self._color_vbo\
= util.generate_vbo_elements(vertices, indices, colors)
self._num_indices = len(indices)
# vertices for axes
axes_vertices = np.array([0,0,0,
0,0,0,
0,0,0,
2,0,0,
0,2,0,
0,0,2])
axes_colors = np.array([0.8,0.2,0.2, # origin - red
0.2,0.8,0.2, # origin - green
0.2,0.2,0.8, # origin - blue
0.8,0.2,0.2, # Red
0.2,0.8,0.2, # Green
0.2,0.2,0.8]) # Blue
axes_indices = np.array([0,3,1,4,2,5])
self._axes_vertex_vbo, self._axes_index_vbo, self._axes_color_vbo \
= util.generate_vbo_elements(axes_vertices, axes_indices, axes_colors)
self._camera_model.init_render()
def _render_axes(self):
# Draw axes
glEnableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, self._axes_vertex_vbo);
glVertexPointer(3, GL_FLOAT, 0, None);
glBindBuffer(GL_ARRAY_BUFFER, self._axes_color_vbo);
glColorPointer(3, GL_FLOAT, 0, None);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self._axes_index_vbo);
glDrawElements(GL_LINES, 6, GL_UNSIGNED_INT, None)
glDisableClientState(GL_COLOR_ARRAY)
def render(self, render_fov=False):
if render_fov:
glPushMatrix()
sx, sy, sz, sthx, sthy, sthz = self._camera_pose
glTranslatef(sx, sy, sz)
glRotatef(sthz, 0, 0, 1)
glRotatef(sthy, 0, 1, 0)
glRotatef(sthx, 1, 0, 0)
self._camera_model.render()
glPopMatrix()
# render robot cube
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, self._vertex_vbo);
glVertexPointer(3, GL_FLOAT, 0, None);
glBindBuffer(GL_ARRAY_BUFFER, self._color_vbo);
glColorPointer(3, GL_FLOAT, 0, None);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self._index_vbo);
glDrawElements(GL_QUADS, self._num_indices, GL_UNSIGNED_INT, None)
glDisableClientState(GL_COLOR_ARRAY)
# render axis
self._render_axes()
def cleanup(self):
glDeleteBuffers(3, np.array([self._vertex_vbo, self._index_vbo, self._color_vbo]))
@property
def camera_model(self):
return self._camera_model
def camera_pose(self, robot_pose):
"""
Returns world-frame camera pose, with rotation in quaternion.
"""
# robot pose with respect to world frame
rx, ry, rz, qx, qy, qz, qw = robot_pose
rthx, rthy, rthz = util.quat_to_euler(qx, qy, qz, qw)
# camera pose with respect to robot frame
sx, sy, sz, sthx, sthy, sthz = self._camera_pose
# camera pose with respect to world frame
cam_pose_world = [rx + sx, ry + sy, rz + sz]\
+ util.R_to_quat(util.R_quat(qx, qy, qz, qw) * util.R_euler(sthx, sthy, sthz)).tolist()
return cam_pose_world
|
[
"mos3d.util.R_euler",
"mos3d.util.cube",
"numpy.array",
"mos3d.util.generate_vbo_elements",
"mos3d.util.R_quat",
"mos3d.util.quat_to_euler"
] |
[((867, 893), 'mos3d.util.cube', 'util.cube', ([], {'color': '(1, 0, 0)'}), '(color=(1, 0, 0))\n', (876, 893), True, 'import mos3d.util as util\n'), ((966, 1019), 'mos3d.util.generate_vbo_elements', 'util.generate_vbo_elements', (['vertices', 'indices', 'colors'], {}), '(vertices, indices, colors)\n', (992, 1019), True, 'import mos3d.util as util\n'), ((1114, 1178), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2])\n', (1122, 1178), True, 'import numpy as np\n'), ((1359, 1464), 'numpy.array', 'np.array', (['[0.8, 0.2, 0.2, 0.2, 0.8, 0.2, 0.2, 0.2, 0.8, 0.8, 0.2, 0.2, 0.2, 0.8, 0.2,\n 0.2, 0.2, 0.8]'], {}), '([0.8, 0.2, 0.2, 0.2, 0.8, 0.2, 0.2, 0.2, 0.8, 0.8, 0.2, 0.2, 0.2, \n 0.8, 0.2, 0.2, 0.2, 0.8])\n', (1367, 1464), True, 'import numpy as np\n'), ((1705, 1733), 'numpy.array', 'np.array', (['[0, 3, 1, 4, 2, 5]'], {}), '([0, 3, 1, 4, 2, 5])\n', (1713, 1733), True, 'import numpy as np\n'), ((1819, 1887), 'mos3d.util.generate_vbo_elements', 'util.generate_vbo_elements', (['axes_vertices', 'axes_indices', 'axes_colors'], {}), '(axes_vertices, axes_indices, axes_colors)\n', (1845, 1887), True, 'import mos3d.util as util\n'), ((3795, 3829), 'mos3d.util.quat_to_euler', 'util.quat_to_euler', (['qx', 'qy', 'qz', 'qw'], {}), '(qx, qy, qz, qw)\n', (3813, 3829), True, 'import mos3d.util as util\n'), ((3396, 3458), 'numpy.array', 'np.array', (['[self._vertex_vbo, self._index_vbo, self._color_vbo]'], {}), '([self._vertex_vbo, self._index_vbo, self._color_vbo])\n', (3404, 3458), True, 'import numpy as np\n'), ((4070, 4097), 'mos3d.util.R_quat', 'util.R_quat', (['qx', 'qy', 'qz', 'qw'], {}), '(qx, qy, qz, qw)\n', (4081, 4097), True, 'import mos3d.util as util\n'), ((4100, 4130), 'mos3d.util.R_euler', 'util.R_euler', (['sthx', 'sthy', 'sthz'], {}), '(sthx, sthy, sthz)\n', (4112, 4130), True, 'import mos3d.util as util\n')]
|
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from qutip import *
from torch.utils.data import Dataset
def normalize(a):
a_oo = a - np.real(a).min()
return a_oo/np.abs(a_oo).max()
def get_state(theta, phi):
ket0, ket1 = np.array([[1.],[0.]]), np.array([[0.],[1.]])
bloch_state = np.cos(theta/2) * ket0 + np.exp(np.complex(0, phi))*ket1
return Qobj(bloch_state)
def get_spherical(theta, phi):
return np.array([np.sin(theta) * np.cos(phi), np.sin(theta) * np.sin(phi), np.cos(theta)])
def sample_bloch(n_samples=50, rand=True):
if rand:
thetas = np.sort(np.pi * np.random.rand(n_samples))
phis = np.sort(2 * np.pi * np.random.rand(n_samples))
else:
thetas = np.linspace(0, np.pi, n_samples)
phis = np.linspace(0, 2 * np.pi, n_samples)
bloch_vec = np.dstack(np.meshgrid(thetas, phis)) # [n_samples, n_samples, 2]
return bloch_vec.reshape(n_samples * n_samples, 2) # [n_samples^2, 2]
def sample_initial_states(n_samples=50, rand=True):
" sample initial states "
bloch_vecs = sample_bloch(n_samples, rand)
states = [get_state(*bvec) for bvec in bloch_vecs]
spherical = np.asarray([get_spherical(*bvec) for bvec in bloch_vecs])
return states, bloch_vecs, spherical
def final_states_to_numpy(states):
"convert list of quantum objects to numpy array [2, num_time_steps]"
return np.concatenate([state.full() for state in states], axis=-1)
class StochasticTwoLevelDataset(Dataset):
def __init__(self, num_batches=30, batched_samples=6, validation_samples=10, start=0, stop=2, last=10, time_steps=300, mc_samples=250, dataset_type='closed'):
self.total_time_steps = np.linspace(start, last, time_steps)
self.initial_states, _, self.spherical = sample_initial_states(batched_samples, rand=True)
self.validation_points = sample_initial_states(validation_samples, rand=False)
self.num_per_batch = batched_samples ** 2
self.num_batches = num_batches
self.num_trajs = self.num_per_batch * self.num_batches
self.dataset_type = dataset_type
if dataset_type == 'closed':
self.rand_parameters = np.zeros((num_batches, 2))
elif dataset_type == 'open':
self.rand_parameters = np.zeros((num_batches, 4))
expect_data = []
for i in range(num_batches):
samp_z = np.random.uniform(1, 2.5, 1)
samp_x = np.random.uniform(1, 2.5, 1)
self.rand_parameters[i, 0] = samp_z
self.rand_parameters[i, 1] = samp_x
H = samp_z[0] * sigmaz() + samp_x[0] * sigmax()
if dataset_type == 'closed':
solve = lambda state : sesolve(H, state, self.total_time_steps, e_ops=[sigmax(), sigmay(), sigmaz()], progress_bar=None)
elif dataset_type == 'open':
decay_samp = np.random.uniform(0.1, 0.3, 2)
self.rand_parameters[i, 2:] = decay_samp
c_ops = [np.sqrt(decay_samp[0]) * sigmax(), np.sqrt(decay_samp[1]) * sigmaz()]
solve = lambda state : mesolve(H, state, self.total_time_steps, e_ops=[sigmax(), sigmay(), sigmaz()], c_ops=c_ops)
all_states = [solve(state).expect for state in self.initial_states]
states = [np.asarray(states, dtype='double') for states in all_states]
states = np.asarray([np.column_stack([state[0], state[1], state[2]]) for state in states])
expect_data.append(states)
self.expect_data = np.asarray(expect_data)
self.total_expect_data = self.expect_data.reshape(self.num_trajs, time_steps, 3)
self.train_time_steps = self.total_time_steps[np.where(self.total_time_steps <= stop)]
self.train_expect_data = self.total_expect_data[:,:self.train_time_steps.shape[0],:]
def plot_trajs(self):
for i in range(self.num_batches):
for j in range(self.num_per_batch):
ts = self.time_steps
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.plot(ts, self.expect_data[i, j, :, 0])
ax1.set_ylim(-1, 1)
ax1.set_ylabel('$\sigma_x$')
ax2.plot(ts, self.expect_data[i, j, :, 1])
ax2.set_ylim(-1, 1)
ax2.set_ylabel('$\sigma_y$')
ax3.plot(ts, self.expect_data[i, j, :, 2])
ax3.set_ylim(-1, 1)
ax3.set_ylabel('$\sigma_z$')
if self.dataset_type == 'closed':
ax3.set_xlabel('H = {}z + {}x'.format(self.rand_parameters[i, 0], self.rand_parameters[i, 1]))
else:
ax3.set_xlabel('H = {}z + {}x decay: {} {}'.format(*self.rand_parameters[i]))
plt.savefig('plots/stochastic_closed_noise/traj_{}_{}.png'.format(i, j))
plt.close(fig)
def render_initial_states(self, directory):
bloch = Bloch()
colors = normalize(self.spherical)
bloch.point_color = colors
bloch.add_points([self.spherical[:, 0], self.spherical[:, 1], self.spherical[:, 2]], 'm')
bloch.save(directory)
# two qubit functions
def random_u(N):
#Return a Haar distributed random unitary NxN
#N being the system dimension
Z = np.random.randn(N,N) + 1.0j * np.random.randn(N,N)
[Q,R] = np.linalg.qr(Z) # QR decomposition
D = np.diag(np.diagonal(R) / np.abs(np.diagonal(R)))
return np.dot(Q, D)
def random_psi():
#Return random state, within computational subspace {|0>,|1>}
Ur = random_u(2)
alpha = Ur[0,0]
beta = Ur[1,0]
ket0, ket1 = np.array([[1.],[0.]]), np.array([[0.],[1.]])
rand_vector = alpha * ket0 + beta * ket1 # alpha |0> + beta |1>
return alpha, beta, rand_vector
def two_qubit_initial(num):
initial_states = []
for i in range(num):
_, _, vec1 = random_psi()
_, _, vec2 = random_psi()
initial_states.append(Qobj(np.kron(vec1, vec2)))
return initial_states
class TwoQubitDataset(Dataset):
def __init__(self, omega=1, delta=1, J=1, num_batches=30, num_trajs=36, time_steps=300, stop=2, end=10):
sigmaz1, sigmaz2 = Qobj(np.kron(sigmaz(), np.eye(2))), Qobj(np.kron(np.eye(2), sigmaz()))
sigmax1, sigmax2 = Qobj(np.kron(sigmax(), np.eye(2))), Qobj(np.kron(np.eye(2), sigmax()))
self.num_trajs = num_batches * num_trajs
self.initial_states = two_qubit_initial(num_trajs)
self.total_time_steps = np.linspace(0, end, time_steps)
expect_data = []
for i in range(num_batches):
samp_z = np.random.uniform(1, 2.5, 1)[0]
samp_x = np.random.uniform(1, 2.5, 1)[0]
self.H = (omega / 2 * sigmaz1 * samp_z) + (delta / 2 * sigmax1 * samp_x) + (omega / 2 * sigmaz2 * samp_z) + (delta / 2 * sigmax2 * samp_x) + (J * sigmax1 * sigmax2)
solve = lambda state : sesolve(self.H, state, self.total_time_steps, e_ops=[sigmax1, sigmax2, sigmaz1, sigmaz2], progress_bar=None)
all_states = [solve(state).expect for state in self.initial_states]
states = [np.asarray(states, dtype='double') for states in all_states]
states = np.asarray([np.column_stack([state[0], state[1], state[2], state[3]]) for state in states])
expect_data.append(states)
expect_data = np.asarray(expect_data)
self.total_expect_data = expect_data.reshape(self.num_trajs, time_steps, 4)
self.train_time_steps = self.total_time_steps[np.where(self.total_time_steps <= stop)]
self.train_expect_data = self.total_expect_data[:,:self.train_time_steps.shape[0],:]
if __name__ == '__main__':
data = TwoQubitDataset()
print(data.total_expect_data.shape[0])
|
[
"numpy.abs",
"numpy.linalg.qr",
"numpy.sin",
"numpy.meshgrid",
"numpy.random.randn",
"matplotlib.pyplot.close",
"numpy.kron",
"numpy.linspace",
"numpy.real",
"matplotlib.pyplot.subplots",
"numpy.complex",
"numpy.diagonal",
"numpy.asarray",
"numpy.cos",
"numpy.dot",
"numpy.random.uniform",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.column_stack",
"numpy.random.rand",
"numpy.eye",
"numpy.sqrt"
] |
[((5417, 5432), 'numpy.linalg.qr', 'np.linalg.qr', (['Z'], {}), '(Z)\n', (5429, 5432), True, 'import numpy as np\n'), ((5523, 5535), 'numpy.dot', 'np.dot', (['Q', 'D'], {}), '(Q, D)\n', (5529, 5535), True, 'import numpy as np\n'), ((285, 309), 'numpy.array', 'np.array', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (293, 309), True, 'import numpy as np\n'), ((308, 332), 'numpy.array', 'np.array', (['[[0.0], [1.0]]'], {}), '([[0.0], [1.0]])\n', (316, 332), True, 'import numpy as np\n'), ((776, 808), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_samples'], {}), '(0, np.pi, n_samples)\n', (787, 808), True, 'import numpy as np\n'), ((824, 860), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_samples'], {}), '(0, 2 * np.pi, n_samples)\n', (835, 860), True, 'import numpy as np\n'), ((892, 917), 'numpy.meshgrid', 'np.meshgrid', (['thetas', 'phis'], {}), '(thetas, phis)\n', (903, 917), True, 'import numpy as np\n'), ((1741, 1777), 'numpy.linspace', 'np.linspace', (['start', 'last', 'time_steps'], {}), '(start, last, time_steps)\n', (1752, 1777), True, 'import numpy as np\n'), ((3600, 3623), 'numpy.asarray', 'np.asarray', (['expect_data'], {}), '(expect_data)\n', (3610, 3623), True, 'import numpy as np\n'), ((5354, 5375), 'numpy.random.randn', 'np.random.randn', (['N', 'N'], {}), '(N, N)\n', (5369, 5375), True, 'import numpy as np\n'), ((5699, 5723), 'numpy.array', 'np.array', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (5707, 5723), True, 'import numpy as np\n'), ((5722, 5746), 'numpy.array', 'np.array', (['[[0.0], [1.0]]'], {}), '([[0.0], [1.0]])\n', (5730, 5746), True, 'import numpy as np\n'), ((6557, 6588), 'numpy.linspace', 'np.linspace', (['(0)', 'end', 'time_steps'], {}), '(0, end, time_steps)\n', (6568, 6588), True, 'import numpy as np\n'), ((7426, 7449), 'numpy.asarray', 'np.asarray', (['expect_data'], {}), '(expect_data)\n', (7436, 7449), True, 'import numpy as np\n'), ((348, 365), 'numpy.cos', 'np.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (354, 365), True, 'import numpy as np\n'), ((545, 558), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (551, 558), True, 'import numpy as np\n'), ((2230, 2256), 'numpy.zeros', 'np.zeros', (['(num_batches, 2)'], {}), '((num_batches, 2))\n', (2238, 2256), True, 'import numpy as np\n'), ((2439, 2467), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2.5)', '(1)'], {}), '(1, 2.5, 1)\n', (2456, 2467), True, 'import numpy as np\n'), ((2489, 2517), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2.5)', '(1)'], {}), '(1, 2.5, 1)\n', (2506, 2517), True, 'import numpy as np\n'), ((3767, 3806), 'numpy.where', 'np.where', (['(self.total_time_steps <= stop)'], {}), '(self.total_time_steps <= stop)\n', (3775, 3806), True, 'import numpy as np\n'), ((5384, 5405), 'numpy.random.randn', 'np.random.randn', (['N', 'N'], {}), '(N, N)\n', (5399, 5405), True, 'import numpy as np\n'), ((5471, 5485), 'numpy.diagonal', 'np.diagonal', (['R'], {}), '(R)\n', (5482, 5485), True, 'import numpy as np\n'), ((7588, 7627), 'numpy.where', 'np.where', (['(self.total_time_steps <= stop)'], {}), '(self.total_time_steps <= stop)\n', (7596, 7627), True, 'import numpy as np\n'), ((191, 201), 'numpy.real', 'np.real', (['a'], {}), '(a)\n', (198, 201), True, 'import numpy as np\n'), ((221, 233), 'numpy.abs', 'np.abs', (['a_oo'], {}), '(a_oo)\n', (227, 233), True, 'import numpy as np\n'), ((380, 398), 'numpy.complex', 'np.complex', (['(0)', 'phi'], {}), '(0, phi)\n', (390, 398), True, 'import numpy as np\n'), ((487, 500), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (493, 500), True, 'import numpy as np\n'), ((503, 514), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (509, 514), True, 'import numpy as np\n'), ((516, 529), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (522, 529), True, 'import numpy as np\n'), ((532, 543), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (538, 543), True, 'import numpy as np\n'), ((651, 676), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (665, 676), True, 'import numpy as np\n'), ((713, 738), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (727, 738), True, 'import numpy as np\n'), ((2329, 2355), 'numpy.zeros', 'np.zeros', (['(num_batches, 4)'], {}), '((num_batches, 4))\n', (2337, 2355), True, 'import numpy as np\n'), ((3356, 3390), 'numpy.asarray', 'np.asarray', (['states'], {'dtype': '"""double"""'}), "(states, dtype='double')\n", (3366, 3390), True, 'import numpy as np\n'), ((4094, 4112), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (4106, 4112), True, 'import matplotlib.pyplot as plt\n'), ((4927, 4941), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4936, 4941), True, 'import matplotlib.pyplot as plt\n'), ((5495, 5509), 'numpy.diagonal', 'np.diagonal', (['R'], {}), '(R)\n', (5506, 5509), True, 'import numpy as np\n'), ((6029, 6048), 'numpy.kron', 'np.kron', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (6036, 6048), True, 'import numpy as np\n'), ((6673, 6701), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2.5)', '(1)'], {}), '(1, 2.5, 1)\n', (6690, 6701), True, 'import numpy as np\n'), ((6726, 6754), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2.5)', '(1)'], {}), '(1, 2.5, 1)\n', (6743, 6754), True, 'import numpy as np\n'), ((7181, 7215), 'numpy.asarray', 'np.asarray', (['states'], {'dtype': '"""double"""'}), "(states, dtype='double')\n", (7191, 7215), True, 'import numpy as np\n'), ((2923, 2953), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(0.3)', '(2)'], {}), '(0.1, 0.3, 2)\n', (2940, 2953), True, 'import numpy as np\n'), ((3451, 3498), 'numpy.column_stack', 'np.column_stack', (['[state[0], state[1], state[2]]'], {}), '([state[0], state[1], state[2]])\n', (3466, 3498), True, 'import numpy as np\n'), ((6270, 6279), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6276, 6279), True, 'import numpy as np\n'), ((6296, 6305), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6302, 6305), True, 'import numpy as np\n'), ((6368, 6377), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6374, 6377), True, 'import numpy as np\n'), ((6394, 6403), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6400, 6403), True, 'import numpy as np\n'), ((7276, 7333), 'numpy.column_stack', 'np.column_stack', (['[state[0], state[1], state[2], state[3]]'], {}), '([state[0], state[1], state[2], state[3]])\n', (7291, 7333), True, 'import numpy as np\n'), ((3036, 3058), 'numpy.sqrt', 'np.sqrt', (['decay_samp[0]'], {}), '(decay_samp[0])\n', (3043, 3058), True, 'import numpy as np\n'), ((3071, 3093), 'numpy.sqrt', 'np.sqrt', (['decay_samp[1]'], {}), '(decay_samp[1])\n', (3078, 3093), True, 'import numpy as np\n')]
|
import os
from os.path import join
import sys
import socket
import subprocess
import json
import importlib
import numpy as np
import pyneal_helper_tools as helper_tools
# get dictionary with relevant paths for tests within this module
paths = helper_tools.get_pyneal_test_paths()
if paths['pynealDir'] not in sys.path:
sys.path.insert(0, paths['pynealDir'])
# import the pynealResults_sim module
spec = importlib.util.spec_from_file_location("pynealResults_sim",
join(paths['pynealDir'], 'utils/simulation/pynealResults_sim.py'))
pynealResults_sim = importlib.util.module_from_spec(spec)
spec.loader.exec_module(pynealResults_sim)
TR = 0
host = '127.0.0.1'
class Test_launchPynealSim():
""" test for utils.simulation.pynealResults_sim """
def test_launchPynealSim(self):
""" test pynealResults_sim.launchPynealSim
test the function that launches the simulator and populates it with
fake data all in one method
"""
port = 6000
pynealResults_sim.launchPynealSim(TR, host, port, keepAlive=False)
def test_pynealResultsSim_resultsServer(self):
""" test pynealResults_sim.ResultsServer
test the class the actually runs the simulated results server
"""
port = 6001
# launch the simulated results server
settings = {'pynealHost': host, 'resultsServerPort': port}
resultsServer = pynealResults_sim.ResultsServer(settings)
resultsServer.daemon = True
resultsServer.start()
# test updating the results server with results
fakeResults = np.array([5000.1, 5000.2, 5000.3])
for volIdx in range(3):
thisResult = {'testResult': fakeResults[volIdx]}
resultsServer.updateResults(volIdx, thisResult)
# test retrieving values from the results server
for volIdx in range(3):
result = resultsServer.requestLookup(volIdx)
assert result['testResult'] == fakeResults[volIdx]
# test sending a request from a remote socket connection
requestedVolIdx = 1 # vol that exists
result = fakeEndUserRequest(requestedVolIdx, port)
assert result['foundResults'] == True
assert result['testResult'] == fakeResults[requestedVolIdx]
requestedVolIdx = 99 # vol that doesn't exist
result = fakeEndUserRequest(requestedVolIdx, port)
assert result['foundResults'] == False
# assuming nothing crashed, close the socket
resultsServer.killServer()
def fakeEndUserRequest(requestedVolIdx, port):
""" Function to mimic the behavior of the end user, which sends a request
to the simulated results server
Parameters
----------
volIdx : int
the volIdx of the volume you'd like to request results for
"""
# socket configs
host = '127.0.0.1' # ip of where Pyneal is running
# connect to the results server of Pyneal
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((host, port))
# send request for volume number. Request must by 4-char string representing
# the volume number requested
request = str(requestedVolIdx).zfill(4)
clientSocket.send(request.encode())
# now read the full response from the server
serverResp = clientSocket.recv(1024)
clientSocket.close()
# format at JSON
serverResp = json.loads(serverResp.decode())
return serverResp
|
[
"pyneal_helper_tools.get_pyneal_test_paths",
"socket.socket",
"sys.path.insert",
"numpy.array",
"os.path.join",
"importlib.util.module_from_spec"
] |
[((246, 282), 'pyneal_helper_tools.get_pyneal_test_paths', 'helper_tools.get_pyneal_test_paths', ([], {}), '()\n', (280, 282), True, 'import pyneal_helper_tools as helper_tools\n'), ((574, 611), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['spec'], {}), '(spec)\n', (605, 611), False, 'import importlib\n'), ((330, 368), 'sys.path.insert', 'sys.path.insert', (['(0)', "paths['pynealDir']"], {}), "(0, paths['pynealDir'])\n", (345, 368), False, 'import sys\n'), ((487, 552), 'os.path.join', 'join', (["paths['pynealDir']", '"""utils/simulation/pynealResults_sim.py"""'], {}), "(paths['pynealDir'], 'utils/simulation/pynealResults_sim.py')\n", (491, 552), False, 'from os.path import join\n'), ((2975, 3024), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2988, 3024), False, 'import socket\n'), ((1605, 1639), 'numpy.array', 'np.array', (['[5000.1, 5000.2, 5000.3]'], {}), '([5000.1, 5000.2, 5000.3])\n', (1613, 1639), True, 'import numpy as np\n')]
|
import numpy as np
import random
from argparse import ArgumentParser
from keras.models import Model
from keras.utils import plot_model, to_categorical
from keras import backend as K
import tensorflow as tf
import os, shutil
from data import load_npz, convert_to_npz
from net import attach_classifier, create_base_network
from siam import contrastive_loss
from bhtsne import tsne
# workaround
from keras import losses
losses.contrastive_loss = contrastive_loss
# ---------- #
batch_size = 128
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.4
# config.gpu_options.allow_growth = True
sess = tf.Session(config = config)
K.set_session(sess)
def GetInliers(preds, mean, std):
pts = (preds - mean) / std
return np.power(pts, 2).sum(axis = 1) <= 1.0
__all__ = ['GetInliers']
if __name__ == '__main__':
parser = ArgumentParser(description = 'Data runner')
parser.add_argument('--input-data', '-i',
type = str,
help = 'Path to input data',
required = True)
parser.add_argument('--model', '-m',
type = str,
help = 'Path to trained classifier',
required = True)
parser.add_argument('--output-dir', '-o',
type = str,
help = 'Path to output directory',
required = True)
parser.add_argument('--max-iter', '-t',
type = int,
help = 'TSNE max iterations',
default = 1000)
args = parser.parse_args()
input_path = args.input_data
model_path = args.model
output_dir = args.output_dir
max_iter = args.max_iter
X_input, y_input = load_npz(input_path)
X_input_train = X_input.astype(np.float32) / 255.0
model = create_base_network(X_input_train.shape[1:])
model.load_weights(model_path)
preds = model.predict(X_input_train, verbose = 1).astype(np.float64)
red_preds = tsne(preds, max_iter = max_iter)
num_classes = y_input.max() + 1
generations = [[] for j in range(3)]
for c in range(num_classes):
cur_preds = red_preds[y_input == c]
cur_mean = cur_preds.mean(0)
cur_std = cur_preds.std(0)
X_input_c = X_input.copy()[y_input == c]
print ('Original shape: %s' % cur_preds.shape)
for j in range(3):
inliers = GetInliers(cur_preds, cur_mean, cur_std)
generations[j].append(X_input_c[inliers])
cur_preds = cur_preds[inliers]
print ('Generation %d shape: %s' % (j, cur_preds.shape))
X_input_c = X_input_c[inliers]
cur_mean = cur_preds.mean(0)
cur_std = cur_preds.std(0)
base_name = os.path.split(input_path)[1].split('.')[0]
for i, g in enumerate(generations):
convert_to_npz(
np.concatenate(g),
np.concatenate([[0]*len(g[0]), [1]*len(g[1])]),
os.path.join(output_dir, '%s_gen%d.npz' % (base_name, i))
)
|
[
"argparse.ArgumentParser",
"net.create_base_network",
"bhtsne.tsne",
"numpy.power",
"keras.backend.set_session",
"tensorflow.Session",
"data.load_npz",
"tensorflow.ConfigProto",
"os.path.split",
"os.path.join",
"numpy.concatenate"
] |
[((507, 523), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (521, 523), True, 'import tensorflow as tf\n'), ((631, 656), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (641, 656), True, 'import tensorflow as tf\n'), ((659, 678), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (672, 678), True, 'from keras import backend as K\n'), ((861, 902), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Data runner"""'}), "(description='Data runner')\n", (875, 902), False, 'from argparse import ArgumentParser\n'), ((1749, 1769), 'data.load_npz', 'load_npz', (['input_path'], {}), '(input_path)\n', (1757, 1769), False, 'from data import load_npz, convert_to_npz\n'), ((1836, 1880), 'net.create_base_network', 'create_base_network', (['X_input_train.shape[1:]'], {}), '(X_input_train.shape[1:])\n', (1855, 1880), False, 'from net import attach_classifier, create_base_network\n'), ((2000, 2030), 'bhtsne.tsne', 'tsne', (['preds'], {'max_iter': 'max_iter'}), '(preds, max_iter=max_iter)\n', (2004, 2030), False, 'from bhtsne import tsne\n'), ((2797, 2814), 'numpy.concatenate', 'np.concatenate', (['g'], {}), '(g)\n', (2811, 2814), True, 'import numpy as np\n'), ((2877, 2934), 'os.path.join', 'os.path.join', (['output_dir', "('%s_gen%d.npz' % (base_name, i))"], {}), "(output_dir, '%s_gen%d.npz' % (base_name, i))\n", (2889, 2934), False, 'import os, shutil\n'), ((756, 772), 'numpy.power', 'np.power', (['pts', '(2)'], {}), '(pts, 2)\n', (764, 772), True, 'import numpy as np\n'), ((2690, 2715), 'os.path.split', 'os.path.split', (['input_path'], {}), '(input_path)\n', (2703, 2715), False, 'import os, shutil\n')]
|
import sys
from pathlib import Path
from PIL import Image
import numpy as np
RESOLUTIONS = ['HR']
DIML_PATH = Path(sys.argv[1]).resolve()
SAVE_PATH = DIML_PATH / 'npy'
TRAIN_RATIO = 0.9
assert DIML_PATH.exists()
SAVE_PATH.mkdir(exist_ok=True, parents=True)
for split in ('train', 'val', 'test'):
for resolution in RESOLUTIONS:
images, depth_maps = [], []
# use last images of each train scene as val data
split_file = 'train' if split == 'val' else split
scenes = sorted([s for s in (DIML_PATH / f'{split_file}/{resolution}').iterdir() if not s.stem.startswith('.')])
for scene in scenes:
image_files = sorted([s for s in scene.glob(f'color/*.png') if not s.stem.startswith('.')])
boundary = int(len(image_files) * TRAIN_RATIO)
if split == 'train':
slc = slice(0, boundary)
elif split == 'val':
slc = slice(boundary, None)
else:
# use all images from test set
slc = slice(None)
for image_file in image_files[slc]:
images.append(np.array(Image.open(image_file)).transpose((2, 0, 1)))
depth_file = image_file.parent.parent / f'depth_filled/{image_file.stem[:-2]}_depth_filled.png'
depth_map = np.array(Image.open(depth_file))
# fit in uint16 to save memory
assert depth_map.max() <= np.iinfo(np.uint16).max
depth_maps.append(depth_map.astype(np.uint16))
print(f'{split}/{resolution}: {len(images)} images')
np.save(str(SAVE_PATH / f'images_{split}_{resolution}.npy'), images)
np.save(str(SAVE_PATH / f'depth_{split}_{resolution}.npy'), depth_maps)
|
[
"pathlib.Path",
"numpy.iinfo",
"PIL.Image.open"
] |
[((112, 129), 'pathlib.Path', 'Path', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (116, 129), False, 'from pathlib import Path\n'), ((1336, 1358), 'PIL.Image.open', 'Image.open', (['depth_file'], {}), '(depth_file)\n', (1346, 1358), False, 'from PIL import Image\n'), ((1449, 1468), 'numpy.iinfo', 'np.iinfo', (['np.uint16'], {}), '(np.uint16)\n', (1457, 1468), True, 'import numpy as np\n'), ((1140, 1162), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (1150, 1162), False, 'from PIL import Image\n')]
|
import arcpy
import numpy
import scipy
from scipy import spatial
#inputs
inFeatures = arcpy.GetParameterAsText(0) #input features (type: feature class, required)
targetFeatures = arcpy.GetParameterAsText(1) #near features (type: feature class, required)
toReturn = arcpy.GetParameter(2) #number of near features to return (type: long, optional, default:2)
outLocation = arcpy.GetParameterAsText(3) #output table location (type: workspace, required)
snapped = arcpy.GetParameterAsText(4) #return snapped features (type: boolean, default:false)
#mapping variables
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd)[0]
#uniqueID
OID = arcpy.Describe(inFeatures).OIDFieldName
nearOID = arcpy.Describe(targetFeatures).OIDFieldName
#list of X,Y coordinates
fields = ['OID@','SHAPE@X','SHAPE@Y']
inXY = {row[0]: (round(row[1],4),round(row[2],4)) for row in arcpy.da.SearchCursor(inFeatures,fields)}
targetXY = ((int(row[0]),round(row[1],4),round(row[2],4)) for row in arcpy.da.SearchCursor(targetFeatures,fields))
#convert to array
pntArray = numpy.array(list(targetXY))
#built KDTree
tree = scipy.spatial.cKDTree(pntArray[:, [1,2]]) #specify only last x,y columns
#list to add to then create array from later
listArray = []
#loop over orig points, convert each to array and compare
count = 0
if snapped == 'true':
for k,p in inXY.iteritems():
distances = [] #distance container
xy = numpy.asarray(p)
index = tree.query_ball_point(xy, r=100)
for r in index:
XA = xy.reshape(-1,2)
XB = pntArray[r][1:].reshape(-1,2) #split array, reshape
distance = scipy.spatial.distance.cdist(XA,XB,metric = 'euclidean')#get distances
row = list(pntArray[r])
row.insert(0, int(k)) #insert key value at [0] index
row.append(round(float(distance),3)) #append distances to list
del row[2:4] #delete x,y values since we don't need them anymore
distances.append(tuple(row)) #append to master list
f = sorted(distances, key = lambda x:x[-1]) #sort based on dist values
for i in f[:toReturn]:
listArray.append(i)
else:
for k,p in inXY.iteritems():
distances = [] #distance container
xy = numpy.asarray(p)
index = tree.query_ball_point(xy, r=100)
for r in index:
XA = xy.reshape(-1,2)
XB = pntArray[r][1:].reshape(-1,2) #split array, reshape
distance = scipy.spatial.distance.cdist(XA,XB,metric = 'euclidean')#get distances
if numpy.all(pntArray[r][1:] != xy):#check to see if it's comparing itself
row = list(pntArray[r])
row.insert(0, int(k)) #insert key value at [0] index
row.append(round(float(distance),3)) #append distances to list
del row[2:4] #delete x,y values since we don't need them anymore
distances.append(tuple(row)) #append to master list
f = sorted(distances, key = lambda x:x[-1]) #sort based on dist values
for i in f[:toReturn]:
listArray.append(i)
if listArray:
#format array to create table from
dts = [('IN_{0}'.format(OID),'uint64'),('NEAR_{0}'.format(nearOID),'uint64'),('NEAR_DIST','<f8')]
formArray = numpy.array(listArray, dtype = dts)
#export to gdb table or dbfif '.gdb' in outLocation:
if '.gdb' in outLocation:
arcpy.da.NumPyArrayToTable(formArray, outLocation)
else:
arcpy.da.NumPyArrayToTable(formArray, outLocation + '.dbf')
outLocation = outLocation + '.dbf'
try:
#mapping variables, add table to map
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
table = arcpy.mapping.TableView(r'{0}'.format(outLocation))
arcpy.mapping.AddTableView(df, table)
arcpy.RefreshActiveView()
arcpy.AddMessage("Output table added to map")
except:
pass
arcpy.AddMessage("Output location:{0}".format(outLocation))
else:
arcpy.AddWarning("No output table was made.")
|
[
"scipy.spatial.distance.cdist",
"arcpy.GetParameter",
"arcpy.GetParameterAsText",
"arcpy.da.SearchCursor",
"arcpy.mapping.ListDataFrames",
"arcpy.Describe",
"numpy.asarray",
"arcpy.AddMessage",
"arcpy.RefreshActiveView",
"numpy.array",
"arcpy.AddWarning",
"scipy.spatial.cKDTree",
"arcpy.mapping.MapDocument",
"arcpy.mapping.AddTableView",
"numpy.all",
"arcpy.da.NumPyArrayToTable"
] |
[((88, 115), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(0)'], {}), '(0)\n', (112, 115), False, 'import arcpy\n'), ((181, 208), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(1)'], {}), '(1)\n', (205, 208), False, 'import arcpy\n'), ((267, 288), 'arcpy.GetParameter', 'arcpy.GetParameter', (['(2)'], {}), '(2)\n', (285, 288), False, 'import arcpy\n'), ((372, 399), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(3)'], {}), '(3)\n', (396, 399), False, 'import arcpy\n'), ((461, 488), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(4)'], {}), '(4)\n', (485, 488), False, 'import arcpy\n'), ((571, 607), 'arcpy.mapping.MapDocument', 'arcpy.mapping.MapDocument', (['"""CURRENT"""'], {}), "('CURRENT')\n", (596, 607), False, 'import arcpy\n'), ((1123, 1165), 'scipy.spatial.cKDTree', 'scipy.spatial.cKDTree', (['pntArray[:, [1, 2]]'], {}), '(pntArray[:, [1, 2]])\n', (1144, 1165), False, 'import scipy\n'), ((613, 646), 'arcpy.mapping.ListDataFrames', 'arcpy.mapping.ListDataFrames', (['mxd'], {}), '(mxd)\n', (641, 646), False, 'import arcpy\n'), ((667, 693), 'arcpy.Describe', 'arcpy.Describe', (['inFeatures'], {}), '(inFeatures)\n', (681, 693), False, 'import arcpy\n'), ((717, 747), 'arcpy.Describe', 'arcpy.Describe', (['targetFeatures'], {}), '(targetFeatures)\n', (731, 747), False, 'import arcpy\n'), ((3360, 3393), 'numpy.array', 'numpy.array', (['listArray'], {'dtype': 'dts'}), '(listArray, dtype=dts)\n', (3371, 3393), False, 'import numpy\n'), ((4135, 4180), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""No output table was made."""'], {}), "('No output table was made.')\n", (4151, 4180), False, 'import arcpy\n'), ((886, 927), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['inFeatures', 'fields'], {}), '(inFeatures, fields)\n', (907, 927), False, 'import arcpy\n'), ((997, 1042), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['targetFeatures', 'fields'], {}), '(targetFeatures, fields)\n', (1018, 1042), False, 'import arcpy\n'), ((1438, 1454), 'numpy.asarray', 'numpy.asarray', (['p'], {}), '(p)\n', (1451, 1454), False, 'import numpy\n'), ((2308, 2324), 'numpy.asarray', 'numpy.asarray', (['p'], {}), '(p)\n', (2321, 2324), False, 'import numpy\n'), ((3492, 3542), 'arcpy.da.NumPyArrayToTable', 'arcpy.da.NumPyArrayToTable', (['formArray', 'outLocation'], {}), '(formArray, outLocation)\n', (3518, 3542), False, 'import arcpy\n'), ((3561, 3620), 'arcpy.da.NumPyArrayToTable', 'arcpy.da.NumPyArrayToTable', (['formArray', "(outLocation + '.dbf')"], {}), "(formArray, outLocation + '.dbf')\n", (3587, 3620), False, 'import arcpy\n'), ((3733, 3769), 'arcpy.mapping.MapDocument', 'arcpy.mapping.MapDocument', (['"""CURRENT"""'], {}), "('CURRENT')\n", (3758, 3769), False, 'import arcpy\n'), ((3901, 3938), 'arcpy.mapping.AddTableView', 'arcpy.mapping.AddTableView', (['df', 'table'], {}), '(df, table)\n', (3927, 3938), False, 'import arcpy\n'), ((3947, 3972), 'arcpy.RefreshActiveView', 'arcpy.RefreshActiveView', ([], {}), '()\n', (3970, 3972), False, 'import arcpy\n'), ((3981, 4026), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Output table added to map"""'], {}), "('Output table added to map')\n", (3997, 4026), False, 'import arcpy\n'), ((1667, 1723), 'scipy.spatial.distance.cdist', 'scipy.spatial.distance.cdist', (['XA', 'XB'], {'metric': '"""euclidean"""'}), "(XA, XB, metric='euclidean')\n", (1695, 1723), False, 'import scipy\n'), ((2537, 2593), 'scipy.spatial.distance.cdist', 'scipy.spatial.distance.cdist', (['XA', 'XB'], {'metric': '"""euclidean"""'}), "(XA, XB, metric='euclidean')\n", (2565, 2593), False, 'import scipy\n'), ((2624, 2656), 'numpy.all', 'numpy.all', (['(pntArray[r][1:] != xy)'], {}), '(pntArray[r][1:] != xy)\n', (2633, 2656), False, 'import numpy\n'), ((3783, 3821), 'arcpy.mapping.ListDataFrames', 'arcpy.mapping.ListDataFrames', (['mxd', '"""*"""'], {}), "(mxd, '*')\n", (3811, 3821), False, 'import arcpy\n')]
|
import os
import re
import fileinput
import time
import subprocess
import numpy as np
from sklearn.utils import class_weight
import nibabel as nib
from nibabel import load as load_nii
import torch
from torch.autograd import Variable
from skimage import transform as skt
def deepMask(args, model, id, t1w_np, t2w_np, t1w_fname, t2w_fname, nifti=True):
dst = args.outdir
case_id = id
model.eval()
start_time = time.time()
data = normalize_resize_to_tensor(t1w_np, t2w_np, args)
# load original input with header and affine
_, header, affine, out_shape = get_nii_hdr_affine(t1w_fname)
shape = data.size()
# convert names to batch tensor
if args.cuda:
data.pin_memory()
data = data.cuda().float()
else:
data = data.float()
data = Variable(data, volatile=True)
output = model(data)
output = torch.argmax(output, dim=1)
output = output.view(shape[2:])
output = output.cpu()
output = output.data.numpy()
print("save {}".format(case_id))
if not os.path.exists(os.path.join(dst)):
os.makedirs(os.path.join(dst), exist_ok=True)
if nifti:
affine = header.get_qform()
output = skt.resize(output, output_shape=out_shape, order=1, mode='wrap', preserve_range=1, anti_aliasing=True)
nii_out = nib.Nifti1Image(output, affine, header)
nii_out.to_filename(os.path.join(dst, case_id+"_vnet_maskpred.nii.gz"))
elapsed_time = time.time() - start_time
print("="*70)
print("=> inference time: {} seconds".format(round(elapsed_time,2)))
print("="*70)
# config = './utils/dense3dCrf/config_densecrf.txt'
cwd = os.path.dirname(__file__)
config = os.path.join(cwd, 'dense3dCrf/config_densecrf.txt')
start_time = time.time()
denseCRF(case_id, t1w_fname, t2w_fname, out_shape, config, dst, os.path.join(dst, case_id+"_vnet_maskpred.nii.gz"))
elapsed_time = time.time() - start_time
print("="*70)
print("=> dense 3D-CRF inference time: {} seconds".format(round(elapsed_time,2)))
print("="*70)
fname = os.path.join(dst, case_id + '_denseCrf3dSegmMap.nii.gz')
seg_map = load_nii(fname).get_fdata()
return seg_map
def normalize_resize_to_tensor(t1w_np, t2w_np, args):
t1w_np = (t1w_np.astype(dtype=np.float32) - t1w_np[np.nonzero(t1w_np)].mean()) / t1w_np[np.nonzero(t1w_np)].std()
t2w_np = (t2w_np.astype(dtype=np.float32) - t2w_np[np.nonzero(t2w_np)].mean()) / t2w_np[np.nonzero(t2w_np)].std()
t1w_np = skt.resize(t1w_np, args.resize, mode='constant', preserve_range=1)
t2w_np = skt.resize(t2w_np, args.resize, mode='constant', preserve_range=1)
data = torch.unsqueeze(torch.from_numpy(np.stack((t1w_np, t2w_np), axis=0)), 0)
return data
def denseCRF(id, t1, t2, input_shape, config, out_dir, pred_labels):
cwd = os.path.dirname(__file__)
X, Y, Z = input_shape
config_tmp = "/tmp/" + id + "_config_densecrf.txt"
subprocess.call(["cp", "-f", config, config_tmp])
# find and replace placeholder variables in the config file with actual filenames
find_str = [
"<ID_PLACEHOLDER>", "<T1_FILE_PLACEHOLDER>", "<FLAIR_FILE_PLACEHOLDER>",
"<OUTDIR_PLACEHOLDER>", "<PRED_LABELS_PLACEHOLDER>",
"<X_PLACEHOLDER>", "<Y_PLACEHOLDER>", "<Z_PLACEHOLDER>"
]
replace_str = [
str(id), str(t1), str(t2),
str(out_dir), str(pred_labels),
str(X), str(Y), str(Z)
]
for fs, rs in zip(find_str, replace_str):
find_replace_re(config_tmp, fs, rs)
# subprocess.call(["./utils/dense3dCrf/dense3DCrfInferenceOnNiis", "-c", config_tmp])
subprocess.call([os.path.join(cwd, 'dense3dCrf/dense3DCrfInferenceOnNiis'), "-c", config_tmp])
def datestr():
now = time.gmtime()
return '{}{:02}{:02}_{:02}{:02}'.format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
def find_replace_re(config_tmp, find_str, replace_str):
with fileinput.FileInput(config_tmp, inplace=True, backup='.bak') as file:
for line in file:
print(re.sub(find_str, str(replace_str), line.rstrip(), flags=re.MULTILINE), end='\n')
def compute_weights(labels, binary=False):
if binary:
labels = (labels > 0).astype(np.int_)
weights = class_weight.compute_class_weight('balanced', np.unique(labels.flatten()), labels.flatten())
return weights
def dice_gross(image, label, empty_score=1.0):
image = (image > 0).astype(np.int_)
label = (label > 0).astype(np.int_)
image = np.asarray(image).astype(np.bool)
label = np.asarray(label).astype(np.bool)
if image.shape != label.shape:
raise ValueError("Shape mismatch: image {0} and label {1} must have the same shape.".format(image.shape, label.shape))
im_sum = image.sum() + label.sum()
if im_sum == 0:
return empty_score
# compute Dice coefficient
intersection = np.logical_and(image, label)
return 2. * intersection.sum() / im_sum
def get_nii_hdr_affine(t1w_fname):
nifti = load_nii(t1w_fname)
shape = nifti.get_fdata().shape
header = load_nii(t1w_fname).header
affine = header.get_qform()
return nifti, header, affine, shape
|
[
"nibabel.Nifti1Image",
"numpy.stack",
"numpy.logical_and",
"nibabel.load",
"time.gmtime",
"torch.autograd.Variable",
"os.path.dirname",
"torch.argmax",
"numpy.asarray",
"fileinput.FileInput",
"time.time",
"numpy.nonzero",
"skimage.transform.resize",
"subprocess.call",
"os.path.join"
] |
[((428, 439), 'time.time', 'time.time', ([], {}), '()\n', (437, 439), False, 'import time\n'), ((802, 831), 'torch.autograd.Variable', 'Variable', (['data'], {'volatile': '(True)'}), '(data, volatile=True)\n', (810, 831), False, 'from torch.autograd import Variable\n'), ((870, 897), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (882, 897), False, 'import torch\n'), ((1661, 1686), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1676, 1686), False, 'import os\n'), ((1700, 1751), 'os.path.join', 'os.path.join', (['cwd', '"""dense3dCrf/config_densecrf.txt"""'], {}), "(cwd, 'dense3dCrf/config_densecrf.txt')\n", (1712, 1751), False, 'import os\n'), ((1770, 1781), 'time.time', 'time.time', ([], {}), '()\n', (1779, 1781), False, 'import time\n'), ((2081, 2137), 'os.path.join', 'os.path.join', (['dst', "(case_id + '_denseCrf3dSegmMap.nii.gz')"], {}), "(dst, case_id + '_denseCrf3dSegmMap.nii.gz')\n", (2093, 2137), False, 'import os\n'), ((2504, 2570), 'skimage.transform.resize', 'skt.resize', (['t1w_np', 'args.resize'], {'mode': '"""constant"""', 'preserve_range': '(1)'}), "(t1w_np, args.resize, mode='constant', preserve_range=1)\n", (2514, 2570), True, 'from skimage import transform as skt\n'), ((2584, 2650), 'skimage.transform.resize', 'skt.resize', (['t2w_np', 'args.resize'], {'mode': '"""constant"""', 'preserve_range': '(1)'}), "(t2w_np, args.resize, mode='constant', preserve_range=1)\n", (2594, 2650), True, 'from skimage import transform as skt\n'), ((2832, 2857), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2847, 2857), False, 'import os\n'), ((2943, 2992), 'subprocess.call', 'subprocess.call', (["['cp', '-f', config, config_tmp]"], {}), "(['cp', '-f', config, config_tmp])\n", (2958, 2992), False, 'import subprocess\n'), ((3840, 3853), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (3851, 3853), False, 'import time\n'), ((4965, 4993), 'numpy.logical_and', 'np.logical_and', (['image', 'label'], {}), '(image, label)\n', (4979, 4993), True, 'import numpy as np\n'), ((5088, 5107), 'nibabel.load', 'load_nii', (['t1w_fname'], {}), '(t1w_fname)\n', (5096, 5107), True, 'from nibabel import load as load_nii\n'), ((1199, 1305), 'skimage.transform.resize', 'skt.resize', (['output'], {'output_shape': 'out_shape', 'order': '(1)', 'mode': '"""wrap"""', 'preserve_range': '(1)', 'anti_aliasing': '(True)'}), "(output, output_shape=out_shape, order=1, mode='wrap',\n preserve_range=1, anti_aliasing=True)\n", (1209, 1305), True, 'from skimage import transform as skt\n'), ((1320, 1359), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['output', 'affine', 'header'], {}), '(output, affine, header)\n', (1335, 1359), True, 'import nibabel as nib\n'), ((1460, 1471), 'time.time', 'time.time', ([], {}), '()\n', (1469, 1471), False, 'import time\n'), ((1850, 1902), 'os.path.join', 'os.path.join', (['dst', "(case_id + '_vnet_maskpred.nii.gz')"], {}), "(dst, case_id + '_vnet_maskpred.nii.gz')\n", (1862, 1902), False, 'import os\n'), ((1921, 1932), 'time.time', 'time.time', ([], {}), '()\n', (1930, 1932), False, 'import time\n'), ((4028, 4088), 'fileinput.FileInput', 'fileinput.FileInput', (['config_tmp'], {'inplace': '(True)', 'backup': '""".bak"""'}), "(config_tmp, inplace=True, backup='.bak')\n", (4047, 4088), False, 'import fileinput\n'), ((5157, 5176), 'nibabel.load', 'load_nii', (['t1w_fname'], {}), '(t1w_fname)\n', (5165, 5176), True, 'from nibabel import load as load_nii\n'), ((1057, 1074), 'os.path.join', 'os.path.join', (['dst'], {}), '(dst)\n', (1069, 1074), False, 'import os\n'), ((1097, 1114), 'os.path.join', 'os.path.join', (['dst'], {}), '(dst)\n', (1109, 1114), False, 'import os\n'), ((1388, 1440), 'os.path.join', 'os.path.join', (['dst', "(case_id + '_vnet_maskpred.nii.gz')"], {}), "(dst, case_id + '_vnet_maskpred.nii.gz')\n", (1400, 1440), False, 'import os\n'), ((2152, 2167), 'nibabel.load', 'load_nii', (['fname'], {}), '(fname)\n', (2160, 2167), True, 'from nibabel import load as load_nii\n'), ((2695, 2729), 'numpy.stack', 'np.stack', (['(t1w_np, t2w_np)'], {'axis': '(0)'}), '((t1w_np, t2w_np), axis=0)\n', (2703, 2729), True, 'import numpy as np\n'), ((3735, 3792), 'os.path.join', 'os.path.join', (['cwd', '"""dense3dCrf/dense3DCrfInferenceOnNiis"""'], {}), "(cwd, 'dense3dCrf/dense3DCrfInferenceOnNiis')\n", (3747, 3792), False, 'import os\n'), ((4584, 4601), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (4594, 4601), True, 'import numpy as np\n'), ((4630, 4647), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (4640, 4647), True, 'import numpy as np\n'), ((2347, 2365), 'numpy.nonzero', 'np.nonzero', (['t1w_np'], {}), '(t1w_np)\n', (2357, 2365), True, 'import numpy as np\n'), ((2465, 2483), 'numpy.nonzero', 'np.nonzero', (['t2w_np'], {}), '(t2w_np)\n', (2475, 2483), True, 'import numpy as np\n'), ((2310, 2328), 'numpy.nonzero', 'np.nonzero', (['t1w_np'], {}), '(t1w_np)\n', (2320, 2328), True, 'import numpy as np\n'), ((2428, 2446), 'numpy.nonzero', 'np.nonzero', (['t2w_np'], {}), '(t2w_np)\n', (2438, 2446), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
The Evolved Transformer and large-scale evolution of image classifiers
======================================================================
Implement evolution to exploit configurations with fixed resource efficiently
"""
import copy
import importlib
import logging
import numpy as np
from orion.algo.hyperband import Hyperband, HyperbandBracket
from orion.core.utils import format_trials
logger = logging.getLogger(__name__)
REGISTRATION_ERROR = """
Bad fidelity level {fidelity}. Should be in {budgets}.
Params: {params}
"""
SPACE_ERROR = """
EvolutionES cannot be used if space does not contain a fidelity dimension.
"""
BUDGET_ERROR = """
Cannot build budgets below max_resources;
(max: {}) - (min: {}) > (num_rungs: {})
"""
def compute_budgets(
min_resources, max_resources, reduction_factor, nums_population, pairs
):
"""Compute the budgets used for each execution of hyperband"""
budgets_eves = []
if reduction_factor == 1:
for i in range(min_resources, max_resources + 1):
if i == min_resources:
budgets_eves.append([(nums_population, i)])
else:
budgets_eves[0].append((pairs * 2, i))
else:
num_brackets = int(np.log(max_resources) / np.log(reduction_factor))
budgets = []
budgets_tab = {} # just for display consideration
for bracket_id in range(0, num_brackets + 1):
bracket_budgets = []
num_trials = int(
np.ceil(
int((num_brackets + 1) / (num_brackets - bracket_id + 1))
* (reduction_factor ** (num_brackets - bracket_id))
)
)
min_resources = max_resources / reduction_factor ** (
num_brackets - bracket_id
)
for i in range(0, num_brackets - bracket_id + 1):
n_i = int(num_trials / reduction_factor ** i)
min_i = int(min_resources * reduction_factor ** i)
bracket_budgets.append((n_i, min_i))
if budgets_tab.get(i):
budgets_tab[i].append((n_i, min_i))
else:
budgets_tab[i] = [(n_i, min_i)]
budgets.append(bracket_budgets)
for i in range(len(budgets[0])):
if i == 0:
budgets_eves.append([(nums_population, budgets[0][i][1])])
else:
budgets_eves[0].append((pairs * 2, budgets[0][i][1]))
return budgets_eves
class EvolutionES(Hyperband):
"""EvolutionES formulates hyperparameter optimization as an evolution.
For more information on the algorithm,
see original paper at
https://arxiv.org/pdf/1703.01041.pdf and
https://arxiv.org/pdf/1901.11117.pdf
Real et al. "Large-Scale Evolution of Image Classifiers"
So et all. "The Evolved Transformer"
Parameters
----------
space: `orion.algo.space.Space`
Optimisation space with priors for each dimension.
seed: None, int or sequence of int
Seed for the random number generator used to sample new trials.
Default: ``None``
repetitions: int
Number of execution of Hyperband. Default is numpy.inf which means to
run Hyperband until no new trials can be suggested.
nums_population: int
Number of population for EvolutionES. Larger number of population often gets better
performance but causes more computation. So there is a trade-off according to the search
space and required budget of your problems.
Default: 20
mutate: str or None, optional
In the mutate part, one can define the customized mutate function with its mutate factors,
such as multiply factor (times/divides by a multiply factor) and add factor
(add/subtract by a multiply factor). The function must be defined by
an importable string. If None, default
mutate function is used: ``orion.algo.mutate_functions.default_mutate``.
"""
requires_type = None
requires_dist = None
requires_shape = "flattened"
def __init__(
self,
space,
seed=None,
repetitions=np.inf,
nums_population=20,
mutate=None,
max_retries=1000,
):
super(EvolutionES, self).__init__(space, seed=seed, repetitions=repetitions)
pair = nums_population // 2
mutate_ratio = 0.3
self.nums_population = nums_population
self.nums_comp_pairs = pair
self.max_retries = max_retries
self.mutate_ratio = mutate_ratio
self.mutate = mutate
self.nums_mutate_gene = (
int((len(self.space.values()) - 1) * mutate_ratio)
if int((len(self.space.values()) - 1) * mutate_ratio) > 0
else 1
)
self._param_names += ["nums_population", "mutate", "max_retries"]
self.hurdles = []
self.population = {}
for i, dim in enumerate(self.space.values()):
if dim.type != "fidelity":
self.population[i] = [-1] * nums_population
self.performance = np.inf * np.ones(nums_population)
self.budgets = compute_budgets(
self.min_resources,
self.max_resources,
self.reduction_factor,
nums_population,
pair,
)
self.brackets = [
BracketEVES(self, bracket_budgets, 1) for bracket_budgets in self.budgets
]
self.seed_rng(seed)
@property
def state_dict(self):
"""Return a state dict that can be used to reset the state of the algorithm."""
state_dict = super(EvolutionES, self).state_dict
state_dict["population"] = copy.deepcopy(self.population)
state_dict["performance"] = copy.deepcopy(self.performance)
state_dict["hurdles"] = copy.deepcopy(self.hurdles)
return state_dict
def set_state(self, state_dict):
"""Reset the state of the algorithm based on the given state_dict"""
super(EvolutionES, self).set_state(state_dict)
self.population = state_dict["population"]
self.performance = state_dict["performance"]
self.hurdles = state_dict["hurdles"]
def _get_bracket(self, trial):
"""Get the bracket of a trial during observe"""
return self.brackets[-1]
class BracketEVES(HyperbandBracket):
"""Bracket of rungs for the algorithm Hyperband.
Parameters
----------
evolutiones: `evolutiones` algorithm
The evolutiones algorithm object which this bracket will be part of.
budgets: list of tuple
Each tuple gives the (n_trials, resource_budget) for the respective rung.
repetition_id: int
The id of hyperband execution this bracket belongs to
"""
def __init__(self, evolution_es, budgets, repetition_id):
super(BracketEVES, self).__init__(evolution_es, budgets, repetition_id)
self.eves = self.hyperband
self.search_space_without_fidelity = []
self._candidates = {}
if evolution_es.mutate:
self.mutate_attr = copy.deepcopy(evolution_es.mutate)
else:
self.mutate_attr = {}
function_string = self.mutate_attr.pop(
"function", "orion.algo.mutate_functions.default_mutate"
)
mod_name, func_name = function_string.rsplit(".", 1)
mod = importlib.import_module(mod_name)
self.mutate_func = getattr(mod, func_name)
for i, dim in enumerate(self.space.values()):
if dim.type != "fidelity":
self.search_space_without_fidelity.append(i)
@property
def space(self):
return self.eves.space
@property
def state_dict(self):
state_dict = super(BracketEVES, self).state_dict
state_dict["candidates"] = copy.deepcopy(self._candidates)
return state_dict
def set_state(self, state_dict):
super(BracketEVES, self).set_state(state_dict)
self._candidates = state_dict["candidates"]
def _get_teams(self, rung_id):
"""Get the red team and blue team"""
if self.has_rung_filled(rung_id + 1):
return []
rung = self.rungs[rung_id]["results"]
population_range = (
self.eves.nums_population
if len(list(rung.values())) > self.eves.nums_population
else len(list(rung.values()))
)
rung_trials = list(rung.values())
for trial_index in range(population_range):
objective, trial = rung_trials[trial_index]
self.eves.performance[trial_index] = objective
for ith_dim in self.search_space_without_fidelity:
self.eves.population[ith_dim][trial_index] = trial.params[
self.space[ith_dim].name
]
population_index = list(range(self.eves.nums_population))
red_team = self.eves.rng.choice(
population_index, self.eves.nums_comp_pairs, replace=False
)
diff_list = list(set(population_index).difference(set(red_team)))
blue_team = self.eves.rng.choice(
diff_list, self.eves.nums_comp_pairs, replace=False
)
return rung, population_range, red_team, blue_team
def _mutate_population(self, red_team, blue_team, rung, population_range, fidelity):
"""Get the mutated population and hurdles"""
winner_list = []
loser_list = []
if set(red_team) != set(blue_team):
hurdles = 0
for i, _ in enumerate(red_team):
winner, loser = (
(red_team, blue_team)
if self.eves.performance[red_team[i]]
< self.eves.performance[blue_team[i]]
else (blue_team, red_team)
)
winner_list.append(winner[i])
loser_list.append(loser[i])
hurdles += self.eves.performance[winner[i]]
self._mutate(winner[i], loser[i])
hurdles /= len(red_team)
self.eves.hurdles.append(hurdles)
logger.debug("Evolution hurdles are: %s", str(self.eves.hurdles))
trials = []
trial_ids = set()
nums_all_equal = [0] * population_range
for i in range(population_range):
point = [0] * len(self.space)
while True:
point = list(point)
point[
list(self.space.keys()).index(self.eves.fidelity_index)
] = fidelity
for j in self.search_space_without_fidelity:
point[j] = self.eves.population[j][i]
trial = format_trials.tuple_to_trial(point, self.space)
trial = self.eves.format_trial(trial)
trial_id = self.eves.get_id(trial)
if trial_id in trial_ids:
nums_all_equal[i] += 1
logger.debug("find equal one, continue to mutate.")
self._mutate(i, i)
elif self.eves.has_suggested(trial):
nums_all_equal[i] += 1
logger.debug("find one already suggested, continue to mutate.")
self._mutate(i, i)
else:
break
if nums_all_equal[i] > self.eves.max_retries:
logger.warning(
"Can not Evolve any more. You can make an early stop."
)
break
if nums_all_equal[i] < self.eves.max_retries:
trials.append(trial)
trial_ids.add(trial_id)
else:
logger.debug("Dropping trial %s", trial)
return trials, np.array(nums_all_equal)
def get_candidates(self, rung_id):
"""Get a candidate for promotion"""
if rung_id not in self._candidates:
rung, population_range, red_team, blue_team = self._get_teams(rung_id)
fidelity = self.rungs[rung_id + 1]["resources"]
self._candidates[rung_id] = self._mutate_population(
red_team, blue_team, rung, population_range, fidelity
)[0]
candidates = []
for candidate in self._candidates[rung_id]:
if not self.eves.has_suggested(candidate):
candidates.append(candidate)
return candidates
def _mutate(self, winner_id, loser_id):
select_genes_key_list = self.eves.rng.choice(
self.search_space_without_fidelity,
self.eves.nums_mutate_gene,
replace=False,
)
self.copy_winner(winner_id, loser_id)
kwargs = copy.deepcopy(self.mutate_attr)
for i, _ in enumerate(select_genes_key_list):
space = self.space.values()[select_genes_key_list[i]]
old = self.eves.population[select_genes_key_list[i]][loser_id]
new = self.mutate_func(space, self.eves.rng, old, **kwargs)
self.eves.population[select_genes_key_list[i]][loser_id] = new
self.eves.performance[loser_id] = -1
def copy_winner(self, winner_id, loser_id):
"""Copy winner to loser"""
for key in self.search_space_without_fidelity:
self.eves.population[key][loser_id] = self.eves.population[key][winner_id]
|
[
"copy.deepcopy",
"numpy.log",
"importlib.import_module",
"numpy.ones",
"numpy.array",
"orion.core.utils.format_trials.tuple_to_trial",
"logging.getLogger"
] |
[((433, 460), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (450, 460), False, 'import logging\n'), ((5757, 5787), 'copy.deepcopy', 'copy.deepcopy', (['self.population'], {}), '(self.population)\n', (5770, 5787), False, 'import copy\n'), ((5824, 5855), 'copy.deepcopy', 'copy.deepcopy', (['self.performance'], {}), '(self.performance)\n', (5837, 5855), False, 'import copy\n'), ((5888, 5915), 'copy.deepcopy', 'copy.deepcopy', (['self.hurdles'], {}), '(self.hurdles)\n', (5901, 5915), False, 'import copy\n'), ((7436, 7469), 'importlib.import_module', 'importlib.import_module', (['mod_name'], {}), '(mod_name)\n', (7459, 7469), False, 'import importlib\n'), ((7876, 7907), 'copy.deepcopy', 'copy.deepcopy', (['self._candidates'], {}), '(self._candidates)\n', (7889, 7907), False, 'import copy\n'), ((12767, 12798), 'copy.deepcopy', 'copy.deepcopy', (['self.mutate_attr'], {}), '(self.mutate_attr)\n', (12780, 12798), False, 'import copy\n'), ((5163, 5187), 'numpy.ones', 'np.ones', (['nums_population'], {}), '(nums_population)\n', (5170, 5187), True, 'import numpy as np\n'), ((7150, 7184), 'copy.deepcopy', 'copy.deepcopy', (['evolution_es.mutate'], {}), '(evolution_es.mutate)\n', (7163, 7184), False, 'import copy\n'), ((11829, 11853), 'numpy.array', 'np.array', (['nums_all_equal'], {}), '(nums_all_equal)\n', (11837, 11853), True, 'import numpy as np\n'), ((1250, 1271), 'numpy.log', 'np.log', (['max_resources'], {}), '(max_resources)\n', (1256, 1271), True, 'import numpy as np\n'), ((1274, 1298), 'numpy.log', 'np.log', (['reduction_factor'], {}), '(reduction_factor)\n', (1280, 1298), True, 'import numpy as np\n'), ((10752, 10799), 'orion.core.utils.format_trials.tuple_to_trial', 'format_trials.tuple_to_trial', (['point', 'self.space'], {}), '(point, self.space)\n', (10780, 10799), False, 'from orion.core.utils import format_trials\n')]
|
'''.
Jul 03, 2018.
<NAME>, <EMAIL>
'''
import numpy as np
from random import shuffle
import os
import pandas as pd
from abc import abstractmethod
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler, SMOTE
from FAE.DataContainer.DataContainer import DataContainer
class DataBalance:
'''
To deal with the data imbalance.
'''
def __init__(self):
pass
def Run(self, data_container, store_path=''):
if store_path:
if os.path.isdir(store_path):
data_container.Save(os.path.join(store_path, 'non_balance_features.csv'))
else:
data_container.Save(store_path)
return data_container
class DownSampling(DataBalance):
def __init__(self):
super(DownSampling, self).__init__()
def GetCaseNameFromAllCaseNames(self, data_container, one_case_data):
one_case_data = np.squeeze(one_case_data)
all_case_data = data_container.GetArray()
all_case_name = data_container.GetCaseName()
if one_case_data.size != all_case_data.shape[1]:
print('The number of features should be same in DataBalance!')
for case_index in range(len(all_case_name)):
if (one_case_data == all_case_data[case_index, :]).all():
return all_case_name[case_index]
print('Not Find Case Name')
return 'Not Find Case Name'
def Run(self, data_container, store_path=''):
data, label, feature_name, label_name = data_container.GetData()
rus = RandomUnderSampler(random_state=0)
data_resampled, label_resampled = rus.fit_sample(data, label)
new_case_name = []
for index in range(data_resampled.shape[0]):
new_case_name.append(self.GetCaseNameFromAllCaseNames(data_container, data_resampled[index, :]))
new_data_container = DataContainer(data_resampled, label_resampled, data_container.GetFeatureName(), new_case_name)
if store_path != '':
if os.path.isdir(store_path):
new_data_container.Save(os.path.join(store_path, 'downsampling_features.csv'))
else:
new_data_container.Save(store_path)
return new_data_container
class UpSampling(DataBalance):
def __init__(self):
super(UpSampling, self).__init__()
def GetCaseNameFromAllCaseNames(self, data_container, one_case_data):
one_case_data = np.squeeze(one_case_data)
all_case_data = data_container.GetArray()
all_case_name = data_container.GetCaseName()
if one_case_data.size != all_case_data.shape[1]:
print('The number of features should be same in DataBalance!')
for case_index in range(len(all_case_name)):
if (one_case_data == all_case_data[case_index, :]).all():
return all_case_name[case_index]
print('Not Find Case Name')
return 'Not Find Case Name'
def Run(self, data_container, store_path=''):
data, label, feature_name, label_name = data_container.GetData()
rus = RandomOverSampler(random_state=0)
data_resampled, label_resampled = rus.fit_sample(data, label)
new_case_name = []
for index in range(data_resampled.shape[0]):
new_case_name.append(self.GetCaseNameFromAllCaseNames(data_container, data_resampled[index, :]))
new_data_container = DataContainer(data_resampled, label_resampled, data_container.GetFeatureName(),
new_case_name)
if store_path != '':
if os.path.isdir(store_path):
new_data_container.Save(os.path.join(store_path, 'upsampling_features.csv'))
else:
new_data_container.Save(store_path)
return new_data_container
class SmoteSampling(DataBalance):
def __init__(self, **kwargs):
super(SmoteSampling, self).__init__()
self.__model = SMOTE(**kwargs, random_state=0)
def Run(self, data_container, store_path=''):
data, label, feature_name, label_name = data_container.GetData()
data_resampled, label_resampled = self.__model.fit_sample(data, label)
new_case_name = ['Generate' + str(index) for index in range(data_resampled.shape[0])]
new_data_container = DataContainer(data_resampled, label_resampled, data_container.GetFeatureName(),
new_case_name)
if store_path != '':
if os.path.isdir(store_path):
new_data_container.Save(os.path.join(store_path, 'smote_features.csv'))
else:
new_data_container.Save(store_path)
return new_data_container
|
[
"imblearn.under_sampling.RandomUnderSampler",
"os.path.isdir",
"imblearn.over_sampling.RandomOverSampler",
"imblearn.over_sampling.SMOTE",
"numpy.squeeze",
"os.path.join"
] |
[((939, 964), 'numpy.squeeze', 'np.squeeze', (['one_case_data'], {}), '(one_case_data)\n', (949, 964), True, 'import numpy as np\n'), ((1584, 1618), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1602, 1618), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((2472, 2497), 'numpy.squeeze', 'np.squeeze', (['one_case_data'], {}), '(one_case_data)\n', (2482, 2497), True, 'import numpy as np\n'), ((3117, 3150), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3134, 3150), False, 'from imblearn.over_sampling import RandomOverSampler, SMOTE\n'), ((3985, 4016), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(0)'}), '(**kwargs, random_state=0)\n', (3990, 4016), False, 'from imblearn.over_sampling import RandomOverSampler, SMOTE\n'), ((522, 547), 'os.path.isdir', 'os.path.isdir', (['store_path'], {}), '(store_path)\n', (535, 547), False, 'import os\n'), ((2048, 2073), 'os.path.isdir', 'os.path.isdir', (['store_path'], {}), '(store_path)\n', (2061, 2073), False, 'import os\n'), ((3623, 3648), 'os.path.isdir', 'os.path.isdir', (['store_path'], {}), '(store_path)\n', (3636, 3648), False, 'import os\n'), ((4526, 4551), 'os.path.isdir', 'os.path.isdir', (['store_path'], {}), '(store_path)\n', (4539, 4551), False, 'import os\n'), ((585, 637), 'os.path.join', 'os.path.join', (['store_path', '"""non_balance_features.csv"""'], {}), "(store_path, 'non_balance_features.csv')\n", (597, 637), False, 'import os\n'), ((2115, 2168), 'os.path.join', 'os.path.join', (['store_path', '"""downsampling_features.csv"""'], {}), "(store_path, 'downsampling_features.csv')\n", (2127, 2168), False, 'import os\n'), ((3690, 3741), 'os.path.join', 'os.path.join', (['store_path', '"""upsampling_features.csv"""'], {}), "(store_path, 'upsampling_features.csv')\n", (3702, 3741), False, 'import os\n'), ((4593, 4639), 'os.path.join', 'os.path.join', (['store_path', '"""smote_features.csv"""'], {}), "(store_path, 'smote_features.csv')\n", (4605, 4639), False, 'import os\n')]
|
from sklearn.naive_bayes import GaussianNB
import numpy as np
import itertools
import csv
import pickle
import psutil
def __loadData(dataFile, isNumericData = False):
data = []
with open(dataFile, 'rt') as csvfile:
datas = csv.reader(csvfile, delimiter = ',')
for row in datas:
if row is None or len(row) == 0:
continue
if isNumericData:
data.append(map(float,row))
else:
data.append(row)
return data
def normalization(sample):
"""one sample pass in"""
sample = sample + 100
# 2^20 = 1048576
return np.log2(sample * 1048576/np.sum(sample))
def getMemoUsage():
return str(psutil.virtual_memory().percent)
def fileLoad(log):
DataSet = {}
DataList = __loadData('gtex_data.csv', isNumericData = 'True')
LabelList = __loadData('gtex_label.csv')
#LabelList = __loadData('label.csv')
log.write('File load Finished. \n')
log.write('Memo usage: '+ getMemoUsage() +'\n')
for i in range(0, 9662):
lb = LabelList[i][0]
#if lb in ['small', 'minor', 'whole']:
# meaning less label are removed from the tests
# continue
if lb in DataSet.keys():
DataSet[lb].append(DataList[i])
else:
DataSet[lb] = [DataList[i]]
log.write('Data Load Finished.\n')
log.write('Memo usage: '+ getMemoUsage() +'\n')
return DataSet
log = open('excludeOnelog.txt', 'w')
log.write('Program Start \n')
log.write('Memo usage: '+ getMemoUsage() +'\n')
DataSet = fileLoad(log)
log.write('Data Pass Finished.\n')
log.write('Memo usage: '+ getMemoUsage() +'\n')
#
#with open('./normalizedmodel/gtex/pickledRawData/Gtex.pkl', 'wb') as tr:
# pickle.dump(DataSet, tr, pickle.HIGHEST_PROTOCOL)
log.write('Data Save Finished.\n')
log.write('Memo usage: '+ getMemoUsage() +'\n')
labels = DataSet.keys()
#for i in range( 21, 40 ):
# l = labels[i]
for l in labels:
model = GaussianNB()
log.write('Building pickle for: ' + l + ' excluded.' )
Data = []
Label = []
for j in labels:
if l != j:
Data = Data + DataSet[j]
Label = Label + [j] * len(DataSet[j])
testTraining = np.array(Data).astype(np.float)
# do normalization here
testTraining = np.apply_along_axis(normalization, 1, testTraining )
testlabeling = np.array(Label)
model.fit(testTraining,testlabeling)
log.write('Model: ' + l + ' training finished.\n')
log.write('Memo usage: '+ getMemoUsage() +'\n')
with open('./normalizedmodel/gtex/excludeOne/'+l+'.pkl', 'wb') as tr:
pickle.dump(model, tr, pickle.HIGHEST_PROTOCOL)
log.write('pickel: ' + l + ' write done.\n')
log.write('Memo usage: '+ getMemoUsage() +'\n')
|
[
"sklearn.naive_bayes.GaussianNB",
"pickle.dump",
"csv.reader",
"numpy.sum",
"psutil.virtual_memory",
"numpy.apply_along_axis",
"numpy.array"
] |
[((2019, 2031), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2029, 2031), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((2359, 2410), 'numpy.apply_along_axis', 'np.apply_along_axis', (['normalization', '(1)', 'testTraining'], {}), '(normalization, 1, testTraining)\n', (2378, 2410), True, 'import numpy as np\n'), ((2431, 2446), 'numpy.array', 'np.array', (['Label'], {}), '(Label)\n', (2439, 2446), True, 'import numpy as np\n'), ((240, 274), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (250, 274), False, 'import csv\n'), ((2687, 2734), 'pickle.dump', 'pickle.dump', (['model', 'tr', 'pickle.HIGHEST_PROTOCOL'], {}), '(model, tr, pickle.HIGHEST_PROTOCOL)\n', (2698, 2734), False, 'import pickle\n'), ((662, 676), 'numpy.sum', 'np.sum', (['sample'], {}), '(sample)\n', (668, 676), True, 'import numpy as np\n'), ((714, 737), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (735, 737), False, 'import psutil\n'), ((2280, 2294), 'numpy.array', 'np.array', (['Data'], {}), '(Data)\n', (2288, 2294), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_psst
----------------------------------
Tests for `psst` module.
"""
import numpy as np
import pytest as pt
import traitlets as T
import psst
from psst.case.generator import Generator, GeneratorView, GeneratorCostView
from .test_generator import default_generator
@pt.fixture(scope="module")
def dg():
return default_generator()
@pt.fixture()
def default_generator_view(dg):
gv = GeneratorView(
model=dg
)
return gv
@pt.fixture()
def default_generator_cost_view(dg):
gv = GeneratorCostView(
model=dg
)
return gv
def test_generator_view(default_generator_view):
gv = default_generator_view
g = gv.model
assert isinstance(gv.model, Generator)
assert gv._title.value == 'Generator:'
assert gv._name.value == g.name
assert gv._maximum_real_power.value == gv._initial_real_power.max
assert gv._maximum_real_power.value == gv._minimum_real_power.max
assert gv._maximum_real_power.value == gv._ramp_up_rate.max
assert gv._maximum_real_power.value == gv._ramp_down_rate.max
assert g.maximum_real_power == gv._maximum_real_power.value
assert g.name == gv._name.value
assert g.generation_type == gv._generation_type.value
assert g.initial_status == gv._initial_status.value
assert g.minimum_real_power == gv._minimum_real_power.value
assert g.initial_real_power == gv._initial_real_power.value
assert g.minimum_up_time == gv._minimum_up_time.value
assert g.minimum_down_time == gv._minimum_down_time.value
assert g.nsegments == gv._nsegments.value
assert g.ramp_up_rate == gv._ramp_up_rate.value
assert g.ramp_down_rate == gv._ramp_down_rate.value
assert g.startup_time == gv._startup_time.value
assert g.shutdown_time == gv._shutdown_time.value
assert g.noload_cost == gv._noload_cost.value
assert g.startup_cost == gv._startup_cost.value
def test_generator_costview_generator_view(
default_generator_cost_view,
default_generator_view
):
gcv = default_generator_cost_view
gv = default_generator_view
assert gv.model == gcv.model
assert gcv._scale_x.max == gv._maximum_real_power.value
assert np.all(gcv._scatter.x == gv.model.cost_curve_points)
assert np.all(gcv._scatter.y == gv.model.cost_curve_values)
assert np.all(gcv._scatter.x == gcv._lines.x)
assert np.all(gcv._scatter.y == gcv._lines.y)
gcv._lines.x = [0, 10, 20, 30]
gcv._lines.y = [0, 10, 20, 30]
assert np.all(gcv._scatter.x == gv.model.cost_curve_points)
assert np.all(gcv._scatter.y == gv.model.cost_curve_values)
assert np.all(gcv._scatter.x == gcv._lines.x)
assert np.all(gcv._scatter.y == gcv._lines.y)
|
[
"pytest.fixture",
"psst.case.generator.GeneratorCostView",
"numpy.all",
"psst.case.generator.GeneratorView"
] |
[((327, 353), 'pytest.fixture', 'pt.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (337, 353), True, 'import pytest as pt\n'), ((397, 409), 'pytest.fixture', 'pt.fixture', ([], {}), '()\n', (407, 409), True, 'import pytest as pt\n'), ((507, 519), 'pytest.fixture', 'pt.fixture', ([], {}), '()\n', (517, 519), True, 'import pytest as pt\n'), ((452, 475), 'psst.case.generator.GeneratorView', 'GeneratorView', ([], {'model': 'dg'}), '(model=dg)\n', (465, 475), False, 'from psst.case.generator import Generator, GeneratorView, GeneratorCostView\n'), ((567, 594), 'psst.case.generator.GeneratorCostView', 'GeneratorCostView', ([], {'model': 'dg'}), '(model=dg)\n', (584, 594), False, 'from psst.case.generator import Generator, GeneratorView, GeneratorCostView\n'), ((2243, 2295), 'numpy.all', 'np.all', (['(gcv._scatter.x == gv.model.cost_curve_points)'], {}), '(gcv._scatter.x == gv.model.cost_curve_points)\n', (2249, 2295), True, 'import numpy as np\n'), ((2307, 2359), 'numpy.all', 'np.all', (['(gcv._scatter.y == gv.model.cost_curve_values)'], {}), '(gcv._scatter.y == gv.model.cost_curve_values)\n', (2313, 2359), True, 'import numpy as np\n'), ((2372, 2410), 'numpy.all', 'np.all', (['(gcv._scatter.x == gcv._lines.x)'], {}), '(gcv._scatter.x == gcv._lines.x)\n', (2378, 2410), True, 'import numpy as np\n'), ((2422, 2460), 'numpy.all', 'np.all', (['(gcv._scatter.y == gcv._lines.y)'], {}), '(gcv._scatter.y == gcv._lines.y)\n', (2428, 2460), True, 'import numpy as np\n'), ((2544, 2596), 'numpy.all', 'np.all', (['(gcv._scatter.x == gv.model.cost_curve_points)'], {}), '(gcv._scatter.x == gv.model.cost_curve_points)\n', (2550, 2596), True, 'import numpy as np\n'), ((2608, 2660), 'numpy.all', 'np.all', (['(gcv._scatter.y == gv.model.cost_curve_values)'], {}), '(gcv._scatter.y == gv.model.cost_curve_values)\n', (2614, 2660), True, 'import numpy as np\n'), ((2673, 2711), 'numpy.all', 'np.all', (['(gcv._scatter.x == gcv._lines.x)'], {}), '(gcv._scatter.x == gcv._lines.x)\n', (2679, 2711), True, 'import numpy as np\n'), ((2723, 2761), 'numpy.all', 'np.all', (['(gcv._scatter.y == gcv._lines.y)'], {}), '(gcv._scatter.y == gcv._lines.y)\n', (2729, 2761), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Functions to detected aggregated spots and foci.
"""
import numpy as np
import bigfish.stack as stack
from sklearn.cluster import DBSCAN
# ### Detect foci ###
def detect_foci(spots, voxel_size_z=None, voxel_size_yx=100, radius=350,
nb_min_spots=4):
"""Detect clustered spots we can define as foci.
1) If two spots are distant within a specific radius, we consider they are
related to each other.
2) A minimum number spots related to each others defines a foci.
Parameters
----------
spots : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 3) or
(nb_spots, 2).
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, spots are
considered in 2-d.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
radius : int
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Radius expressed in nanometer.
nb_min_spots : int
The number of spots in a neighborhood for a point to be considered as
a core point (from which a cluster is expanded). This includes the
point itself.
Returns
-------
clustered_spots : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx coordinates)
plus the index of the cluster assigned to the spot. If no cluster was
assigned, value is -1.
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx coordinates), the number of
spots detected in the foci and its index.
"""
# check parameters
stack.check_array(spots, ndim=2, dtype=np.int64)
stack.check_parameter(voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
radius=int,
nb_min_spots=int)
# check number of dimensions
ndim = spots.shape[1]
if ndim not in [2, 3]:
raise ValueError("Spot coordinates should be in 2 or 3 dimensions, "
"not {0}.".format(ndim))
if ndim == 3 and voxel_size_z is None:
raise ValueError("Provided spot coordinates has {0} dimensions but "
"'voxel_size_z' parameter is missing.".format(ndim))
if ndim == 2:
voxel_size_z = None
# case where no spot were detected
if spots.size == 0:
clustered_spots = np.array([], dtype=np.int64).reshape((0, ndim + 1))
foci = np.array([], dtype=np.int64).reshape((0, ndim + 2))
return clustered_spots, foci
# cluster spots
clustered_spots = _cluster_spots(
spots, voxel_size_z, voxel_size_yx, radius, nb_min_spots)
# extract and shape foci information
foci = _extract_information(clustered_spots)
return clustered_spots, foci
def _convert_spot_coordinates(spots, voxel_size_z, voxel_size_yx):
"""Convert spots coordinates from pixel to nanometer.
Parameters
----------
spots : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 3) or
(nb_spots, 2).
voxel_size_z : int or float
Height of a voxel, along the z axis, in nanometer.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
Returns
-------
spots_nanometer : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 3) or
(nb_spots, 3), in nanometer.
"""
# convert spots coordinates in nanometer
spots_nanometer = spots.copy()
if spots.shape[1] == 3:
spots_nanometer[:, 0] *= voxel_size_z
spots_nanometer[:, 1:] *= voxel_size_yx
else:
spots_nanometer *= voxel_size_yx
return spots_nanometer
def _cluster_spots(spots, voxel_size_z, voxel_size_yx, radius, nb_min_spots):
"""Assign a cluster to each spot.
Parameters
----------
spots : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 3) or
(nb_spots, 2).
voxel_size_z : int or float
Height of a voxel, along the z axis, in nanometer.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
radius : int
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Radius expressed in nanometer.
nb_min_spots : int
The number of spots in a neighborhood for a point to be considered as
a core point (from which a cluster is expanded). This includes the
point itself.
Returns
-------
clustered_spots : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx coordinates)
plus the index of the cluster assigned to the spot. If no cluster was
assigned, value is -1.
"""
# convert spots coordinates in nanometer
spots_nanometer = _convert_spot_coordinates(spots=spots,
voxel_size_z=voxel_size_z,
voxel_size_yx=voxel_size_yx)
# fit a DBSCAN clustering algorithm with a specific radius
dbscan = DBSCAN(eps=radius, min_samples=nb_min_spots)
dbscan.fit(spots_nanometer)
labels = dbscan.labels_
labels = labels[:, np.newaxis]
# assign a cluster to each spot if possible
clustered_spots = spots.copy()
clustered_spots = np.concatenate((clustered_spots, labels), axis=1)
return clustered_spots
def _extract_information(clustered_spots):
"""Extract foci information from clustered spots.
Parameters
----------
clustered_spots : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx coordinates)
plus the index of the cluster assigned to the spot. If no cluster was
assigned, value is -1.
Returns
-------
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx coordinates), the number of
spots detected in the foci and its index.
"""
# extract information for 3-d foci...
if clustered_spots.shape[1] == 4:
# get 3-d foci labels
labels_foci = np.unique(
clustered_spots[clustered_spots[:, 3] != -1, 3])
if labels_foci.size == 0:
foci = np.array([], dtype=np.int64).reshape((0, 5))
return foci
# shape information
foci = []
for label in labels_foci:
spots_in_foci = clustered_spots[clustered_spots[:, 3] == label, :3]
z_foci, y_foci, x_foci = spots_in_foci.mean(axis=0)
nb_spots_foci = len(spots_in_foci)
foci.append([z_foci, y_foci, x_foci, nb_spots_foci, label])
foci = np.array(foci, dtype=np.int64)
# ... and 2-d foci
else:
# get 2-d foci labels
labels_foci = np.unique(
clustered_spots[clustered_spots[:, 2] != -1, 2])
if labels_foci.size == 0:
foci = np.array([], dtype=np.int64).reshape((0, 4))
return foci
# shape information
foci = []
for label in labels_foci:
spots_in_foci = clustered_spots[clustered_spots[:, 2] == label, :2]
y_foci, x_foci = spots_in_foci.mean(axis=0)
nb_spots_foci = len(spots_in_foci)
foci.append([y_foci, x_foci, nb_spots_foci, label])
foci = np.array(foci, dtype=np.int64)
return foci
|
[
"sklearn.cluster.DBSCAN",
"numpy.concatenate",
"numpy.array",
"bigfish.stack.check_array",
"numpy.unique"
] |
[((1926, 1974), 'bigfish.stack.check_array', 'stack.check_array', (['spots'], {'ndim': '(2)', 'dtype': 'np.int64'}), '(spots, ndim=2, dtype=np.int64)\n', (1943, 1974), True, 'import bigfish.stack as stack\n'), ((5543, 5587), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': 'radius', 'min_samples': 'nb_min_spots'}), '(eps=radius, min_samples=nb_min_spots)\n', (5549, 5587), False, 'from sklearn.cluster import DBSCAN\n'), ((5789, 5838), 'numpy.concatenate', 'np.concatenate', (['(clustered_spots, labels)'], {'axis': '(1)'}), '((clustered_spots, labels), axis=1)\n', (5803, 5838), True, 'import numpy as np\n'), ((6697, 6755), 'numpy.unique', 'np.unique', (['clustered_spots[clustered_spots[:, 3] != -1, 3]'], {}), '(clustered_spots[clustered_spots[:, 3] != -1, 3])\n', (6706, 6755), True, 'import numpy as np\n'), ((7250, 7280), 'numpy.array', 'np.array', (['foci'], {'dtype': 'np.int64'}), '(foci, dtype=np.int64)\n', (7258, 7280), True, 'import numpy as np\n'), ((7368, 7426), 'numpy.unique', 'np.unique', (['clustered_spots[clustered_spots[:, 2] != -1, 2]'], {}), '(clustered_spots[clustered_spots[:, 2] != -1, 2])\n', (7377, 7426), True, 'import numpy as np\n'), ((7905, 7935), 'numpy.array', 'np.array', (['foci'], {'dtype': 'np.int64'}), '(foci, dtype=np.int64)\n', (7913, 7935), True, 'import numpy as np\n'), ((2724, 2752), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (2732, 2752), True, 'import numpy as np\n'), ((2791, 2819), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (2799, 2819), True, 'import numpy as np\n'), ((6822, 6850), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (6830, 6850), True, 'import numpy as np\n'), ((7493, 7521), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (7501, 7521), True, 'import numpy as np\n')]
|
#! /usr/bin/python3
# -*- coding:utf-8 -*-
import numpy as np
from outils import *
class Layer :
def __init__(self, entry, neurones, activation, learning_rate) :
# entry is the number of neurones of the previous layer
# neurones is the number of neurones of the current layer
# activation is the activation function of this layer
sigma = 1 / np.sqrt(entry)
self.weights = np.random.normal(0, sigma, size=[neurones,entry])
self.bias = np.random.normal(0, sigma, size=[neurones])
self.activation = activation
self.activation_level = np.zeros(neurones)
self.output = np.zeros(neurones)
self.learning_rate = learning_rate
def compute(self, Input) :
self.activation_level = np.dot(self.weights, Input) - self.bias
self.output=self.activation.f(self.activation_level)
def __len__(self) :
return len(self.output)
def update(self, delta_weights, delta_bias) :
self.weights += delta_weights
self.bias += delta_bias
def set_weights(self, weights, bias) :
for i in range(len(weights)) :
for j in range(len(weights[i])) :
self.weights[i,j] = weights[i,j]
for i in range(len(bias)) :
self.bias[i] = bias[i]
|
[
"numpy.dot",
"numpy.zeros",
"numpy.random.normal",
"numpy.sqrt"
] |
[((421, 471), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma'], {'size': '[neurones, entry]'}), '(0, sigma, size=[neurones, entry])\n', (437, 471), True, 'import numpy as np\n'), ((491, 534), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma'], {'size': '[neurones]'}), '(0, sigma, size=[neurones])\n', (507, 534), True, 'import numpy as np\n'), ((604, 622), 'numpy.zeros', 'np.zeros', (['neurones'], {}), '(neurones)\n', (612, 622), True, 'import numpy as np\n'), ((645, 663), 'numpy.zeros', 'np.zeros', (['neurones'], {}), '(neurones)\n', (653, 663), True, 'import numpy as np\n'), ((382, 396), 'numpy.sqrt', 'np.sqrt', (['entry'], {}), '(entry)\n', (389, 396), True, 'import numpy as np\n'), ((771, 798), 'numpy.dot', 'np.dot', (['self.weights', 'Input'], {}), '(self.weights, Input)\n', (777, 798), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from keras import backend as K
from keras.datasets import mnist
from keras.utils import np_utils
import keras
import numpy as np
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# for solving some specific problems, don't care
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
def get_mnist():
image_size = 28
num_channels = 1
num_classes = 10
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_test = X_test.reshape(X_test.shape[0], image_size, image_size, num_channels)
X_test = X_test.astype('float32')
X_test /= 255
Y_test = np_utils.to_categorical(y_test, num_classes)
X_train = X_train.reshape(X_train.shape[0], image_size, image_size, num_channels)
X_train = X_train.astype('float32') / 255
Y_train = np_utils.to_categorical(y_train, num_classes)
return X_train, Y_train, X_test, Y_test
# Get output of one specific layer
def getlayer_output(l_in, l_out, x, model):
# get_k_layer_output = K.function([model.layers[l_in].input, K.learning_phase()],[model.layers[l_out].output])
get_k_layer_output = K.function([model.layers[l_in].input, 0], [model.layers[l_out].output])
return get_k_layer_output([x])[0]
if __name__ == '__main__':
# 1. Get 'mnist' dataset.
X_train, Y_train, X_test, Y_test = get_mnist()
print(X_train.shape)
# 2. Load a trained 'MNIST_carlini' model.
dataset_name = 'MNIST'
model_name = 'carlini'
model_weights_fpath = "%s_%s.keras_weights.h5" % (dataset_name, model_name)
model_weights_fpath = os.path.join('downloads/trained_models', model_weights_fpath)
from models.carlini_models import carlini_mnist_model
model = carlini_mnist_model(logits=False, input_range_type=1, pre_filter=lambda x:x)
model.load_weights(model_weights_fpath)
model.summary()
# # 3. Evaluate the trained model on test set.
# print ("Evaluating the pre-trained model...")
# labels_true = np.argmax(Y_test, axis=1)
# labels_test = np.argmax(model.predict(X_test), axis=1)
# print('Accuracy test set: %.2f%%' % (np.sum(labels_test == labels_true) / X_test.shape[0] * 100))
# 4. get output of each layer and store them in npy
start_layer = 18
outputs = {}
for hl_idx in range(start_layer, len(model.layers)):
n_neurons = model.layers[hl_idx].output_shape[-1] # neurons in every layer
print('layer name: ', model.layers[hl_idx].name, '# of neurons: ', n_neurons)
h_layer = np.array([])
for i in range(10):
layer = getlayer_output(0, hl_idx, X_train[6000*i:6000*(i+1), :, :, :], model).copy()
if h_layer.size == 0:
h_layer = layer
else:
h_layer = np.concatenate((layer, h_layer), axis=0)
print('h layer', model.layers[hl_idx].name, h_layer.shape)
file_name = './output/{0}_check_values.npy'.format(model.layers[hl_idx].name)
np.save(file_name, h_layer)
# start_layer = 0
# name = []
# for hl_idx in range(start_layer, len(model.layers) - 1):
# name.append(model.layers[hl_idx].name)
#
# file_name = './output/file_name.npy'
# np.save(file_name, name)
|
[
"numpy.save",
"keras.datasets.mnist.load_data",
"keras.backend.function",
"tensorflow.Session",
"tensorflow.ConfigProto",
"keras.utils.np_utils.to_categorical",
"numpy.array",
"models.carlini_models.carlini_mnist_model",
"os.path.join",
"numpy.concatenate"
] |
[((416, 432), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (430, 432), True, 'import tensorflow as tf\n'), ((479, 504), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (489, 504), True, 'import tensorflow as tf\n'), ((630, 647), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (645, 647), False, 'from keras.datasets import mnist\n'), ((801, 845), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (824, 845), False, 'from keras.utils import np_utils\n'), ((994, 1039), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (1017, 1039), False, 'from keras.utils import np_utils\n'), ((1306, 1377), 'keras.backend.function', 'K.function', (['[model.layers[l_in].input, 0]', '[model.layers[l_out].output]'], {}), '([model.layers[l_in].input, 0], [model.layers[l_out].output])\n', (1316, 1377), True, 'from keras import backend as K\n'), ((1760, 1821), 'os.path.join', 'os.path.join', (['"""downloads/trained_models"""', 'model_weights_fpath'], {}), "('downloads/trained_models', model_weights_fpath)\n", (1772, 1821), False, 'import os\n'), ((1893, 1970), 'models.carlini_models.carlini_mnist_model', 'carlini_mnist_model', ([], {'logits': '(False)', 'input_range_type': '(1)', 'pre_filter': '(lambda x: x)'}), '(logits=False, input_range_type=1, pre_filter=lambda x: x)\n', (1912, 1970), False, 'from models.carlini_models import carlini_mnist_model\n'), ((2690, 2702), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2698, 2702), True, 'import numpy as np\n'), ((3142, 3169), 'numpy.save', 'np.save', (['file_name', 'h_layer'], {}), '(file_name, h_layer)\n', (3149, 3169), True, 'import numpy as np\n'), ((2939, 2979), 'numpy.concatenate', 'np.concatenate', (['(layer, h_layer)'], {'axis': '(0)'}), '((layer, h_layer), axis=0)\n', (2953, 2979), True, 'import numpy as np\n')]
|
from typing import Tuple, Sequence
import numpy as np
from . import DefaultAgent
from mdde.agent.abc import NodeAgentMapping
class SingleNodeDefaultAgent(DefaultAgent):
"""Default agent allowing only a single data node to be managed. Generates a uniform action space."""
def __init__(self,
agent_name: str,
agent_id: int,
data_node_id: str,
group: str = DefaultAgent.DEFAULT_GROUP,
write_stats: bool = False,
allow_do_nothing: bool = True):
"""
Single node default agent constructor
:param agent_name: Name of the agent instance.
:param agent_id: Unique ID of the agent within the experimental run.
:param data_node_id: Data node managed by the agent.
:param group: (optional) Group of the agent.
:param write_stats: (optional) If True - agent will write some data (description of it's action space), to the
results folder provided in `self._config` for later analysis.
:param allow_do_nothing: (optional) If True - when the agent generates its action space, it will add a
'do_nothing' action at 0. Otherwise the agent must always take an action.
"""
super().__init__(agent_name=agent_name,
agent_id=agent_id,
data_node_ids=[data_node_id],
group=group,
write_stats=write_stats,
allow_do_nothing=allow_do_nothing)
def create_action_space(self,
nodes: Tuple[NodeAgentMapping, ...],
fragments: Sequence[str],
obs: np.ndarray,
) -> int:
"""
Generates an action space size: 1 + len(nodes) * len(fragments) * 2.
Action 0 is a do nothing action. For each node a two sets of actions will be created: copy a fragment to self,
delete fragment from self. It's not allowed, however, to remove fragments from other agents or copy from node
to the very same node. These constraints will be observed by the registry. It's beneficial to have ownership
of the node indicated in the observation space.
"""
own_node = self.data_node_ids[0]
n_frags = len(fragments)
cur_act_idx = 0
if self._allow_do_nothing:
a_actions = np.empty(1 + len(nodes) * n_frags + n_frags, dtype=object)
a_actions[0] = self.Action(node_source_id=None,
node_destination_id=None,
fragment_id=None,
is_del=False) # do nothing action
cur_act_idx += 1
else:
a_actions = np.empty(len(nodes) * n_frags + n_frags, dtype=object)
# Delete actions
for frag_idx, frag_reg_id in enumerate(fragments, cur_act_idx):
# Delete
a_actions[frag_idx] = self.Action(node_source_id=own_node,
node_destination_id=None,
fragment_id=frag_reg_id,
is_del=True)
cur_act_idx = cur_act_idx + n_frags
# Copy actions per node
for node in nodes:
for frag_idx, frag_reg_id in enumerate(fragments, cur_act_idx):
a_actions[frag_idx] = self.Action(node_source_id=node.node_id,
node_destination_id=own_node,
fragment_id=frag_reg_id,
is_del=False)
cur_act_idx = cur_act_idx + n_frags
self._actions = a_actions
if self._write_stats: # Save descriptions for later analysis
self._dumpActions()
return len(self._actions)
def filter_observation(self,
obs_descr: Tuple[NodeAgentMapping, ...],
fragments: Tuple[str, ...],
obs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Expecting the observation space generated by the default scenario. Agent appends a new axis to the
observation array marking own nodes with 1, foreign with 0."""
own = np.zeros((len(obs_descr), 1), dtype=np.int8)
legal_act = np.zeros((len(self._actions)), dtype=np.int8)
node_idx = -1
for own_node_idx in [obs_descr.index(own_node) for own_node in obs_descr if own_node.agent_id == self.id]:
own[own_node_idx][0] = 1
node_idx = own_node_idx
# TODO: Refactor legal actions portion (make efficient after the logic of this experimental code stabilizes)
if node_idx > -1: # Generate a binary map of actions, where 1 means action should be legal in current state.
# own node and observation
own_node = self.data_node_ids[0]
own_observation = obs[node_idx]
# collect foreign nodes observations into a map
foreign_nodes_map = {}
for f_node_idx, f_node_id in {obs_descr.index(node): node.node_id
for node in obs_descr if node.agent_id != self.id}.items():
foreign_nodes_map[f_node_id] = obs[f_node_idx]
if self._allow_do_nothing: # agent can always do nothing if it's allowed in the current scenario
legal_act[0] = 1
for action_idx, action in enumerate(self._actions):
action_fragment_id = action.fragment_id
if action_fragment_id is None:
# Skip the actions that don't deal with any fragment, for this agent it's only `do_nothing`
continue
action_fragment_obs_idx = fragments.index(action_fragment_id)
node_has_copy = own_observation[action_fragment_obs_idx][0]
# delete actions
if action.is_del:
if node_has_copy:
# verify if the fragment is unique(can't remove a unique fragment)
f_unique = True
for f_node_id, f_node_obs in foreign_nodes_map.items():
if f_node_obs[action_fragment_obs_idx][1]:
f_unique = False
break
if not f_unique:
legal_act[action_idx] = 1
# copy actions
else:
if action.node_source_id != own_node \
and action.node_destination_id is not None \
and not node_has_copy:
# check if the designated source contains the action
source_has_copy = foreign_nodes_map[action.node_source_id][action_fragment_obs_idx][0]
if source_has_copy:
legal_act[action_idx] = 1
return np.insert(obs, 2, own, axis=2), legal_act
|
[
"numpy.insert"
] |
[((7145, 7175), 'numpy.insert', 'np.insert', (['obs', '(2)', 'own'], {'axis': '(2)'}), '(obs, 2, own, axis=2)\n', (7154, 7175), True, 'import numpy as np\n')]
|
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for pixel_control_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from trfl import pixel_control_ops
class PixelControlRewardsTest(tf.test.TestCase):
"""Test the `pixel_control_rewards` op."""
def setUp(self):
"""Defines example data and expected result for the op."""
super(PixelControlRewardsTest, self).setUp()
# Configure.
self._cell = 2
obs_size = (5, 2, 4, 4, 3, 2)
y = obs_size[2] // self._cell
x = obs_size[3] // self._cell
channels = np.prod(obs_size[4:])
rew_size = (obs_size[0]-1, obs_size[1], x, y)
# Input data.
self._obs_np = np.random.uniform(size=obs_size)
self._obs_tf = tf.placeholder(tf.float32, obs_size)
# Expected pseudo-rewards.
abs_diff = np.absolute(self._obs_np[1:] - self._obs_np[:-1])
abs_diff = abs_diff.reshape((-1,) + obs_size[2:4] + (channels,))
abs_diff = abs_diff.reshape((-1, y, self._cell, x, self._cell, channels))
avg_abs_diff = abs_diff.mean(axis=(2, 4, 5))
self._expected_pseudo_rewards = avg_abs_diff.reshape(rew_size)
def testPixelControlRewards(self):
"""Compute pseudo rewards from observations."""
pseudo_rewards_tf = pixel_control_ops.pixel_control_rewards(
self._obs_tf, self._cell)
with self.test_session() as sess:
self.assertAllClose(
sess.run(pseudo_rewards_tf, feed_dict={self._obs_tf: self._obs_np}),
self._expected_pseudo_rewards)
class PixelControlLossTest(tf.test.TestCase):
"""Test the `pixel_control_loss` op."""
def setUp(self):
"""Defines example data and expected result for the op."""
super(PixelControlLossTest, self).setUp()
# Observation shape is (2,2,3) (i.e., height 2, width 2, and 3 channels).
# We will use no cropping, and a cell size of 1. We have num_actions = 3,
# meaning our Q values should be (2,2,3). We will set the Q value equal to
# the observation.
self.seq_length = 3
self.batch_size = 1
num_actions = 3
obs_shape = (2, 2, num_actions)
self.discount = 0.9
self.cell_size = 1
self.scale = 1.0
# Create ops to feed actions and rewards.
self.observations_ph = tf.placeholder(
shape=(self.seq_length+1, self.batch_size)+obs_shape, dtype=tf.float32)
self.action_values_ph = tf.placeholder(
shape=(self.seq_length+1, self.batch_size)+obs_shape, dtype=tf.float32)
self.actions_ph = tf.placeholder(
shape=(self.seq_length, self.batch_size), dtype=tf.int32)
# Observations.
obs1 = np.array([[[1, 2, 3], [3, 4, 5]], [[5, 6, 7], [7, 8, 9]]])
obs2 = np.array([[[7, 8, 9], [1, 2, 3]], [[3, 4, 5], [5, 6, 7]]])
obs3 = np.array([[[5, 6, 7], [7, 8, 9]], [[1, 2, 3], [3, 4, 5]]])
obs4 = np.array([[[3, 4, 5], [5, 6, 7]], [[7, 8, 9], [1, 2, 3]]])
# Actions.
action1 = 0
action2 = 1
action3 = 2
# Compute loss for constant discount.
qa_tm1 = obs3[:, :, action3]
reward3 = np.mean(np.abs(obs4 - obs3), axis=2)
qmax_t = np.amax(obs4, axis=2)
target = reward3 + self.discount * qmax_t
error3 = target - qa_tm1
qa_tm1 = obs2[:, :, action2]
reward2 = np.mean(np.abs(obs3 - obs2), axis=2)
target = reward2 + self.discount * target
error2 = target - qa_tm1
qa_tm1 = obs1[:, :, action1]
reward1 = np.mean(np.abs(obs2 - obs1), axis=2)
target = reward1 + self.discount * target
error1 = target - qa_tm1
# Compute loss for episode termination with discount 0.
qa_tm1 = obs1[:, :, action1]
reward1 = np.mean(np.abs(obs2 - obs1), axis=2)
target = reward1 + 0. * target
error1_term = target - qa_tm1
self.error = np.sum(
np.square(error1) + np.square(error2) + np.square(error3)) * 0.5
self.error_term = np.sum(
np.square(error1_term) + np.square(error2) + np.square(error3)) * 0.5
# Placeholder data.
self.observations = np.expand_dims(
np.stack([obs1, obs2, obs3, obs4], axis=0), axis=1)
self.action_values = self.observations
self.actions = np.stack(
[np.array([action1]), np.array([action2]), np.array([action3])], axis=0)
def testPixelControlLossScalarDiscount(self):
"""Compute loss for given observations, actions, values, scalar discount."""
loss, _ = pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph, self.action_values_ph,
self.cell_size, self.discount, self.scale)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
feed_dict = {
self.observations_ph: self.observations,
self.action_values_ph: self.action_values,
self.actions_ph: self.actions}
loss_np = sess.run(loss, feed_dict=feed_dict)
self.assertNear(loss_np, self.error, 1e-3)
def testPixelControlLossTensorDiscount(self):
"""Compute loss for given observations, actions, values, tensor discount."""
zero_discount = tf.zeros((1, self.batch_size))
non_zero_discount = tf.tile(
tf.reshape(self.discount, [1, 1]),
[self.seq_length - 1, self.batch_size])
tensor_discount = tf.concat([zero_discount, non_zero_discount], axis=0)
loss, _ = pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph, self.action_values_ph,
self.cell_size, tensor_discount, self.scale)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
feed_dict = {
self.observations_ph: self.observations,
self.action_values_ph: self.action_values,
self.actions_ph: self.actions}
loss_np = sess.run(loss, feed_dict=feed_dict)
self.assertNear(loss_np, self.error_term, 1e-3)
def testPixelControlLossShapes(self):
with self.assertRaisesRegexp(
ValueError, "Pixel Control values are not compatible"):
pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph,
self.action_values_ph[:, :, :-1], self.cell_size, self.discount,
self.scale)
def testTensorDiscountShape(self):
with self.assertRaisesRegexp(
ValueError, "discount_factor must be a scalar or a tensor of rank 2"):
tensor_discount = tf.tile(
tf.reshape(self.discount, [1, 1, 1]),
[self.seq_length, self.batch_size, 1])
pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph,
self.action_values_ph, self.cell_size, tensor_discount,
self.scale)
if __name__ == "__main__":
tf.test.main()
|
[
"tensorflow.test.main",
"numpy.random.uniform",
"numpy.absolute",
"numpy.stack",
"numpy.abs",
"tensorflow.global_variables_initializer",
"tensorflow.reshape",
"numpy.square",
"tensorflow.concat",
"numpy.amax",
"tensorflow.placeholder",
"tensorflow.zeros",
"numpy.array",
"trfl.pixel_control_ops.pixel_control_loss",
"trfl.pixel_control_ops.pixel_control_rewards",
"numpy.prod"
] |
[((7341, 7355), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (7353, 7355), True, 'import tensorflow as tf\n'), ((1308, 1329), 'numpy.prod', 'np.prod', (['obs_size[4:]'], {}), '(obs_size[4:])\n', (1315, 1329), True, 'import numpy as np\n'), ((1418, 1450), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'obs_size'}), '(size=obs_size)\n', (1435, 1450), True, 'import numpy as np\n'), ((1470, 1506), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'obs_size'], {}), '(tf.float32, obs_size)\n', (1484, 1506), True, 'import tensorflow as tf\n'), ((1554, 1603), 'numpy.absolute', 'np.absolute', (['(self._obs_np[1:] - self._obs_np[:-1])'], {}), '(self._obs_np[1:] - self._obs_np[:-1])\n', (1565, 1603), True, 'import numpy as np\n'), ((1981, 2046), 'trfl.pixel_control_ops.pixel_control_rewards', 'pixel_control_ops.pixel_control_rewards', (['self._obs_tf', 'self._cell'], {}), '(self._obs_tf, self._cell)\n', (2020, 2046), False, 'from trfl import pixel_control_ops\n'), ((2966, 3060), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '((self.seq_length + 1, self.batch_size) + obs_shape)', 'dtype': 'tf.float32'}), '(shape=(self.seq_length + 1, self.batch_size) + obs_shape,\n dtype=tf.float32)\n', (2980, 3060), True, 'import tensorflow as tf\n'), ((3090, 3184), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '((self.seq_length + 1, self.batch_size) + obs_shape)', 'dtype': 'tf.float32'}), '(shape=(self.seq_length + 1, self.batch_size) + obs_shape,\n dtype=tf.float32)\n', (3104, 3184), True, 'import tensorflow as tf\n'), ((3208, 3280), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(self.seq_length, self.batch_size)', 'dtype': 'tf.int32'}), '(shape=(self.seq_length, self.batch_size), dtype=tf.int32)\n', (3222, 3280), True, 'import tensorflow as tf\n'), ((3322, 3380), 'numpy.array', 'np.array', (['[[[1, 2, 3], [3, 4, 5]], [[5, 6, 7], [7, 8, 9]]]'], {}), '([[[1, 2, 3], [3, 4, 5]], [[5, 6, 7], [7, 8, 9]]])\n', (3330, 3380), True, 'import numpy as np\n'), ((3392, 3450), 'numpy.array', 'np.array', (['[[[7, 8, 9], [1, 2, 3]], [[3, 4, 5], [5, 6, 7]]]'], {}), '([[[7, 8, 9], [1, 2, 3]], [[3, 4, 5], [5, 6, 7]]])\n', (3400, 3450), True, 'import numpy as np\n'), ((3462, 3520), 'numpy.array', 'np.array', (['[[[5, 6, 7], [7, 8, 9]], [[1, 2, 3], [3, 4, 5]]]'], {}), '([[[5, 6, 7], [7, 8, 9]], [[1, 2, 3], [3, 4, 5]]])\n', (3470, 3520), True, 'import numpy as np\n'), ((3532, 3590), 'numpy.array', 'np.array', (['[[[3, 4, 5], [5, 6, 7]], [[7, 8, 9], [1, 2, 3]]]'], {}), '([[[3, 4, 5], [5, 6, 7]], [[7, 8, 9], [1, 2, 3]]])\n', (3540, 3590), True, 'import numpy as np\n'), ((3795, 3816), 'numpy.amax', 'np.amax', (['obs4'], {'axis': '(2)'}), '(obs4, axis=2)\n', (3802, 3816), True, 'import numpy as np\n'), ((5056, 5201), 'trfl.pixel_control_ops.pixel_control_loss', 'pixel_control_ops.pixel_control_loss', (['self.observations_ph', 'self.actions_ph', 'self.action_values_ph', 'self.cell_size', 'self.discount', 'self.scale'], {}), '(self.observations_ph, self.actions_ph,\n self.action_values_ph, self.cell_size, self.discount, self.scale)\n', (5092, 5201), False, 'from trfl import pixel_control_ops\n'), ((5226, 5259), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5257, 5259), True, 'import tensorflow as tf\n'), ((5737, 5767), 'tensorflow.zeros', 'tf.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (5745, 5767), True, 'import tensorflow as tf\n'), ((5914, 5967), 'tensorflow.concat', 'tf.concat', (['[zero_discount, non_zero_discount]'], {'axis': '(0)'}), '([zero_discount, non_zero_discount], axis=0)\n', (5923, 5967), True, 'import tensorflow as tf\n'), ((5982, 6129), 'trfl.pixel_control_ops.pixel_control_loss', 'pixel_control_ops.pixel_control_loss', (['self.observations_ph', 'self.actions_ph', 'self.action_values_ph', 'self.cell_size', 'tensor_discount', 'self.scale'], {}), '(self.observations_ph, self.actions_ph,\n self.action_values_ph, self.cell_size, tensor_discount, self.scale)\n', (6018, 6129), False, 'from trfl import pixel_control_ops\n'), ((6154, 6187), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6185, 6187), True, 'import tensorflow as tf\n'), ((3753, 3772), 'numpy.abs', 'np.abs', (['(obs4 - obs3)'], {}), '(obs4 - obs3)\n', (3759, 3772), True, 'import numpy as np\n'), ((3948, 3967), 'numpy.abs', 'np.abs', (['(obs3 - obs2)'], {}), '(obs3 - obs2)\n', (3954, 3967), True, 'import numpy as np\n'), ((4108, 4127), 'numpy.abs', 'np.abs', (['(obs2 - obs1)'], {}), '(obs2 - obs1)\n', (4114, 4127), True, 'import numpy as np\n'), ((4328, 4347), 'numpy.abs', 'np.abs', (['(obs2 - obs1)'], {}), '(obs2 - obs1)\n', (4334, 4347), True, 'import numpy as np\n'), ((4706, 4748), 'numpy.stack', 'np.stack', (['[obs1, obs2, obs3, obs4]'], {'axis': '(0)'}), '([obs1, obs2, obs3, obs4], axis=0)\n', (4714, 4748), True, 'import numpy as np\n'), ((5809, 5842), 'tensorflow.reshape', 'tf.reshape', (['self.discount', '[1, 1]'], {}), '(self.discount, [1, 1])\n', (5819, 5842), True, 'import tensorflow as tf\n'), ((6664, 6825), 'trfl.pixel_control_ops.pixel_control_loss', 'pixel_control_ops.pixel_control_loss', (['self.observations_ph', 'self.actions_ph', 'self.action_values_ph[:, :, :-1]', 'self.cell_size', 'self.discount', 'self.scale'], {}), '(self.observations_ph, self.actions_ph,\n self.action_values_ph[:, :, :-1], self.cell_size, self.discount, self.scale\n )\n', (6700, 6825), False, 'from trfl import pixel_control_ops\n'), ((7135, 7282), 'trfl.pixel_control_ops.pixel_control_loss', 'pixel_control_ops.pixel_control_loss', (['self.observations_ph', 'self.actions_ph', 'self.action_values_ph', 'self.cell_size', 'tensor_discount', 'self.scale'], {}), '(self.observations_ph, self.actions_ph,\n self.action_values_ph, self.cell_size, tensor_discount, self.scale)\n', (7171, 7282), False, 'from trfl import pixel_control_ops\n'), ((4839, 4858), 'numpy.array', 'np.array', (['[action1]'], {}), '([action1])\n', (4847, 4858), True, 'import numpy as np\n'), ((4860, 4879), 'numpy.array', 'np.array', (['[action2]'], {}), '([action2])\n', (4868, 4879), True, 'import numpy as np\n'), ((4881, 4900), 'numpy.array', 'np.array', (['[action3]'], {}), '([action3])\n', (4889, 4900), True, 'import numpy as np\n'), ((7042, 7078), 'tensorflow.reshape', 'tf.reshape', (['self.discount', '[1, 1, 1]'], {}), '(self.discount, [1, 1, 1])\n', (7052, 7078), True, 'import tensorflow as tf\n'), ((4500, 4517), 'numpy.square', 'np.square', (['error3'], {}), '(error3)\n', (4509, 4517), True, 'import numpy as np\n'), ((4608, 4625), 'numpy.square', 'np.square', (['error3'], {}), '(error3)\n', (4617, 4625), True, 'import numpy as np\n'), ((4460, 4477), 'numpy.square', 'np.square', (['error1'], {}), '(error1)\n', (4469, 4477), True, 'import numpy as np\n'), ((4480, 4497), 'numpy.square', 'np.square', (['error2'], {}), '(error2)\n', (4489, 4497), True, 'import numpy as np\n'), ((4563, 4585), 'numpy.square', 'np.square', (['error1_term'], {}), '(error1_term)\n', (4572, 4585), True, 'import numpy as np\n'), ((4588, 4605), 'numpy.square', 'np.square', (['error2'], {}), '(error2)\n', (4597, 4605), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.keras import preprocessing
import streamlit as st
import numpy as np
import webbrowser
from PIL import Image
url = "https://github.com/NavinBondade/Identifying-Nine-Tomato-Disease-With-Deep-Learning"
st.set_page_config(page_title='Tomato Diseases Identification Tool', initial_sidebar_state = 'auto')
st.title("Nine Tomato Diseases Identification Tool")
st.write("A machine learning powered system that tells accurately whether a tomato plant is infected with Bacterial Spot, Early Blight, Late Blight, Leaf Mold, Septoria Leaf Spot, Spider Mites, Target Spot, Tomato Yellow Leaf Curl Virus, Tomato Mosaic Virus, Healthy. Check out code here [link](%s)." % url)
with open("Pictures.zip", "rb") as fp:
col1, col2, col3 = st.columns(3)
with col2:
btn = st.download_button(
label="Download Test Data",
data=fp,
file_name="Pictures.zip",
mime="application/zip"
)
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
file = st.sidebar.file_uploader("Upload Image", type=['jpeg','jpg','png'])
cat = ['Bacterial Spot', 'Early Blight', 'Late Blight', 'Leaf Mold', 'Septoria Leaf Spot', 'Spider Mites', 'Target Spot', 'Tomato Yellow Leaf Curl Virus', 'Tomato Mosaic Virus', 'Healthy']
def prediction(image, model):
test_image = image.resize((200,200))
test_image = preprocessing.image.img_to_array(test_image)
test_image = test_image / 255.0
test_image = np.expand_dims(test_image, axis=0)
result=model.predict(test_image)
result=np.argmax(result)
Pred=cat[result]
return Pred
if file is not None:
img = Image.open(file)
model = tf.keras.models.load_model("tomato_disease.h5")
img_jpeg = img.convert('RGB')
pred = prediction(img_jpeg, model)
#score = tf.nn.softmax(prediction[0])
st.markdown(f"<h2 style='text-align: center; color: black;'>{pred}</h2>", unsafe_allow_html=True)
st.image(img, use_column_width=True)
|
[
"streamlit.set_page_config",
"streamlit.markdown",
"streamlit.columns",
"tensorflow.keras.models.load_model",
"streamlit.image",
"numpy.argmax",
"tensorflow.keras.preprocessing.image.img_to_array",
"numpy.expand_dims",
"streamlit.write",
"streamlit.title",
"PIL.Image.open",
"streamlit.download_button",
"streamlit.sidebar.file_uploader"
] |
[((250, 352), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Tomato Diseases Identification Tool"""', 'initial_sidebar_state': '"""auto"""'}), "(page_title='Tomato Diseases Identification Tool',\n initial_sidebar_state='auto')\n", (268, 352), True, 'import streamlit as st\n'), ((352, 404), 'streamlit.title', 'st.title', (['"""Nine Tomato Diseases Identification Tool"""'], {}), "('Nine Tomato Diseases Identification Tool')\n", (360, 404), True, 'import streamlit as st\n'), ((406, 723), 'streamlit.write', 'st.write', (["('A machine learning powered system that tells accurately whether a tomato plant is infected with Bacterial Spot, Early Blight, Late Blight, Leaf Mold, Septoria Leaf Spot, Spider Mites, Target Spot, Tomato Yellow Leaf Curl Virus, Tomato Mosaic Virus, Healthy. Check out code here [link](%s).'\n % url)"], {}), "(\n 'A machine learning powered system that tells accurately whether a tomato plant is infected with Bacterial Spot, Early Blight, Late Blight, Leaf Mold, Septoria Leaf Spot, Spider Mites, Target Spot, Tomato Yellow Leaf Curl Virus, Tomato Mosaic Virus, Healthy. Check out code here [link](%s).'\n % url)\n", (414, 723), True, 'import streamlit as st\n'), ((1161, 1218), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (1172, 1218), True, 'import streamlit as st\n'), ((1237, 1306), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload Image"""'], {'type': "['jpeg', 'jpg', 'png']"}), "('Upload Image', type=['jpeg', 'jpg', 'png'])\n", (1261, 1306), True, 'import streamlit as st\n'), ((780, 793), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (790, 793), True, 'import streamlit as st\n'), ((1590, 1634), 'tensorflow.keras.preprocessing.image.img_to_array', 'preprocessing.image.img_to_array', (['test_image'], {}), '(test_image)\n', (1622, 1634), False, 'from tensorflow.keras import preprocessing\n'), ((1690, 1724), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (1704, 1724), True, 'import numpy as np\n'), ((1775, 1792), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (1784, 1792), True, 'import numpy as np\n'), ((1871, 1887), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (1881, 1887), False, 'from PIL import Image\n'), ((1901, 1948), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""tomato_disease.h5"""'], {}), "('tomato_disease.h5')\n", (1927, 1948), True, 'import tensorflow as tf\n'), ((2072, 2173), 'streamlit.markdown', 'st.markdown', (['f"""<h2 style=\'text-align: center; color: black;\'>{pred}</h2>"""'], {'unsafe_allow_html': '(True)'}), '(f"<h2 style=\'text-align: center; color: black;\'>{pred}</h2>",\n unsafe_allow_html=True)\n', (2083, 2173), True, 'import streamlit as st\n'), ((2175, 2211), 'streamlit.image', 'st.image', (['img'], {'use_column_width': '(True)'}), '(img, use_column_width=True)\n', (2183, 2211), True, 'import streamlit as st\n'), ((825, 935), 'streamlit.download_button', 'st.download_button', ([], {'label': '"""Download Test Data"""', 'data': 'fp', 'file_name': '"""Pictures.zip"""', 'mime': '"""application/zip"""'}), "(label='Download Test Data', data=fp, file_name=\n 'Pictures.zip', mime='application/zip')\n", (843, 935), True, 'import streamlit as st\n')]
|
# Copyright 2020 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications for Guinet et al.
from absl import flags
import numpy as np
import six
import tensorflow as tf
import tensorflow_ranking as tfr
from tensorflow_ranking.python import utils
flags.DEFINE_string("train_path", None, "Input file path used for training.")
flags.DEFINE_string("vali_path", None, "Input file path used for validation.")
flags.DEFINE_string("test_path", None, "Input file path used for testing.")
flags.DEFINE_string("output_dir", None, "Output directory for models.")
flags.DEFINE_string(
"query_relevance_type",
"binary",
"Type of relevance for the queries, binary ou continuous.",
)
flags.DEFINE_integer("query_size", 10, "Number of words per query.")
flags.DEFINE_integer("train_batch_size", 32, "The batch size for training.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of steps for training.")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate for optimizer.")
flags.DEFINE_float("dropout_rate", 0.5, "The dropout rate before output layer.")
flags.DEFINE_list("hidden_layer_dims", ["512", "256", "128"], "Sizes for hidden layers.")
flags.DEFINE_integer("num_features", 600, "Number of features per document.")
flags.DEFINE_integer("list_size", 10, "List size used for training.")
flags.DEFINE_integer("group_size", 1, "Group size used in score function.")
flags.DEFINE_string(
"loss",
"pairwise_logistic_loss",
"The RankingLossKey for the primary loss function.",
)
flags.DEFINE_string(
"secondary_loss",
None,
"The RankingLossKey for the secondary loss for " "multi-objective learning.",
)
flags.DEFINE_float(
"secondary_loss_weight",
0.5,
"The weight for the secondary loss in " "multi-objective learning.",
)
flags.DEFINE_bool(
"use_document_interactions", False,
"If true, uses cross-document interactions to generate scores.")
FLAGS = flags.FLAGS
_PRIMARY_HEAD = "primary_head"
_SECONDARY_HEAD = "secondary_head"
def _use_multi_head():
"""Returns True if using multi-head."""
return FLAGS.secondary_loss is not None
class IteratorInitializerHook(tf.estimator.SessionRunHook):
"""Hook to initialize data iterator after session is created."""
def __init__(self):
super(IteratorInitializerHook, self).__init__()
self.iterator_initializer_fn = None
def after_create_session(self, session, coord):
"""Initialize the iterator after the session has been created."""
del coord
self.iterator_initializer_fn(session)
def example_feature_columns():
"""Returns the example feature columns."""
feature_names = ["{}".format(i) for i in range(FLAGS.num_features)]
return {
name: tf.feature_column.numeric_column(name, shape=(1,), default_value=0.0)
for name in feature_names
}
def load_libsvm_data(path, list_size):
"""Returns features and labels in numpy.array."""
def _parse_line(line):
"""Parses a single line in LibSVM format."""
tokens = line.split("#")[0].split()
assert len(tokens) >= 2, "Ill-formatted line: {}".format(line)
label = float(tokens[0])
qid = tokens[1]
kv_pairs = [kv.split(":") for kv in tokens[2:]]
features = {k: float(v) for (k, v) in kv_pairs}
return qid, features, label
tf.compat.v1.logging.info("Loading data from {}".format(path))
# The 0-based index assigned to a query.
qid_to_index = {}
# The number of docs seen so far for a query.
qid_to_ndoc = {}
# Each feature is mapped an array with [num_queries, list_size, 1]. Label has
# a shape of [num_queries, list_size]. We use list for each of them due to the
# unknown number of quries.
feature_map = {k: [] for k in example_feature_columns()}
label_list = []
total_docs = 0
discarded_docs = 0
with open(path, "rt") as f:
for line in f:
qid, features, label = _parse_line(line)
if qid not in qid_to_index:
# Create index and allocate space for a new query.
qid_to_index[qid] = len(qid_to_index)
qid_to_ndoc[qid] = 0
for k in feature_map:
feature_map[k].append(np.zeros([list_size, 1], dtype=np.float32))
label_list.append(np.ones([list_size], dtype=np.float32) * -1.0)
total_docs += 1
batch_idx = qid_to_index[qid]
doc_idx = qid_to_ndoc[qid]
qid_to_ndoc[qid] += 1
# Keep the first 'list_size' docs only.
if doc_idx >= list_size:
discarded_docs += 1
continue
for k, v in six.iteritems(features):
assert k in feature_map, "Key {} not found in features.".format(k)
feature_map[k][batch_idx][doc_idx, 0] = v
label_list[batch_idx][doc_idx] = label
tf.compat.v1.logging.info("Number of queries: {}".format(len(qid_to_index)))
tf.compat.v1.logging.info("Number of documents in total: {}".format(total_docs))
tf.compat.v1.logging.info(
"Number of documents discarded: {}".format(discarded_docs)
)
# Convert everything to np.array.
for k in feature_map:
feature_map[k] = np.array(feature_map[k])
return feature_map, np.array(label_list)
def get_train_inputs(features, labels, batch_size):
"""Set up training input in batches."""
iterator_initializer_hook = IteratorInitializerHook()
def _train_input_fn():
"""Defines training input fn."""
features_placeholder = {
k: tf.compat.v1.placeholder(v.dtype, v.shape)
for k, v in six.iteritems(features)
}
if _use_multi_head():
placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
labels_placeholder = {
_PRIMARY_HEAD: placeholder,
_SECONDARY_HEAD: placeholder,
}
else:
labels_placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder)
)
dataset = dataset.shuffle(5000).repeat().batch(batch_size)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
if _use_multi_head():
feed_dict = {
labels_placeholder[head_name]: labels
for head_name in labels_placeholder
}
else:
feed_dict = {labels_placeholder: labels}
feed_dict.update(
{features_placeholder[k]: features[k] for k in features_placeholder}
)
iterator_initializer_hook.iterator_initializer_fn = lambda sess: sess.run(
iterator.initializer, feed_dict=feed_dict
)
return iterator.get_next()
return _train_input_fn, iterator_initializer_hook
def get_eval_inputs(features, labels):
"""Set up eval inputs in a single batch."""
iterator_initializer_hook = IteratorInitializerHook()
def _eval_input_fn():
"""Defines eval input fn."""
features_placeholder = {
k: tf.compat.v1.placeholder(v.dtype, v.shape)
for k, v in six.iteritems(features)
}
if _use_multi_head():
placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
labels_placeholder = {
_PRIMARY_HEAD: placeholder,
_SECONDARY_HEAD: placeholder,
}
else:
labels_placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
dataset = tf.data.Dataset.from_tensors(
(features_placeholder, labels_placeholder)
)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
if _use_multi_head():
feed_dict = {
labels_placeholder[head_name]: labels
for head_name in labels_placeholder
}
else:
feed_dict = {labels_placeholder: labels}
feed_dict.update(
{features_placeholder[k]: features[k] for k in features_placeholder}
)
iterator_initializer_hook.iterator_initializer_fn = lambda sess: sess.run(
iterator.initializer, feed_dict=feed_dict
)
return iterator.get_next()
return _eval_input_fn, iterator_initializer_hook
def make_serving_input_fn():
"""Returns serving input fn to receive tf.Example."""
feature_spec = tf.feature_column.make_parse_example_spec(
example_feature_columns().values()
)
return tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
def make_transform_fn():
"""Returns a transform_fn that converts features to dense Tensors."""
def _transform_fn(features, mode):
"""Defines transform_fn."""
if mode == tf.estimator.ModeKeys.PREDICT:
# We expect tf.Example as input during serving. In this case, group_size
# must be set to 1.
if FLAGS.group_size != 1:
raise ValueError(
"group_size should be 1 to be able to export model, but get %s"
% FLAGS.group_size
)
context_features, example_features = tfr.feature.encode_pointwise_features(
features=features,
context_feature_columns=None,
example_feature_columns=example_feature_columns(),
mode=mode,
scope="transform_layer",
)
else:
context_features, example_features = tfr.feature.encode_listwise_features(
features=features,
context_feature_columns=None,
example_feature_columns=example_feature_columns(),
mode=mode,
scope="transform_layer",
)
return context_features, example_features
return _transform_fn
def make_score_fn():
"""Returns a groupwise score fn to build `EstimatorSpec`."""
def _score_fn(
unused_context_features, group_features, mode, unused_params, unused_config
):
"""Defines the network to score a group of documents."""
with tf.compat.v1.name_scope("input_layer"):
group_input = [
tf.compat.v1.layers.flatten(group_features[name])
for name in sorted(example_feature_columns())
]
input_layer = tf.concat(group_input, 1)
tf.compat.v1.summary.scalar(
"input_sparsity", tf.nn.zero_fraction(input_layer)
)
tf.compat.v1.summary.scalar(
"input_max", tf.reduce_max(input_tensor=input_layer)
)
tf.compat.v1.summary.scalar(
"input_min", tf.reduce_min(input_tensor=input_layer)
)
is_training = mode == tf.estimator.ModeKeys.TRAIN
cur_layer = tf.compat.v1.layers.batch_normalization(
input_layer, training=is_training
)
for i, layer_width in enumerate(int(d) for d in FLAGS.hidden_layer_dims):
cur_layer = tf.compat.v1.layers.dense(cur_layer, units=layer_width)
cur_layer = tf.compat.v1.layers.batch_normalization(
cur_layer, training=is_training
)
cur_layer = tf.nn.relu(cur_layer)
tf.compat.v1.summary.scalar(
"fully_connected_{}_sparsity".format(i), tf.nn.zero_fraction(cur_layer)
)
cur_layer = tf.compat.v1.layers.dropout(
cur_layer, rate=FLAGS.dropout_rate, training=is_training
)
logits = tf.compat.v1.layers.dense(cur_layer, units=FLAGS.group_size)
if _use_multi_head():
# Duplicate the logits for both heads.
return {_PRIMARY_HEAD: logits, _SECONDARY_HEAD: logits}
else:
return logits
return _score_fn
def bilingual_lexical_induction(labels, predictions, features):
"""Compute the BLI. We do not make all the needed verifications as they were already made for previous metrics."""
if FLAGS.query_relevance_type == "binary":
ground_truth = 2
else:
ground_truth = FLAGS.query_size
# We get the label of the highest ranked word by the model
sorted_labels = utils.sort_by_scores(predictions, [labels],topn = 1)[0]
# We check if the label is equal to ground truth
relevance = tf.cast(tf.equal(sorted_labels, ground_truth), dtype=tf.float32)
# We return it
return tf.compat.v1.metrics.mean(relevance)
def get_eval_metric_fns():
"""Returns a dict from name to metric functions."""
metric_fns = {}
metric_fns.update(
{
"metric/%s" % name: tfr.metrics.make_ranking_metric_fn(name)
for name in [
tfr.metrics.RankingMetricKey.ARP,
tfr.metrics.RankingMetricKey.ORDERED_PAIR_ACCURACY,
]
}
)
metric_fns.update(
{
"metric/ndcg@%d"
% topn: tfr.metrics.make_ranking_metric_fn(
tfr.metrics.RankingMetricKey.NDCG, topn=topn
)
for topn in [1, 3, 5, 10]
}
)
# Adding the new metric
metric_fns.update(
{
"metric/bli": bilingual_lexical_induction
}
)
return metric_fns
def train_and_eval():
"""Train and Evaluate."""
features, labels = load_libsvm_data(FLAGS.train_path, FLAGS.list_size)
train_input_fn, train_hook = get_train_inputs(
features, labels, FLAGS.train_batch_size
)
features_vali, labels_vali = load_libsvm_data(FLAGS.vali_path, FLAGS.list_size)
vali_input_fn, vali_hook = get_eval_inputs(features_vali, labels_vali)
features_test, labels_test = load_libsvm_data(FLAGS.test_path, FLAGS.list_size)
test_input_fn, test_hook = get_eval_inputs(features_test, labels_test)
optimizer = tf.compat.v1.train.AdagradOptimizer(learning_rate=FLAGS.learning_rate)
def _train_op_fn(loss):
"""Defines train op used in ranking head."""
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
minimize_op = optimizer.minimize(
loss=loss, global_step=tf.compat.v1.train.get_global_step()
)
train_op = tf.group([minimize_op, update_ops])
return train_op
if _use_multi_head():
primary_head = tfr.head.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(FLAGS.loss),
eval_metric_fns=get_eval_metric_fns(),
train_op_fn=_train_op_fn,
name=_PRIMARY_HEAD,
)
secondary_head = tfr.head.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(FLAGS.secondary_loss),
eval_metric_fns=get_eval_metric_fns(),
train_op_fn=_train_op_fn,
name=_SECONDARY_HEAD,
)
ranking_head = tfr.head.create_multi_ranking_head(
[primary_head, secondary_head], [1.0, FLAGS.secondary_loss_weight]
)
else:
ranking_head = tfr.head.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(FLAGS.loss),
eval_metric_fns=get_eval_metric_fns(),
train_op_fn=_train_op_fn,
)
estimator = tf.estimator.Estimator(
model_fn=tfr.model.make_groupwise_ranking_fn(
group_score_fn=make_score_fn(),
group_size=FLAGS.group_size,
transform_fn=make_transform_fn(),
ranking_head=ranking_head,
),
config=tf.estimator.RunConfig(FLAGS.output_dir, save_checkpoints_steps=1000),
)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, hooks=[train_hook], max_steps=FLAGS.num_train_steps
)
# Export model to accept tf.Example when group_size = 1.
if FLAGS.group_size == 1:
vali_spec = tf.estimator.EvalSpec(
input_fn=vali_input_fn,
hooks=[vali_hook],
steps=1,
exporters=tf.estimator.LatestExporter(
"latest_exporter", serving_input_receiver_fn=make_serving_input_fn()
),
start_delay_secs=0,
throttle_secs=30,
)
else:
vali_spec = tf.estimator.EvalSpec(
input_fn=vali_input_fn,
hooks=[vali_hook],
steps=1,
start_delay_secs=0,
throttle_secs=30,
)
# Train and validate
tf.estimator.train_and_evaluate(estimator, train_spec, vali_spec)
# Evaluate on the test data.
estimator.evaluate(input_fn=test_input_fn, hooks=[test_hook])
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
train_and_eval()
if __name__ == "__main__":
flags.mark_flag_as_required("train_path")
flags.mark_flag_as_required("vali_path")
flags.mark_flag_as_required("test_path")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
"""
WIP - make predictions :
def predict_input_fn(path):
context_feature_spec = tf.feature_column.make_parse_example_spec(
context_feature_columns().values())
example_feature_spec = tf.feature_column.make_parse_example_spec(
list(example_feature_columns().values()))
dataset = tfr.data.build_ranking_dataset(
file_pattern=path,
data_format=tfr.data.EIE,
batch_size=_BATCH_SIZE,
list_size=_LIST_SIZE,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
reader=tf.data.TFRecordDataset,
shuffle=False,
num_epochs=1)
features = tf.data.make_one_shot_iterator(dataset).get_next()
return features
ranker -> trained model
predictions = ranker.predict(input_fn=lambda: predict_input_fn("/tmp/test.tfrecords"))
x = next(predictions)
"""
|
[
"tensorflow.nn.zero_fraction",
"tensorflow_ranking.python.utils.sort_by_scores",
"numpy.ones",
"tensorflow.estimator.TrainSpec",
"tensorflow.reduce_max",
"absl.flags.DEFINE_list",
"six.iteritems",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.train.AdagradOptimizer",
"tensorflow.compat.v1.layers.dense",
"tensorflow.nn.relu",
"tensorflow_ranking.metrics.make_ranking_metric_fn",
"tensorflow.compat.v1.placeholder",
"absl.flags.DEFINE_bool",
"tensorflow.compat.v1.name_scope",
"absl.flags.mark_flag_as_required",
"tensorflow.concat",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_float",
"tensorflow.compat.v1.metrics.mean",
"tensorflow.compat.v1.layers.dropout",
"tensorflow.reduce_min",
"tensorflow.equal",
"tensorflow.data.Dataset.from_tensors",
"tensorflow.feature_column.numeric_column",
"tensorflow.estimator.EvalSpec",
"tensorflow.group",
"tensorflow.estimator.RunConfig",
"tensorflow.compat.v1.data.make_initializable_iterator",
"tensorflow.compat.v1.layers.flatten",
"numpy.zeros",
"tensorflow.compat.v1.train.get_global_step",
"tensorflow.data.Dataset.from_tensor_slices",
"absl.flags.DEFINE_string",
"tensorflow.compat.v1.layers.batch_normalization",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.estimator.export.build_parsing_serving_input_receiver_fn",
"numpy.array",
"tensorflow.estimator.train_and_evaluate",
"tensorflow_ranking.losses.make_loss_fn",
"tensorflow_ranking.head.create_multi_ranking_head"
] |
[((788, 865), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""train_path"""', 'None', '"""Input file path used for training."""'], {}), "('train_path', None, 'Input file path used for training.')\n", (807, 865), False, 'from absl import flags\n'), ((866, 944), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""vali_path"""', 'None', '"""Input file path used for validation."""'], {}), "('vali_path', None, 'Input file path used for validation.')\n", (885, 944), False, 'from absl import flags\n'), ((945, 1020), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""test_path"""', 'None', '"""Input file path used for testing."""'], {}), "('test_path', None, 'Input file path used for testing.')\n", (964, 1020), False, 'from absl import flags\n'), ((1021, 1092), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""', 'None', '"""Output directory for models."""'], {}), "('output_dir', None, 'Output directory for models.')\n", (1040, 1092), False, 'from absl import flags\n'), ((1093, 1210), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""query_relevance_type"""', '"""binary"""', '"""Type of relevance for the queries, binary ou continuous."""'], {}), "('query_relevance_type', 'binary',\n 'Type of relevance for the queries, binary ou continuous.')\n", (1112, 1210), False, 'from absl import flags\n'), ((1223, 1291), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""query_size"""', '(10)', '"""Number of words per query."""'], {}), "('query_size', 10, 'Number of words per query.')\n", (1243, 1291), False, 'from absl import flags\n'), ((1292, 1368), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""train_batch_size"""', '(32)', '"""The batch size for training."""'], {}), "('train_batch_size', 32, 'The batch size for training.')\n", (1312, 1368), False, 'from absl import flags\n'), ((1369, 1454), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_train_steps"""', '(100000)', '"""Number of steps for training."""'], {}), "('num_train_steps', 100000, 'Number of steps for training.'\n )\n", (1389, 1454), False, 'from absl import flags\n'), ((1451, 1524), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(0.01)', '"""Learning rate for optimizer."""'], {}), "('learning_rate', 0.01, 'Learning rate for optimizer.')\n", (1469, 1524), False, 'from absl import flags\n'), ((1525, 1610), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""dropout_rate"""', '(0.5)', '"""The dropout rate before output layer."""'], {}), "('dropout_rate', 0.5, 'The dropout rate before output layer.'\n )\n", (1543, 1610), False, 'from absl import flags\n'), ((1606, 1699), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""hidden_layer_dims"""', "['512', '256', '128']", '"""Sizes for hidden layers."""'], {}), "('hidden_layer_dims', ['512', '256', '128'],\n 'Sizes for hidden layers.')\n", (1623, 1699), False, 'from absl import flags\n'), ((1697, 1774), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_features"""', '(600)', '"""Number of features per document."""'], {}), "('num_features', 600, 'Number of features per document.')\n", (1717, 1774), False, 'from absl import flags\n'), ((1775, 1844), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""list_size"""', '(10)', '"""List size used for training."""'], {}), "('list_size', 10, 'List size used for training.')\n", (1795, 1844), False, 'from absl import flags\n'), ((1845, 1920), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""group_size"""', '(1)', '"""Group size used in score function."""'], {}), "('group_size', 1, 'Group size used in score function.')\n", (1865, 1920), False, 'from absl import flags\n'), ((1922, 2032), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""loss"""', '"""pairwise_logistic_loss"""', '"""The RankingLossKey for the primary loss function."""'], {}), "('loss', 'pairwise_logistic_loss',\n 'The RankingLossKey for the primary loss function.')\n", (1941, 2032), False, 'from absl import flags\n'), ((2044, 2166), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""secondary_loss"""', 'None', '"""The RankingLossKey for the secondary loss for multi-objective learning."""'], {}), "('secondary_loss', None,\n 'The RankingLossKey for the secondary loss for multi-objective learning.')\n", (2063, 2166), False, 'from absl import flags\n'), ((2181, 2299), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""secondary_loss_weight"""', '(0.5)', '"""The weight for the secondary loss in multi-objective learning."""'], {}), "('secondary_loss_weight', 0.5,\n 'The weight for the secondary loss in multi-objective learning.')\n", (2199, 2299), False, 'from absl import flags\n'), ((2314, 2436), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""use_document_interactions"""', '(False)', '"""If true, uses cross-document interactions to generate scores."""'], {}), "('use_document_interactions', False,\n 'If true, uses cross-document interactions to generate scores.')\n", (2331, 2436), False, 'from absl import flags\n'), ((9159, 9232), 'tensorflow.estimator.export.build_parsing_serving_input_receiver_fn', 'tf.estimator.export.build_parsing_serving_input_receiver_fn', (['feature_spec'], {}), '(feature_spec)\n', (9218, 9232), True, 'import tensorflow as tf\n'), ((13107, 13143), 'tensorflow.compat.v1.metrics.mean', 'tf.compat.v1.metrics.mean', (['relevance'], {}), '(relevance)\n', (13132, 13143), True, 'import tensorflow as tf\n'), ((14507, 14577), 'tensorflow.compat.v1.train.AdagradOptimizer', 'tf.compat.v1.train.AdagradOptimizer', ([], {'learning_rate': 'FLAGS.learning_rate'}), '(learning_rate=FLAGS.learning_rate)\n', (14542, 14577), True, 'import tensorflow as tf\n'), ((16223, 16327), 'tensorflow.estimator.TrainSpec', 'tf.estimator.TrainSpec', ([], {'input_fn': 'train_input_fn', 'hooks': '[train_hook]', 'max_steps': 'FLAGS.num_train_steps'}), '(input_fn=train_input_fn, hooks=[train_hook],\n max_steps=FLAGS.num_train_steps)\n', (16245, 16327), True, 'import tensorflow as tf\n'), ((17026, 17091), 'tensorflow.estimator.train_and_evaluate', 'tf.estimator.train_and_evaluate', (['estimator', 'train_spec', 'vali_spec'], {}), '(estimator, train_spec, vali_spec)\n', (17057, 17091), True, 'import tensorflow as tf\n'), ((17211, 17272), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.INFO'], {}), '(tf.compat.v1.logging.INFO)\n', (17245, 17272), True, 'import tensorflow as tf\n'), ((17328, 17369), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""train_path"""'], {}), "('train_path')\n", (17355, 17369), False, 'from absl import flags\n'), ((17374, 17414), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""vali_path"""'], {}), "('vali_path')\n", (17401, 17414), False, 'from absl import flags\n'), ((17419, 17459), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""test_path"""'], {}), "('test_path')\n", (17446, 17459), False, 'from absl import flags\n'), ((17464, 17505), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""output_dir"""'], {}), "('output_dir')\n", (17491, 17505), False, 'from absl import flags\n'), ((17511, 17533), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (17531, 17533), True, 'import tensorflow as tf\n'), ((3269, 3338), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['name'], {'shape': '(1,)', 'default_value': '(0.0)'}), '(name, shape=(1,), default_value=0.0)\n', (3301, 3338), True, 'import tensorflow as tf\n'), ((5808, 5832), 'numpy.array', 'np.array', (['feature_map[k]'], {}), '(feature_map[k])\n', (5816, 5832), True, 'import numpy as np\n'), ((5857, 5877), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (5865, 5877), True, 'import numpy as np\n'), ((6618, 6696), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(features_placeholder, labels_placeholder)'], {}), '((features_placeholder, labels_placeholder))\n', (6652, 6696), True, 'import tensorflow as tf\n'), ((6805, 6859), 'tensorflow.compat.v1.data.make_initializable_iterator', 'tf.compat.v1.data.make_initializable_iterator', (['dataset'], {}), '(dataset)\n', (6850, 6859), True, 'import tensorflow as tf\n'), ((8183, 8255), 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (['(features_placeholder, labels_placeholder)'], {}), '((features_placeholder, labels_placeholder))\n', (8211, 8255), True, 'import tensorflow as tf\n'), ((8297, 8351), 'tensorflow.compat.v1.data.make_initializable_iterator', 'tf.compat.v1.data.make_initializable_iterator', (['dataset'], {}), '(dataset)\n', (8342, 8351), True, 'import tensorflow as tf\n'), ((11504, 11578), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.compat.v1.layers.batch_normalization', (['input_layer'], {'training': 'is_training'}), '(input_layer, training=is_training)\n', (11543, 11578), True, 'import tensorflow as tf\n'), ((12099, 12189), 'tensorflow.compat.v1.layers.dropout', 'tf.compat.v1.layers.dropout', (['cur_layer'], {'rate': 'FLAGS.dropout_rate', 'training': 'is_training'}), '(cur_layer, rate=FLAGS.dropout_rate, training=\n is_training)\n', (12126, 12189), True, 'import tensorflow as tf\n'), ((12224, 12284), 'tensorflow.compat.v1.layers.dense', 'tf.compat.v1.layers.dense', (['cur_layer'], {'units': 'FLAGS.group_size'}), '(cur_layer, units=FLAGS.group_size)\n', (12249, 12284), True, 'import tensorflow as tf\n'), ((12887, 12938), 'tensorflow_ranking.python.utils.sort_by_scores', 'utils.sort_by_scores', (['predictions', '[labels]'], {'topn': '(1)'}), '(predictions, [labels], topn=1)\n', (12907, 12938), False, 'from tensorflow_ranking.python import utils\n'), ((13020, 13057), 'tensorflow.equal', 'tf.equal', (['sorted_labels', 'ground_truth'], {}), '(sorted_labels, ground_truth)\n', (13028, 13057), True, 'import tensorflow as tf\n'), ((14681, 14743), 'tensorflow.compat.v1.get_collection', 'tf.compat.v1.get_collection', (['tf.compat.v1.GraphKeys.UPDATE_OPS'], {}), '(tf.compat.v1.GraphKeys.UPDATE_OPS)\n', (14708, 14743), True, 'import tensorflow as tf\n'), ((14887, 14922), 'tensorflow.group', 'tf.group', (['[minimize_op, update_ops]'], {}), '([minimize_op, update_ops])\n', (14895, 14922), True, 'import tensorflow as tf\n'), ((15493, 15599), 'tensorflow_ranking.head.create_multi_ranking_head', 'tfr.head.create_multi_ranking_head', (['[primary_head, secondary_head]', '[1.0, FLAGS.secondary_loss_weight]'], {}), '([primary_head, secondary_head], [1.0,\n FLAGS.secondary_loss_weight])\n', (15527, 15599), True, 'import tensorflow_ranking as tfr\n'), ((16813, 16928), 'tensorflow.estimator.EvalSpec', 'tf.estimator.EvalSpec', ([], {'input_fn': 'vali_input_fn', 'hooks': '[vali_hook]', 'steps': '(1)', 'start_delay_secs': '(0)', 'throttle_secs': '(30)'}), '(input_fn=vali_input_fn, hooks=[vali_hook], steps=1,\n start_delay_secs=0, throttle_secs=30)\n', (16834, 16928), True, 'import tensorflow as tf\n'), ((5230, 5253), 'six.iteritems', 'six.iteritems', (['features'], {}), '(features)\n', (5243, 5253), False, 'import six\n'), ((6151, 6193), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['v.dtype', 'v.shape'], {}), '(v.dtype, v.shape)\n', (6175, 6193), True, 'import tensorflow as tf\n'), ((6308, 6360), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['labels.dtype', 'labels.shape'], {}), '(labels.dtype, labels.shape)\n', (6332, 6360), True, 'import tensorflow as tf\n'), ((6547, 6599), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['labels.dtype', 'labels.shape'], {}), '(labels.dtype, labels.shape)\n', (6571, 6599), True, 'import tensorflow as tf\n'), ((7716, 7758), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['v.dtype', 'v.shape'], {}), '(v.dtype, v.shape)\n', (7740, 7758), True, 'import tensorflow as tf\n'), ((7873, 7925), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['labels.dtype', 'labels.shape'], {}), '(labels.dtype, labels.shape)\n', (7897, 7925), True, 'import tensorflow as tf\n'), ((8112, 8164), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['labels.dtype', 'labels.shape'], {}), '(labels.dtype, labels.shape)\n', (8136, 8164), True, 'import tensorflow as tf\n'), ((10793, 10831), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""input_layer"""'], {}), "('input_layer')\n", (10816, 10831), True, 'import tensorflow as tf\n'), ((11029, 11054), 'tensorflow.concat', 'tf.concat', (['group_input', '(1)'], {}), '(group_input, 1)\n', (11038, 11054), True, 'import tensorflow as tf\n'), ((11707, 11762), 'tensorflow.compat.v1.layers.dense', 'tf.compat.v1.layers.dense', (['cur_layer'], {'units': 'layer_width'}), '(cur_layer, units=layer_width)\n', (11732, 11762), True, 'import tensorflow as tf\n'), ((11787, 11859), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.compat.v1.layers.batch_normalization', (['cur_layer'], {'training': 'is_training'}), '(cur_layer, training=is_training)\n', (11826, 11859), True, 'import tensorflow as tf\n'), ((11914, 11935), 'tensorflow.nn.relu', 'tf.nn.relu', (['cur_layer'], {}), '(cur_layer)\n', (11924, 11935), True, 'import tensorflow as tf\n'), ((13313, 13353), 'tensorflow_ranking.metrics.make_ranking_metric_fn', 'tfr.metrics.make_ranking_metric_fn', (['name'], {}), '(name)\n', (13347, 13353), True, 'import tensorflow_ranking as tfr\n'), ((13610, 13695), 'tensorflow_ranking.metrics.make_ranking_metric_fn', 'tfr.metrics.make_ranking_metric_fn', (['tfr.metrics.RankingMetricKey.NDCG'], {'topn': 'topn'}), '(tfr.metrics.RankingMetricKey.NDCG, topn=topn\n )\n', (13644, 13695), True, 'import tensorflow_ranking as tfr\n'), ((16128, 16197), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', (['FLAGS.output_dir'], {'save_checkpoints_steps': '(1000)'}), '(FLAGS.output_dir, save_checkpoints_steps=1000)\n', (16150, 16197), True, 'import tensorflow as tf\n'), ((6218, 6241), 'six.iteritems', 'six.iteritems', (['features'], {}), '(features)\n', (6231, 6241), False, 'import six\n'), ((7783, 7806), 'six.iteritems', 'six.iteritems', (['features'], {}), '(features)\n', (7796, 7806), False, 'import six\n'), ((10877, 10926), 'tensorflow.compat.v1.layers.flatten', 'tf.compat.v1.layers.flatten', (['group_features[name]'], {}), '(group_features[name])\n', (10904, 10926), True, 'import tensorflow as tf\n'), ((11130, 11162), 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['input_layer'], {}), '(input_layer)\n', (11149, 11162), True, 'import tensorflow as tf\n'), ((11247, 11286), 'tensorflow.reduce_max', 'tf.reduce_max', ([], {'input_tensor': 'input_layer'}), '(input_tensor=input_layer)\n', (11260, 11286), True, 'import tensorflow as tf\n'), ((11371, 11410), 'tensorflow.reduce_min', 'tf.reduce_min', ([], {'input_tensor': 'input_layer'}), '(input_tensor=input_layer)\n', (11384, 11410), True, 'import tensorflow as tf\n'), ((12034, 12064), 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['cur_layer'], {}), '(cur_layer)\n', (12053, 12064), True, 'import tensorflow as tf\n'), ((14821, 14857), 'tensorflow.compat.v1.train.get_global_step', 'tf.compat.v1.train.get_global_step', ([], {}), '()\n', (14855, 14857), True, 'import tensorflow as tf\n'), ((15047, 15082), 'tensorflow_ranking.losses.make_loss_fn', 'tfr.losses.make_loss_fn', (['FLAGS.loss'], {}), '(FLAGS.loss)\n', (15070, 15082), True, 'import tensorflow_ranking as tfr\n'), ((15290, 15335), 'tensorflow_ranking.losses.make_loss_fn', 'tfr.losses.make_loss_fn', (['FLAGS.secondary_loss'], {}), '(FLAGS.secondary_loss)\n', (15313, 15335), True, 'import tensorflow_ranking as tfr\n'), ((15701, 15736), 'tensorflow_ranking.losses.make_loss_fn', 'tfr.losses.make_loss_fn', (['FLAGS.loss'], {}), '(FLAGS.loss)\n', (15724, 15736), True, 'import tensorflow_ranking as tfr\n'), ((4788, 4830), 'numpy.zeros', 'np.zeros', (['[list_size, 1]'], {'dtype': 'np.float32'}), '([list_size, 1], dtype=np.float32)\n', (4796, 4830), True, 'import numpy as np\n'), ((4866, 4904), 'numpy.ones', 'np.ones', (['[list_size]'], {'dtype': 'np.float32'}), '([list_size], dtype=np.float32)\n', (4873, 4904), True, 'import numpy as np\n')]
|
import argparse
import os
import pickle
import time
# import warnings
import numpy as np
from csv import writer
import warnings
import matplotlib.pyplot as plt
# utils imports
from power_planner.utils.utils_ksp import KspUtils
from power_planner.utils.utils_costs import CostUtils
from power_planner.evaluate_path import save_path_cost_csv
from power_planner import graphs
def convert_instance(instance, instance_corr):
tuned_inst_corr = np.ones(instance_corr.shape)
for i in range(instance_corr.shape[0]):
if not np.any(instance_corr[i, :]):
tuned_inst_corr[i, :] = 0
for i in range(instance_corr.shape[1]):
if not np.any(instance_corr[:, i]):
tuned_inst_corr[:, i] = 0
# put high costs in the edge area
tuned_inst = instance.copy()
inverted = np.absolute(instance_corr - 1).astype("bool")
tuned_inst[:, inverted] = 1
return tuned_inst, tuned_inst_corr
def logging(ID, graph, path, path_costs, cfg, time_pipeline, comp_path=None):
if comp_path is None:
max_eucl = 0
mean_eucl = 0
else:
# compute path distances and multiply with resolution to get meters
max_eucl = (
KspUtils.path_distance(path, comp_path, mode="eucl_max") *
cfg.scale * 10
)
mean_eucl = (
KspUtils.path_distance(path, comp_path, mode="eucl_mean") *
cfg.scale * 10
)
# get unnormalized costs
angle_cost = round(np.sum(CostUtils.compute_angle_costs(path)), 2)
n_categories = len(cfg.class_weights)
path_costs = np.asarray(path_costs)
# get normalization weights
ang_weight_norm = cfg.angle_weight * np.sum(cfg.class_weights)
all_cost_weights = np.array([ang_weight_norm] + list(cfg.class_weights))
all_cost_weights = all_cost_weights / np.sum(all_cost_weights)
print([ang_weight_norm] + list(cfg.class_weights), all_cost_weights)
# compute normalized path costs
summed_costs = np.around(np.sum(path_costs[:, -n_categories:], axis=0), 2)
weighted_sum = round(np.dot(summed_costs, all_cost_weights[1:]), 2)
together = all_cost_weights[0] * angle_cost + weighted_sum
n_pixels = np.sum(instance_corr > 0)
# csv_header = ["ID", "instance", "resolution", "graph", "number pixels"
# "space edges", "overall time",
# "time vertex adding", "time edge adding", "time shortest path",
# "angle cost", "category costs", "sum of costs"]
logs = [
ID, INST, SCALE_PARAM * 10, n_pixels, graphtype, graph.n_nodes,
graph.n_edges, time_pipeline, graph.time_logs["add_nodes"],
graph.time_logs["add_all_edges"], graph.time_logs["shortest_path"],
cfg.angle_weight, angle_cost, summed_costs, weighted_sum, together,
mean_eucl, max_eucl
]
with open(cfg.csv_times, 'a+', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(logs)
parser = argparse.ArgumentParser()
parser.add_argument('-cluster', action='store_true')
parser.add_argument('-i', '--instance', type=str, default="ch")
parser.add_argument('-s', '--scale', help="resolution", type=int, default=1)
args = parser.parse_args()
# define out save name
ID = "results_" + args.instance # str(round(time.time() / 60))[-5:]
OUT_DIR = os.path.join("..", "outputs")
OUT_PATH = os.path.join(OUT_DIR, ID)
SCALE_PARAM = args.scale
SCENARIO = 1
INST = args.instance
height_resistance_path = None # "../data/Instance_CH.nosync/dtm_10m.tif"
PIPELINE = [(1, 0)]
USE_KSP = 0
# summarize: mean/max/min, remove: all/surrounding, sample: simple/watershed
NOTES = "None" # "mean-all-simple"
# define IO paths
PATH_FILES = "data"
# Iterate overall graphs
graph_names = ["Normal graph", "Implicit line graph", "Line graph"]
for INST in ["belgium", "de", "ch"]:
for g, GRAPH_TYPE in enumerate(
[graphs.WeightedGraph, graphs.ImplicitLG, graphs.LineGraph]
):
for SCALE_PARAM in [5, 2, 1]: # TODO
print("")
print("---------------------------------------------------")
print("---------------", INST, SCALE_PARAM, "-------------")
IOPATH = os.path.join(
PATH_FILES, f"{INST}_data_{SCENARIO}_{SCALE_PARAM}.dat"
)
# LOAD DATA
with open(IOPATH, "rb") as infile:
data = pickle.load(infile)
(instance, edge_cost, instance_corr, config) = data
cfg = config.graph
start_inds = config.graph.start_inds
dest_inds = config.graph.dest_inds
instance_vertices = len(np.where(instance_corr > 0)[0])
OUT_PATH_orig = OUT_PATH
# for a, angle_weight in enumerate([0.2 ]): # TODO
# print("PROCESS ", graph_names[g], angle_weight)
cfg.edge_weight = 0
# cfg.angle_weight = angle_weight
cfg.csv_times = "../outputs/graph_compare.csv"
# ID
graphtype = graph_names[g]
ID = f"{graph_names[g]}_{SCALE_PARAM}_{INST}_{int(cfg.angle_weight*100)}"
OUT_PATH = os.path.join(OUT_DIR, ID + ".csv")
graph_gt = GRAPH_TYPE(instance, instance_corr, verbose=0)
graph_gt.set_shift(cfg.start_inds, cfg.dest_inds, **vars(cfg))
estimated_edges = instance_vertices * len(graph_gt.shift_tuples)
print("will have ", estimated_edges, "edges")
# ABORT if too many edges
if estimated_edges > 1000000000:
print("ABORT bc of memory!")
logs = [
ID, INST, SCALE_PARAM * 10, instance_vertices, graphtype,
instance_vertices, estimated_edges
]
with open(cfg.csv_times, 'a+', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(logs)
break
tic = time.time()
path_gt, path_costs_gt, cost_sum_gt = graph_gt.single_sp(
**vars(cfg)
)
print("vertices:", graph_gt.n_nodes, "edges", graph_gt.n_edges)
time_pipeline = round(time.time() - tic, 3)
print("DONE SP")
if g == 0:
path_bl = path_gt
logging(
ID,
graph_gt,
path_gt,
path_costs_gt,
cfg,
time_pipeline,
comp_path=path_bl
)
save_path_cost_csv(OUT_PATH, [path_gt], instance, **vars(cfg))
# if g == 0 and a == 0:
# angle weight only needs to be varied for the implicit lg
# break
|
[
"numpy.absolute",
"numpy.sum",
"argparse.ArgumentParser",
"csv.writer",
"numpy.asarray",
"power_planner.utils.utils_costs.CostUtils.compute_angle_costs",
"numpy.ones",
"power_planner.utils.utils_ksp.KspUtils.path_distance",
"time.time",
"numpy.any",
"pickle.load",
"numpy.where",
"numpy.dot",
"os.path.join"
] |
[((3044, 3069), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3067, 3069), False, 'import argparse\n'), ((3394, 3423), 'os.path.join', 'os.path.join', (['""".."""', '"""outputs"""'], {}), "('..', 'outputs')\n", (3406, 3423), False, 'import os\n'), ((3435, 3460), 'os.path.join', 'os.path.join', (['OUT_DIR', 'ID'], {}), '(OUT_DIR, ID)\n', (3447, 3460), False, 'import os\n'), ((444, 472), 'numpy.ones', 'np.ones', (['instance_corr.shape'], {}), '(instance_corr.shape)\n', (451, 472), True, 'import numpy as np\n'), ((1582, 1604), 'numpy.asarray', 'np.asarray', (['path_costs'], {}), '(path_costs)\n', (1592, 1604), True, 'import numpy as np\n'), ((2186, 2211), 'numpy.sum', 'np.sum', (['(instance_corr > 0)'], {}), '(instance_corr > 0)\n', (2192, 2211), True, 'import numpy as np\n'), ((1678, 1703), 'numpy.sum', 'np.sum', (['cfg.class_weights'], {}), '(cfg.class_weights)\n', (1684, 1703), True, 'import numpy as np\n'), ((1823, 1847), 'numpy.sum', 'np.sum', (['all_cost_weights'], {}), '(all_cost_weights)\n', (1829, 1847), True, 'import numpy as np\n'), ((1986, 2031), 'numpy.sum', 'np.sum', (['path_costs[:, -n_categories:]'], {'axis': '(0)'}), '(path_costs[:, -n_categories:], axis=0)\n', (1992, 2031), True, 'import numpy as np\n'), ((2061, 2103), 'numpy.dot', 'np.dot', (['summed_costs', 'all_cost_weights[1:]'], {}), '(summed_costs, all_cost_weights[1:])\n', (2067, 2103), True, 'import numpy as np\n'), ((2922, 2939), 'csv.writer', 'writer', (['write_obj'], {}), '(write_obj)\n', (2928, 2939), False, 'from csv import writer\n'), ((532, 559), 'numpy.any', 'np.any', (['instance_corr[i, :]'], {}), '(instance_corr[i, :])\n', (538, 559), True, 'import numpy as np\n'), ((658, 685), 'numpy.any', 'np.any', (['instance_corr[:, i]'], {}), '(instance_corr[:, i])\n', (664, 685), True, 'import numpy as np\n'), ((811, 841), 'numpy.absolute', 'np.absolute', (['(instance_corr - 1)'], {}), '(instance_corr - 1)\n', (822, 841), True, 'import numpy as np\n'), ((1482, 1517), 'power_planner.utils.utils_costs.CostUtils.compute_angle_costs', 'CostUtils.compute_angle_costs', (['path'], {}), '(path)\n', (1511, 1517), False, 'from power_planner.utils.utils_costs import CostUtils\n'), ((4257, 4326), 'os.path.join', 'os.path.join', (['PATH_FILES', 'f"""{INST}_data_{SCENARIO}_{SCALE_PARAM}.dat"""'], {}), "(PATH_FILES, f'{INST}_data_{SCENARIO}_{SCALE_PARAM}.dat')\n", (4269, 4326), False, 'import os\n'), ((5215, 5249), 'os.path.join', 'os.path.join', (['OUT_DIR', "(ID + '.csv')"], {}), "(OUT_DIR, ID + '.csv')\n", (5227, 5249), False, 'import os\n'), ((6178, 6189), 'time.time', 'time.time', ([], {}), '()\n', (6187, 6189), False, 'import time\n'), ((1196, 1252), 'power_planner.utils.utils_ksp.KspUtils.path_distance', 'KspUtils.path_distance', (['path', 'comp_path'], {'mode': '"""eucl_max"""'}), "(path, comp_path, mode='eucl_max')\n", (1218, 1252), False, 'from power_planner.utils.utils_ksp import KspUtils\n'), ((1326, 1383), 'power_planner.utils.utils_ksp.KspUtils.path_distance', 'KspUtils.path_distance', (['path', 'comp_path'], {'mode': '"""eucl_mean"""'}), "(path, comp_path, mode='eucl_mean')\n", (1348, 1383), False, 'from power_planner.utils.utils_ksp import KspUtils\n'), ((4452, 4471), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (4463, 4471), False, 'import pickle\n'), ((4716, 4743), 'numpy.where', 'np.where', (['(instance_corr > 0)'], {}), '(instance_corr > 0)\n', (4724, 4743), True, 'import numpy as np\n'), ((6003, 6020), 'csv.writer', 'writer', (['write_obj'], {}), '(write_obj)\n', (6009, 6020), False, 'from csv import writer\n'), ((6414, 6425), 'time.time', 'time.time', ([], {}), '()\n', (6423, 6425), False, 'import time\n')]
|
"""
this preprocessor, adds optical flow information to the input sequence to the
netwwork.
this works based on two ideas, an overall flow and a per 5 frame flow calculation
"""
from src.preprocessing.preprocessor import preprocessor
import numpy as np
import cv2
from numpy import array, zeros
import os
from glob import glob
import cv2
import json
class OpticalFlow ( preprocessor ):
def __init__(self, exportPath, trainingPath , testPath , images_size=[640,640], importPath = None , skip_count =5):
self.exportPath = exportPath
self.trainingPath = trainingPath
self.testPath = testPath
self.image_size = images_size
self.importPath = importPath
self.skip_count = skip_count
self.name = "Optical_Flow_" + str(skip_count)
self.x_test = None
self.y_train = None
self.x_train = None
def preprocess(self):
"""
this funciton preopricess the imagaes into three arays, test_x tarin_x , train_y
:return:
"""
train_x = [] # None #np.array([])
train_y = [] # None # np.array([])
train_vars = []
train_of = []
# create the trainig set
if( not self.trainingPath is None):
for sample in sorted(os.listdir(self.trainingPath)) :
mask_path = os.path.join( self.trainingPath, sample + '/mask.png')
if os.path.exists(os.path.join( self.trainingPath, sample + '/OpticalFlow.png') ):
the_of = cv2.imread( os.path.join( self.trainingPath, sample + '/OpticalFlow.png') , 1 )
else:
frame1 = self.cv_resize( cv2.imread( os.path.join( self.trainingPath, sample + '/frame0001.png')))
frame2 = self.cv_resize( cv2.imread( os.path.join( self.trainingPath, sample + '/frame0050.png')))
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite( os.path.join( self.trainingPath, sample + '/OpticalFlow.png'), bgr )
the_of = bgr
the_of = np.expand_dims(the_of,
axis=0) # np.expand_dims(the_of.reshape(the_of.shape + (1,)), axis=0) # np.expand_dims(the_of, axis=0)
# make varinaces
if os.path.exists(os.path.join( self.trainingPath, sample + '/basicVariance.png') ):
the_var = cv2.imread( os.path.join( self.trainingPath, sample + '/basicVariance.png') ,0 )
else:
files = sorted( glob( os.path.join(self.trainingPath, "%s/frame*.png" % sample) ) )
files = np.array([self.change_size(cv2.imread(x, 0)) for x in files])
variances = np.var(files, axis=0)
variances = (variances / np.max(variances)) * 255
del (files )
cv2.imwrite( os.path.join( self.trainingPath, sample + '/basicVariance.png'), variances )
the_var = variances
the_var = np.expand_dims(the_var.reshape(the_var.shape + (1,)), axis=0)
# load train_y
y = self.change_size(cv2.imread( mask_path, 0))
y= np.expand_dims( y, axis=0 )
y=( y==2 ).astype(int)
# take under account the skip count and lod the images
t = [ self.change_size(cv2.imread(os.path.join(self.trainingPath, "%s/frame%04d.png" % (sample, i)),0)) for i in range(0, 99, self.skip_count) ]
t = [ np.expand_dims(x, axis=0) for x in t ]
train_x.extend(t)
for i in range( len(t)):
train_y.append(y)
train_vars.append(the_var)
train_of.append(the_of)
# create the test set
# test_x = []
test_dic = {}
test_size_ref = {}
test_vars = {}
tesr_ofs= {}
if not self.testPath is None:
for sample in sorted(os.listdir(self.testPath)):
# image = cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0) #/ 255
# test_size_ref[sample]= image.shape
# image = self.change_size(image)
# image = image.reshape(image.shape + (1,))
# test_dic[sample] = np.expand_dims(image, axis=0)
print (os.path.join(self.testPath, "%s/frame%04d.png" % (sample, i)))
if '.DS_Store' in sample : continue
the_var= None
if os.path.exists(os.path.join( self.testPath, sample + '/OpticalFlow.png') ):
the_of = cv2.imread( os.path.join( self.testPath, sample + '/OpticalFlow.png') ,1)
else:
frame1 = self.cv_resize( cv2.imread( os.path.join( self.testPath, sample + '/frame0001.png')))
frame2 = self.cv_resize( cv2.imread( os.path.join( self.testPath, sample + '/frame0050.png')))
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite( os.path.join( self.testPath, sample + '/OpticalFlow.png'), bgr )
the_of = bgr
the_of = np.expand_dims(the_of, axis=0) # np.expand_dims(the_of.reshape(the_of.shape + (1,)), axis=0) # np.expand_dims(the_of, axis=0)
# make varinaces
if os.path.exists(os.path.join(self.testPath, sample + '/basicVariance.png')):
the_var = cv2.imread(os.path.join(self.testPath, sample + '/basicVariance.png'), 0)
else:
files = sorted(glob(os.path.join(self.testPath, "%s/frame*.png" % sample)))
files = np.array([self.change_size(cv2.imread(x, 0)) for x in files])
variances = np.var(files, axis=0)
variances = (variances / np.max(variances)) * 255
del (files )
cv2.imwrite(os.path.join(self.testPath, sample + '/basicVariance.png'), variances)
the_var = variances
the_var = np.expand_dims(the_var.reshape(the_var.shape + (1,)), axis=0)
test_vars[sample] = the_var
t = [cv2.imread(os.path.join(self.testPath, "%s/frame%04d.png" % (sample, i)), 0)
for i in range(0, 99, 25)]
test_size_ref[sample] = t[0].shape
t = [ self.change_size(x) for x in t ]
t = [np.expand_dims(x.reshape(x.shape + (1,)), axis=0) for x in t]
temp_vars = []
temp_ofs = []
for i in range ( len(t) ):
temp_vars.append(the_var)
temp_ofs.append( the_of )
test_vars[sample] = np.vstack(temp_vars)
tesr_ofs[sample] = np.vstack( temp_ofs )
test_dic[sample] = np.vstack(t)
# test_x.append(np.expand_dims(image, axis=0))
train_x = np.vstack(train_x)
train_y = np.vstack(train_y)
train_vars = np.vstack(train_vars)
train_of = np.vstack( train_of )
# test_x = np.vstack(test_x)
train_x = train_x.reshape(train_x.shape + (1,))
train_y = train_y.reshape(train_y.shape + (1,))
#test_x = test_x.reshape(test_x.shape + (1,))
print(train_x.shape)
print(train_y.shape)
# print(test_x.shape)
self.x_train = train_x
# self.x_test = test_x
self.y_train = train_y
# if( not self.exportPath is None):
# self.save_to_file()
return train_x , train_y , test_dic, test_size_ref, train_vars, test_vars, train_of , tesr_ofs
def cv_resize (self, im):
"""
code from : https://jdhao.github.io/2017/11/06/resize-image-to-square-with-padding/
:return:
"""
desired_size = 640
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = desired_size/ max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
return new_im
|
[
"numpy.zeros_like",
"cv2.cartToPolar",
"cv2.cvtColor",
"cv2.copyMakeBorder",
"numpy.expand_dims",
"numpy.var",
"cv2.imread",
"numpy.max",
"cv2.calcOpticalFlowFarneback",
"cv2.normalize",
"numpy.vstack",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((8096, 8114), 'numpy.vstack', 'np.vstack', (['train_x'], {}), '(train_x)\n', (8105, 8114), True, 'import numpy as np\n'), ((8133, 8151), 'numpy.vstack', 'np.vstack', (['train_y'], {}), '(train_y)\n', (8142, 8151), True, 'import numpy as np\n'), ((8173, 8194), 'numpy.vstack', 'np.vstack', (['train_vars'], {}), '(train_vars)\n', (8182, 8194), True, 'import numpy as np\n'), ((8214, 8233), 'numpy.vstack', 'np.vstack', (['train_of'], {}), '(train_of)\n', (8223, 8233), True, 'import numpy as np\n'), ((9245, 9287), 'cv2.resize', 'cv2.resize', (['im', '(new_size[1], new_size[0])'], {}), '(im, (new_size[1], new_size[0]))\n', (9255, 9287), False, 'import cv2\n'), ((9545, 9632), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['im', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'color'}), '(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value\n =color)\n', (9563, 9632), False, 'import cv2\n'), ((1272, 1301), 'os.listdir', 'os.listdir', (['self.trainingPath'], {}), '(self.trainingPath)\n', (1282, 1301), False, 'import os\n'), ((1334, 1387), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/mask.png')"], {}), "(self.trainingPath, sample + '/mask.png')\n", (1346, 1387), False, 'import os\n'), ((2624, 2654), 'numpy.expand_dims', 'np.expand_dims', (['the_of'], {'axis': '(0)'}), '(the_of, axis=0)\n', (2638, 2654), True, 'import numpy as np\n'), ((3774, 3799), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (3788, 3799), True, 'import numpy as np\n'), ((4559, 4584), 'os.listdir', 'os.listdir', (['self.testPath'], {}), '(self.testPath)\n', (4569, 4584), False, 'import os\n'), ((6304, 6334), 'numpy.expand_dims', 'np.expand_dims', (['the_of'], {'axis': '(0)'}), '(the_of, axis=0)\n', (6318, 6334), True, 'import numpy as np\n'), ((7885, 7905), 'numpy.vstack', 'np.vstack', (['temp_vars'], {}), '(temp_vars)\n', (7894, 7905), True, 'import numpy as np\n'), ((7941, 7960), 'numpy.vstack', 'np.vstack', (['temp_ofs'], {}), '(temp_ofs)\n', (7950, 7960), True, 'import numpy as np\n'), ((7999, 8011), 'numpy.vstack', 'np.vstack', (['t'], {}), '(t)\n', (8008, 8011), True, 'import numpy as np\n'), ((1426, 1486), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/OpticalFlow.png')"], {}), "(self.trainingPath, sample + '/OpticalFlow.png')\n", (1438, 1486), False, 'import os\n'), ((1888, 1928), 'cv2.cvtColor', 'cv2.cvtColor', (['frame1', 'cv2.COLOR_BGR2GRAY'], {}), '(frame1, cv2.COLOR_BGR2GRAY)\n', (1900, 1928), False, 'import cv2\n'), ((1955, 1976), 'numpy.zeros_like', 'np.zeros_like', (['frame1'], {}), '(frame1)\n', (1968, 1976), True, 'import numpy as np\n'), ((2043, 2083), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2', 'cv2.COLOR_BGR2GRAY'], {}), '(frame2, cv2.COLOR_BGR2GRAY)\n', (2055, 2083), False, 'import cv2\n'), ((2111, 2183), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prvs', 'next', 'None', '(0.5)', '(3)', '(15)', '(3)', '(5)', '(1.2)', '(0)'], {}), '(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n', (2139, 2183), False, 'import cv2\n'), ((2215, 2258), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (2230, 2258), False, 'import cv2\n'), ((2349, 2398), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(mag, None, 0, 255, cv2.NORM_MINMAX)\n', (2362, 2398), False, 'import cv2\n'), ((2425, 2461), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (2437, 2461), False, 'import cv2\n'), ((2866, 2928), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/basicVariance.png')"], {}), "(self.trainingPath, sample + '/basicVariance.png')\n", (2878, 2928), False, 'import os\n'), ((3293, 3314), 'numpy.var', 'np.var', (['files'], {'axis': '(0)'}), '(files, axis=0)\n', (3299, 3314), True, 'import numpy as np\n'), ((3728, 3752), 'cv2.imread', 'cv2.imread', (['mask_path', '(0)'], {}), '(mask_path, 0)\n', (3738, 3752), False, 'import cv2\n'), ((4098, 4123), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4112, 4123), True, 'import numpy as np\n'), ((4944, 5005), 'os.path.join', 'os.path.join', (['self.testPath', "('%s/frame%04d.png' % (sample, i))"], {}), "(self.testPath, '%s/frame%04d.png' % (sample, i))\n", (4956, 5005), False, 'import os\n'), ((5126, 5182), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/OpticalFlow.png')"], {}), "(self.testPath, sample + '/OpticalFlow.png')\n", (5138, 5182), False, 'import os\n'), ((5570, 5610), 'cv2.cvtColor', 'cv2.cvtColor', (['frame1', 'cv2.COLOR_BGR2GRAY'], {}), '(frame1, cv2.COLOR_BGR2GRAY)\n', (5582, 5610), False, 'import cv2\n'), ((5637, 5658), 'numpy.zeros_like', 'np.zeros_like', (['frame1'], {}), '(frame1)\n', (5650, 5658), True, 'import numpy as np\n'), ((5725, 5765), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2', 'cv2.COLOR_BGR2GRAY'], {}), '(frame2, cv2.COLOR_BGR2GRAY)\n', (5737, 5765), False, 'import cv2\n'), ((5793, 5865), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prvs', 'next', 'None', '(0.5)', '(3)', '(15)', '(3)', '(5)', '(1.2)', '(0)'], {}), '(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n', (5821, 5865), False, 'import cv2\n'), ((5897, 5940), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (5912, 5940), False, 'import cv2\n'), ((6031, 6080), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(mag, None, 0, 255, cv2.NORM_MINMAX)\n', (6044, 6080), False, 'import cv2\n'), ((6107, 6143), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (6119, 6143), False, 'import cv2\n'), ((6502, 6560), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/basicVariance.png')"], {}), "(self.testPath, sample + '/basicVariance.png')\n", (6514, 6560), False, 'import os\n'), ((6907, 6928), 'numpy.var', 'np.var', (['files'], {'axis': '(0)'}), '(files, axis=0)\n', (6913, 6928), True, 'import numpy as np\n'), ((1533, 1593), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/OpticalFlow.png')"], {}), "(self.trainingPath, sample + '/OpticalFlow.png')\n", (1545, 1593), False, 'import os\n'), ((2495, 2555), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/OpticalFlow.png')"], {}), "(self.trainingPath, sample + '/OpticalFlow.png')\n", (2507, 2555), False, 'import os\n'), ((2976, 3038), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/basicVariance.png')"], {}), "(self.trainingPath, sample + '/basicVariance.png')\n", (2988, 3038), False, 'import os\n'), ((3453, 3515), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/basicVariance.png')"], {}), "(self.trainingPath, sample + '/basicVariance.png')\n", (3465, 3515), False, 'import os\n'), ((5229, 5285), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/OpticalFlow.png')"], {}), "(self.testPath, sample + '/OpticalFlow.png')\n", (5241, 5285), False, 'import os\n'), ((6177, 6233), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/OpticalFlow.png')"], {}), "(self.testPath, sample + '/OpticalFlow.png')\n", (6189, 6233), False, 'import os\n'), ((6604, 6662), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/basicVariance.png')"], {}), "(self.testPath, sample + '/basicVariance.png')\n", (6616, 6662), False, 'import os\n'), ((7064, 7122), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/basicVariance.png')"], {}), "(self.testPath, sample + '/basicVariance.png')\n", (7076, 7122), False, 'import os\n'), ((7342, 7403), 'os.path.join', 'os.path.join', (['self.testPath', "('%s/frame%04d.png' % (sample, i))"], {}), "(self.testPath, '%s/frame%04d.png' % (sample, i))\n", (7354, 7403), False, 'import os\n'), ((1680, 1738), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/frame0001.png')"], {}), "(self.trainingPath, sample + '/frame0001.png')\n", (1692, 1738), False, 'import os\n'), ((1799, 1857), 'os.path.join', 'os.path.join', (['self.trainingPath', "(sample + '/frame0050.png')"], {}), "(self.trainingPath, sample + '/frame0050.png')\n", (1811, 1857), False, 'import os\n'), ((3109, 3166), 'os.path.join', 'os.path.join', (['self.trainingPath', "('%s/frame*.png' % sample)"], {}), "(self.trainingPath, '%s/frame*.png' % sample)\n", (3121, 3166), False, 'import os\n'), ((3360, 3377), 'numpy.max', 'np.max', (['variances'], {}), '(variances)\n', (3366, 3377), True, 'import numpy as np\n'), ((3964, 4029), 'os.path.join', 'os.path.join', (['self.trainingPath', "('%s/frame%04d.png' % (sample, i))"], {}), "(self.trainingPath, '%s/frame%04d.png' % (sample, i))\n", (3976, 4029), False, 'import os\n'), ((5370, 5424), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/frame0001.png')"], {}), "(self.testPath, sample + '/frame0001.png')\n", (5382, 5424), False, 'import os\n'), ((5485, 5539), 'os.path.join', 'os.path.join', (['self.testPath', "(sample + '/frame0050.png')"], {}), "(self.testPath, sample + '/frame0050.png')\n", (5497, 5539), False, 'import os\n'), ((6729, 6782), 'os.path.join', 'os.path.join', (['self.testPath', "('%s/frame*.png' % sample)"], {}), "(self.testPath, '%s/frame*.png' % sample)\n", (6741, 6782), False, 'import os\n'), ((6974, 6991), 'numpy.max', 'np.max', (['variances'], {}), '(variances)\n', (6980, 6991), True, 'import numpy as np\n'), ((3226, 3242), 'cv2.imread', 'cv2.imread', (['x', '(0)'], {}), '(x, 0)\n', (3236, 3242), False, 'import cv2\n'), ((6840, 6856), 'cv2.imread', 'cv2.imread', (['x', '(0)'], {}), '(x, 0)\n', (6850, 6856), False, 'import cv2\n')]
|
import GRT
import sys
import numpy as np
import argparse
def main():
# Parse the data filename from the argument list
parser = argparse.ArgumentParser(description='Process some data.')
parser.add_argument('filename', help='A data file')
args = parser.parse_args()
filename = args.filename
# Load some training data to train the ClusterTree model
trainingData = np.loadtxt(filename, delimiter=',')
# Create a new ClusterTree instance
ctree = GRT.ClusterTree()
# Set the number of steps that will be used to choose the best splitting values
# More steps will give you a better model, but will take longer to train
ctree.setNumSplittingSteps( 100 )
# Set the maximum depth of the tree
ctree.setMaxDepth( 10 )
# Set the minimum number of samples allowed per node
ctree.setMinNumSamplesPerNode( 10 )
# Set the minimum RMS error allowed per node
ctree.setMinRMSErrorPerNode( 0.1 )
# Train a cluster tree model
if not ctree.train( trainingData ):
print("Failed to train model!")
sys.exit(1)
# if not ctree.save("CTreeModel.grt"): # this fails for some reason
# print("Failed to save model!")
# sys.exit(1)
# if not ctree.load("CTreeModel.grt"):
# print("Failed to train model!")
# sys.exit(1)
# Print the tree
ctree._print()
if __name__ == '__main__':
main()
sys.exit(0)
|
[
"GRT.ClusterTree",
"numpy.loadtxt",
"argparse.ArgumentParser",
"sys.exit"
] |
[((136, 193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some data."""'}), "(description='Process some data.')\n", (159, 193), False, 'import argparse\n'), ((392, 427), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (402, 427), True, 'import numpy as np\n'), ((481, 498), 'GRT.ClusterTree', 'GRT.ClusterTree', ([], {}), '()\n', (496, 498), False, 'import GRT\n'), ((1446, 1457), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1454, 1457), False, 'import sys\n'), ((1097, 1108), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1105, 1108), False, 'import sys\n')]
|
#!/usr/bin/env python
import numpy as np
import util
delay = .005
Fs = 48000
reps = 100
tail_samples = int(Fs*2.)
delay_samples = int(delay*Fs)
signal = np.tile(np.r_[np.random.standard_normal(Fs*2)*.1, np.zeros(Fs*2)], reps)
signal = np.vstack((np.r_[np.zeros(delay_samples), signal], np.r_[signal, np.zeros(delay_samples)])).T
signal = np.vstack((signal, np.zeros((tail_samples, 2))))
util.writewave('noise.wav', signal, Fs, 3)
|
[
"numpy.zeros",
"numpy.random.standard_normal",
"util.writewave"
] |
[((387, 429), 'util.writewave', 'util.writewave', (['"""noise.wav"""', 'signal', 'Fs', '(3)'], {}), "('noise.wav', signal, Fs, 3)\n", (401, 429), False, 'import util\n'), ((357, 384), 'numpy.zeros', 'np.zeros', (['(tail_samples, 2)'], {}), '((tail_samples, 2))\n', (365, 384), True, 'import numpy as np\n'), ((203, 219), 'numpy.zeros', 'np.zeros', (['(Fs * 2)'], {}), '(Fs * 2)\n', (211, 219), True, 'import numpy as np\n'), ((167, 200), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(Fs * 2)'], {}), '(Fs * 2)\n', (192, 200), True, 'import numpy as np\n'), ((252, 275), 'numpy.zeros', 'np.zeros', (['delay_samples'], {}), '(delay_samples)\n', (260, 275), True, 'import numpy as np\n'), ((300, 323), 'numpy.zeros', 'np.zeros', (['delay_samples'], {}), '(delay_samples)\n', (308, 323), True, 'import numpy as np\n')]
|
import cv2
import os
import glob
import numpy as np
import subprocess
IN_PATH = '/data5/xin/i3d/HDHIT310Q_4171741/png'
OUT_PATH = '/data5/xin/i3d/HDHIT310Q_4171741/npy'
# IN_PATH = '/data5/xin/i3d/'
# OUT_PATH = '/data5/xin/i3d/npy'
FRAME_RATE = 25
IMAGE_SIZE = 224
FRAME_PER_CHUNK = 100
OVERLAP = 20
EXPECTED_SHAPE = (FRAME_PER_CHUNK, IMAGE_SIZE, IMAGE_SIZE, 3)
BATCH_SIZE = 8
def resize(im, target_size=IMAGE_SIZE):
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_scale = float(IMAGE_SIZE) / float(im_size_min)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im
def fix(chunk):
if chunk.shape != EXPECTED_SHAPE:
tmp = np.zeros(EXPECTED_SHAPE)
# tmp[:chunk.shape[0], :chunk.shape[1], :chunk.shape[2], :chunk.shape[3], :chunk.shape[4]] = chunk
tmp[:chunk.shape[0], :chunk.shape[1], :chunk.shape[2], :chunk.shape[3]] = chunk
chunk = tmp
assert(chunk.shape == EXPECTED_SHAPE)
print('>>>>>>>>>>>', chunk.shape)
return chunk
image_paths = glob.glob(os.path.join(IN_PATH, '*.png'))
image_paths.sort(key=lambda x: int(os.path.splitext(x.split('-')[-1])[0]) )
# print(">>>>>>>>>>> image_paths", image_paths)
# print('>>>>>>>>>', len(image_paths))
result = []
chunk = []
chunk_idx = 0
start_idx = 0
for image_idx, image_path in enumerate(image_paths):
im = cv2.imread(image_path)
im = resize(im)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w, c = im.shape
half = IMAGE_SIZE / 2
h_start = max(0, h / 2 - half)
w_start = max(0, w / 2 - half)
im_array = np.array(im)[h_start:h_start+IMAGE_SIZE, w_start:w_start+IMAGE_SIZE, :]
im_array = (np.float32(im_array) / 255.0 - 0.5) * 2
assert(im_array.shape == (IMAGE_SIZE, IMAGE_SIZE, 3))
chunk.append(im_array)
if len(chunk) == FRAME_PER_CHUNK:
# result.append(np.expand_dims(np.array(chunk), axis=0))
result.append(fix(np.array(chunk)))
chunk = chunk[-20:]
if len(result) == BATCH_SIZE:
end_idx = image_idx+1
save_path = os.path.join(OUT_PATH, 'chunk_{}_start_{}_end_{}.npy').format(chunk_idx, start_idx, end_idx)
np.save(save_path, np.stack(result, axis=0))
# print('>>>>>>>>>> saved to {}'.format(save_path))
# print('>>>>>>>>>>> here', np.stack(result, axis=0).shape)
chunk_idx += 1
result = []
start_idx = end_idx - 20
if chunk:
# result.append(np.expand_dims(np.array(chunk), axis=0))
result.append(np.array(chunk))
chunk = []
if result:
end_idx = image_idx+1
save_path = os.path.join(OUT_PATH, 'chunk_{}_start_{}_end_{}.npy').format(chunk_idx, start_idx, end_idx)
np.save(save_path, np.stack(chunks, axis=0))
# print('>>>>>>>>>> saved to {}'.format(save_path))
# print('>>>>>>>>>>> here', np.stack(chunks, axis=0).shape)
chunk_idx += 1
result = []
start_idx = end_idx - 20
|
[
"numpy.stack",
"cv2.cvtColor",
"numpy.float32",
"numpy.zeros",
"cv2.imread",
"numpy.min",
"numpy.array",
"os.path.join",
"cv2.resize"
] |
[((464, 485), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (470, 485), True, 'import numpy as np\n'), ((549, 638), 'cv2.resize', 'cv2.resize', (['im', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.\n INTER_LINEAR)\n', (559, 638), False, 'import cv2\n'), ((1079, 1109), 'os.path.join', 'os.path.join', (['IN_PATH', '"""*.png"""'], {}), "(IN_PATH, '*.png')\n", (1091, 1109), False, 'import os\n'), ((1392, 1414), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1402, 1414), False, 'import cv2\n'), ((1444, 1479), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (1456, 1479), False, 'import cv2\n'), ((717, 741), 'numpy.zeros', 'np.zeros', (['EXPECTED_SHAPE'], {}), '(EXPECTED_SHAPE)\n', (725, 741), True, 'import numpy as np\n'), ((1624, 1636), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1632, 1636), True, 'import numpy as np\n'), ((2551, 2566), 'numpy.array', 'np.array', (['chunk'], {}), '(chunk)\n', (2559, 2566), True, 'import numpy as np\n'), ((2757, 2781), 'numpy.stack', 'np.stack', (['chunks'], {'axis': '(0)'}), '(chunks, axis=0)\n', (2765, 2781), True, 'import numpy as np\n'), ((2231, 2255), 'numpy.stack', 'np.stack', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (2239, 2255), True, 'import numpy as np\n'), ((2641, 2695), 'os.path.join', 'os.path.join', (['OUT_PATH', '"""chunk_{}_start_{}_end_{}.npy"""'], {}), "(OUT_PATH, 'chunk_{}_start_{}_end_{}.npy')\n", (2653, 2695), False, 'import os\n'), ((1712, 1732), 'numpy.float32', 'np.float32', (['im_array'], {}), '(im_array)\n', (1722, 1732), True, 'import numpy as np\n'), ((1976, 1991), 'numpy.array', 'np.array', (['chunk'], {}), '(chunk)\n', (1984, 1991), True, 'import numpy as np\n'), ((2111, 2165), 'os.path.join', 'os.path.join', (['OUT_PATH', '"""chunk_{}_start_{}_end_{}.npy"""'], {}), "(OUT_PATH, 'chunk_{}_start_{}_end_{}.npy')\n", (2123, 2165), False, 'import os\n')]
|
# PointNetVLAD datasets: based on Oxford RobotCar and Inhouse
# Code adapted from PointNetVLAD repo: https://github.com/mikacuy/pointnetvlad
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import KDTree
import pickle
import argparse
# For training and test data splits
X_WIDTH = 150
Y_WIDTH = 150
# For Oxford
P1 = [5735712.768124, 620084.402381]
P2 = [5735611.299219, 620540.270327]
P3 = [5735237.358209, 620543.094379]
P4 = [5734749.303802, 619932.693364]
# For University Sector
P5 = [363621.292362, 142864.19756]
P6 = [364788.795462, 143125.746609]
P7 = [363597.507711, 144011.414174]
# For Residential Area
P8 = [360895.486453, 144999.915143]
P9 = [362357.024536, 144894.825301]
P10 = [361368.907155, 145209.663042]
P_DICT = {"oxford": [P1, P2, P3, P4], "university": [P5, P6, P7], "residential": [P8, P9, P10], "business": []}
def check_in_test_set(northing, easting, points):
in_test_set = False
for point in points:
if point[0] - X_WIDTH < northing < point[0] + X_WIDTH and point[1] - Y_WIDTH < easting < point[1] + Y_WIDTH:
in_test_set = True
break
return in_test_set
def output_to_file(output, base_path, filename):
file_path = os.path.join(base_path, filename)
with open(file_path, 'wb') as handle:
pickle.dump(output, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
def construct_query_and_database_sets(base_path, runs_folder, folders, pointcloud_fols, filename, p, output_name):
database_trees = []
test_trees = []
for folder in folders:
print(folder)
df_database = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_test = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
# df_locations['timestamp']=runs_folder+folder+pointcloud_fols+df_locations['timestamp'].astype(str)+'.bin'
# df_locations=df_locations.rename(columns={'timestamp':'file'})
for index, row in df_locations.iterrows():
# entire business district is in the test set
if output_name == "business":
df_test = df_test.append(row, ignore_index=True)
elif check_in_test_set(row['northing'], row['easting'], p):
df_test = df_test.append(row, ignore_index=True)
df_database = df_database.append(row, ignore_index=True)
database_tree = KDTree(df_database[['northing', 'easting']])
test_tree = KDTree(df_test[['northing', 'easting']])
database_trees.append(database_tree)
test_trees.append(test_tree)
test_sets = []
database_sets = []
for folder in folders:
database = {}
test = {}
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
df_locations['timestamp'] = runs_folder + folder + pointcloud_fols + \
df_locations['timestamp'].astype(str) + '.bin'
df_locations = df_locations.rename(columns={'timestamp': 'file'})
for index, row in df_locations.iterrows():
# entire business district is in the test set
if output_name == "business":
test[len(test.keys())] = {'query': row['file'], 'northing': row['northing'], 'easting': row['easting']}
elif check_in_test_set(row['northing'], row['easting'], p):
test[len(test.keys())] = {'query': row['file'], 'northing': row['northing'], 'easting': row['easting']}
database[len(database.keys())] = {'query': row['file'], 'northing': row['northing'],
'easting': row['easting']}
database_sets.append(database)
test_sets.append(test)
for i in range(len(database_sets)):
tree = database_trees[i]
for j in range(len(test_sets)):
if i == j:
continue
for key in range(len(test_sets[j].keys())):
coor = np.array([[test_sets[j][key]["northing"], test_sets[j][key]["easting"]]])
index = tree.query_radius(coor, r=25)
# indices of the positive matches in database i of each query (key) in test set j
test_sets[j][key][i] = index[0].tolist()
output_to_file(database_sets, base_path, "minkloc_" + output_name + '_evaluation_database.pickle')
output_to_file(test_sets, base_path, "minkloc_" + output_name + '_evaluation_query.pickle')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate evaluation datasets')
parser.add_argument('--dataset_root', type=str, required=True, help='Dataset root folder')
args = parser.parse_args()
print('Dataset root: {}'.format(args.dataset_root))
assert os.path.exists(args.dataset_root), f"Cannot access dataset root folder: {args.dataset_root}"
base_path = args.dataset_root
# For Oxford
folders = []
runs_folder = "oxford/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
index_list = [5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 24, 31, 32, 33, 38, 39, 43, 44]
print(len(index_list))
for index in index_list:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_20m/",
"pointcloud_locations_20m.csv", P_DICT["oxford"], "oxford")
# For University Sector
folders = []
runs_folder = "inhouse_datasets/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
uni_index = range(10, 15)
for index in uni_index:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_25m_25/",
"pointcloud_centroids_25.csv", P_DICT["university"], "university")
# For Residential Area
folders = []
runs_folder = "inhouse_datasets/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
res_index = range(5, 10)
for index in res_index:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_25m_25/",
"pointcloud_centroids_25.csv", P_DICT["residential"], "residential")
# For Business District
folders = []
runs_folder = "inhouse_datasets/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
bus_index = range(5)
for index in bus_index:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_25m_25/",
"pointcloud_centroids_25.csv", P_DICT["business"], "business")
|
[
"pandas.DataFrame",
"pickle.dump",
"argparse.ArgumentParser",
"os.path.exists",
"sklearn.neighbors.KDTree",
"numpy.array",
"os.path.join"
] |
[((1220, 1253), 'os.path.join', 'os.path.join', (['base_path', 'filename'], {}), '(base_path, filename)\n', (1232, 1253), False, 'import os\n'), ((4593, 4660), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate evaluation datasets"""'}), "(description='Generate evaluation datasets')\n", (4616, 4660), False, 'import argparse\n'), ((4856, 4889), 'os.path.exists', 'os.path.exists', (['args.dataset_root'], {}), '(args.dataset_root)\n', (4870, 4889), False, 'import os\n'), ((1304, 1365), 'pickle.dump', 'pickle.dump', (['output', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(output, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (1315, 1365), False, 'import pickle\n'), ((1627, 1680), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['file', 'northing', 'easting']"}), "(columns=['file', 'northing', 'easting'])\n", (1639, 1680), True, 'import pandas as pd\n'), ((1699, 1752), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['file', 'northing', 'easting']"}), "(columns=['file', 'northing', 'easting'])\n", (1711, 1752), True, 'import pandas as pd\n'), ((2490, 2534), 'sklearn.neighbors.KDTree', 'KDTree', (["df_database[['northing', 'easting']]"], {}), "(df_database[['northing', 'easting']])\n", (2496, 2534), False, 'from sklearn.neighbors import KDTree\n'), ((2555, 2595), 'sklearn.neighbors.KDTree', 'KDTree', (["df_test[['northing', 'easting']]"], {}), "(df_test[['northing', 'easting']])\n", (2561, 2595), False, 'from sklearn.neighbors import KDTree\n'), ((1789, 1843), 'os.path.join', 'os.path.join', (['base_path', 'runs_folder', 'folder', 'filename'], {}), '(base_path, runs_folder, folder, filename)\n', (1801, 1843), False, 'import os\n'), ((2823, 2877), 'os.path.join', 'os.path.join', (['base_path', 'runs_folder', 'folder', 'filename'], {}), '(base_path, runs_folder, folder, filename)\n', (2835, 2877), False, 'import os\n'), ((5082, 5118), 'os.path.join', 'os.path.join', (['base_path', 'runs_folder'], {}), '(base_path, runs_folder)\n', (5094, 5118), False, 'import os\n'), ((5655, 5691), 'os.path.join', 'os.path.join', (['base_path', 'runs_folder'], {}), '(base_path, runs_folder)\n', (5667, 5691), False, 'import os\n'), ((6133, 6169), 'os.path.join', 'os.path.join', (['base_path', 'runs_folder'], {}), '(base_path, runs_folder)\n', (6145, 6169), False, 'import os\n'), ((6613, 6649), 'os.path.join', 'os.path.join', (['base_path', 'runs_folder'], {}), '(base_path, runs_folder)\n', (6625, 6649), False, 'import os\n'), ((4068, 4141), 'numpy.array', 'np.array', (["[[test_sets[j][key]['northing'], test_sets[j][key]['easting']]]"], {}), "([[test_sets[j][key]['northing'], test_sets[j][key]['easting']]])\n", (4076, 4141), True, 'import numpy as np\n')]
|
import numpy as np
# moves: up->1, down->2,left->3,right->4
# board: 0->wall, 1->empty, 2->destination
# , 3->destination with box, 4->box, 5->player
class BruteSolve:
def __init__(self, basic_state):
# int -> tuple(np.array)
self.state_by_id = {0:basic_state}
# tuple(np.array) -> state_index, previous_state_index,move_from_previous_to_me
self.state_data = {tuple(basic_state.reshape(-1)):(0,None,None)}
self.current_states = [basic_state]
self.next_states = list()
self.destinations = {tuple(i) for i in np.argwhere(basic_state == 2)}
self.move_functions = {1:self.move_up,
2:self.move_down,
3:self.move_left,
4:self.move_right}
self.solution = self.solve()
def solve(self):
while True:
self.next_states = list()
for state in self.current_states:
for move_number in self.move_functions:
next_state = self.apply_move(move_number,state)
next_state_as_tuple = tuple(next_state.reshape(-1))
if next_state_as_tuple not in self.state_data:
self.add_state(next_state,next_state_as_tuple,state,move_number)
if self.game_over_victory(next_state):
solution = self.rewind(next_state)
return solution
self.current_states = self.next_states
def add_state(self,state,state_tuple, previous_state,move):
self.next_states.append(state)
state_id = len(self.state_by_id)
self.state_by_id[state_id] = state
previous_state_id = self.state_data[tuple(previous_state.reshape(-1))][0]
self.state_data[state_tuple] = (state_id,previous_state_id,move)
def rewind(self,winning_state):
steps_to_victory = []
current = winning_state
while current is not None:
state_tuple = tuple(current.reshape(-1))
_,previous_state_id,move = self.state_data[state_tuple]
if move is None: break # state 0
steps_to_victory.append(move)
current = self.state_by_id[previous_state_id]
steps_to_victory.reverse()
return steps_to_victory
def move_down(self,state):
state = np.rot90(state,2)
state = self.move_up(state)
state = np.rot90(state,2)
return state
def move_right(self,state):
state = np.rot90(state)
state = self.move_up(state)
state = np.rot90(state,-1)
return state
def move_left(self,state):
state = np.rot90(state,-1)
state = self.move_up(state)
state = np.rot90(state)
return state
def move_up(self, state):
player_y, player_x = np.argwhere(state == 5)[0]
above = state[player_y - 1, player_x]
if above == 0: # wall
return state
elif above in (1, 2): # empty/destination
state[player_y - 1, player_x] = 5
state[player_y, player_x] = 1
return state
elif above in (3, 4): # box on destination / box
above_2 = state[player_y - 2, player_x]
if above_2 in (1, 2): # empty/destination
state[player_y - 2, player_x] = 4 if above_2 == 1 else 3
state[player_y - 1, player_x] = 5
state[player_y , player_x] = 1
return state
error_message = f'dont know how to deal with above = {above}'
raise Exception(error_message)
def apply_move(self, move, state):
state = state.copy()
movment = self.move_functions[move]
state = movment(state)
state = self.fix_destinations(state)
return state
def fix_destinations(self,state):
for y,x in self.destinations:
if state[y,x] == 1:
state[y, x] = 2
return state
def game_over_victory(self,state):
for y,x in self.destinations:
if state[y,x] != 3:
return False
return True
if __name__ == '__main__':
state = np.array([[0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,0,0],
[0,0,0,0,2,4,4,2,1,0],
[0,0,0,0,0,0,2,1,2,0],
[0,0,0,0,0,0,1,1,1,0],
[0,0,0,0,0,0,0,4,1,0],
[0,0,0,0,0,0,0,1,4,0],
[0,0,0,0,0,0,0,0,5,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0]])
bs = BruteSolve(state)
solution = bs.solution
|
[
"numpy.argwhere",
"numpy.rot90",
"numpy.array"
] |
[((4209, 4556), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, \n 0, 2, 4, 4, 2, 1, 0], [0, 0, 0, 0, 0, 0, 2, 1, 2, 0], [0, 0, 0, 0, 0, 0,\n 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 4, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 4,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 5, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [\n 0, 0, 0, 0, 2, 4, 4, 2, 1, 0], [0, 0, 0, 0, 0, 0, 2, 1, 2, 0], [0, 0, 0,\n 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 4, 1, 0], [0, 0, 0, 0, 0, 0,\n 0, 1, 4, 0], [0, 0, 0, 0, 0, 0, 0, 0, 5, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n', (4217, 4556), True, 'import numpy as np\n'), ((2388, 2406), 'numpy.rot90', 'np.rot90', (['state', '(2)'], {}), '(state, 2)\n', (2396, 2406), True, 'import numpy as np\n'), ((2458, 2476), 'numpy.rot90', 'np.rot90', (['state', '(2)'], {}), '(state, 2)\n', (2466, 2476), True, 'import numpy as np\n'), ((2547, 2562), 'numpy.rot90', 'np.rot90', (['state'], {}), '(state)\n', (2555, 2562), True, 'import numpy as np\n'), ((2615, 2634), 'numpy.rot90', 'np.rot90', (['state', '(-1)'], {}), '(state, -1)\n', (2623, 2634), True, 'import numpy as np\n'), ((2703, 2722), 'numpy.rot90', 'np.rot90', (['state', '(-1)'], {}), '(state, -1)\n', (2711, 2722), True, 'import numpy as np\n'), ((2774, 2789), 'numpy.rot90', 'np.rot90', (['state'], {}), '(state)\n', (2782, 2789), True, 'import numpy as np\n'), ((2872, 2895), 'numpy.argwhere', 'np.argwhere', (['(state == 5)'], {}), '(state == 5)\n', (2883, 2895), True, 'import numpy as np\n'), ((574, 603), 'numpy.argwhere', 'np.argwhere', (['(basic_state == 2)'], {}), '(basic_state == 2)\n', (585, 603), True, 'import numpy as np\n')]
|
from utils.common import to_numpy, rec_fn_apply
import torch
import cv2
import torchvision
import os
import csv
from PIL import Image
from torchvision import transforms
MAX_LOG_IMAGES = 4
import numpy as np
def log_images_hook(model, iteration, loop_type, inp, target, output, meta, epoch, writer, classes=None, meter_dict=None):
if iteration != 0:
return
perm = torch.randperm(inp.size(0))[:MAX_LOG_IMAGES]
orig_img = meta['original_img']
for idx in perm:
fig = compose_logged_image(x=inp[idx], y=target[idx], yh=output[idx], orig_img=orig_img[idx], classes=classes)
writer.add_image(f'images/{loop_type}', fig, epoch)
def compose_logged_image(x, y, yh, orig_img, classes):
gt = y.detach().cpu().item()
pred = yh.argmax().detach().cpu().item()
if classes is not None:
gt = classes[gt]
pred = classes[pred]
pred_box = compose_prediction_box(x.shape[1:], gt, pred)
fig = torchvision.utils.make_grid([orig_img.detach().cpu(), x.detach().cpu(), pred_box])
return fig
def compose_prediction_box(img_shape, gt, pred):
h,w = img_shape
dy = int(20 * h/96.0)
fontscale = 0.5*h/96.0
text_image = np.ones((h, w, 3))*1
for i, word in enumerate(['gt', str(gt), 'pred', str(pred)]):
cv2.putText(text_image, word, org=(0,(i+1)*dy), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=fontscale, color=(0, 0, 0))
text_image = text_image.transpose(2, 0, 1)
return torch.FloatTensor(text_image)
|
[
"cv2.putText",
"torch.FloatTensor",
"numpy.ones"
] |
[((1461, 1490), 'torch.FloatTensor', 'torch.FloatTensor', (['text_image'], {}), '(text_image)\n', (1478, 1490), False, 'import torch\n'), ((1187, 1205), 'numpy.ones', 'np.ones', (['(h, w, 3)'], {}), '((h, w, 3))\n', (1194, 1205), True, 'import numpy as np\n'), ((1282, 1412), 'cv2.putText', 'cv2.putText', (['text_image', 'word'], {'org': '(0, (i + 1) * dy)', 'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': 'fontscale', 'color': '(0, 0, 0)'}), '(text_image, word, org=(0, (i + 1) * dy), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=fontscale, color=(0, 0, 0))\n', (1293, 1412), False, 'import cv2\n')]
|
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
File description:
This file contains various code snippets with varying relevance focused on numpy point clouds
"""
import numpy as np
import random
# set random seeds
np.random.seed (1337 )
random.seed (1337 )
# prepare cloud
numpy_cloud = np.array([[1.1, 2.1, 3.1],
[1.2, 2.2, 3.2],
[1.3, 2.3, 3.3],
[1.4, 2.4, 3.4],
[1.5, 2.5, 3.5],
[1.6, 2.6, 3.6]] )
#
# # last test case
# from modules import input_output
# from modules import icp
#
#
# np_pointcloud = input_output.load_ascii_file ("clouds/tmp/noisy_reference.asc" )
# corresponding_pointcloud = input_output.load_ascii_file ("clouds/tmp/cut_correspondence.asc" )
#
# t, mse = icp.icp (np_pointcloud.points[:, 0:3], corresponding_pointcloud.points[:, 0:3] )
#
# print ("ICP Results: ")
# print (t)
# print (mse)
# ## apply noise
# from modules import input_output
#
#
# numpy_cloud = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_als16_reduced_normals_r_1_cleared.asc" )
#
# # noise
# noise = np.random.uniform (-0.015, 0.015, size=(numpy_cloud.points.shape[0], 3 ))
# noise = np.concatenate ((noise.reshape (-1, 3), np.zeros (shape=(numpy_cloud.points.shape[0], 9) )), axis=1 )
# numpy_cloud.points = numpy_cloud.points + noise
#
# input_output.save_ascii_file (numpy_cloud.points, numpy_cloud.field_labels, "clouds/tmp/noisy_reference.asc")
# # # test gaussian filter
# from modules import accumulator, input_output, consensus
# import scipy.ndimage
# import matplotlib.pyplot as plt
#
#
# def morph_consensus_cube (cube ):
#
# # add the maximum value to the coordinates, so they are positive
# cube[:, 0:3] += np.max (cube[:, 0])
#
# # normalize by new maximum, so values are distributed from 0 to 1
# cube[:, 0:3] /= np.max (cube[:, 0])
#
# # spread the values again by step count (depends on original grid_size of the cube), so the coordinates
# # are now monotonically rising real numbers that can easily be used for array indexing
# steps_count = int (cube.shape[0] ** (1/3 ))
# cube[:, 0:3] *= steps_count - 1
#
# # create new cube
# new_cube = np.zeros (shape=(steps_count + 1, steps_count + 1, steps_count + 1 ))
# for row in cube:
# new_cube[int (row[0]), int (row[1]), int (row[2])] = row[3]
#
# return new_cube
#
#
# def morph_back (morphed_cube, grid_length=2.0 ):
#
# # get steps and fashion cube container in the style of a pointcloud
# steps_count = int (morphed_cube.shape[0] ** 3 )
# cube = np.zeros (shape=(steps_count, 4 ))
#
# #
# iterator = 0
# for x_dim in range (morphed_cube.shape[0] ):
# for y_dim in range (morphed_cube.shape[1] ):
# for z_dim in range (morphed_cube.shape[2] ):
#
# cube[iterator, :] = [x_dim, y_dim, z_dim, morphed_cube[int (x_dim), int (y_dim), int (z_dim)]]
# iterator += 1
#
# # normalize by steps_count, so values are distributed from 0 to 1
# cube[:, 0:3] /= np.max (cube[:, 0])
#
# # apply the original grid_length
# cube[:, 0:3] *= grid_length
#
# # add the maximum value to the coordinates, so they are positive
# cube[:, 0:3] -= np.max (cube[:, 0]) / 2
#
# return cube
#
#
# #test_cube = accumulator.create_closed_grid (0.5, 0.25 )
# #test_cube[:, 3] = test_cube [:, 2] + np.random.uniform (-.1, .1, size=(test_cube.shape[0] ))
#
# test_cube, _ = input_output.load_ascii_file (str ("docs/logs/unordered_cube_savefiles/Color_Houses Color Houses_dim16 "
# + "to Color Houses_als16_distance-accumulator_sphere_radius_1.0_"
# + "step_0.05.asc" ), return_separate=True )
#
# original = test_cube.copy ()
#
# test_cube = morph_consensus_cube (test_cube )
# sigma = 1
# test_cube = scipy.ndimage.gaussian_filter (test_cube, sigma, order=0 )
# test_cube = morph_back (test_cube )
#
# np.set_printoptions (precision=2, linewidth=380, suppress=True )
#
# combined_cube = np.concatenate ((original, test_cube[:, 3].reshape (-1, 1 )), axis=1 )
# print (combined_cube[-100:-1, :] )
#
# accumulator.display_consensus_cube (original, 1, (0, 0, 0 ), "original")
# accumulator.display_consensus_cube (test_cube, 1, (0, 0, 0 ), str ("gaussian filtered sigma=" + str (sigma )))
#
# #plt.show ()
# Test Cloud Pruning
# from modules import input_output, conversions, np_pointcloud
# a1 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_als16_reduced_normals_r_1_cleared.asc" )
# d1 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_dim16_reduced_normals_r_1_cleared.asc" )
# a2 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_als16_reduced_normals_r_1_cleared.asc" )
# d2 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_dim16_reduced_normals_r_1_cleared.asc" )
# a3 = input_output.load_ascii_file (
# "clouds/New Regions/Forest/Forest_als16_reduced_normals_r_1_cleared.asc" )
# d3 = input_output.load_ascii_file (
# "clouds/New Regions/Forest/Forest_dim16_reduced_normals_r_1_cleared.asc" )
# a4 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_als16_reduced_normals_r_1_cleared.asc" )
# d4 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_dim16_reduced_normals_r_1_cleared.asc" )
# a5 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_als16_reduced_normals_r_1_cleared.asc" )
# d5 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_dim16_reduced_normals_r_1_cleared.asc" )
# a6 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_als16_reduced_normals_r_1_cleared.asc" )
# d6 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_dim16_reduced_normals_r_1_cleared.asc" )
# a7 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_als16_reduced_normals_r_1_cleared.asc" )
# d7 = input_output.load_ascii_file (
# "clouds/New Regions/Everything/Everything_dim16_reduced_normals_r_1_cleared.asc" )
# input_output.save_ascii_file (a7.points, a7.field_labels, "clouds/tmp/original_als.asc" )
# input_output.save_ascii_file (d7.points, d7.field_labels, "clouds/tmp/original_dim.asc" )
#
# # points
# cloud_points_ref, cloud_points_corr = conversions.prune_model_outliers (a1, d1 )
# input_output.save_ascii_file (cloud_points_ref.points, cloud_points_ref.field_labels, "clouds/tmp/points_ref.asc" )
# input_output.save_ascii_file (cloud_points_corr.points, cloud_points_corr.field_labels, "clouds/tmp/points_corr.asc" )
#
# # borders
# cloud_borders = conversions.prune_cloud_borders (a2.points )
# input_output.save_ascii_file (cloud_borders, a2.field_labels, "clouds/tmp/borders.asc" )
#
# water classes
# input_output.save_ascii_file (d3.points, d3.field_labels, "clouds/tmp/original_water.asc" )
# cloud_water = conversions.remove_water_classes (d3 )
# input_output.save_ascii_file (cloud_water.points, cloud_water.field_labels, "clouds/tmp/water.asc" )
#
# # sigma
# cloud_sigma = conversions.prune_sigma_quality (a4 )
# input_output.save_ascii_file (cloud_sigma.points, cloud_sigma.field_labels, "clouds/tmp/sigma.asc" )
#
# # vectors
# cloud_points_ref, cloud_points_corr = conversions.prune_normal_vectors (a5, d5 )
# input_output.save_ascii_file (cloud_points_ref.points, cloud_points_ref.field_labels, "clouds/tmp/vectors_ref.asc" )
# input_output.save_ascii_file (cloud_points_corr.points, cloud_points_corr.field_labels, "clouds/tmp/vectors_corr.asc" )
#
# # all
# cloud_points_ref, cloud_points_corr = conversions.prune_cloud_pair (a6, d6 )
# input_output.save_ascii_file (cloud_points_ref.points, cloud_points_ref.field_labels, "clouds/tmp/all_ref.asc" )
# input_output.save_ascii_file (cloud_points_corr.points, cloud_points_corr.field_labels, "clouds/tmp/all_corr.asc" )
# # speed test accumulator init
# import math
# import time
#
#
# def create_closed_grid (grid_length, step ):
#
# # grid variables
# steps_number = math.ceil (grid_length / step + 1 )
# grid_points_number = steps_number**3
#
# # make a grid in the style of a pointcloud
# grid = np.zeros ((grid_points_number, 4 ))
#
# # in intervals of step, create grid nodes
# general_iterator = 0
# minimum = -math.floor (steps_number / 2)
# maximum = math.ceil (steps_number / 2 )
# for x_iterator in range (minimum, maximum ):
# for y_iterator in range (minimum, maximum ):
# for z_iterator in range (minimum, maximum ):
#
# grid[general_iterator, 0:3] = [x_iterator * step,
# y_iterator * step,
# z_iterator * step]
#
# general_iterator += 1
#
# return grid
#
#
# measure = time.time ()
# grid = create_closed_grid (2, 0.1 )
# grid_time = time.time - measure
#
# print ("grid.shape: " + str (grid.shape ))
# # print ("grid:\n" + str (grid ))
#
# measure = time.time ()
# lmax = 1
# lmin = -lmax
# step = 0.1
# xyz = np.transpose(np.reshape(np.mgrid[lmin:lmax+step:step, lmin:lmax+step:step, lmin:lmax+step:step], (-1, 4)))
# print ("\nxyz.shape: " + str (grid.shape ))
# # print ("xyz:\n" + str (grid ))
# xyz = time.time - measure
# # Test np_pointcloud class NumpyPointCloud - This will throw warnings (and expected errors, if # are removed )
# from modules.np_pointcloud import NumpyPointCloud
#
# numpy_cloud = np.concatenate ((numpy_cloud, numpy_cloud), axis=1 )
# my_cloud = NumpyPointCloud (numpy_cloud, ["X", "Y", "Z", "I", "Don't", "Know"] )
# #
# # print ("Test: Wrong Get." + str (my_cloud.get_fields (["X", "Y", "Z", "I", "Dont", "Know"] )))
# # print ("Test: Wrong Get." + str (my_cloud.get_fields (["Dont"] )))
# print ("\nTest: Get." + str (my_cloud.get_fields (["X", "Y", "Z", "I", "Don't", "Know"] )))
#
# # wrong replace
# # my_cloud.add_fields (np.array([1.1, 2.1, 3.1, 1.1, 2.1, 3.1]).reshape (-1, 1), "Know" )
# # my_cloud.add_fields (numpy_cloud[:, 2], "Know" )
# print ("\nTest: Wrong Replace." + str (my_cloud ))
#
# print (my_cloud.shape)
# # replace
# my_cloud.add_fields (np.array ([1.1, 2.1, 1337, 1.1, 2.1, 3.1]).reshape(-1, 1), "aaa", replace=True )
# my_cloud.add_fields (numpy_cloud[:, 1], "I", replace=True )
# print ("\nTest: Replace." + str (my_cloud ))
#
# print (my_cloud.shape)
#
# # add
# my_cloud.add_fields ([1.1, 2.1, 3.1, 1337, 2.1, 3.1], "Test1" )
# print ("\nTest: Add." + str (my_cloud ))
#
# my_cloud.delete_fields (["I", "Dont", "Know"] )
# print ("\nTest: Wrong Delete." + str (my_cloud ))
#
# my_cloud.delete_fields (["Test1"] )
# print ("\nTest: Delete." + str (my_cloud ))
# # basic accumulator
# import math
# import scipy.spatial
# from modules import input_output
#
#
# def create_closed_grid (grid_length, step ):
#
# # grid variables
# steps_number = math.ceil (grid_length / step + 1 )
# grid_points_number = steps_number**3
#
# # make a grid in the style of a pointcloud
# grid = np.zeros ((grid_points_number, 4 ))
#
# # in intervals of step, create grid nodes
# general_iterator = 0
# min = -math.floor (steps_number / 2)
# max = math.ceil (steps_number / 2 )
# for x_iterator in range (min, max ):
# for y_iterator in range (min, max ):
# for z_iterator in range (min, max ):
#
# grid[general_iterator, 0:3] = [x_iterator * step,
# y_iterator * step,
# z_iterator * step]
#
# general_iterator += 1
#
# return grid
#
#
# numpy_cloud = np.array([[1, 0, 0],
# [1, 0, 0],
# [20, 0, 0],
# [30, 0, 0],
# [40, 0, 0],
# [50, 0, 0]], dtype=float )
#
# numpy_cloud += np.random.uniform (-0.1, 0.1, size=(numpy_cloud.shape[0], 3 ))
#
# corresponding_cloud = np.array([[1, 2, 0],
# [10, 2, 0],
# [20, 2, 0],
# [30, 2, 0],
# [40, 2, 0],
# [50, 0, 2]], dtype=float )
#
# corresponding_cloud += np.random.uniform (-0.1, 0.1, size=(corresponding_cloud.shape[0], 3 ))
#
#
# accumulator_radius = 2
# grid_size = 0.1
#
# # build a grid as a kdtree to discretize the results
# consensus_cube = create_closed_grid (accumulator_radius * 2, grid_size )
# grid_kdtree = scipy.spatial.cKDTree (consensus_cube[:, 0:3] )
# print ("\nconsensus_cube shape: " + str (consensus_cube.shape ))
# #print ("\nconsensus_cube:\n" + str (consensus_cube ))
#
# # build kdtree and query it for points within radius
# scipy_kdtree = scipy.spatial.cKDTree (numpy_cloud[:, 0:3] )
# cloud_indices = scipy_kdtree.query_ball_point (corresponding_cloud[:, 0:3], accumulator_radius )
# #print ("\ncloud_indices: " + str (cloud_indices ))
#
# for i, point_indices in enumerate (cloud_indices ):
# if (len(point_indices ) > 0):
#
# # diff all points found near the corresponding point with corresponding point
# diff_vectors = numpy_cloud[point_indices, 0:3] - corresponding_cloud[i, 0:3]
# print ("\n-------------------------------------------------------\n\npoint_indices:\n" + str (point_indices ))
# print ("diff_vectors:\n" + str (diff_vectors ))
#
# # rasterize
# dists, point_matches = grid_kdtree.query (diff_vectors, k=1 )
# print ("dists from gridpoints: " + str (dists.T ))
# print ("grid point matches: " + str (point_matches.T ))
#
# # update the cube with the results of this point, ignore multiple hits
# consensus_cube[np.unique (point_matches ), 3] += 1
# print ("\nupdated consensus_cube >0:\n" + str (consensus_cube[consensus_cube[:, 3] > 0, :] ))
#
#
#
# best_alignment = consensus_cube[np.argmax (consensus_cube[:, 3] ), 0:3]
# print ("\nbest_alignment: \t" + str (best_alignment ))
# #print ("random_offset: \t\t" + str (random_offset ))
# # Plot an angle histogram of the differences of normal vectors
# from modules import input_output
# from modules.normals import normalize_vector_array, normalize_vector
# import matplotlib.pyplot as plt
# import scipy.spatial
#
#
# def load_example_cloud (folder ):
#
# # # big cloud
# numpy_pointcloud = input_output.conditionalized_load(
# 'clouds/Regions/' + folder + '/ALS16_Cloud_reduced_normals_cleared.asc' )
#
# corresponding_pointcloud = input_output.conditionalized_load (
# 'clouds/Regions/' + folder + '/DSM_Cloud_reduced_normals.asc' )
#
# return numpy_pointcloud, corresponding_pointcloud
#
#
# def einsum_angle_between (vector_array_1, vector_array_2 ):
#
# # diagonal of dot product
# diag = np.clip (np.einsum('ij,ij->i', vector_array_1, vector_array_2 ), -1, 1 )
#
# return np.arccos (diag )
#
#
# def plot_histogram (data, numer_of_bins, maximum ):
# # the histogram of the data
# n, bins, patches = plt.hist(data, numer_of_bins, density=False, range=(0, 180), facecolor='g', alpha=0.75 )
#
# plt.xlabel('angle' )
# plt.ylabel('count' )
# plt.title('Histogram of Angle Differences Yz Houses translated' )
# #plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# plt.axis([0, numer_of_bins, 0, maximum] )
# plt.grid(True )
# plt.show()
#
#
# def get_points_normals_zero (numpy_pointcloud, field_labels_list):
# normals = numpy_pointcloud.get_normals ()
# normals = np.absolute (normals )
#
# sqrt = np.sqrt (normals[:, 0]**2 + normals[:, 1]**2 + normals[:, 2]**2 )
#
# a = np.where (sqrt > 0.5, True, False )
#
# return numpy_pointcloud.points[a, :]
#
#
# # load clouds
# # numpy_pointcloud, corresponding_pointcloud = load_example_cloud ("Yz Houses" )
#
# numpy_pointcloud = input_output.load_ascii_file ("clouds/tmp/fail/normals_fixpoint_2.asc" )
# corresponding_pointcloud = input_output.load_ascii_file ("clouds/tmp/fail/normals_fixpoint_3.asc" )
#
# # numpy_pointcloud.points = get_points_normals_zero (numpy_pointcloud.points, numpy_cloud_field_labels )
# # corresponding_pointcloud.points = get_points_normals_zero (
# # corresponding_pointcloud.points, corresponding_cloud_field_labels )
#
# # translate
# corresponding_pointcloud.points[:, 0:3] += (0.314620971680, -0.019294738770, -0.035737037659 )
#
# # extract normals
# normals_numpy_cloud = numpy_pointcloud.get_normals ()
# normals_corresponding_cloud = corresponding_pointcloud.get_normals ()
#
# # normalize
# normals_numpy_cloud = normalize_vector_array (normals_numpy_cloud )
# normals_corresponding_cloud = normalize_vector_array (normals_corresponding_cloud )
#
# # build a kdtree and query it
# kdtree = scipy.spatial.cKDTree (numpy_pointcloud.points[:, 0:3] )
# distances, correspondences = kdtree.query (corresponding_pointcloud.points[:, 0:3], k=1 )
#
# # get the angle differences between the normal vectors
# angle_differences = einsum_angle_between (normals_numpy_cloud[correspondences, :],
# normals_corresponding_cloud ) * (180/np.pi)
#
# # plot
# plot_histogram (angle_differences, 180, 12000 )
#
# # corresponding_cloud = np.concatenate (
# # (corresponding_cloud, angle_differences.reshape (-1, 1 )), axis=1 )
# # input_output.save_ascii_file (corresponding_cloud,
# # corresponding_cloud_field_labels + ["AngleDifferences"],
# # "clouds/tmp/yz_houses_dim_angles.asc" )
# # einsum behavior
# numpy_cloud = np.array([[1, 0, 0],
# [1, 0, 0],
# [1, 0, 0]] )
#
# numpy_cloud_2 = np.array([[0, 0, 0],
# [1, 1, 0],
# [0, 1, 0]] )
#
# numpy_cloud_2 = normalize_vector_array (numpy_cloud_2 )
#
# print (einsum_angle_between (numpy_cloud, numpy_cloud_2 ))
# # einsum test
# numpy_cloud = np.array([[1, 0, 0],
# [1, 0, 0],
# [1, 0, 0],
# [1, 0, 0]] )
# numpy_cloud_2 = np.array([[1, 0, 0],
# [0, 13, 0],
# [12, 1, 0],
# [0, 1, 1]] )
#
# print (np.dot (numpy_cloud, numpy_cloud_2.T ))
# print (np.einsum('ij,ij->i', numpy_cloud, numpy_cloud_2 )) # dot product with each row in n and p
# # delete isolated points without neighbors in corresponding cloud
# from modules import conversions
# from modules import icp
# from modules import input_output
#
#
# #load ALS and DSM cloud
# als14_cloud, als14_field_labels = input_output.conditionalized_load (
# 'clouds/Regions/Everything/ALS14_Cloud_reduced_normals_cleared.asc' )
# dim_cloud, dim_field_labels = input_output.conditionalized_load (
# 'clouds/Regions/Everything/DSM_Cloud_reduced_normals.asc' )
#
# radius = 0.5
# als14_cloud, als14_field_labels, dim_cloud, dim_field_labels = conversions.mask_cloudpoints_without_correspondence (
# als14_cloud, als14_field_labels, dim_cloud, dim_field_labels, radius )
#
# print (icp.icp (als14_cloud, dim_cloud ))
#
# # input_output.save_ascii_file (als14_cloud, als14_field_labels, "clouds/tmp/als14_cloud_" + str(radius ) + ".asc")
# # input_output.save_ascii_file (dim_cloud, dim_field_labels, "clouds/tmp/dim_cloud_" + str(radius ) + ".asc")
# # join saved dictionaries
# from modules import input_output
#
#
# input_output.join_saved_dictionaries (["output_dict_1", "output_dict_2", "output_dict_3"], "output_dict")
# print (str (input_output.load_obj ("output_dict" )).replace (")), ", ")),\n" ))
# # get fields test
# def get_fields (numpy_cloud, field_labels_list, requested_fields ):
#
# # remove any spaces around the labels
# field_labels_list = [label.strip () for label in field_labels_list]
#
# if (requested_fields is not None
# and all(field in field_labels_list for field in requested_fields ) ):
# indices = []
# for field in requested_fields:
# indices.append (field_labels_list.index(field ))
# else:
# raise ValueError ("This Cloud is missing one of the requested fields: "
# + str(requested_fields )
# + ".\nSupplied Cloud fields are: "
# + str(field_labels_list ))
#
# return numpy_cloud[:, indices]
#
#
# numpy_cloud = np.concatenate ((numpy_cloud, numpy_cloud), axis=1 )
# field_labels_list = ["X", "Y", "Z", "A1", "A2", "A3"]
# requested_fields = ["Z", "A1", "A3"]
#
# print (get_fields (numpy_cloud, field_labels_list, requested_fields ))
# # parallelism test
# import math
# import time
# from multiprocessing import Pool
#
#
# def point_distance_cloud_consensus_parallel_wrapper (input):
# # translation is received as additional argument
# (tree_of_numpy_cloud, numpy_cloud, corresponding_cloud, translation, distance_threshold ) = input
#
# # consensus is started with translated corresponding_cloud
# (consensus_count, consensus_vector, consensus_time) = point_distance_cloud_consensus (
# tree_of_numpy_cloud, numpy_cloud, corresponding_cloud+translation, distance_threshold )
#
# # translation is returned alongside the computed values
# return (consensus_count, consensus_vector, consensus_time, translation)
#
#
# in loop:
# if (algorithm == 'distance'):
#
# arguments_list.append (
# [scipy_kdtree, numpy_cloud, corresponding_cloud, translation, distance_threshold] )
#
#
# out of loop:
# # go parallel
# with Pool(processes=None) as p:
# (results_list) = p.map (point_distance_cloud_consensus_parallel_wrapper, arguments_list )
# for (consensus_count, consensus_vector, consensus_time, translation) in results_list:
#
#
# def compute (translation ):
#
# #print (translation )
#
# count = 3
#
# for i in range (5000):
# isprime = True
#
# for x in range(2, int(math.sqrt(count ) + 1 )):
# if count % x == 0:
# isprime = False
# break
#
# # if isprime:
# # print (count )
#
# count += 1
#
# return translation, count, 5
#
#
# step = 0.15
# steps_number = 5
# min = -math.floor (steps_number / 2)
# max = math.ceil (steps_number / 2 )
#
#
# measure = time.time ()
# for x_iterator in range (min, max ):
# for y_iterator in range (min, max ):
# for z_iterator in range (min, max ):
#
# translation = [x_iterator * step,
# y_iterator * step,
# z_iterator * step]
#
# compute (translation )
#
# print ("Plain Time: " + str (time.time () - measure ))
#
# measure_whole = time.time ()
# to_do_list = []
# for x_iterator in range (min, max ):
# for y_iterator in range (min, max ):
# for z_iterator in range (min, max ):
#
# translation = [x_iterator * step,
# y_iterator * step,
# z_iterator * step]
#
# to_do_list.append (translation )
#
# # go parallel
# with Pool(processes=None) as p:
# results_list = p.map (compute, to_do_list )
#
# # for (consensus_count, consensus_vector, consensus_time) in results_list:
# # print (consensus_count )
# # print (consensus_vector )
# # print (consensus_time )
#
# print ("Parallel Complete Time: " + str (time.time () - measure_whole ))
# # save current transformations.reference_translations as dict
# from data import transformations
# from modules import input_output
#
#
#
# input_output.save_obj (transformations.reference_translations, "reference_translations_part_3_dict")
# # test data dictionaries
# from queue_alignment_algorithms import get_reference_data_paths, compare_results
# from modules import input_output
# from data import transformations
#
#
# def an_algorithm (ref_cloud_path, aligned_cloud_path, plot_title ):
#
# dictionary_line = {(ref_cloud_path, aligned_cloud_path):
# ((1337, 0, 0), (1337, 0, 0))}
#
# return dictionary_line
#
#
# def use_algorithm_on_dictionary (reference_dictionary_name, algorithm_function, results_save_name=None ):
# '''
# Uses a dictionary of reference cloud file_paths as keys and a list of corresponding aligned cloud file_paths as
# values
#
# Input:
# file_paths_dictionary: (string) Dictionary with reference_paths as keys and paths of aligned clouds as values
# algorithm_function (function): Function that returns dict {(reference path, aligned_path): (translation, mse)}
# results_save_name (string): Results will be saved as data/results_save_path.pkl. Values may be overwritten.
# '''
#
# # parse the reference values saved in a file
# reference_dictionary = input_output.load_obj (reference_dictionary_name )
# file_paths_dictionary = get_reference_data_paths (reference_dictionary )
#
# # before start, check if files exist
# for key in file_paths_dictionary:
# if (input_output.check_for_file (key ) is False):
# print ("File " + key + " was not found. Aborting.")
# return False
# for aligned_cloud_path in file_paths_dictionary[key]:
# if (input_output.check_for_file (aligned_cloud_path ) is False):
# print ("File " + aligned_cloud_path + " was not found. Aborting.")
# return False
#
# algorithm_results = {} # dictionary
#
# # create a list of tuples from reference and aligned cloud file paths
# for ref_cloud_path in file_paths_dictionary:
# for aligned_cloud_path in file_paths_dictionary[ref_cloud_path]: # multiple aligned clouds possible
#
# folder, reference_file_name = input_output.get_folder_and_file_name (ref_cloud_path)
# folder, aligned_file_name = input_output.get_folder_and_file_name (aligned_cloud_path)
# plot_title = folder + ' ' + aligned_file_name + ' to ' + reference_file_name
#
# # call the algorithm supplied by algorithm_function
# algorithm_results.update (algorithm_function (ref_cloud_path, aligned_cloud_path, plot_title ))
#
# if (results_save_name is not None ):
# input_output.save_obj (algorithm_results, results_save_name)
#
# # prints the values computed along with the ground truth in the dictionary
# compare_results (algorithm_results, reference_dictionary )
#
# return True
#
#
# print ("\n\nComputing Consensus for each cloud pair in reference_translations returns: "
# + str(use_algorithm_on_dictionary ("reference_translations_dict",
# an_algorithm,
# "test_results_dict" )))
#
# print (input_output.load_obj ("test_results_dict"))
# # find the column containing the maximum value of a row
#print (numpy_cloud[np.argmax(numpy_cloud[:, 2]), :])
# # find the row containing a certain subset of values and move it to the end of the array
# numpy_cloud = np.array([[1.1, 2.1, 3.1, 0],
# [1.2, 2.2, 3.2, 0],
# [171.3, 172.3, 3.3, 0],
# [1.4, 2.4, 3.4, 0],
# [0, 0, 3.5, 0],
# [11.6, 2.6, 3.4, 0]] )
#
# best_alignment = [171.3, 172.3, 3.3]
#
# print (numpy_cloud)
# print ()
# best_alignment_index = (numpy_cloud[:, :3] == best_alignment).all(axis=1).nonzero()[0][0]
# best_alignment_row = numpy_cloud[best_alignment_index, :].reshape (1, -1)
# numpy_cloud = np.delete (numpy_cloud, best_alignment_index, axis=0)
# numpy_cloud = np.concatenate ((numpy_cloud, best_alignment_row), axis=0)
#
# print (numpy_cloud)
# # angle speed test for loop and monolith and einsum
# from modules import input_output
# import time
# import numpy.linalg as la
# from scipy.spatial import distance as dist
#
#
# def get_normals (numpy_cloud, field_labels_list ):
#
# # remove any spaces around the labels
# field_labels_list = [label.strip () for label in field_labels_list]
#
# if ('Nx' in field_labels_list
# and 'Ny' in field_labels_list
# and 'Nz' in field_labels_list ):
# indices = []
# indices.append (field_labels_list.index('Nz' ))
# indices.append (field_labels_list.index('Ny' ))
# indices.append (field_labels_list.index('Nx' ))
# else:
# raise ValueError ("This Cloud is missing one of the required fields:
# 'Nx', 'Ny', 'Nz'. Compute Normals first.")
#
# return numpy_cloud[:, indices]
#
#
# def angle_between (vector_1, vector_2):
# """ Returns the angle in radians between vectors 'vector_1' and 'vector_2' """
#
# res = np.arccos(np.clip(np.dot(vector_1, vector_2), -1.0, 1.0))
#
# return res
#
#
# def alternative_angle_between (vector_array_1, vector_array_2, step=1000 ):
#
# # prepare results vector with lenght of number of points
# results = np.zeros ((vector_array_1.shape[0], 1 ))
#
# # np.dot (vector_array_1[i:], vector_array_2.T) computes a gigantic matrix. In order to save RAM space, it has to
# # be done in batches
# for i in range (0, vector_array_1.shape[0], step ):
# if (i + step > vector_array_1.shape[0]):
# results[i:] = np.arccos (
# np.diagonal (
# np.clip (
# np.dot (vector_array_1[i:, :],
# vector_array_2[i:, :].T ), -1, 1 ))).reshape (-1, 1)
# else:
# results[i:i+step] = np.arccos (
# np.diagonal (
# np.clip (
# np.dot (vector_array_1[i:i+step, :],
# vector_array_2[i:i+step, :].T ), -1, 1 ))).reshape (-1, 1)
#
# return results
#
#
# def alternative_angle_between_nan (vector_array_1, vector_array_2, step=1000 ):
#
# # prepare results vector with lenght of number of points
# results = np.zeros ((vector_array_1.shape[0], 1 ))
#
# # np.dot (vector_array_1[i:], vector_array_2.T) computes a gigantic matrix. In order to save RAM space, it has to
# # be done in batches
# for i in range (0, vector_array_1.shape[0], step ):
# # the last step, all values until the end of the array
# if (i + step > vector_array_1.shape[0]):
# results[i:] = np.arccos (
# np.diagonal (np.dot (vector_array_1[i:, :],
# vector_array_2[i:, :].T ))).reshape (-1, 1)
# # every other step, taking values in the range of step
# else:
# results[i:i+step] = np.arccos (
# np.diagonal (
# np.dot (vector_array_1[i:i+step, :],
# vector_array_2[i:i+step, :].T ))).reshape (-1, 1)
#
# # replace nan values with 90 degrees angle difference
# return np.where (np.isnan (results), 1.57079632679, results )
#
#
# def alternative_angle_between_noclip (vector_array_1, vector_array_2, step=1000 ):
#
# # prepare results vector with lenght of number of points
# results = np.zeros ((vector_array_1.shape[0], 1 ))
#
# # np.dot (vector_array_1[i:], vector_array_2.T) computes a gigantic matrix. In order to save RAM space, it has to
# # be done in batches
# for i in range (0, vector_array_1.shape[0], step ):
# if (i + step > vector_array_1.shape[0]):
# results[i:] = np.arccos (
# np.diagonal (np.dot (vector_array_1[i:, :],
# vector_array_2[i:, :].T ))).reshape (-1, 1)
# else:
# results[i:i+step] = np.arccos (
# np.diagonal (np.dot (vector_array_1[i:i+step, :],
# vector_array_2[i:i+step, :].T ))).reshape (-1, 1)
#
# return results
#
#
# def simple_loop_angle (vector_array_1, vector_array_2 ):
#
# results = np.zeros ((vector_array_1.shape[0], 1 ))
# for i in range (vector_array_1.shape[0]):
# results[i] = vector_array_1[i, :].dot (vector_array_2[i, :] )
#
# return np.arccos (np.clip (results, -1, 1 ))
#
#
# def einsum_angle_between (vector_array_1, vector_array_2 ):
#
# # diagonal of dot product
# diag = np.clip (np.einsum('ij,ij->i', vector_array_1, vector_array_2 ), -1, 1 )
#
# return np.arccos (diag )
#
#
# def load_example_cloud ():
#
# # # big cloud
# numpy_cloud, numpy_cloud_field_labels = input_output.conditionalized_load(
# 'clouds/Regions/Yz Houses/ALS16_Cloud_reduced_normals_cleared.asc' )
#
# corresponding_cloud, corresponding_cloud_field_labels = input_output.conditionalized_load (
# 'clouds/Regions/Yz Houses/DSM_Cloud_reduced_normals.asc' )
#
# return numpy_cloud, numpy_cloud_field_labels, corresponding_cloud, corresponding_cloud_field_labels
#
#
# # ### prepare ####
# numpy_cloud, numpy_cloud_field_labels, corresponding_cloud, corresponding_cloud_field_labels \
# = load_example_cloud ()
#
# normals_numpy_cloud = get_normals (numpy_cloud, numpy_cloud_field_labels )
# normals_corresponding_cloud = get_normals (corresponding_cloud, corresponding_cloud_field_labels )
#
#
# step = 58
# print ("Step: " + str(step ))
#
# # Step: 40
# # Loop Process Time: 2.312503254413605
# # Monolith Process Time: 0.15936983108520508
# # No Clip Monolith Process Time: 0.1318157744407654
# # NAN Monolith Process Time: 0.1287021803855896
#
# # Step: 50
# # Loop Process Time: 2.491855809688568
# # Monolith Process Time: 0.16239188432693483
# # No Clip Monolith Process Time: 0.1278723359107971
# # NAN Monolith Process Time: 0.12739877462387084
#
# # Step: 56
# # Loop Process Time: 2.857189098993937
# # Monolith Process Time: 0.16701097488403321
# # No Clip Monolith Process Time: 0.13585476875305175
# # NAN Monolith Process Time: 0.14023882548014324
#
# # Step: 58
# # Loop Process Time: 2.310372988382975
# # Monolith Process Time: 0.1322481155395508
# # No Clip Monolith Process Time: 0.10442533493041992
# # NAN Monolith Process Time: 0.10448430379231771
#
# # Step: 60
# # Loop Process Time: 2.739641170501709
# # Monolith Process Time: 0.16630157709121704
# # No Clip Monolith Process Time: 0.13103942155838014
# # NAN Monolith Process Time: 0.1315992569923401
#
# # Step: 62
# # Loop Process Time: 2.4043121496836344
# # Monolith Process Time: 0.1526663939158122
# # No Clip Monolith Process Time: 0.12144707043965658
# # NAN Monolith Process Time: 0.12466743787129721
#
#
# monolith_time = 0
# monolith_nc_time = 0
# monolith_nan_time = 0
# loop_time = 0
# simple_loop_time = 0
# einsum_time = 0
# times = 25
# for i in range (times):
#
# measure = time.time ()
# # slow looped process
# results_loop = normals_numpy_cloud.shape[0] * [None]
# for index, (vec1, vec2) in enumerate(
# zip (normals_numpy_cloud, normals_corresponding_cloud[:normals_numpy_cloud.shape[0], :] )):
# results_loop[index] = (angle_between (vec1, vec2 ) )
# loop_time += time.time () - measure
#
# measure = time.time ()
# results_monolith = alternative_angle_between (
# normals_numpy_cloud, normals_corresponding_cloud[:normals_numpy_cloud.shape[0], :], step )
# monolith_time += time.time () - measure
#
# measure = time.time ()
# results_nc_monolith = alternative_angle_between_noclip (
# normals_numpy_cloud, normals_corresponding_cloud[:normals_numpy_cloud.shape[0], :], step )
# monolith_nc_time += time.time () - measure
#
# measure = time.time ()
# results_nan_monolith = alternative_angle_between_nan (
# normals_numpy_cloud, normals_corresponding_cloud[:normals_numpy_cloud.shape[0], :], step )
# monolith_nan_time += time.time () - measure
#
# measure = time.time ()
# results_simple_loop = simple_loop_angle (
# normals_numpy_cloud, normals_corresponding_cloud[:normals_numpy_cloud.shape[0], :] )
# simple_loop_time += time.time () - measure
#
# measure = time.time ()
# results_einsum = einsum_angle_between (#
# normals_numpy_cloud, normals_corresponding_cloud[:normals_numpy_cloud.shape[0], :] )
# einsum_time += time.time () - measure
#
# #
# monolith_time = monolith_time / times
# monolith_nc_time = monolith_nc_time / times
# monolith_nan_time = monolith_nan_time / times
# loop_time = loop_time / times
# simple_loop_time = simple_loop_time / times
# einsum_time = einsum_time / times
#
#
# print ("\nStep: " + str(step ))
# print ("Loop Process Time: " + str(loop_time ))
# print ("Monolith Process Time: " + str(monolith_time ))
# print ("No Clip Monolith Process Time: " + str(monolith_nc_time ))
# print ("NAN Monolith Process Time: " + str(monolith_nan_time ))
# print ("Simple Loop Time: " + str(simple_loop_time ))
# print ("Einsum Time: " + str(einsum_time ))
# #
# print ("\n\nloop:\n" + str(results_loop[:10]))
# print ("monolith:\n" + str(results_monolith[:10].T))
# print ("noclip monolith:\n" + str(results_nc_monolith[:10].T))
# print ("NAN monolith:\n" + str(results_nan_monolith[:10].T))
# print ("Simple Loop:\n" + str(results_simple_loop[:10].T))
# print ("Einsum Result:\n" + str(results_einsum[:10].T))
# # how to append to a list
# list1 = [1, 2, 3]
# list2 = [4, 5, 6]
# list1.append (list2 )
#
# print (list1)
#
# list1 = [1, 2, 3]
# list1 = list1 + list2
#
# print (list1)
# # Speed test of array-wise normla vector angle_between computation
# from modules import normals
# import time
#
#
# def normalize_vector_array (vector_array ):
# norms = np.apply_along_axis(np.linalg.norm, 1, vector_array )
# return vector_array / norms.reshape (-1, 1 )
#
#
# def angle_between(vector_1, vector_2):
# """ Returns the angle in radians between vectors 'vector_1' and 'vector_2' """
#
# if (vector_1 is None or vector_2 is None or None in vector_1 or None in vector_2 ):
# return None
#
# vector_1 = normals.normalize_vector (vector_1 )
# vector_2 = normals.normalize_vector (vector_2 )
#
# return np.arccos(np.clip(np.dot(vector_1, vector_2), -1.0, 1.0))
#
#
# vector_array_1 = np.random.uniform (0, 1, size=(10000, 3 ))
# vector_array_2 = np.random.uniform (0, 1, size=(10000, 3 ))
#
# vector_array_1 = normalize_vector_array (vector_array_1 )
# vector_array_2 = normalize_vector_array (vector_array_2 )
#
# # Pure Numpy Process
# start = time.time()
# arccos = np.arccos (vector_array_1.dot (vector_array_2.T)[:, 0] )
# end1 = time.time() - start
#
# # Looped Numpy Process
# start = time.time()
# results = len (vector_array_1 ) * [None]
# for index, (vec1, vec2) in enumerate(zip (vector_array_1, vector_array_2 )):
# results[index] = (angle_between (vec1, vec2 ) )
# end2 = time.time() - start
#
# print ("Numpy Time = " + str (end1 ))
# print ("Standard Time = " + str (end2 ))
#
# print ("arccos: " + str (arccos ))
# print ("results: " + str (results ))
# # function as argument
# def something ():
# return "Something"
#
#
# def function_taking_function (some_python_function ):
# print (str(some_python_function ()))
#
#
# function_taking_function (something )
# # test normal calculation
# from modules import normals
# from modules import input_output
# import sys
# #
# # # ply files
# # # numpy_cloud_1 = input_output.load_ply_file ('clouds/laserscanning/', 'plane1.ply') # 3806 points
# # #numpy_cloud_2 = input_output.load_ply_file ('clouds/laserscanning/', 'plane2.ply') # 3806 points
# #
# # # las files
# # #numpy_cloud_1 = input_output.load_las_file ('clouds/laserscanning/plane1.las') # 3806 points
# # #numpy_cloud_2 = input_output.load_las_file ('clouds/laserscanning/plane2.las') # 3806 points
# #
# # simple plane
# # numpy_cloud_1 = np.array ([[-1, 0, 0], # +x
# # [2, 0, 0], # -x
# # [0, 2, 0]
# # [2, 0, 200]]) # +y
#
# numpy_cloud_1 = np.array ([[-1, 0, 0], # +x
# [2, 0, 0], # -x
# [0, 2, 0],
# [0, 3, 0],
# [0, 4, 0],
# [0, 5, 0],
# [-1, 0, 200], # +x
# [2, 0, 200], # -x
# [0, 2, 200],
# [0, 3, 200],
# [0, 4, 200],
# [0, 5, 200]]) # +y
#
# # numpy_cloud_1 = np.random.uniform (-10, 10, (300, 3))
# #
# # 1st cloud
# normal_vector, consensus_points, _, _, _ = \
# normals.ransac_plane_estimation (numpy_cloud_1, 0.1, fixed_point=numpy_cloud_1[0, :], w=0.8 )
# print ('\nRANSAC, Cloud 1:\nnormal_vector: ' + str(normal_vector ))
# print ('consensus_points:\n' + str(consensus_points ) + '\n')
# #
# normal_vector, sigma, mass_center, _ = normals.PCA (consensus_points )
# print ('\nPCA, Cloud 1:\nnormal_vector: ' + str(normal_vector ))
# print ('sigma: ' + str(sigma ))
# print ('mass_center: ' + str(mass_center ) + '\n')
# corresponding_cloud = np.array([[1.1, 0, 0],
# [2.2, 0, 0],
# [3.3, 0, 0],
# [4.4, 0, 0],
# [5.5, 0, 0],
# [6.6, 0, 0]] )
#
# consensus_count, consensus_vector = cloud_consensus (numpy_cloud, corresponding_cloud, 0.4 )
# print ("consensus_count: " + str(consensus_count ))
# print ("consensus_vector:\n" + str(consensus_vector ))
# print (vector_array_distance (numpy_cloud, corresponding_cloud ))
# # reshape arrays to concat them
#import math
#
#
# an_array = np.array ((1, 2, 3)).reshape (1, 3)
#
# print ("numpy_cloud.shape: " + str(numpy_cloud.shape ))
# print ("an_array.shape: " + str(an_array.shape ))
# print (
# np.concatenate ( (numpy_cloud, an_array, an_array), axis=0 )
# )
# # sort a list of tuples
# list = [('folder/folder/a', 'folder/folder/b'),
# ('folder/folder/b', 'folder/folder/a'),
# ('folder/folder/a', 'folder/folder/c'),
# ('folder/folder/c', 'folder/folder/a')]
#
# print (sorted (list))
# # clear up the wicked output of
# import sklearn.neighbors.kd_tree
#
#
# numpy_cloud = np.array([[1.1, 0, 0],
# [1.2, 0, 0],
# [1.3, 0, 0],
# [1.4, 0, 0],
# [1.5, 0, 0],
# [1.6, 0, 0]] )
#
# # build a kdtree
# tree = sklearn.neighbors.kd_tree.KDTree (numpy_cloud, leaf_size=40, metric='euclidean')
# query_radius = 0.3 # m
#
# for index, point in enumerate (numpy_cloud ):
#
# thing = tree.query_radius(point.reshape (1, -1), r=query_radius )
# thing = [value2 for value in thing for value2 in value]
#
# print (thing)
# import random
#
#
# numpy_cloud = np.concatenate ((numpy_cloud, numpy_cloud, numpy_cloud, numpy_cloud), axis=0 )
# sample_factor = 6
#
# # sample deterministic
# print ("a")
# print (numpy_cloud[::sample_factor])
#
# # sample random
# indices = random.sample(range(0, numpy_cloud.shape[0] ), int (numpy_cloud.shape[0] / sample_factor ))
# print ("\nint: " + str(int (numpy_cloud.shape[0] / sample_factor )))
# print (numpy_cloud[indices, :] )
# {: .8f}.format (value)
# # delete everything that has more or equal to 20 in the 8th row _cleared:
# numpy_cloud = numpy_cloud[numpy_cloud[:, 7] < 20]
# cloud_altered = True
# def display_small_cloud (cloud ):
# fig = pyplot.figure()
# ax = Axes3D(fig)
#
# for i in range(0, cloud.size):
# ax.scatter(cloud[i][0], cloud[i][1], cloud[i][2])
#
# pyplot.show()
# # reduce and compute normals for a cloud specified by file_path
# import numpy as np
# import sklearn.neighbors # kdtree
# import normals
# #import math
# import input_output
# from os.path import splitext
# from conversions import reduce_cloud
# import psutil
#
# file_path = "clouds/Regions/Everything/DSM_Cloud_333165_59950 - Cloud.las"
# filename, file_extension = splitext(file_path )
#
# field_labels_list = ['X', 'Y', 'Z']
# previous_folder = ""
#
# # load the file, then reduce it
# if ("DSM_Cloud" in file_path):
#
# # Load DIM cloud
# numpy_cloud = input_output.load_las_file (file_path, dtype="dim" )
# numpy_cloud[:, 3:6] = numpy_cloud[:, 3:6] / 65535.0 # rgb short int to float
# field_labels_list.append ('Rf ' 'Gf ' 'Bf ' 'Classification ')
# else:
# # Load ALS cloud
# numpy_cloud = input_output.load_las_file (file_path, dtype="als")
# field_labels_list.append('Intensity '
# 'Number_of_Returns '
# 'Return_Number '
# 'Point_Source_ID '
# 'Classification ')
#
# print ("------------------------------------------------\ncloud successfully loaded!")
#
# # all clouds in one folder should get the same trafo
# if (len(file_path.split ('/')) == 1):
# current_folder = file_path
# else:
# current_folder = file_path.split ('/')[-2]
# if (current_folder != previous_folder):
# min_x_coordinate, min_y_coordinate = reduce_cloud (numpy_cloud, return_transformation=True )[1:]
# previous_folder = current_folder
#
# # reduce
# numpy_cloud[:, 0] = numpy_cloud[:, 0] - min_x_coordinate
# numpy_cloud[:, 1] = numpy_cloud[:, 1] - min_y_coordinate
#
# print ("------------------------------------------------\ncloud successfully reduced!")
# # compute normals
# # build a kdtree
# tree = sklearn.neighbors.kd_tree.KDTree (numpy_cloud, leaf_size=40, metric='euclidean')
#
# # set radius for neighbor search
# query_radius = 5.0 # m
# if ("DSM_Cloud" in file_path): # DIM clouds are roughly 6 times more dense than ALS clouds
# query_radius = query_radius / 6
#
# # kdtree radius search
# list_of_point_indices = tree.query_radius(numpy_cloud, r=query_radius ) # this floods memory
# additional_values = np.zeros ((numpy_cloud.shape[0], 4 ))
#
# # compute normals for each point
# for index, point_neighbor_indices in enumerate (list_of_point_indices ):
#
# if (psutil.virtual_memory().percent > 95.0):
# print (print ("!!! Memory Usage too high: " + str(psutil.virtual_memory().percent) + "%. Breaking loop."))
#
# # you can't estimate a cloud with less than three neighbors
# if (len (point_neighbor_indices) < 3 ):
# continue
#
# # do a Principal Component Analysis with the plane points obtained by a RANSAC plane estimation
# normal_vector, sigma, mass_center = normals.PCA (
# normals.ransac_plane_estimation (numpy_cloud[point_neighbor_indices, :], # point neighbors
# threshold=0.3, # max point distance from the plane
# w=0.6, # probability for the point to be an inlier
# z=0.90) # desired probability that plane is found
# [1] ) # only use the second return value, the points
#
# # join the normal_vector and sigma value to a 4x1 array and write them to the corresponding position
# additional_values[index, :] = np.append (normal_vector, sigma)
#
#
# print ("------------------------------------------------\ncloud successfully norm norm!")
#
# # add the newly computed values to the cloud
# numpy_cloud = np.concatenate ((numpy_cloud, additional_values), axis=1)
# field_labels_list.append('Nx ' 'Ny ' 'Nz ' 'Sigma ' )
#
# print ("------------------------------------------------\nnorm norm added!")
#
# # save the cloud again
# input_output.save_ascii_file (numpy_cloud, field_labels_list, filename + "_reduced_normals.asc" )
#
# print ("Done.")
# # misc tests
# def something ():
# return (1, 2, 3, 4)
#
#
# print (something ()[1:])
#
# file_path = "This/is/a/very/long/path.las"
# #file_path = "Short/Path.las"
# #file_path = "Path.las"
# #file_path = "This / is //\ a_wierd/\\ path.las"
#
# print (file_path)
# print (len(file_path.split ('/')))
# print (file_path.split ('/')[-2])
# field_names_list = ["x", "y", "z", "i" ]
#
# field_names_list = ['{0} '.format(name) for name in field_names_list]
# str1 = ''.join(field_names_list)
# leading_line = "//" + str1
#
# print (leading_line)
# import numpy as np
# import random
#
#
# numpy_cloud = np.array([[1.1, 2.1, 3.1],
# [1.2, 2.2, 3.2],
# [1.3, 2.3, 3.3],
# [1.4, 2.4, 3.4],
# [1.5, 2.5, 3.5],
# [1.6, 2.6, 3.6]] )
#
# indices = random.sample(range(0, numpy_cloud.shape[0] ), 6 )
# print ('indices: ' + str (indices ))
#
# for idx in indices:
# print (numpy_cloud[idx, :] )
# numpy_cloud = np.array([[1.1, 2.1, 3.1],
# [1.2, 2.2, 3.2],
# [1.3, 2.3, 3.3],
# [1.4, 2.4, 3.4],
# [1.5, 2.5, 3.5],
# [1.6, 2.6, 3.6]] )
#
# cloud = [[1.1, 2.1, 3.1],
# [1.2, 2.2, 3.2],
# [1.3, 2.3, 3.3],
# [1.4, 2.4, 3.4],
# [1.5, 2.5, 3.5],
# [1.6, 2.6, 3.6]]
#
# cloud.append (numpy_cloud [2, :].tolist ())
#
#
# win_path = '/some/path/containing/a/Windows Directory'
# linux_path = '/some/path/containing/a/linux_directory'
# print (win_path.replace (' ', '\ ' ))
# import numpy as np
# import input_output
#
# min_x = 1.2
# max_x = 1.5
# min_y = 2.3
# max_y = 2.6
# numpy_cloud = np.array([[1.1, 2.1, 3.1],
# [1.2, 2.2, 3.2],
# [1.3, 2.3, 3.3],
# [1.4, 2.4, 3.4],
# [1.5, 2.5, 3.5],
# [1.6, 2.6, 3.6]] )
#
# subset_cloud = np.array([0, 0 ,0])
#
# # for point in numpy_cloud:
# # if (point[0] > min_x
# # and point[0] < max_x
# # and point[1] > min_y
# # and point[1] < max_y):
# # print ("found point " + str(point ))
# # subset_cloud.append (point)
# # print ("point: " + str (point ))
# # print ("point[0]: " + str (point[0] ))
#
#
# subset_cloud = [point for point in numpy_cloud if (point[0] > min_x
# and point[0] < max_x
# and point[1] > min_y
# and point[1] < max_y)]
#
# print ('subset_cloud.shape: ' + str (len (subset_cloud )))
# print ('subset_cloud: ' + str (subset_cloud ))
#
# input_output.save_ply_file(subset_cloud, '', 'test.ply')
# string = '/this/is/a/long/path/and/a/file.txt'
# print (string.rsplit ('/', 1 ) )
#
# from __future__ import print_function
# import numpy as np
# import pcl
#
# points_1 = np.array([[0, 0, 0],
# [1, 0, 0],
# [0, 1, 0],
# [1, 1, 0]], dtype=np.float32)
# points_2 = np.array([[0, 0, 0.2],
# [1, 0, 0],
# [0, 1, 0],
# [1.1, 1, 0.5]], dtype=np.float32)
#
# pc_1 = pcl.PointCloud(points_1)
# pc_2 = pcl.PointCloud(points_2)
#
# #
# kd = pc_1.make_kdtree_flann()
#
# print('pc_1:')
# print(points_1)
# print('\npc_2:')
# print(points_2)
# print('\n')
#
# # find the single closest points to each point in point cloud 2
# # (and the sqr distances)
# indices, sqr_distances = kd.nearest_k_search_for_cloud(pc_2, 1)
# for i in range(pc_1.size):
# print('index of the closest point in pc_1 to point %d in pc_2 is %d' % (i, indices[i, 0]))
# print('the squared distance between these two points is %f' % sqr_distances[i, 0])
# #################################################################
# print ("\nExample 1: Input Cloud")
#
# # define
# input_cloud = np.array([[1.1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
# #input_cloud = np.reshape (input_cloud, (-1, 1))
#
#
# # check
# print ('Size: ' + str(input_cloud.size ))
# print ('Shape: ' + str(input_cloud.shape ))
# print ('Cloud:\n' + str(input_cloud ))
# def normalize_vector (vector ):
# '''
#
# '''
# # check if vector is a matrix
# if (len (vector.shape ) > 1 ):
# print ("In normalize_vector: Vector is out of shape.")
# return vector
#
# vector_magnitude = 0
# for value in vector:
# vector_magnitude = vector_magnitude + np.float_power (value, 2 )
# vector_magnitude = np.sqrt (vector_magnitude )
#
# return vector / vector_magnitude
#
#
# vector = np.array ([40, 10, 0], float)
#
# print ('Vector: ' + str(vector ))
# print ('Vector Norm: ' + str(normalize_vector (vector )))
# #eigenvalues, eigenvectors = np.linalg.eig(input_cloud )
# eigenvalues = np.zeros (3 )
# eigenvectors = np.zeros ((3, 3 ))
#
# evals = np.array ([2, 3, 1] )
# evecs = np.array (([3,2,1], [6,5,4], [9,8,7] ))
#
# print ('Before:')
# print ('Values: ' + str(evals ))
# print ('Vectors: \n' + str(evecs ))
#
# # sort them
# indices = np.argsort (-evals )
# for loop_count, index in enumerate(indices ):
# eigenvalues[loop_count] = evals[index]
# eigenvectors[:, loop_count] = evecs[:, index]
#
# print ('After:')
# print ('Values: ' + str(eigenvalues ))
# print ('Vectors: \n' + str(eigenvectors ))
# # change
# input_cloud = np.concatenate ((input_cloud, input_cloud, input_cloud, input_cloud), axis = -1)
#
# print (type(input_cloud ))
# print (type (pcl.PointCloud ( )))
# print (type (pcl.PointCloud_PointXYZI ( )))
# print ('\n')
#
# input_cloud = np.subtract (input_cloud[:, 0:3], np.array([0.1, 0.5, 2]))
|
[
"numpy.array",
"random.seed",
"numpy.random.seed"
] |
[((747, 767), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (761, 767), True, 'import numpy as np\n'), ((771, 788), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (782, 788), False, 'import random\n'), ((825, 942), 'numpy.array', 'np.array', (['[[1.1, 2.1, 3.1], [1.2, 2.2, 3.2], [1.3, 2.3, 3.3], [1.4, 2.4, 3.4], [1.5, \n 2.5, 3.5], [1.6, 2.6, 3.6]]'], {}), '([[1.1, 2.1, 3.1], [1.2, 2.2, 3.2], [1.3, 2.3, 3.3], [1.4, 2.4, 3.4\n ], [1.5, 2.5, 3.5], [1.6, 2.6, 3.6]])\n', (833, 942), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Interpolated Model
------------------
Model interpolated from some surrogate model
"""
import numpy as np
import os
from scipy import interpolate
from sklearn.gaussian_process import GaussianProcessRegressor
from .model import model_base
class interpolated(model_base):
def __init__(self, name, param_names, bands, weight=1):
model_base.__init__(self, name, param_names, bands, weight)
fname = os.environ["EM_PE_INSTALL_DIR"] + "/Data/" + self.name + ".npz"
f = np.load(fname)
self.t_interp = f["arr_0"]
x = f["arr_1"]
lc_arr = f["arr_2"]
self.gp_dict = {}
for i in range(len(self.bands)):
band = self.bands[i]
y = lc_arr[i]
self.gp_dict[band] = []
for j in range(len(self.t_interp)):
gp = GaussianProcessRegressor()
gp.fit(x, y[:,j])
self.gp_dict[band].append(gp)
def evaluate(self, tvec_days, band):
x = np.empty((1, len(self.param_names)))
gp_list = self.gp_dict[band]
for i in range(len(self.param_names)):
x[0][i] = self.params[self.param_names[i]]
y = np.empty(len(self.t_interp))
for i in range(len(self.t_interp)):
gp = gp_list[i]
y[i] = gp.predict(x)
f = interpolate.interp1d(self.t_interp, y, fill_value="extrapolate")
return f(tvec_days), 0
|
[
"scipy.interpolate.interp1d",
"numpy.load",
"sklearn.gaussian_process.GaussianProcessRegressor"
] |
[((520, 534), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (527, 534), True, 'import numpy as np\n'), ((1347, 1411), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.t_interp', 'y'], {'fill_value': '"""extrapolate"""'}), "(self.t_interp, y, fill_value='extrapolate')\n", (1367, 1411), False, 'from scipy import interpolate\n'), ((852, 878), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (876, 878), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n')]
|
import numpy as np
from scipy import linalg
class GaussianProcess:
def compute_rho_corr_func_point(self, a, b, this_rho):
"""Compute correlation between two points a and b."""
corr = np.prod(this_rho**(4 * (a - b)**2))
return corr
def compute_rho_corr_func(self, a, b, this_rho):
"""Compute rho correlation function between two vectors a and b.
Returns kernel matrix [len(a), len(b)]."""
corr_matrix = [self.compute_rho_corr_func_point(a[i], b[j], this_rho)
for i in range(a.shape[0]) for j in range(b.shape[0])]
return np.array(corr_matrix).reshape(a.shape[0], -1)
def __init__(self, x, y, cov_n, prec_f, rho, compute_lnlike=False):
"""Set up covariance matrix and pre-compute its Cholesky decomposition.
Parameters
----------
x: design points [N_data, N_dim_input]
y: design values [N_data, N_output]
cov_n: covariance of y [N_output*N_data, N_output*N_data]
prec_f: precision of the GP
rho: CP correlation length [N_output, N_dim_input]
compute_lnlike: (optional, default to False) marginal likelihood
Returns
-------
None
"""
self.N_data = len(x)
self.N_dim_input = x.shape[1]
self.N_output = y.shape[1]
# Check dimensions
if self.N_data!=len(y):
raise TypeError("len(design points) %d must match len(design values) %d"%(self.N_data, len(y)))
if len(prec_f)!=self.N_output:
raise TypeError("len(prec_f) %d must match number of outputs %d"%(len(prec_f), self.N_output))
if cov_n.shape!=(self.N_output*self.N_data, self.N_output*self.N_data):
raise TypeError("Shape of data cov mat (%s,%s) must be (%s,%s)."%(
cov_n.shape[0], cov_n.shape[1], self.N_output*self.N_data, self.N_output*self.N_data))
if rho.shape!=(self.N_output, self.N_dim_input):
raise TypeError("Shape of correlation lengths (%d,%d) must be (%d,%d)"%(
rho.shape[0], rho.shape[1], self.N_output*self.N_dim_input, self.N_output*self.N_dim_input))
self.x = x
self.corr_rho = rho
self.prec_f = prec_f
self.y_flat = y.flatten(order='F')
# Correlation matrix
self.corrmat = np.zeros((self.N_output*self.N_data, self.N_output*self.N_data))
for i in range(self.N_output):
self.corrmat[i*self.N_data:(i+1)*self.N_data, i*self.N_data:(i+1)*self.N_data] = self.compute_rho_corr_func(x, x, self.corr_rho[i])/self.prec_f[i]
try:
self.cholesky_factor = linalg.cho_factor(self.corrmat + cov_n)
except:
print("Could not compute Cholesky decomposition")
return
self.Krig_basis = linalg.cho_solve(self.cholesky_factor, self.y_flat)
if compute_lnlike:
chi_squared = np.matmul(self.y_flat.T, self.Krig_basis)
ln_corrmat_det = 2 * np.sum(np.log(np.diag(self.cholesky_factor[0])))
self.lnlike = -.5 * chi_squared - .5 * ln_corrmat_det
def predict(self, x_new):
"""
Parameters: evaluation points [N_dim_input]
Returns: (mean, variance)
"""
if len(x_new)!=self.N_dim_input:
raise TypeError("Evaluation points %s needs to be shape %d"%(len(x_new), self.N_dim_input))
# Correlation with design input [N_output, N_data]
corr_xnew_x = np.zeros((self.N_output, self.N_output*self.N_data))
for i in range(self.N_output):
corr_xnew_x[i,i*self.N_data:(i+1)*self.N_data] = [self.compute_rho_corr_func_point(x_new, self.x[j], self.corr_rho[i])
for j in range(self.N_data)]
corr_xnew_x/= self.prec_f[:,None]
# Mean prediction
eval_mean = np.dot(corr_xnew_x, self.Krig_basis)
# Variance
v = linalg.cho_solve(self.cholesky_factor, corr_xnew_x.T)
eval_covmat = np.diag(1./self.prec_f) - np.dot(corr_xnew_x, v)
return eval_mean, eval_covmat
|
[
"numpy.diag",
"scipy.linalg.cho_factor",
"scipy.linalg.cho_solve",
"numpy.zeros",
"numpy.array",
"numpy.matmul",
"numpy.dot",
"numpy.prod"
] |
[((204, 243), 'numpy.prod', 'np.prod', (['(this_rho ** (4 * (a - b) ** 2))'], {}), '(this_rho ** (4 * (a - b) ** 2))\n', (211, 243), True, 'import numpy as np\n'), ((2349, 2417), 'numpy.zeros', 'np.zeros', (['(self.N_output * self.N_data, self.N_output * self.N_data)'], {}), '((self.N_output * self.N_data, self.N_output * self.N_data))\n', (2357, 2417), True, 'import numpy as np\n'), ((2823, 2874), 'scipy.linalg.cho_solve', 'linalg.cho_solve', (['self.cholesky_factor', 'self.y_flat'], {}), '(self.cholesky_factor, self.y_flat)\n', (2839, 2874), False, 'from scipy import linalg\n'), ((3489, 3543), 'numpy.zeros', 'np.zeros', (['(self.N_output, self.N_output * self.N_data)'], {}), '((self.N_output, self.N_output * self.N_data))\n', (3497, 3543), True, 'import numpy as np\n'), ((3892, 3928), 'numpy.dot', 'np.dot', (['corr_xnew_x', 'self.Krig_basis'], {}), '(corr_xnew_x, self.Krig_basis)\n', (3898, 3928), True, 'import numpy as np\n'), ((3961, 4014), 'scipy.linalg.cho_solve', 'linalg.cho_solve', (['self.cholesky_factor', 'corr_xnew_x.T'], {}), '(self.cholesky_factor, corr_xnew_x.T)\n', (3977, 4014), False, 'from scipy import linalg\n'), ((2660, 2699), 'scipy.linalg.cho_factor', 'linalg.cho_factor', (['(self.corrmat + cov_n)'], {}), '(self.corrmat + cov_n)\n', (2677, 2699), False, 'from scipy import linalg\n'), ((2929, 2970), 'numpy.matmul', 'np.matmul', (['self.y_flat.T', 'self.Krig_basis'], {}), '(self.y_flat.T, self.Krig_basis)\n', (2938, 2970), True, 'import numpy as np\n'), ((4037, 4063), 'numpy.diag', 'np.diag', (['(1.0 / self.prec_f)'], {}), '(1.0 / self.prec_f)\n', (4044, 4063), True, 'import numpy as np\n'), ((4063, 4085), 'numpy.dot', 'np.dot', (['corr_xnew_x', 'v'], {}), '(corr_xnew_x, v)\n', (4069, 4085), True, 'import numpy as np\n'), ((599, 620), 'numpy.array', 'np.array', (['corr_matrix'], {}), '(corr_matrix)\n', (607, 620), True, 'import numpy as np\n'), ((3018, 3050), 'numpy.diag', 'np.diag', (['self.cholesky_factor[0]'], {}), '(self.cholesky_factor[0])\n', (3025, 3050), True, 'import numpy as np\n')]
|
import gym
import numpy as np
from gym import Wrapper
def make_non_absorbing(observation):
return np.concatenate([observation, [0.0]], -1)
class AbsorbingStatesWrapper(Wrapper):
def __init__(self, env):
super().__init__(env)
low = env.observation_space.low
high = env.observation_space.high
self._absorbing_state = np.concatenate([np.zeros_like(low), [1.0]], 0)
low = np.concatenate([low, [0]], 0)
high = np.concatenate([high, [1]], 0)
self.observation_space = gym.spaces.Box(
low=low, high=high, dtype=env.observation_space.dtype)
def reset(self, **kwargs):
self._done = False
self._absorbing = False
self._info = {}
return make_non_absorbing(self.env.reset(**kwargs))
def step(self, action):
if not self._done:
observation, reward, done, info = self.env.step(action)
observation = make_non_absorbing(observation)
self._done = done
self._info = info
truncated_done = 'TimeLimit.truncated' in info
return observation, reward, truncated_done, info
else:
if not self._absorbing:
self._absorbing = True
return self._absorbing_state, 0.0, False, self._info
else:
return self._absorbing_state, 0.0, True, self._info
if __name__ == '__main__':
env = gym.make('Hopper-v2')
env = AbsorbingStatesWrapper(env)
env.reset()
done = False
while not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print(obs, done)
|
[
"gym.spaces.Box",
"numpy.zeros_like",
"gym.make",
"numpy.concatenate"
] |
[((104, 144), 'numpy.concatenate', 'np.concatenate', (['[observation, [0.0]]', '(-1)'], {}), '([observation, [0.0]], -1)\n', (118, 144), True, 'import numpy as np\n'), ((1434, 1455), 'gym.make', 'gym.make', (['"""Hopper-v2"""'], {}), "('Hopper-v2')\n", (1442, 1455), False, 'import gym\n'), ((421, 450), 'numpy.concatenate', 'np.concatenate', (['[low, [0]]', '(0)'], {}), '([low, [0]], 0)\n', (435, 450), True, 'import numpy as np\n'), ((466, 496), 'numpy.concatenate', 'np.concatenate', (['[high, [1]]', '(0)'], {}), '([high, [1]], 0)\n', (480, 496), True, 'import numpy as np\n'), ((531, 600), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high', 'dtype': 'env.observation_space.dtype'}), '(low=low, high=high, dtype=env.observation_space.dtype)\n', (545, 600), False, 'import gym\n'), ((376, 394), 'numpy.zeros_like', 'np.zeros_like', (['low'], {}), '(low)\n', (389, 394), True, 'import numpy as np\n')]
|
# cording: utf-8
"""
gradient_check.py
[誤差逆伝搬により,正しく勾配が求められ,正しいアドレスに格納できているか] を確認するファイルです.
上手く更新が行われない場合の原因究明にお役立てください.
"""
import os,sys
import numpy as np
import matplotlib.pyplot as plt
import networkgraph
from agent import Agent
from dataset.mnist import load_mnist
from common.multi_layer_net_extend import MultiLayerNetExtend
from common.optimizer import SGD
path = os.path.dirname(os.path.abspath(__file__))
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
n = 1
maxdeg = n-1
Gadj = np.ones([n,n]) - np.eye(n)
x_train_split = np.split(x_train, n)
t_train_split = np.split(t_train, n)
max_epochs = 101
each_train_size = x_train_split[0].shape[0]
batch_size = min(100, each_train_size)
Agent.n = n
Agent.maxdeg, Agent.AdjG_init = maxdeg, Gadj
Agent.train_size, Agent.batch_size = each_train_size, batch_size
weight_decay_lambda = 0
agents = [Agent(idx, x_train_split[idx], t_train_split[idx], x_test, t_test,
SGD(lr=lambda s:0.01), weight_decay_lambda) for idx in range(n)]
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(each_train_size / batch_size, 1)
epoch_cnt = 0
#####################
grad_numerical = agents[0].degub_numericalGrad()
grad_backprop = agents[0].debug_backpropGrad()
### 数値勾配と誤差逆伝搬により求めた勾配が一致していることを確認します.
for key in grad_numerical.keys():
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
print(key + ":" + str(diff))
######################
# network = MultiLayerNetExtend(input_size=784, hidden_size_list=[50], output_size=10,
# weight_decay_lambda=0,
# use_dropout=False, dropout_ration=0.0, use_batchnorm=False)
# x_batch = x_train[:3]
# t_batch = t_train[:3]
# grad_numerical = network.numerical_gradient(x_batch,t_batch)
# grad_backprop = network.gradient(x_batch,t_batch)
# for key in grad_numerical.keys():
# diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
# print(key + ":" + str(diff))
#######################
|
[
"os.path.abspath",
"numpy.abs",
"common.optimizer.SGD",
"numpy.ones",
"numpy.split",
"dataset.mnist.load_mnist",
"numpy.eye"
] |
[((479, 505), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'normalize': '(True)'}), '(normalize=True)\n', (489, 505), False, 'from dataset.mnist import load_mnist\n'), ((581, 601), 'numpy.split', 'np.split', (['x_train', 'n'], {}), '(x_train, n)\n', (589, 601), True, 'import numpy as np\n'), ((619, 639), 'numpy.split', 'np.split', (['t_train', 'n'], {}), '(t_train, n)\n', (627, 639), True, 'import numpy as np\n'), ((410, 435), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (425, 435), False, 'import os, sys\n'), ((535, 550), 'numpy.ones', 'np.ones', (['[n, n]'], {}), '([n, n])\n', (542, 550), True, 'import numpy as np\n'), ((552, 561), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (558, 561), True, 'import numpy as np\n'), ((998, 1020), 'common.optimizer.SGD', 'SGD', ([], {'lr': '(lambda s: 0.01)'}), '(lr=lambda s: 0.01)\n', (1001, 1020), False, 'from common.optimizer import SGD\n'), ((1427, 1475), 'numpy.abs', 'np.abs', (['(grad_backprop[key] - grad_numerical[key])'], {}), '(grad_backprop[key] - grad_numerical[key])\n', (1433, 1475), True, 'import numpy as np\n')]
|
"""
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
Author:<NAME>
"""
import numpy as np
import threading, random
import tensorflow as tf
class memory_pooling(object):
"""a class offer the APIs of experience replay in reinforcement learning
Example:
memorys = memory_pooling(maxlen=100)
for i in range(300):
state_current = [1, 2, 3, 4]
action = [1., 2.3, 4.6]
state_next = [2, 3, 5, 9]
r = 2.3
whether_end = False
memorys.put([state_current, action, state_next, r, whether_end])
pass
"""
def __init__(self, maxlen=500):
self.lock = threading.Lock()
self.memory = []
self.maxlen = maxlen
def put(self, memory):
"""put the a memory to memory list
Args:
"""
self.lock.acquire()
## put memory, could not be interupted by other threads
if len(self.memory) >= self.maxlen:
# del_i = random.randint(0, len(self.memory) - 1)
# self.memory.pop(del_i) ## random del a memory
self.memory.pop(0) ## random del a memory
self.memory.append(memory) ## put the newest memory
else:
self.memory.append(memory)
self.lock.release()
def capacity_bigger_than(self, val):
"""judge whether the memory pooling capacity is bigger that a val
Args:
val: normally, is a int represents the batch size
Return:
bool var.
"""
if len(self.memory) >= val:
return True
else:
return False
def get(self, batch_size):
"""randomly get some memory from memory pooling
Args:
batch_size: an int
Return:
if success, return batch memories.
elsewise, return None
"""
if len(self.memory) >= batch_size:
self.lock.acquire()
memorys = random.sample(self.memory, batch_size)
self.lock.release()
return memorys
else:
return None
pass
def get_history(self):
return np.array(self.memory)
class balance_memory_pooling():
"""this class will store different class of obj in a balance propotion"""
def __init__(self, max_capacity, n_class):
"""
Args:
max_capacity: max capacity
n_class: the number of class
"""
assert n_class >= 2
self.balance_memory = []
self.max_capacity = max_capacity
self.n_class = n_class
for i in range(n_class):
self.balance_memory.append([])
self.whether_max_capacity = False
def put(self, class_index, memory):
"""
Args:
class_index: the index of class in this memory
memory: the memory you wanna store
"""
assert class_index < len(self.balance_memory)
if self.__total_capacity() >= self.max_capacity:
self.__del_memory_of_max_len()
self.balance_memory[class_index].append(memory)
else:
self.balance_memory[class_index].append(memory)
def __total_capacity(self):
""" query the capacity """
l = 0
for memorys in self.balance_memory:
l += len(memorys)
return l
def __del_memory_of_max_len(self):
"""del a memory of max len class"""
l = []
for memorys in self.balance_memory:
l.append(len(memorys))
index = int(np.argmax(np.array(l)))
# del_i = random.randint(0,len(self.balance_memory[index])-1)
self.balance_memory[index].pop(0)
def get_propotion(self):
"""get propotion in different class"""
l = []
for memorys in self.balance_memory:
l.append(len(memorys))
propotion = np.array(l)/ self.__total_capacity()
return propotion
def get(self, batch_size):
"""random sample from all memory"""
m = []
for memorys in self.balance_memory:
m += memorys
return random.sample(m, batch_size)
def capacity_bigger_than(self, val):
"""judge whether the memory pooling capacity is bigger that a val
Args:
val: normally, is a int represents the batch size
Return:
bool var.
"""
if self.__total_capacity() >= val:
return True
else:
return False
def is_balance(self):
propotion = float(np.max(np.array(self.get_propotion())))
if propotion < max(0.6, (1/self.n_class+0.1)):
return True
else:
return False
def normalize_rewards(rewards):
"""normalize the rewards
Args:
rewards: a list of reward.
Return:
rewards after normalization
"""
r = (np.array(rewards) - np.mean(rewards))/np.std(rewards)
return r
def replace(memorys, norm_rewards, reward_index=3):
"""replace the raw rewards in memorys with the rewards after normalizaiton
Args:
memorys: a list of memory, the elemt means [state_current, action, state_next, reward, end]
norm_rewards: a list of rewards after normalization.
reward_index: the raw reward position in memories' item, default is 3
Example:
## init a memorys pooling
memorys = memory_pooling(maxlen=100)
for i in range(300):
memorys.put([i,i,i,i,i])
## get 20 memorys
memorys_ = memorys.get(batch_size=20)
## calculate the norm_rewards and replace raw rewards with them.
raw_rewards = [m[3] for m in memorys_]
r = normalize_rewards(raw_rewards)
replace(memorys_, r)
## now, the memorys_'s rewards are after nomalization
"""
for i, nr in enumerate(norm_rewards):
memorys[i][reward_index] = nr
def copy_a2b(vars_a, vars_b):
"""creat a copy ops which copy the vars_a to vars_b
Args:
vars_a: a list of tensor vars
var_b: a list of tensor vars
Return:
a list of copy operation
"""
assert len(vars_a)==len(vars_b)
copy_ops = [var_b.assign(var_a) for var_a, var_b in zip(vars_a, vars_b)]
copy_ops = tf.group(*copy_ops)
return copy_ops
def soft_copy_a2b(vars_a, vars_b, tau=1e-3):
"""creat a soft copy ops which soft copy the vars_a to vras_b
vars_b = tau*vars_a + (1-tau)*vars_b
Args:
vars_a: a list of tensor vars
var_b: a list of tensor vars
Return:
a list of soft copy operation
"""
assert len(vars_a) == len(vars_b)
copy_ops = [var_b.assign(tau*var_a + (1-tau)*var_b) for var_a, var_b in zip(vars_a, vars_b)]
copy_ops = tf.group(*copy_ops)
return copy_ops
class exploration_noise(object):
"""a exploration noise for continous control"""
def __init__(self, theta, mu=0., sigma=0.4, x0=0, dt=1e-1, n_steps_annealing=40000, size=3):
self.theta = theta
self.sigma = sigma
self.n_steps_annealing = n_steps_annealing
self.sigma_step = - self.sigma / float(self.n_steps_annealing)
self.x0 = x0
self.mu = mu
self.dt = dt
self.size = size
def generate(self, step):
sigma = max(0, self.sigma_step * step + self.sigma)
x = self.x0 + self.theta * (self.mu - self.x0) * self.dt + sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
self.x0 = x
return x
if __name__ == '__main__':
n = exploration_noise(10.)
for i in range(1000):
print(n.generate(i))
|
[
"random.sample",
"numpy.std",
"threading.Lock",
"tensorflow.group",
"numpy.array",
"numpy.mean",
"numpy.random.normal",
"numpy.sqrt"
] |
[((6312, 6331), 'tensorflow.group', 'tf.group', (['*copy_ops'], {}), '(*copy_ops)\n', (6320, 6331), True, 'import tensorflow as tf\n'), ((6800, 6819), 'tensorflow.group', 'tf.group', (['*copy_ops'], {}), '(*copy_ops)\n', (6808, 6819), True, 'import tensorflow as tf\n'), ((699, 715), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (713, 715), False, 'import threading, random\n'), ((2202, 2223), 'numpy.array', 'np.array', (['self.memory'], {}), '(self.memory)\n', (2210, 2223), True, 'import numpy as np\n'), ((4170, 4198), 'random.sample', 'random.sample', (['m', 'batch_size'], {}), '(m, batch_size)\n', (4183, 4198), False, 'import threading, random\n'), ((4970, 4985), 'numpy.std', 'np.std', (['rewards'], {}), '(rewards)\n', (4976, 4985), True, 'import numpy as np\n'), ((2009, 2047), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (2022, 2047), False, 'import threading, random\n'), ((3931, 3942), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (3939, 3942), True, 'import numpy as np\n'), ((4932, 4949), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (4940, 4949), True, 'import numpy as np\n'), ((4952, 4968), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (4959, 4968), True, 'import numpy as np\n'), ((3612, 3623), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (3620, 3623), True, 'import numpy as np\n'), ((7473, 7505), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.size'}), '(size=self.size)\n', (7489, 7505), True, 'import numpy as np\n'), ((7454, 7470), 'numpy.sqrt', 'np.sqrt', (['self.dt'], {}), '(self.dt)\n', (7461, 7470), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (С) ABBYY (BIT Software), 1993 - 2019. All rights reserved.
"""
Скрипт для оценки результата работы сети (численно) и получения визуализаций
"""
import argparse
import logging
import os
import time
import numpy as np
from semantic_segmentation.data_generators import BatchGenerator
from semantic_segmentation.model_runner import ModelRunner
from semantic_segmentation.net import NetManager, NetConfig
argparser = argparse.ArgumentParser()
argparser.add_argument('--source', '-s', type=str, required=True,
help="path to data markup dir (contains Image and Markup subfolders)")
argparser.add_argument('--dest', '-d', type=str, required=False,
help="path to dir with results")
argparser.add_argument('--log_dir', '-l', type=str, required=True,
help="path to training logging dir with config and saved models")
argparser.add_argument('--model_path', type=str, default=None,
help="path to trained model (.h5) - either local, global or from log_dir; "
"if None, load last model from log_dir")
argparser.add_argument('--markup_type', '-mt', type=str, default="Barcode",
help="markup type for train and test")
argparser.add_argument('--batch_size', '-b', type=int, default=8,
help="batch size for train, test and evaluation")
argparser.add_argument('--n_workers', '-n', type=int, default=4,
help="number of preprocessing threads")
argparser.add_argument('--prepare_batch_size', '-pbs', type=int, default=3000,
help="number of preprocessed images before groupby")
argparser.add_argument('--max_image_side', type=int, default=None,
help="max size for image height and width "
"(if it is larger image will be downsized maintaining aspect ratio)")
argparser.add_argument('--visualize', '-viz', action='store_true',
help="if True draws images with gt, results, segmentation (and classification if enabled)")
argparser.add_argument('--min_detection_area', '-min_area', type=int, default=None,
help="found connected components with area less than this value will be filtered out")
def probability(string):
value = float(string)
if not (0 <= value <= 1):
raise argparse.ArgumentTypeError(f"{value} is not a probability")
return value
argparser.add_argument('--pixel_threshold', '-pixel_threshold', type=probability, default=0.5,
help="threshold to consider pixel as detected, should be between 0 and 1")
def main():
args = argparser.parse_args()
if not args.dest:
args.dest = os.path.join(args.log_dir, "results", "last_result")
else:
args.dest = os.path.join(args.log_dir, "results", args.dest)
os.makedirs(args.dest, exist_ok=True)
logging.basicConfig(format='%(message)s', level=logging.INFO)
logging.getLogger().addHandler(logging.FileHandler(os.path.join(args.dest, 'log.txt'), 'w'))
net_manager = NetManager(args.log_dir)
net_config = net_manager.load_model(args.model_path)
net_config = NetConfig.from_others(net_config,
max_image_side=args.max_image_side,
min_pixels_for_detection=args.min_detection_area)
model = net_manager.get_keras_model()
input_image_shape = (1, net_config.get_max_side(), net_config.get_max_side(), 1)
model.predict(np.zeros(input_image_shape)) # чтобы точно скомпилилось
t = time.time()
model.predict(np.zeros(input_image_shape))
t = time.time() - t
logging.info(f"INFERENCE TIME: {t}, shape={input_image_shape}")
test_generator = BatchGenerator(
args.source,
batch_size=args.batch_size,
markup_type=args.markup_type,
net_config=net_config,
use_augmentation=False,
prepare_batch_size=args.prepare_batch_size,
yield_incomplete_batches=True,
n_workers=args.n_workers,
test=True
)
logging.info(f"Config: {net_config}")
logging.info(f"Predicting {args.source} --> {args.dest}")
model_runner = ModelRunner(net_config,
pixel_threshold=args.pixel_threshold)
metrics, _ = model_runner.run(model=model,
data_generator=test_generator.generate(add_metainfo=True),
n_images=test_generator.get_images_per_epoch(),
save_dir=args.dest,
save_visualizations=args.visualize)
with open(os.path.join(args.dest, "result.txt"), 'w') as f:
f.write("Evaluation {} images from {}\n\n".format(test_generator.get_images_per_epoch(), args.source))
for metric_name, metric_value in metrics.items():
f.write("{}: {}\n".format(metric_name, metric_value))
if __name__ == '__main__':
main()
|
[
"semantic_segmentation.net.NetManager",
"semantic_segmentation.model_runner.ModelRunner",
"os.makedirs",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.path.join",
"numpy.zeros",
"time.time",
"logging.info",
"semantic_segmentation.data_generators.BatchGenerator",
"semantic_segmentation.net.NetConfig.from_others",
"logging.getLogger",
"argparse.ArgumentTypeError"
] |
[((470, 495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (493, 495), False, 'import argparse\n'), ((2927, 2964), 'os.makedirs', 'os.makedirs', (['args.dest'], {'exist_ok': '(True)'}), '(args.dest, exist_ok=True)\n', (2938, 2964), False, 'import os\n'), ((2970, 3031), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': 'logging.INFO'}), "(format='%(message)s', level=logging.INFO)\n", (2989, 3031), False, 'import logging\n'), ((3148, 3172), 'semantic_segmentation.net.NetManager', 'NetManager', (['args.log_dir'], {}), '(args.log_dir)\n', (3158, 3172), False, 'from semantic_segmentation.net import NetManager, NetConfig\n'), ((3247, 3370), 'semantic_segmentation.net.NetConfig.from_others', 'NetConfig.from_others', (['net_config'], {'max_image_side': 'args.max_image_side', 'min_pixels_for_detection': 'args.min_detection_area'}), '(net_config, max_image_side=args.max_image_side,\n min_pixels_for_detection=args.min_detection_area)\n', (3268, 3370), False, 'from semantic_segmentation.net import NetManager, NetConfig\n'), ((3655, 3666), 'time.time', 'time.time', ([], {}), '()\n', (3664, 3666), False, 'import time\n'), ((3742, 3805), 'logging.info', 'logging.info', (['f"""INFERENCE TIME: {t}, shape={input_image_shape}"""'], {}), "(f'INFERENCE TIME: {t}, shape={input_image_shape}')\n", (3754, 3805), False, 'import logging\n'), ((3828, 4086), 'semantic_segmentation.data_generators.BatchGenerator', 'BatchGenerator', (['args.source'], {'batch_size': 'args.batch_size', 'markup_type': 'args.markup_type', 'net_config': 'net_config', 'use_augmentation': '(False)', 'prepare_batch_size': 'args.prepare_batch_size', 'yield_incomplete_batches': '(True)', 'n_workers': 'args.n_workers', 'test': '(True)'}), '(args.source, batch_size=args.batch_size, markup_type=args.\n markup_type, net_config=net_config, use_augmentation=False,\n prepare_batch_size=args.prepare_batch_size, yield_incomplete_batches=\n True, n_workers=args.n_workers, test=True)\n', (3842, 4086), False, 'from semantic_segmentation.data_generators import BatchGenerator\n'), ((4156, 4193), 'logging.info', 'logging.info', (['f"""Config: {net_config}"""'], {}), "(f'Config: {net_config}')\n", (4168, 4193), False, 'import logging\n'), ((4199, 4256), 'logging.info', 'logging.info', (['f"""Predicting {args.source} --> {args.dest}"""'], {}), "(f'Predicting {args.source} --> {args.dest}')\n", (4211, 4256), False, 'import logging\n'), ((4276, 4337), 'semantic_segmentation.model_runner.ModelRunner', 'ModelRunner', (['net_config'], {'pixel_threshold': 'args.pixel_threshold'}), '(net_config, pixel_threshold=args.pixel_threshold)\n', (4287, 4337), False, 'from semantic_segmentation.model_runner import ModelRunner\n'), ((2429, 2488), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""{value} is not a probability"""'], {}), "(f'{value} is not a probability')\n", (2455, 2488), False, 'import argparse\n'), ((2791, 2843), 'os.path.join', 'os.path.join', (['args.log_dir', '"""results"""', '"""last_result"""'], {}), "(args.log_dir, 'results', 'last_result')\n", (2803, 2843), False, 'import os\n'), ((2874, 2922), 'os.path.join', 'os.path.join', (['args.log_dir', '"""results"""', 'args.dest'], {}), "(args.log_dir, 'results', args.dest)\n", (2886, 2922), False, 'import os\n'), ((3590, 3617), 'numpy.zeros', 'np.zeros', (['input_image_shape'], {}), '(input_image_shape)\n', (3598, 3617), True, 'import numpy as np\n'), ((3685, 3712), 'numpy.zeros', 'np.zeros', (['input_image_shape'], {}), '(input_image_shape)\n', (3693, 3712), True, 'import numpy as np\n'), ((3722, 3733), 'time.time', 'time.time', ([], {}), '()\n', (3731, 3733), False, 'import time\n'), ((3036, 3055), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3053, 3055), False, 'import logging\n'), ((3087, 3121), 'os.path.join', 'os.path.join', (['args.dest', '"""log.txt"""'], {}), "(args.dest, 'log.txt')\n", (3099, 3121), False, 'import os\n'), ((4730, 4767), 'os.path.join', 'os.path.join', (['args.dest', '"""result.txt"""'], {}), "(args.dest, 'result.txt')\n", (4742, 4767), False, 'import os\n')]
|
import os
import pickle
import kenlm
import numpy as np
from s2search.text import fix_text, fix_author_text
from s2search.features import make_features, posthoc_score_adjust
class S2Ranker:
"""A class to encapsulate the Semantic Scholar search ranker.
Arguments:
data_dir {str} -- where the language models and lightgbm model live.
use_posthoc_correction {bool} -- whether to use posthoc correction
"""
def __init__(self, data_dir, use_posthoc_correction=True):
self.use_posthoc_correction = use_posthoc_correction
self.data_dir = data_dir
lm_title_abstracts = kenlm.Model(os.path.join(data_dir, 'titles_abstracts_lm.binary'))
lm_authors = kenlm.Model(os.path.join(data_dir, 'authors_lm.binary'))
lm_venues = kenlm.Model(os.path.join(data_dir, 'venues_lm.binary'))
self.lms = (lm_title_abstracts, lm_authors, lm_venues)
with open(os.path.join(data_dir, 'lightgbm_model.pickle'), 'rb') as f:
self.model = pickle.load(f)
def score(self, query, papers, **kws):
"""Score each pair of (query, paper) for all papers
Arguments:
query {str} -- plain text search query
papers {list of dicts} -- A list of candidate papers, each of which
is a dictionary.
Returns:
scores {np.array} -- an array of scores, one per paper in papers
"""
query = str(query)
X = np.array([
make_features(query, self.prepare_result(paper), self.lms)
for paper in papers
])
scores = self.model.predict(X, **kws)
if self.use_posthoc_correction:
scores = posthoc_score_adjust(scores, X, query)
return scores
@classmethod
def prepare_result(cls, paper):
"""Prepare the raw text result for featurization
Arguments:
paper {dict} -- A dictionary that has the required paper fields:
'title', 'abstract', 'authors', 'venues', 'year',
'n_citations', 'n_key_citations'
Returns:
out {dict} -- A dictionary where the paper fields have been pre-processed.
"""
out = {'paper_year': paper.get('year', np.nan)}
out['n_citations'] = paper.get('n_citations', 0)
# if n_key_citations aren't available, we can get a quick estimate of what they are from the n_citations
out['n_key_citations'] = paper.get('n_key_citations', int(-1.4 + np.log1p(out['n_citations'])))
if out['n_key_citations'] < 0:
out['n_key_citations'] = 0
out['paper_title_cleaned'] = fix_text(paper.get('title', ''))
out['paper_abstract_cleaned'] = fix_text(paper.get('abstract', ''))
out['paper_venue_cleaned'] = fix_text(paper.get('venue', ''))
out['author_name'] = [fix_author_text(i) for i in paper.get('authors', [])]
return out
|
[
"s2search.text.fix_author_text",
"s2search.features.posthoc_score_adjust",
"pickle.load",
"os.path.join",
"numpy.log1p"
] |
[((641, 693), 'os.path.join', 'os.path.join', (['data_dir', '"""titles_abstracts_lm.binary"""'], {}), "(data_dir, 'titles_abstracts_lm.binary')\n", (653, 693), False, 'import os\n'), ((728, 771), 'os.path.join', 'os.path.join', (['data_dir', '"""authors_lm.binary"""'], {}), "(data_dir, 'authors_lm.binary')\n", (740, 771), False, 'import os\n'), ((805, 847), 'os.path.join', 'os.path.join', (['data_dir', '"""venues_lm.binary"""'], {}), "(data_dir, 'venues_lm.binary')\n", (817, 847), False, 'import os\n'), ((1017, 1031), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1028, 1031), False, 'import pickle\n'), ((1726, 1764), 's2search.features.posthoc_score_adjust', 'posthoc_score_adjust', (['scores', 'X', 'query'], {}), '(scores, X, query)\n', (1746, 1764), False, 'from s2search.features import make_features, posthoc_score_adjust\n'), ((2908, 2926), 's2search.text.fix_author_text', 'fix_author_text', (['i'], {}), '(i)\n', (2923, 2926), False, 'from s2search.text import fix_text, fix_author_text\n'), ((931, 978), 'os.path.join', 'os.path.join', (['data_dir', '"""lightgbm_model.pickle"""'], {}), "(data_dir, 'lightgbm_model.pickle')\n", (943, 978), False, 'import os\n'), ((2553, 2581), 'numpy.log1p', 'np.log1p', (["out['n_citations']"], {}), "(out['n_citations'])\n", (2561, 2581), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from utils.trapezoidal_generator import TrapezoidalGenerator
class TestTrapezoidalGenerator(unittest.TestCase):
epsilon = 0.001
tg = TrapezoidalGenerator(dq_max=1, ddq_max=10)
def test_generator(self):
self.tg.set_limits(2, 2)
q_init = 0
q_final = 5
real_coefs = np.array([1.0, 2.5])
t_1, tau = self.tg.generate_coefficients(q_init, q_final)
coefs = np.array([t_1, tau])
self.assertTrue(np.all(np.abs((real_coefs - coefs)) <= self.epsilon))
def test_generator_frequency(self):
self.tg.set_limits(2, 2)
self.tg.set_frequency(5)
q_init = 0
q_final = 5
real_coefs = np.array([1.0, 2.6])
t_1, tau = self.tg.generate_coefficients(q_init, q_final)
coefs = np.array([t_1, tau])
self.assertTrue(np.all(np.abs((real_coefs - coefs)) <= self.epsilon))
|
[
"utils.trapezoidal_generator.TrapezoidalGenerator",
"numpy.abs",
"numpy.array"
] |
[((179, 221), 'utils.trapezoidal_generator.TrapezoidalGenerator', 'TrapezoidalGenerator', ([], {'dq_max': '(1)', 'ddq_max': '(10)'}), '(dq_max=1, ddq_max=10)\n', (199, 221), False, 'from utils.trapezoidal_generator import TrapezoidalGenerator\n'), ((347, 367), 'numpy.array', 'np.array', (['[1.0, 2.5]'], {}), '([1.0, 2.5])\n', (355, 367), True, 'import numpy as np\n'), ((451, 471), 'numpy.array', 'np.array', (['[t_1, tau]'], {}), '([t_1, tau])\n', (459, 471), True, 'import numpy as np\n'), ((718, 738), 'numpy.array', 'np.array', (['[1.0, 2.6]'], {}), '([1.0, 2.6])\n', (726, 738), True, 'import numpy as np\n'), ((822, 842), 'numpy.array', 'np.array', (['[t_1, tau]'], {}), '([t_1, tau])\n', (830, 842), True, 'import numpy as np\n'), ((503, 529), 'numpy.abs', 'np.abs', (['(real_coefs - coefs)'], {}), '(real_coefs - coefs)\n', (509, 529), True, 'import numpy as np\n'), ((874, 900), 'numpy.abs', 'np.abs', (['(real_coefs - coefs)'], {}), '(real_coefs - coefs)\n', (880, 900), True, 'import numpy as np\n')]
|
"""
A class MinHashing that builds a minHash signature (in the form of a vector or a set) of a given length n
from a given set of integers (a set of hashed shingles).
"""
import numpy as np
import sympy
class MinHashing:
def __init__(self, seed=1337):
self.seed = seed
def _get_prime_above(self, n):
"""
Get smallest prime that is larger than n
:param n: and int that the prime should be larger than
:return: a prime number larger than n
"""
return sympy.nextprime(n)
def get_signature_matrix_hash(self, shingles, signature_len=100):
"""
Creates signature matrix using hashes
:param shingles: List of shingle sets
:param signature_len: Length of each signature
:return: numpy array representing the signature matrix. Documents are arranged column-wise
"""
np.random.seed(self.seed) # set numpy seed
rows = set.union(*shingles) # Extract shingle id:s that exist in our documents
max_shingle_count = max(rows)
prime = self._get_prime_above(max_shingle_count)
result = np.ones((signature_len, len(shingles)), dtype=np.int)*prime # Initialize the signature matrix
"""Create hashes in the form h(r) = (a*r+b) % c"""
a = np.random.randint(0, np.iinfo(np.int32).max, signature_len)
b = np.random.randint(0, np.iinfo(np.int32).max, signature_len)
c = np.ones_like(a) * prime
# Iterate over each shingle (row in this case)
for r in rows:
# Calculate hash of row
r_hashes = (a * r + b) % c
# Iterate over each document
# TODO: Make more efficient using numpy operations
for doc in range(len(shingles)):
if r in shingles[doc]:
# Update matrix if document contains shingle with id r
result[:, doc] = np.where(result[:, doc] < r_hashes, result[:, doc], r_hashes)
return result
def get_signature_matrix_permutations(self, shingles, signature_len=100):
"""
Creates signature matrix using permutations.
:param shingles: List of shingle sets
:param signature_len: Length of each signature
:return: numpy array representing the signature matrix. Documents are arranged column-wise
"""
np.random.seed(self.seed)
result = np.zeros((signature_len, len(shingles)), dtype=np.int)
# permutations = [[1,3,7,6,2,5,4], [4,2,1,3,6,7,5], [3,4,7,6,1,2,5]]
for i in range(signature_len):
order = np.random.permutation(100)
# order = permutations[i]
handle_document = set(range(len(shingles)))
for idx, j in enumerate(order):
handle_document_temp = handle_document.copy()
for doc in handle_document_temp:
if j in shingles[doc]:
result[i, doc] = int(idx) + 1
handle_document.remove(doc)
return result
def main():
# Example usage of this class
min_hashing_object = MinHashing()
# example_shingles = [{0, 1, 5, 6}, {2, 3, 4}, {0, 5, 6}, {1, 2, 3, 4}]
# signature_matrix = min_hashing_object.get_signature_matrix_hash(example_shingles)
from DataLoader import DataLoader
data_loader = DataLoader("Data/bbc-text.csv")
docs = data_loader.get_documents(nr_docs=20, char_dim=500)
from Shingling import Shingling
shingling = Shingling(docs, 9)
shingling.docs_to_hashed_shingles()
shingle_sets = shingling.hashed_shingles
signature_matrix = min_hashing_object.get_signature_matrix_hash(shingle_sets, signature_len=100)
from LSH import LSH
lsh = LSH(signature_matrix)
# 20 docs and threshold 0.3 works
candidate_pairs = lsh.get_candidate_pairs(0.5, band_method=1)
print("Num pairs: ", len(candidate_pairs))
print(candidate_pairs)
return
if __name__ == "__main__":
main()
|
[
"LSH.LSH",
"numpy.random.seed",
"DataLoader.DataLoader",
"numpy.ones_like",
"numpy.iinfo",
"sympy.nextprime",
"Shingling.Shingling",
"numpy.where",
"numpy.random.permutation"
] |
[((3365, 3396), 'DataLoader.DataLoader', 'DataLoader', (['"""Data/bbc-text.csv"""'], {}), "('Data/bbc-text.csv')\n", (3375, 3396), False, 'from DataLoader import DataLoader\n'), ((3512, 3530), 'Shingling.Shingling', 'Shingling', (['docs', '(9)'], {}), '(docs, 9)\n', (3521, 3530), False, 'from Shingling import Shingling\n'), ((3753, 3774), 'LSH.LSH', 'LSH', (['signature_matrix'], {}), '(signature_matrix)\n', (3756, 3774), False, 'from LSH import LSH\n'), ((518, 536), 'sympy.nextprime', 'sympy.nextprime', (['n'], {}), '(n)\n', (533, 536), False, 'import sympy\n'), ((886, 911), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (900, 911), True, 'import numpy as np\n'), ((2371, 2396), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2385, 2396), True, 'import numpy as np\n'), ((1444, 1459), 'numpy.ones_like', 'np.ones_like', (['a'], {}), '(a)\n', (1456, 1459), True, 'import numpy as np\n'), ((2607, 2633), 'numpy.random.permutation', 'np.random.permutation', (['(100)'], {}), '(100)\n', (2628, 2633), True, 'import numpy as np\n'), ((1321, 1339), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1329, 1339), True, 'import numpy as np\n'), ((1393, 1411), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1401, 1411), True, 'import numpy as np\n'), ((1922, 1983), 'numpy.where', 'np.where', (['(result[:, doc] < r_hashes)', 'result[:, doc]', 'r_hashes'], {}), '(result[:, doc] < r_hashes, result[:, doc], r_hashes)\n', (1930, 1983), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import statistics
import tensorflow
from tensorflow import keras
from numpy import asarray
from numpy import savetxt
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import BatchNormalization,Activation,Dropout
from keras.layers import Conv1D, MaxPooling2D, Dense, Flatten,Conv1D
from keras.utils import normalize, to_categorical
import tensorflow as tf
import numpy as np
import tensorflow as tf
from keras.regularizers import l2
from tensorflow import keras
from tensorflow.keras import layers,models,activations
from tensorflow.keras.layers.experimental import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,f1_score,classification_report
from keras.datasets import reuters
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, SimpleRNN, Activation, LSTM, Dropout
from keras import optimizers
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.datasets import make_classification
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
data = pd.read_csv(r"D:\project\feature_vectors_syscalls_frequency_5_Cat.csv")
data1 = data.iloc[:,:-1]
rows, cols = (11598, 139)
data_norm = [[0]*cols]*rows
mu = data1.mean()
test1 = data.iloc[748,:-1]
std_devv = data1.std()
data_norm = (data1-mu)/std_devv
d_n = data_norm.to_numpy()
y = data.iloc[:,139]
y1 = y.to_numpy()
# feature selection
def select_features(X_train, y_train):
# configure to select all features
fs = SelectKBest(score_func=f_classif, k=98)
# learn relationship from training data
fs.fit(X_train, y_train)
# transform train input data
X_train_fs = fs.transform(X_train)
return X_train_fs, fs
# feature selection
X_train_fs, fs = select_features(data_norm, y1)
X_train, X_test, y_train, y_test = train_test_split(X_train_fs, y1, test_size=0.3, shuffle= True)
train_X = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
test_X = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
model1 = Sequential()
model1.add(Conv1D(200,1,input_shape = (1,1,98),padding = 'Valid',activation= 'relu'))
model1.add(Dropout(0.3))
model1.add(Dense(100, activation='relu'))
model1.add(Flatten())
model1.add(Dense(6,activation = 'relu' ))
model1.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer= 'adam',
metrics=["accuracy"],
)
history = model1.fit(train_X, y_train, epochs=120, batch_size=128, validation_data=(test_X, y_test), verbose=2)
label = model1.predict(test_X)
label1 = model1.predict(train_X)
op = np.argmax(label,axis=1)
ops =np.argmax(label1,axis=1)
print(f1_score(y_test, op,average = 'micro'))
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
y_pred = model1.predict_classes(test_X)
print(classification_report(y_test, y_pred))
|
[
"matplotlib.pyplot.title",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.argmax",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"keras.layers.Dropout",
"matplotlib.pyplot.ylabel",
"keras.layers.Flatten",
"keras.layers.Conv1D",
"sklearn.metrics.classification_report",
"sklearn.metrics.f1_score",
"keras.layers.Dense",
"keras.models.Sequential",
"matplotlib.pyplot.xlabel",
"sklearn.feature_selection.SelectKBest"
] |
[((1379, 1451), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\project\\\\feature_vectors_syscalls_frequency_5_Cat.csv"""'], {}), "('D:\\\\project\\\\feature_vectors_syscalls_frequency_5_Cat.csv')\n", (1390, 1451), True, 'import pandas as pd\n'), ((2122, 2183), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train_fs', 'y1'], {'test_size': '(0.3)', 'shuffle': '(True)'}), '(X_train_fs, y1, test_size=0.3, shuffle=True)\n', (2138, 2183), False, 'from sklearn.model_selection import train_test_split\n'), ((2327, 2339), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2337, 2339), False, 'from keras.models import Sequential\n'), ((2894, 2918), 'numpy.argmax', 'np.argmax', (['label'], {'axis': '(1)'}), '(label, axis=1)\n', (2903, 2918), True, 'import numpy as np\n'), ((2924, 2949), 'numpy.argmax', 'np.argmax', (['label1'], {'axis': '(1)'}), '(label1, axis=1)\n', (2933, 2949), True, 'import numpy as np\n'), ((3000, 3037), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (3008, 3037), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3080), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (3047, 3080), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3109), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (3091, 3109), True, 'import matplotlib.pyplot as plt\n'), ((3111, 3133), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (3121, 3133), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3154), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3145, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3203), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (3166, 3203), True, 'import matplotlib.pyplot as plt\n'), ((3205, 3215), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3213, 3215), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1852), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'f_classif', 'k': '(98)'}), '(score_func=f_classif, k=98)\n', (1824, 1852), False, 'from sklearn.feature_selection import SelectKBest\n'), ((2352, 2426), 'keras.layers.Conv1D', 'Conv1D', (['(200)', '(1)'], {'input_shape': '(1, 1, 98)', 'padding': '"""Valid"""', 'activation': '"""relu"""'}), "(200, 1, input_shape=(1, 1, 98), padding='Valid', activation='relu')\n", (2358, 2426), False, 'from keras.layers import Conv1D, MaxPooling2D, Dense, Flatten, Conv1D\n'), ((2439, 2451), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (2446, 2451), False, 'from keras.layers import Dense, SimpleRNN, Activation, LSTM, Dropout\n'), ((2465, 2494), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (2470, 2494), False, 'from keras.layers import Dense, SimpleRNN, Activation, LSTM, Dropout\n'), ((2508, 2517), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2515, 2517), False, 'from keras.layers import Conv1D, MaxPooling2D, Dense, Flatten, Conv1D\n'), ((2531, 2558), 'keras.layers.Dense', 'Dense', (['(6)'], {'activation': '"""relu"""'}), "(6, activation='relu')\n", (2536, 2558), False, 'from keras.layers import Dense, SimpleRNN, Activation, LSTM, Dropout\n'), ((2959, 2996), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'op'], {'average': '"""micro"""'}), "(y_test, op, average='micro')\n", (2967, 2996), False, 'from sklearn.metrics import accuracy_score, f1_score, classification_report\n'), ((3264, 3301), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3285, 3301), False, 'from sklearn.metrics import accuracy_score, f1_score, classification_report\n'), ((2589, 2649), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2631, 2649), False, 'from tensorflow import keras\n')]
|
import warnings
import numpy as np
import nibabel as nib
from .base_extractor import BaseExtractor
from .utils import mask_data, label_timeseries
def _check_cifti(fname):
"""Verify that file is read as a cifti"""
img = nib.load(fname)
if not isinstance(img, nib.Cifti2Image):
raise ValueError(f'{fname} not an instance of Cifti2Image')
return img
def _read_dtseries(fname):
"""Safely read dtseries file"""
if not fname.endswith('.dtseries.nii'):
raise ValueError(f'{fname} must be a .dtseries.nii file')
return _check_cifti(fname)
def _read_dlabel(fname):
"""Safely read a dlabel file and return the array and labels"""
if not fname.endswith('.dlabel.nii'):
raise ValueError(f'{fname} must be a .dlabel.nii file')
img = _check_cifti(fname)
# actual numerical labels in data to compare with label table
vertex_labels = np.unique(img.get_fdata())
label_dict = img.header.get_axis(index=0).label[0]
labels = []
for k, v in label_dict.items():
if k in vertex_labels:
labels.append(v[0])
return img, labels
def _get_models(img):
"""Pull out all brain models from cifti file"""
brain_models = list(img.header.get_index_map(1).brain_models)
models = {}
for i, m in enumerate(brain_models):
struct = m.brain_structure
models[struct] = {'count': m.index_count, 'offset': m.index_offset,
'model_index': i, 'type': m.model_type}
if m.model_type == 'CIFTI_MODEL_TYPE_SURFACE':
models[struct]['n_indices'] = m.surface_number_of_vertices
models[struct]['indices'] = np.asarray(m.vertex_indices)
else:
models[struct]['n_indices'] = m.index_count
models[struct]['indices'] = np.arange(m.index_count)
# ensure sorted by offset
models = {k: v for k, v in sorted(models.items(),
key=lambda x: x[1]['offset'])}
return models
def _has_medwall(model):
"""Check if structure has medial wall, which is when the model count is
equal to the number of vertices. Always false for non surface models
"""
if ((model['type'] == 'CIFTI_MODEL_TYPE_SURFACE') and
(model['count'] == model['n_indices'])):
return True
else:
return False
def _load_and_align_ciftis(dlabel, dtseries):
"""Correctly align the dlabel with the dtseries data
If dlabel and dtseries have the same number of elements, then they are
already aligned, and are each plainly loaded. If not, then iterate through
the brain models in dtseries and check vertex alignment of surface models,
and the existence of volume models. This will happen if a) dlabel includes
medial wall vertices but dtseries does not or vice versa, or b) if dlabel
does not have all the brain models found in dtseries.
Parameters
----------
dlabel : nibabel.Cifti1Image
ROI/label file
dtseries : nibabel.Cifti1Image
Functional data
Returns
-------
np.ndarray, np.ndarray
Aligned arrays for the dlabel and dtseries, respectively
Raises
------
ValueError
If dlabel and dtseries have different lengths but no medial wall has
been detected in either
"""
dlabel_data = dlabel.get_fdata().ravel()
dtseries_data = dtseries.get_fdata()
if dlabel.shape[1] == dtseries.shape[1]:
return dlabel_data, dtseries_data
else:
warnings.warn(f'dlabel has shape {dlabel.shape[1]} and dtseries has '
f'shape {dtseries.shape[1]}. Aligning files via '
'brain structures present in each file. Double check '
'results!')
dl_models = _get_models(dlabel)
dts_models = _get_models(dtseries)
dlabel_list = []
dtseries_list = []
for k, v in dts_models.items():
if k not in dl_models.keys():
# dlabel does not have the brain model
continue
if dl_models[k]['count'] == v['count']:
# both are the same so doesnt matter if medial wall or not
dts_idx = np.arange(v['count']) + v['offset']
dl_idx = dts_idx
elif _has_medwall(dl_models[k]) and not _has_medwall(v):
# use dtseries vertices to index dlabel
dl_idx = v['indices'] + dl_models[k]['offset']
dts_idx = np.arange(v['count']) + v['offset']
elif _has_medwall(v) and not _has_medwall(dl_models[k]):
# use dlabel vertices to index dtseries
dts_idx = dl_models[k]['indices'] + v['offset']
dl_idx = np.arange(dl_models[k]['count']) + dl_models[k]['offset']
else:
# no medial wall in both but also not equal
raise ValueError('Cannot align dlabel with dtseries.')
dlabel_list.append(dlabel_data[dl_idx])
dtseries_list.append(dtseries_data[:, dts_idx])
return np.hstack(dlabel_list), np.hstack(dtseries_list)
class CiftiExtractor(BaseExtractor):
def __init__(self, fname, roi_file, as_vertices=False, pre_clean=False,
verbose=False, **kwargs):
"""Cifti extraction class
Parameters
----------
fname : str
Functional dtseries.nii file
roi_file : str
dlabel.nii file that identifies regions of interests. Can be an
atlas/parcellation with multiple regions, or a binary mask
as_vertices : bool, optional
Extract the individual vertex timeseries from a region. Only
possible when roi_file is a binary mask (single region), by
default False
pre_clean : bool, optional
Denoise data (e.g., filtering, confound regression) before
timeseries extraction. Otherwise, denoising is done on the
extracted timeseries, which is consistent with nilearn and is more
computationally efficient. By default False
verbose : bool, optional
Print out extraction timestamp, by default False
**kwargs
Arguments to pass to nilearn.signal.clean other than
confounds_regressors
"""
self.fname = fname
self.dtseries = _read_dtseries(fname)
self.roi_file = roi_file
self.dlabel, self.labels = _read_dlabel(roi_file)
self.as_vertices = as_vertices
self.pre_clean = pre_clean
self.verbose = verbose
self._clean_kwargs = kwargs
self.dlabel_array, self.darray = _load_and_align_ciftis(self.dlabel,
self.dtseries)
self.regressor_names = None
self.regressor_array = None
def discard_scans(self, n_scans):
"""Discard first N scans from data and regressors, if available
Parameters
----------
n_scans : int
Number of initial scans to remove
"""
self.darray = self.darray[n_scans:, :]
if self.regressor_array is not None:
self.regressor_array = self.regressor_array[n_scans:, :]
def extract(self):
"""Extract timeseries"""
self.show_extract_msg(self.fname)
tseries = mask_data(self.darray, self.dlabel_array,
self.regressor_array, self.as_vertices,
self.pre_clean, **self._clean_kwargs)
self.timeseries = label_timeseries(tseries, self.labels,
self.as_vertices)
# remove extracted background signal if any
if '???' in self.timeseries.columns:
self.timeseries = self.timeseries.drop('???', axis=1)
|
[
"nibabel.load",
"numpy.asarray",
"numpy.hstack",
"numpy.arange",
"warnings.warn"
] |
[((230, 245), 'nibabel.load', 'nib.load', (['fname'], {}), '(fname)\n', (238, 245), True, 'import nibabel as nib\n'), ((3515, 3701), 'warnings.warn', 'warnings.warn', (['f"""dlabel has shape {dlabel.shape[1]} and dtseries has shape {dtseries.shape[1]}. Aligning files via brain structures present in each file. Double check results!"""'], {}), "(\n f'dlabel has shape {dlabel.shape[1]} and dtseries has shape {dtseries.shape[1]}. Aligning files via brain structures present in each file. Double check results!'\n )\n", (3528, 3701), False, 'import warnings\n'), ((1662, 1690), 'numpy.asarray', 'np.asarray', (['m.vertex_indices'], {}), '(m.vertex_indices)\n', (1672, 1690), True, 'import numpy as np\n'), ((1801, 1825), 'numpy.arange', 'np.arange', (['m.index_count'], {}), '(m.index_count)\n', (1810, 1825), True, 'import numpy as np\n'), ((5104, 5126), 'numpy.hstack', 'np.hstack', (['dlabel_list'], {}), '(dlabel_list)\n', (5113, 5126), True, 'import numpy as np\n'), ((5128, 5152), 'numpy.hstack', 'np.hstack', (['dtseries_list'], {}), '(dtseries_list)\n', (5137, 5152), True, 'import numpy as np\n'), ((4223, 4244), 'numpy.arange', 'np.arange', (["v['count']"], {}), "(v['count'])\n", (4232, 4244), True, 'import numpy as np\n'), ((4506, 4527), 'numpy.arange', 'np.arange', (["v['count']"], {}), "(v['count'])\n", (4515, 4527), True, 'import numpy as np\n'), ((4756, 4788), 'numpy.arange', 'np.arange', (["dl_models[k]['count']"], {}), "(dl_models[k]['count'])\n", (4765, 4788), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tflearn
import numpy as np
'''
Snake Neural Network
A simple neural network used to play the snake game
Current inputs:
1. Distance Left wall
2. Distance Top Wall
3. Distance Right Wall
4. Distance Bottom Wallbuild_dqn
5. Distance Apple
6. Angle Apple
7. left blocked
8. up blocked
9. right blocked
9. down blocked
'''
class NeuralNetwork:
def __init__(self, training_data, max_steps):
x = np.array([i[0] for i in training_data])
X = x.reshape(-1, 10, 1)
y = [i[1] for i in training_data]
input_size = len(X[0])
output_size = len(y[0])
network = input_data(shape=[None, input_size, 1], name='input')
network = tflearn.fully_connected(network, 32)
network = tflearn.fully_connected(network, 32)
network = fully_connected(network, output_size, activation='softmax')
network = regression(network, name='targets')
self.model = tflearn.DNN(network, tensorboard_verbose=1)
def train_model(self, training_data):
shape_second_parameter = len(training_data[0][0])
x = np.array([i[0] for i in training_data])
X = x.reshape(-1, shape_second_parameter, 1)
y = [i[1] for i in training_data]
self.model.fit({'input': X}, {'targets': y}, n_epoch=10, batch_size=16, show_metric=True)
self.model.save('miniskake_trained.tflearn')
def predict(self, training_data):
prediction = self.model.predict(training_data)
return np.argmax(prediction[0])
class Generation:
def __init__(self, population_size):
self.population_size = population_size
def evaluate(self):
for i in range(self.population_size):
print(i)
|
[
"tflearn.layers.estimator.regression",
"tflearn.layers.core.input_data",
"tflearn.fully_connected",
"numpy.argmax",
"tflearn.layers.core.fully_connected",
"tflearn.DNN",
"numpy.array"
] |
[((599, 638), 'numpy.array', 'np.array', (['[i[0] for i in training_data]'], {}), '([i[0] for i in training_data])\n', (607, 638), True, 'import numpy as np\n'), ((796, 849), 'tflearn.layers.core.input_data', 'input_data', ([], {'shape': '[None, input_size, 1]', 'name': '"""input"""'}), "(shape=[None, input_size, 1], name='input')\n", (806, 849), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((868, 904), 'tflearn.fully_connected', 'tflearn.fully_connected', (['network', '(32)'], {}), '(network, 32)\n', (891, 904), False, 'import tflearn\n'), ((923, 959), 'tflearn.fully_connected', 'tflearn.fully_connected', (['network', '(32)'], {}), '(network, 32)\n', (946, 959), False, 'import tflearn\n'), ((978, 1037), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', 'output_size'], {'activation': '"""softmax"""'}), "(network, output_size, activation='softmax')\n", (993, 1037), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((1056, 1091), 'tflearn.layers.estimator.regression', 'regression', (['network'], {'name': '"""targets"""'}), "(network, name='targets')\n", (1066, 1091), False, 'from tflearn.layers.estimator import regression\n'), ((1114, 1157), 'tflearn.DNN', 'tflearn.DNN', (['network'], {'tensorboard_verbose': '(1)'}), '(network, tensorboard_verbose=1)\n', (1125, 1157), False, 'import tflearn\n'), ((1271, 1310), 'numpy.array', 'np.array', (['[i[0] for i in training_data]'], {}), '([i[0] for i in training_data])\n', (1279, 1310), True, 'import numpy as np\n'), ((1668, 1692), 'numpy.argmax', 'np.argmax', (['prediction[0]'], {}), '(prediction[0])\n', (1677, 1692), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
import sys
from subprocess import CalledProcessError
from subprocess import Popen
from subprocess import check_call
import numpy as np
from parsable import parsable
from treecat.config import make_config
from treecat.format import pickle_dump
from treecat.format import pickle_load
from treecat.testutil import tempdir
PYTHON = sys.executable
FILE = os.path.abspath(__file__)
parsable = parsable.Parsable()
def check_call_env(cmd, env):
ret = Popen(cmd, env=env).wait()
if ret:
raise CalledProcessError(returncode=ret, cmd=cmd)
def run_with_tool(cmd, tool, dirname):
profile_path = os.path.join(dirname, 'profile_train.prof')
env = os.environ.copy()
env['TREECAT_THREADS'] = '1'
if tool == 'timers':
env.setdefault('TREECAT_PROFILE', '1')
env.setdefault('TREECAT_LOG_LEVEL', '20')
check_call_env([PYTHON, '-O'] + cmd, env)
elif tool == 'time':
if platform.platform().startswith('Darwin'):
gnu_time = 'gtime'
else:
gnu_time = '/usr/bin/time'
check_call_env([gnu_time, '-v', PYTHON, '-O'] + cmd, env)
elif tool == 'snakeviz':
check_call_env([PYTHON, '-m', 'cProfile', '-o', profile_path] + cmd,
env)
check_call(['snakeviz', profile_path])
elif tool == 'line_profiler':
check_call_env(['kernprof', '-l', '-v', '-o', profile_path] + cmd, env)
elif tool == 'pdb':
check_call_env([PYTHON, '-m', 'pdb'] + cmd, env)
else:
raise ValueError('Unknown tool: {}'.format(tool))
@parsable
def train_files(dataset_path, config_path):
"""INTERNAL Train from pickled dataset, config."""
from treecat.training import train_ensemble
dataset = pickle_load(dataset_path)
table = dataset['table']
V = table.num_cols
K = V * (V - 1) // 2
tree_prior = np.zeros(K, dtype=np.float32)
config = pickle_load(config_path)
train_ensemble(table, tree_prior, config)
@parsable
def serve_files(model_path, config_path, num_samples):
"""INTERNAL Serve from pickled model, config."""
from treecat.serving import TreeCatServer
import numpy as np
model = pickle_load(model_path)
config = pickle_load(config_path)
model['config'] = config
server = TreeCatServer(model)
counts = np.ones(model['tree'].num_vertices, np.int8)
samples = server.sample(int(num_samples), counts)
server.logprob(samples)
server.median(counts, samples)
server.latent_correlation()
@parsable
def train(rows=100,
cols=10,
epochs=5,
clusters=32,
parallel=False,
tool='timers'):
"""Profile TreeCatTrainer on a random dataset.
Available tools: timers, time, snakeviz, line_profiler, pdb
"""
from treecat.generate import generate_dataset_file
config = make_config(
learning_init_epochs=epochs,
model_num_clusters=clusters,
model_ensemble_size=1,
learning_parallel=parallel)
dataset_path = generate_dataset_file(rows, cols)
with tempdir() as dirname:
config_path = os.path.join(dirname, 'config.pkz')
pickle_dump(config, config_path)
cmd = [FILE, 'train_files', dataset_path, config_path]
run_with_tool(cmd, tool, dirname)
@parsable
def serve(rows=100, cols=10, cats=4, tool='timers'):
"""Profile TreeCatServer on a random dataset.
Available tools: timers, time, snakeviz, line_profiler, pdb
"""
from treecat.generate import generate_model_file
config = make_config()
model_path = generate_model_file(rows, cols, cats)
with tempdir() as dirname:
config_path = os.path.join(dirname, 'config.pkz')
pickle_dump(config, config_path)
cmd = [FILE, 'serve_files', model_path, config_path, str(rows)]
run_with_tool(cmd, tool, dirname)
@parsable
def eval(rows=100, cols=10, cats=4, tool='timers'):
"""Profile treecat.validate.eval on a random dataset.
Available tools: timers, time, snakeviz, line_profiler, pdb
"""
from treecat.generate import generate_dataset_file
from treecat.validate import train
dataset_path = generate_dataset_file(rows, cols)
validate_py = os.path.join(os.path.dirname(FILE), 'validate.py')
with tempdir() as dirname:
param_csv_path = os.path.join(dirname, 'param.csv')
with open(param_csv_path, 'w') as f:
f.write('learning_init_epochs\n2')
train(dataset_path, param_csv_path, dirname, learning_init_epochs=2)
cmd = [
validate_py,
'eval',
dataset_path,
param_csv_path,
dirname,
os.path.join(dirname, 'tuning.pkz'),
'learning_init_epochs=2',
]
run_with_tool(cmd, tool, dirname)
if __name__ == '__main__':
parsable()
|
[
"treecat.testutil.tempdir",
"os.environ.copy",
"numpy.ones",
"subprocess.CalledProcessError",
"treecat.training.train_ensemble",
"os.path.join",
"subprocess.check_call",
"os.path.abspath",
"treecat.format.pickle_dump",
"os.path.dirname",
"treecat.generate.generate_dataset_file",
"subprocess.Popen",
"treecat.validate.train",
"parsable.parsable",
"treecat.config.make_config",
"treecat.format.pickle_load",
"numpy.zeros",
"treecat.generate.generate_model_file",
"parsable.parsable.Parsable",
"platform.platform",
"treecat.serving.TreeCatServer"
] |
[((489, 514), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (504, 514), False, 'import os\n'), ((527, 546), 'parsable.parsable.Parsable', 'parsable.Parsable', ([], {}), '()\n', (544, 546), False, 'from parsable import parsable\n'), ((746, 789), 'os.path.join', 'os.path.join', (['dirname', '"""profile_train.prof"""'], {}), "(dirname, 'profile_train.prof')\n", (758, 789), False, 'import os\n'), ((800, 817), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (815, 817), False, 'import os\n'), ((1868, 1893), 'treecat.format.pickle_load', 'pickle_load', (['dataset_path'], {}), '(dataset_path)\n', (1879, 1893), False, 'from treecat.format import pickle_load\n'), ((1988, 2017), 'numpy.zeros', 'np.zeros', (['K'], {'dtype': 'np.float32'}), '(K, dtype=np.float32)\n', (1996, 2017), True, 'import numpy as np\n'), ((2031, 2055), 'treecat.format.pickle_load', 'pickle_load', (['config_path'], {}), '(config_path)\n', (2042, 2055), False, 'from treecat.format import pickle_load\n'), ((2060, 2101), 'treecat.training.train_ensemble', 'train_ensemble', (['table', 'tree_prior', 'config'], {}), '(table, tree_prior, config)\n', (2074, 2101), False, 'from treecat.training import train_ensemble\n'), ((2303, 2326), 'treecat.format.pickle_load', 'pickle_load', (['model_path'], {}), '(model_path)\n', (2314, 2326), False, 'from treecat.format import pickle_load\n'), ((2340, 2364), 'treecat.format.pickle_load', 'pickle_load', (['config_path'], {}), '(config_path)\n', (2351, 2364), False, 'from treecat.format import pickle_load\n'), ((2407, 2427), 'treecat.serving.TreeCatServer', 'TreeCatServer', (['model'], {}), '(model)\n', (2420, 2427), False, 'from treecat.serving import TreeCatServer\n'), ((2441, 2485), 'numpy.ones', 'np.ones', (["model['tree'].num_vertices", 'np.int8'], {}), "(model['tree'].num_vertices, np.int8)\n", (2448, 2485), True, 'import numpy as np\n'), ((2972, 3096), 'treecat.config.make_config', 'make_config', ([], {'learning_init_epochs': 'epochs', 'model_num_clusters': 'clusters', 'model_ensemble_size': '(1)', 'learning_parallel': 'parallel'}), '(learning_init_epochs=epochs, model_num_clusters=clusters,\n model_ensemble_size=1, learning_parallel=parallel)\n', (2983, 3096), False, 'from treecat.config import make_config\n'), ((3145, 3178), 'treecat.generate.generate_dataset_file', 'generate_dataset_file', (['rows', 'cols'], {}), '(rows, cols)\n', (3166, 3178), False, 'from treecat.generate import generate_dataset_file\n'), ((3667, 3680), 'treecat.config.make_config', 'make_config', ([], {}), '()\n', (3678, 3680), False, 'from treecat.config import make_config\n'), ((3698, 3735), 'treecat.generate.generate_model_file', 'generate_model_file', (['rows', 'cols', 'cats'], {}), '(rows, cols, cats)\n', (3717, 3735), False, 'from treecat.generate import generate_model_file\n'), ((4287, 4320), 'treecat.generate.generate_dataset_file', 'generate_dataset_file', (['rows', 'cols'], {}), '(rows, cols)\n', (4308, 4320), False, 'from treecat.generate import generate_dataset_file\n'), ((4958, 4968), 'parsable.parsable', 'parsable', ([], {}), '()\n', (4966, 4968), False, 'from parsable import parsable\n'), ((642, 685), 'subprocess.CalledProcessError', 'CalledProcessError', ([], {'returncode': 'ret', 'cmd': 'cmd'}), '(returncode=ret, cmd=cmd)\n', (660, 685), False, 'from subprocess import CalledProcessError\n'), ((3188, 3197), 'treecat.testutil.tempdir', 'tempdir', ([], {}), '()\n', (3195, 3197), False, 'from treecat.testutil import tempdir\n'), ((3232, 3267), 'os.path.join', 'os.path.join', (['dirname', '"""config.pkz"""'], {}), "(dirname, 'config.pkz')\n", (3244, 3267), False, 'import os\n'), ((3276, 3308), 'treecat.format.pickle_dump', 'pickle_dump', (['config', 'config_path'], {}), '(config, config_path)\n', (3287, 3308), False, 'from treecat.format import pickle_dump\n'), ((3745, 3754), 'treecat.testutil.tempdir', 'tempdir', ([], {}), '()\n', (3752, 3754), False, 'from treecat.testutil import tempdir\n'), ((3789, 3824), 'os.path.join', 'os.path.join', (['dirname', '"""config.pkz"""'], {}), "(dirname, 'config.pkz')\n", (3801, 3824), False, 'import os\n'), ((3833, 3865), 'treecat.format.pickle_dump', 'pickle_dump', (['config', 'config_path'], {}), '(config, config_path)\n', (3844, 3865), False, 'from treecat.format import pickle_dump\n'), ((4352, 4373), 'os.path.dirname', 'os.path.dirname', (['FILE'], {}), '(FILE)\n', (4367, 4373), False, 'import os\n'), ((4399, 4408), 'treecat.testutil.tempdir', 'tempdir', ([], {}), '()\n', (4406, 4408), False, 'from treecat.testutil import tempdir\n'), ((4446, 4480), 'os.path.join', 'os.path.join', (['dirname', '"""param.csv"""'], {}), "(dirname, 'param.csv')\n", (4458, 4480), False, 'import os\n'), ((4581, 4649), 'treecat.validate.train', 'train', (['dataset_path', 'param_csv_path', 'dirname'], {'learning_init_epochs': '(2)'}), '(dataset_path, param_csv_path, dirname, learning_init_epochs=2)\n', (4586, 4649), False, 'from treecat.validate import train\n'), ((589, 608), 'subprocess.Popen', 'Popen', (['cmd'], {'env': 'env'}), '(cmd, env=env)\n', (594, 608), False, 'from subprocess import Popen\n'), ((4798, 4833), 'os.path.join', 'os.path.join', (['dirname', '"""tuning.pkz"""'], {}), "(dirname, 'tuning.pkz')\n", (4810, 4833), False, 'import os\n'), ((1393, 1431), 'subprocess.check_call', 'check_call', (["['snakeviz', profile_path]"], {}), "(['snakeviz', profile_path])\n", (1403, 1431), False, 'from subprocess import check_call\n'), ((1059, 1078), 'platform.platform', 'platform.platform', ([], {}), '()\n', (1076, 1078), False, 'import platform\n')]
|
'''This provides visualization tools for Keras.'''
import subprocess
import warnings
import numpy as np
from PIL import Image
from bokeh.plotting import (cursession, figure, output_server, show)
from keras.callbacks import Callback
from keras.preprocessing.image import load_img
class BokehCallback(Callback):
def __init__(self, predit_funct=None):
Callback.__init__(self)
# output_notebook()
self.loss = np.array([])
self.psnrs = np.array([])
output_server("line")
self.imagew = 512
self.min_loss = 10000
self.predit_funct = predit_funct
self.p = figure()
self.p2 = figure()
self.x = np.array([])
self.y = np.array([])
self.bx = np.array([])
self.by = np.array([])
self.cx = np.array([])
self.epochNo = 0
self.p.line(self.x, self.y, name='line', color="tomato", line_width=2)
self.p.line(self.bx, self.by, name='batch_line', color="blue", line_width=2)
self.p2.line(self.cx, self.psnrs, name='psnr', color="green", line_width=2)
show(self.p)
# show(self.p2)
# self.p2 = figure(x_range=[0, self.imagew], y_range=[0, self.imagew])
# self.p2.image_rgba(name='image', image=[np.array((self.imagew, self.imagew), dtype='uint32')], x=0, y=0, dw=self.imagew, dh=self.imagew)
# show(self.p2)
self.psnr = 0
def on_batch_end(self, batch, logs={}):
self.loss = np.append(self.loss, logs['loss'])
# renderer = self.p.select(dict(name="batch_line"))
# ds = renderer[0].data_source
# ds.data['x'] = self.bx
# ds.data['y'] = self.by
# ds.push_notebook()
def on_epoch_end(self, epoch, logs={}):
epoch = self.epochNo
self.x = np.append(self.x, epoch)
self.y = np.append(self.y, logs['val_loss'])
self.bx = np.append(self.bx, epoch)
self.by = np.append(self.by, self.loss.mean())
self.loss = np.array([])
self.cx = np.append(self.cx, epoch)
renderer = self.p.select(dict(name="line"))
ds = renderer[0].data_source
ds.data['x'] = self.x
ds.data['y'] = self.y
cursession().store_objects(ds)
# ds.push_notebook()
renderer = self.p.select(dict(name="batch_line"))
ds = renderer[0].data_source
ds.data['x'] = self.bx
ds.data['y'] = self.by
# ds.push_notebook()
cursession().store_objects(ds)
# if logs['val_loss'] < self.min_loss:
if self.predit_funct:
self.psnr = self.predit_funct(self.model, epoch)
print("psnr: {}".format(self.psnr))
self.psnrs = np.append(self.psnrs, self.psnr)
renderer = self.p2.select(dict(name="psnr"))
ds = renderer[0].data_source
ds.data['x'] = self.x
ds.data['y'] = self.psnrs
cursession().store_objects(ds)
self.min_loss = min(self.min_loss, logs['val_loss'])
self.epochNo += 1
class ModelPsnrCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, predict_funct=None):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.best = -np.Inf
self.predict_funct = predict_funct
def on_epoch_end(self, epoch, logs={}):
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = self.predict_funct(self.model, epoch)
print("psnr: {}".format(current))
if current > self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, filepath))
self.best = current
self.model.save_weights(filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
else:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, filepath))
self.model.save_weights(filepath, overwrite=True)
class ModelBestCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, best=np.Inf):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.best = best
def on_epoch_end(self, epoch, logs={}):
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning)
else:
if current < self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, filepath))
self.best = current
self.model.save_weights(filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
else:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, filepath))
self.model.save_weights(filepath, overwrite=True)
class ModelBestDictCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, model_dict=None):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.model_dict = model_dict
self.best = model_dict.get('best', np.Inf)
self.viewer = None
self.no = 0
def on_epoch_end(self, epoch, logs={}):
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning)
else:
if current < self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, filepath))
self.best = current
self.model.save_weights(filepath, overwrite=True)
self.model_dict['best'] = current
img = load_img('/home/robin/test-deblur-test.png')
img = predict_base_image_channels_1(1, img, self.model, 'RGB')
if self.viewer:
close(self.viewer)
self.viewer = show_img(img, self.no)
self.no += 1
# img.save('/home/robin/test-deblur.png', 'PNG')
else:
if self.verbose > 0:
print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
else:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, filepath))
self.model.save_weights(filepath, overwrite=True)
def predict_base_image_channels_1(channels, img, model, mode, shape=None):
wx = model.layers[-1].output_shape[2]
wy = model.layers[-1].output_shape[3]
if mode == 'YCbCr':
img = img.convert('YCbCr')
img_ar = np.asarray(img, dtype='float32')
img_ar = img_ar.transpose(2, 1, 0)
full_time = 0
for y in range(0, img.height, wy):
for x in range(0, img.width, wx):
valid_x = model.layers[0].input_shape[2]
if x + valid_x > img.width:
valid_x = img.width - x
valid_y = model.layers[0].input_shape[3]
if y + valid_y > img.height:
valid_y = img.height - y
valid_x2 = wx
if x + valid_x2 > img.width:
valid_x2 = img.width - x
valid_y2 = wy
if y + valid_y2 > img.height:
valid_y2 = img.height - y
if channels == 3:
cropped_input = np.zeros((channels, model.layers[0].input_shape[2], model.layers[0].input_shape[3]), dtype='float32')
cropped_input[:, :valid_x, :valid_y] = img_ar[:, x:x+valid_x, y:y+valid_y]
preds = model.predict(np.array([cropped_input]))
preds = np.clip(preds, 0, 255)
img_ar[:, x:x+valid_x2, y:y+valid_y2] = preds[0][:, :valid_x, :valid_y]
else:
for c in range(0, 1 if mode == 'YCbCr' else 3):
cropped_input = np.zeros((1, model.layers[0].input_shape[2], model.layers[0].input_shape[3]), dtype='float32')
cropped_input[0, :valid_x, :valid_y] = img_ar[c, x:x+valid_x, y:y+valid_y]
if mode == 'YCbCr':
preds = model.predict(cropped_input.reshape((1, 1, cropped_input.shape[1], cropped_input.shape[2])))
else:
p = cropped_input[0]
preds = model.predict(p.reshape((1, 1, p.shape[0], p.shape[1])))
preds = np.clip(preds, 0, 255)
img_ar[c, x:x+valid_x2, y:y+valid_y2] = preds[0][0, :valid_x, :valid_y]
if mode == 'YCbCr':
result = img_ar.transpose(2, 1, 0).astype("uint8")
result = Image.fromarray(result[:, :, :], "YCbCr")
result = result.convert("RGB")
else:
# result = array_to_img(img_ar, scale=False)
img_ar = img_ar.transpose(2, 1, 0)
result = Image.fromarray(img_ar.astype("uint8"), "RGB")
return result
def show_img(img, no=0):
name = '/home/robin/test-blur6-{}.png'.format(str(no))
img.save(name)
viewer = subprocess.Popen(['shotwell', name])
return viewer
def close(viewer):
viewer.terminate()
viewer.kill()
|
[
"subprocess.Popen",
"bokeh.plotting.figure",
"bokeh.plotting.output_server",
"numpy.asarray",
"bokeh.plotting.cursession",
"numpy.zeros",
"numpy.clip",
"numpy.append",
"keras.preprocessing.image.load_img",
"keras.callbacks.Callback.__init__",
"numpy.array",
"bokeh.plotting.show",
"PIL.Image.fromarray",
"warnings.warn"
] |
[((7962, 7994), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': '"""float32"""'}), "(img, dtype='float32')\n", (7972, 7994), True, 'import numpy as np\n'), ((10344, 10380), 'subprocess.Popen', 'subprocess.Popen', (["['shotwell', name]"], {}), "(['shotwell', name])\n", (10360, 10380), False, 'import subprocess\n'), ((366, 389), 'keras.callbacks.Callback.__init__', 'Callback.__init__', (['self'], {}), '(self)\n', (383, 389), False, 'from keras.callbacks import Callback\n'), ((438, 450), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (446, 450), True, 'import numpy as np\n'), ((472, 484), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (480, 484), True, 'import numpy as np\n'), ((493, 514), 'bokeh.plotting.output_server', 'output_server', (['"""line"""'], {}), "('line')\n", (506, 514), False, 'from bokeh.plotting import cursession, figure, output_server, show\n'), ((629, 637), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (635, 637), False, 'from bokeh.plotting import cursession, figure, output_server, show\n'), ((656, 664), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (662, 664), False, 'from bokeh.plotting import cursession, figure, output_server, show\n'), ((682, 694), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (690, 694), True, 'import numpy as np\n'), ((712, 724), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (720, 724), True, 'import numpy as np\n'), ((743, 755), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (751, 755), True, 'import numpy as np\n'), ((774, 786), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (782, 786), True, 'import numpy as np\n'), ((805, 817), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (813, 817), True, 'import numpy as np\n'), ((1099, 1111), 'bokeh.plotting.show', 'show', (['self.p'], {}), '(self.p)\n', (1103, 1111), False, 'from bokeh.plotting import cursession, figure, output_server, show\n'), ((1473, 1507), 'numpy.append', 'np.append', (['self.loss', "logs['loss']"], {}), "(self.loss, logs['loss'])\n", (1482, 1507), True, 'import numpy as np\n'), ((1794, 1818), 'numpy.append', 'np.append', (['self.x', 'epoch'], {}), '(self.x, epoch)\n', (1803, 1818), True, 'import numpy as np\n'), ((1836, 1871), 'numpy.append', 'np.append', (['self.y', "logs['val_loss']"], {}), "(self.y, logs['val_loss'])\n", (1845, 1871), True, 'import numpy as np\n'), ((1890, 1915), 'numpy.append', 'np.append', (['self.bx', 'epoch'], {}), '(self.bx, epoch)\n', (1899, 1915), True, 'import numpy as np\n'), ((1991, 2003), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1999, 2003), True, 'import numpy as np\n'), ((2022, 2047), 'numpy.append', 'np.append', (['self.cx', 'epoch'], {}), '(self.cx, epoch)\n', (2031, 2047), True, 'import numpy as np\n'), ((9957, 9998), 'PIL.Image.fromarray', 'Image.fromarray', (['result[:, :, :]', '"""YCbCr"""'], {}), "(result[:, :, :], 'YCbCr')\n", (9972, 9998), False, 'from PIL import Image\n'), ((2702, 2734), 'numpy.append', 'np.append', (['self.psnrs', 'self.psnr'], {}), '(self.psnrs, self.psnr)\n', (2711, 2734), True, 'import numpy as np\n'), ((2205, 2217), 'bokeh.plotting.cursession', 'cursession', ([], {}), '()\n', (2215, 2217), False, 'from bokeh.plotting import cursession, figure, output_server, show\n'), ((2459, 2471), 'bokeh.plotting.cursession', 'cursession', ([], {}), '()\n', (2469, 2471), False, 'from bokeh.plotting import cursession, figure, output_server, show\n'), ((4927, 5032), 'warnings.warn', 'warnings.warn', (["('Can save best model only with %s available, skipping.' % self.monitor)", 'RuntimeWarning'], {}), "('Can save best model only with %s available, skipping.' %\n self.monitor, RuntimeWarning)\n", (4940, 5032), False, 'import warnings\n'), ((6452, 6557), 'warnings.warn', 'warnings.warn', (["('Can save best model only with %s available, skipping.' % self.monitor)", 'RuntimeWarning'], {}), "('Can save best model only with %s available, skipping.' %\n self.monitor, RuntimeWarning)\n", (6465, 6557), False, 'import warnings\n'), ((8685, 8791), 'numpy.zeros', 'np.zeros', (['(channels, model.layers[0].input_shape[2], model.layers[0].input_shape[3])'], {'dtype': '"""float32"""'}), "((channels, model.layers[0].input_shape[2], model.layers[0].\n input_shape[3]), dtype='float32')\n", (8693, 8791), True, 'import numpy as np\n'), ((8967, 8989), 'numpy.clip', 'np.clip', (['preds', '(0)', '(255)'], {}), '(preds, 0, 255)\n', (8974, 8989), True, 'import numpy as np\n'), ((2917, 2929), 'bokeh.plotting.cursession', 'cursession', ([], {}), '()\n', (2927, 2929), False, 'from bokeh.plotting import cursession, figure, output_server, show\n'), ((7026, 7070), 'keras.preprocessing.image.load_img', 'load_img', (['"""/home/robin/test-deblur-test.png"""'], {}), "('/home/robin/test-deblur-test.png')\n", (7034, 7070), False, 'from keras.preprocessing.image import load_img\n'), ((8916, 8941), 'numpy.array', 'np.array', (['[cropped_input]'], {}), '([cropped_input])\n', (8924, 8941), True, 'import numpy as np\n'), ((9197, 9296), 'numpy.zeros', 'np.zeros', (['(1, model.layers[0].input_shape[2], model.layers[0].input_shape[3])'], {'dtype': '"""float32"""'}), "((1, model.layers[0].input_shape[2], model.layers[0].input_shape[3]\n ), dtype='float32')\n", (9205, 9296), True, 'import numpy as np\n'), ((9741, 9763), 'numpy.clip', 'np.clip', (['preds', '(0)', '(255)'], {}), '(preds, 0, 255)\n', (9748, 9763), True, 'import numpy as np\n')]
|
# run with
# `mpirun -n n_proc python3 multi_send-recv.py int_flag`
# int_flag > 0 - use non-blocking recv
# int_flag = 0 - use blocking recv
import sys
import numpy as np
from mpi4py import MPI
def overlap_map(N, n):
"""
Returns the structured array of id pairs and labels.
Labels characterize the overlap position of two regions
specified by an id pair
"""
# neighbors below
I = np.arange(N-n)
pairs = np.column_stack((I, I+n))
labels = np.full(N-n, 'bottom')
# neighbors on the right
I = np.arange(N).reshape(n, n)[:, :-1].flatten()
new_pairs = np.column_stack((I, I+1))
new_labels = np.full(N-n, 'right')
pairs = np.row_stack((pairs, new_pairs))
labels = np.r_[labels, new_labels]
# neighbors on the right, below
I = np.arange(N-n).reshape(n-1, n)[:, :-1].flatten()
new_pairs = np.column_stack((I, I+n+1))
new_labels = np.full(N-2*n+1, 'bot-right')
pairs = np.row_stack((pairs, new_pairs))
labels = np.r_[labels, new_labels]
# neighbors on the left, below
I = np.arange(N-n).reshape(n-1, n)[:, 1:].flatten()
new_pairs = np.column_stack((I, I+n-1))
new_labels = np.full(N-2*n+1, 'bot-left')
pairs = np.row_stack((pairs, new_pairs))
labels = np.r_[labels, new_labels]
# U12 if 'bottom-right'
overlaps = np.rec.fromarrays([pairs, labels],
[('ids', 'u4', (2,)), ('pos', 'U9')])
return overlaps
## <imports~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
non_blocking_recv = (int(sys.argv[1]) > 0)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if size > 9:
print('set no more than 9 processes!')
sys.exit(1)
overlaps = overlap_map(9, 3)
size_mask = (overlaps.ids >= size).sum(1).astype(bool)
overlaps = overlaps[~size_mask]
mask_in = (overlaps.ids[:, 0] == rank)
mask_out = (overlaps.ids[:, 1] == rank)
send_to = overlaps[mask_out].ids[:, 0]
recv_from = overlaps[mask_in].ids[:, 1]
pack_size = 1000
data = np.full(pack_size, rank, 'f8')
print(f'info++++>{rank}')
print('send to:', send_to)
print('sent from:', recv_from)
print(f'{rank}<++++info')
t =- MPI.Wtime()
# ----------- Measuring exec time from here --------------
for dest in send_to:
comm.Isend([data, MPI.DOUBLE], dest)
recvbuf = np.empty((len(recv_from), pack_size))
reqs = [None] * len(recv_from)
if non_blocking_recv:
method = getattr(comm, 'Irecv')
if rank == 0: print('\n\nNON-BLOCKING')
else:
method = getattr(comm, 'Recv')
if rank == 0: print('\n\nBLOCKING')
for i, src in enumerate(recv_from):
reqs[i] = method(recvbuf[i], src)
## >>> This segment is only necessary for Irecv procedure
if non_blocking_recv:
for req in reqs:
req.wait()
## <<<
# ----------- Measuring exec time to here --------------
t += MPI.Wtime()
res = comm.reduce(t, op=MPI.MAX, root=0)
if rank == 0: print(f'EXEC TIME: {res}\n\n')
print()
# The next line prints execution time of every process calls it
# print(f'finished in {t:.3} s')
print(f'res---->{rank}')
print(recvbuf)
print(f'{rank}<----res')
MPI.Finalize()
|
[
"numpy.full",
"mpi4py.MPI.Finalize",
"mpi4py.MPI.Wtime",
"numpy.rec.fromarrays",
"numpy.arange",
"numpy.row_stack",
"numpy.column_stack",
"sys.exit"
] |
[((1974, 2004), 'numpy.full', 'np.full', (['pack_size', 'rank', '"""f8"""'], {}), "(pack_size, rank, 'f8')\n", (1981, 2004), True, 'import numpy as np\n'), ((2791, 2802), 'mpi4py.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (2800, 2802), False, 'from mpi4py import MPI\n'), ((3061, 3075), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (3073, 3075), False, 'from mpi4py import MPI\n'), ((413, 429), 'numpy.arange', 'np.arange', (['(N - n)'], {}), '(N - n)\n', (422, 429), True, 'import numpy as np\n'), ((440, 467), 'numpy.column_stack', 'np.column_stack', (['(I, I + n)'], {}), '((I, I + n))\n', (455, 467), True, 'import numpy as np\n'), ((479, 503), 'numpy.full', 'np.full', (['(N - n)', '"""bottom"""'], {}), "(N - n, 'bottom')\n", (486, 503), True, 'import numpy as np\n'), ((601, 628), 'numpy.column_stack', 'np.column_stack', (['(I, I + 1)'], {}), '((I, I + 1))\n', (616, 628), True, 'import numpy as np\n'), ((644, 667), 'numpy.full', 'np.full', (['(N - n)', '"""right"""'], {}), "(N - n, 'right')\n", (651, 667), True, 'import numpy as np\n'), ((678, 710), 'numpy.row_stack', 'np.row_stack', (['(pairs, new_pairs)'], {}), '((pairs, new_pairs))\n', (690, 710), True, 'import numpy as np\n'), ((860, 891), 'numpy.column_stack', 'np.column_stack', (['(I, I + n + 1)'], {}), '((I, I + n + 1))\n', (875, 891), True, 'import numpy as np\n'), ((905, 940), 'numpy.full', 'np.full', (['(N - 2 * n + 1)', '"""bot-right"""'], {}), "(N - 2 * n + 1, 'bot-right')\n", (912, 940), True, 'import numpy as np\n'), ((947, 979), 'numpy.row_stack', 'np.row_stack', (['(pairs, new_pairs)'], {}), '((pairs, new_pairs))\n', (959, 979), True, 'import numpy as np\n'), ((1127, 1158), 'numpy.column_stack', 'np.column_stack', (['(I, I + n - 1)'], {}), '((I, I + n - 1))\n', (1142, 1158), True, 'import numpy as np\n'), ((1172, 1206), 'numpy.full', 'np.full', (['(N - 2 * n + 1)', '"""bot-left"""'], {}), "(N - 2 * n + 1, 'bot-left')\n", (1179, 1206), True, 'import numpy as np\n'), ((1213, 1245), 'numpy.row_stack', 'np.row_stack', (['(pairs, new_pairs)'], {}), '((pairs, new_pairs))\n', (1225, 1245), True, 'import numpy as np\n'), ((1329, 1401), 'numpy.rec.fromarrays', 'np.rec.fromarrays', (['[pairs, labels]', "[('ids', 'u4', (2,)), ('pos', 'U9')]"], {}), "([pairs, labels], [('ids', 'u4', (2,)), ('pos', 'U9')])\n", (1346, 1401), True, 'import numpy as np\n'), ((1661, 1672), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1669, 1672), False, 'import sys\n'), ((2122, 2133), 'mpi4py.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (2131, 2133), False, 'from mpi4py import MPI\n'), ((540, 552), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (549, 552), True, 'import numpy as np\n'), ((795, 811), 'numpy.arange', 'np.arange', (['(N - n)'], {}), '(N - n)\n', (804, 811), True, 'import numpy as np\n'), ((1063, 1079), 'numpy.arange', 'np.arange', (['(N - n)'], {}), '(N - n)\n', (1072, 1079), True, 'import numpy as np\n')]
|
import numpy as np
class normalizer:
def __init__(self, size, eps=1e-2, default_clip_range=np.inf):
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
# some local information
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
# get the total sum sumsq and sum count
self.total_sum = np.zeros(self.size, np.float32)
self.total_sumsq = np.zeros(self.size, np.float32)
self.total_count = np.ones(1, np.float32)
# get the mean and std
self.mean = np.zeros(self.size, np.float32)
self.std = np.ones(self.size, np.float32)
# thread locker
# update the parameters of the normalizer
def update(self, v):
v = v.reshape(-1, self.size)
self.local_sum += v.sum(axis=0)
self.local_sumsq += (np.square(v)).sum(axis=0)
self.local_count[0] += v.shape[0]
def recompute_stats(self):
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
# reset
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
# update the total stuff
self.total_sum += local_sum
self.total_sumsq += local_sumsq
self.total_count += local_count
# calculate the new mean and std
self.mean = self.total_sum / self.total_count
self.std = np.sqrt(np.maximum(np.square(self.eps), (self.total_sumsq / self.total_count) - np.square(self.total_sum / self.total_count)))
# normalize the observation
def normalize(self, v, clip_range=None):
#print('now normalize', v)
if clip_range is None:
clip_range = self.default_clip_range
#print((v - self.mean) / (self.std))
return np.clip((v - self.mean) / (self.std), -clip_range, clip_range)
|
[
"numpy.square",
"numpy.zeros",
"numpy.ones",
"numpy.clip"
] |
[((264, 295), 'numpy.zeros', 'np.zeros', (['self.size', 'np.float32'], {}), '(self.size, np.float32)\n', (272, 295), True, 'import numpy as np\n'), ((323, 354), 'numpy.zeros', 'np.zeros', (['self.size', 'np.float32'], {}), '(self.size, np.float32)\n', (331, 354), True, 'import numpy as np\n'), ((382, 405), 'numpy.zeros', 'np.zeros', (['(1)', 'np.float32'], {}), '(1, np.float32)\n', (390, 405), True, 'import numpy as np\n'), ((479, 510), 'numpy.zeros', 'np.zeros', (['self.size', 'np.float32'], {}), '(self.size, np.float32)\n', (487, 510), True, 'import numpy as np\n'), ((538, 569), 'numpy.zeros', 'np.zeros', (['self.size', 'np.float32'], {}), '(self.size, np.float32)\n', (546, 569), True, 'import numpy as np\n'), ((597, 619), 'numpy.ones', 'np.ones', (['(1)', 'np.float32'], {}), '(1, np.float32)\n', (604, 619), True, 'import numpy as np\n'), ((671, 702), 'numpy.zeros', 'np.zeros', (['self.size', 'np.float32'], {}), '(self.size, np.float32)\n', (679, 702), True, 'import numpy as np\n'), ((722, 752), 'numpy.ones', 'np.ones', (['self.size', 'np.float32'], {}), '(self.size, np.float32)\n', (729, 752), True, 'import numpy as np\n'), ((1947, 2007), 'numpy.clip', 'np.clip', (['((v - self.mean) / self.std)', '(-clip_range)', 'clip_range'], {}), '((v - self.mean) / self.std, -clip_range, clip_range)\n', (1954, 2007), True, 'import numpy as np\n'), ((955, 967), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (964, 967), True, 'import numpy as np\n'), ((1587, 1606), 'numpy.square', 'np.square', (['self.eps'], {}), '(self.eps)\n', (1596, 1606), True, 'import numpy as np\n'), ((1648, 1692), 'numpy.square', 'np.square', (['(self.total_sum / self.total_count)'], {}), '(self.total_sum / self.total_count)\n', (1657, 1692), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import shutil
import os
# Supress tensorflow interanal logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
import reporter as rpt
import time
# Tunable Variables
EPOCHS = 100
DATASET_FRACTION = 0.995 # Use 99.5% for training, 0.5% for testing
DIRECOTRY = "lpf_05_DNNSingle_Test_v2"
MODELS = f"{DIRECOTRY}/models"
REPORTS = f"{DIRECOTRY}/reports"
DATA = f"{DIRECOTRY}/reports/data"
PLOTS = f"{DIRECOTRY}/reports/plots"
# Configure Test Folder
if os.path.exists(DIRECOTRY):
shutil.rmtree(DIRECOTRY)
# Configure the reporting folders
os.mkdir(DIRECOTRY)
os.mkdir(MODELS)
os.mkdir(REPORTS)
os.mkdir(DATA)
os.mkdir(PLOTS)
print("Directory {} created".format(DIRECOTRY))
# Print some Tensorflow diagnostics stuff
print("\nTensorflow Version: {}\n".format(tf.__version__))
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
tf.debugging.set_log_device_placement(True)
path = "weights_dataset.csv"
column_names = ['estimated_power', 'actual_position', 'position_error', 'output_force', 'weight']
raw_dataset = pd.read_csv(path, names=column_names, low_memory=False)
dataset = raw_dataset.copy()
print("\nDataset Head:")
print(dataset.head())
print("\nTotal Rows of Data: {}\n".format(len(dataset.index)))
print("\nNaN Evaluation:")
print(dataset.isna().sum())
print("\n\nSplitting Dataset...")
train_dataset = dataset.sample(frac=DATASET_FRACTION, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# Diagnostics Plot
#sns.pairplot(train_dataset[['estimated_power', 'actual_position', 'position_error', 'output_force']], diag_kind='kde')
print("\n\nTransposing Dataset - Overall Statistics:")
print(train_dataset.describe().transpose())
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('weight')
test_labels = test_features.pop('weight')
# Normalize the data - Sample of how it looks
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
# Print demo for normalized values
#first = np.array(train_features[:1])
#with np.printoptions(precision=2, suppress=True):
# print('\nFirst example:', first)
# print()
# print('\nNormalized:', normalizer(first).numpy())
# Normalize Output Force
outputforce = np.array(train_features['output_force'])
outputforce_normalizer = preprocessing.Normalization(input_shape=[1,])
outputforce_normalizer.adapt(outputforce)
# Save the test results for later
test_results = {}
# determine the number of input features
n_features = train_features.shape[1]
print(f'Number of features: {n_features}')
################################
####### Detect Hardware ########
################################
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection
except ValueError:
tpu = None
gpus = tf.config.list_logical_devices("GPU")
# Select appropriate distribution strategy for hardware
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
elif len(gpus) > 0:
strategy = tf.distribute.MirroredStrategy(gpus) # this works for 1 to multiple GPUs
print('Running on ', len(gpus), ' GPU(s) ')
else:
strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
print('Running on CPU')
# How many accelerators do we have ?
print("Number of accelerators: ", strategy.num_replicas_in_sync)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
##### Single Variable DNN Regression #####
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
def single_dnn():
with strategy.scope():
# define model
model = keras.Sequential()
model.add(layers.Dense(64, activation='relu', kernel_initializer='he_normal', input_shape=(1,), name="layer1"))
model.add(layers.Dense(64, activation='relu', kernel_initializer='he_normal', name="layer2"))
model.add(layers.Dense(1))
# compile the model
model.compile(optimizer=tf.keras.optimizers.Adam(0.001), loss='mean_absolute_error')
print("\nSingle-Variable DNN Regression Model Details:\n")
print(model.summary())
# fit the model
history = model.fit(train_features['output_force'], train_labels, epochs=EPOCHS, verbose=1, validation_split=0.2) # batch_size=32
# Save the model for later
test_results['single_dnn_model'] = model.evaluate(test_features['output_force'], test_labels, verbose=0)
# Plot the model loss
rpt.plot_loss(history, f'{PLOTS}/SingleVarDNN_Training.png')
# Export some rought prediction results to CSV
single_dnn_model_prediction = model.predict(test_features['output_force'])
rpt.csv_prediction_export(single_dnn_model_prediction, test_labels, f'{DATA}/SingleVarDNN_Predictions.csv')
# Plot the predictions
single_dnn_model_prediction = model.predict(test_features['output_force']).flatten()
rpt.plot_predict(single_dnn_model_prediction, test_labels, f'{PLOTS}/SingleVarDNN_Predictions.png')
# Plot the error
rpt.plot_predict_error(single_dnn_model_prediction, test_labels, f'{PLOTS}/SingleVarDNN_Error.png')
# Export the model
model.save(f'{MODELS}/single_dnn_model', overwrite=True, include_optimizer=False, save_format='tf')
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
##### Single Variable DNN Regression #####
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
def single_dnn_normalized():
with strategy.scope():
# define model
model = keras.Sequential()
model.add(layers.Dense(64, activation='relu', kernel_initializer='he_normal', input_shape=(1,), name="layer1"))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu', kernel_initializer='he_normal', name="layer2"))
#model.add(layers.Dense(1, activation='sigmoid'))
model.add(layers.Dense(1))
# compile the model
model.compile(optimizer=tf.keras.optimizers.Adam(0.001), loss='mean_absolute_error')
print("\nSingle-Variable DNN Regression Model Details:\n")
print(model.summary())
# configure early stopping
es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
# fit the model
history = model.fit(train_features['output_force'], train_labels, epochs=EPOCHS, verbose=1, validation_split=0.2, callbacks=[es]) # batch_size=32
# Save the model for later
test_results['single_dnn_norm_model'] = model.evaluate(test_features['output_force'], test_labels, verbose=0)
# Plot the model loss
rpt.plot_loss(history, f'{PLOTS}/SingleVarDNN_Norm_Training.png')
# Export some rought prediction results to CSV
single_dnn_model_prediction = model.predict(test_features['output_force'])
rpt.csv_prediction_export(single_dnn_model_prediction, test_labels, f'{DATA}/SingleVarDNN_Norm_Predictions.csv')
# Plot the predictions
single_dnn_model_prediction = model.predict(test_features['output_force']).flatten()
rpt.plot_predict(single_dnn_model_prediction, test_labels, f'{PLOTS}/SingleVarDNN_Norm_Predictions.png')
# Plot the error
rpt.plot_predict_error(single_dnn_model_prediction, test_labels, f'{PLOTS}/SingleVarDNN_Norm_Error.png')
# Export the model
model.save(f'{MODELS}/single_dnn_normalized', overwrite=True, include_optimizer=False, save_format='tf')
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
##### Mutli Variable DNN Regression ######
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
def multi_dnn():
with strategy.scope():
# define model
model = keras.Sequential()
model.add(layers.Dense(64, activation='relu', kernel_initializer='he_normal', input_shape=(n_features,)))
model.add(layers.Dense(64, activation='relu', kernel_initializer='he_normal'))
model.add(layers.Dense(32, activation='relu', kernel_initializer='he_normal'))
model.add(layers.Dense(16, activation='relu', kernel_initializer='he_normal'))
model.add(layers.Dense(8, activation='relu', kernel_initializer='he_normal'))
model.add(layers.Dense(1))
# compile the model
model.compile(optimizer=tf.keras.optimizers.Adam(0.001), loss='mean_absolute_error')
print("\nMulti-Variable DNN Regression Model Details:\n")
print(model.summary())
# fit the model
history = model.fit(train_features, train_labels, epochs=EPOCHS, verbose=1, validation_split=0.2) # batch_size=32
# Save the model for later
test_results['multi_dnn_model'] = model.evaluate(test_features, test_labels, verbose=0)
# Plot the model loss
rpt.plot_loss(history, f'{PLOTS}/MultiVarDNN_Training.png')
# list all data in history
#print("History")
#print(history.history.keys())
# Export some rought prediction results to CSV
multi_dnn_model_prediction = model.predict(test_features)
rpt.csv_prediction_export(multi_dnn_model_prediction, test_labels, f'{DATA}/MultiVarDNN_Predictions.csv')
# Plot the predictions
multi_dnn_model_prediction = model.predict(test_features).flatten()
rpt.plot_predict(multi_dnn_model_prediction, test_labels, f'{PLOTS}/MultiVarDNN_Predictions.png')
# Plot the error
rpt.plot_predict_error(multi_dnn_model_prediction, test_labels, f'{PLOTS}/MultiVarDNN_Error.png')
# Export the model
model.save(f'{MODELS}/multi_dnn_model', overwrite=True, include_optimizer=False, save_format='tf')#.save(f'{MODELS}/multi_dnn_model')
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
############## Test Reports ##############
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
def print_evaluation():
print("\nAll Training Results:\n")
results = pd.DataFrame(test_results, index=['Mean absolute error [Weight]']).T
print(results)
results.to_csv(f'{DATA}/Model_Evaluation.csv')
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
############## MAIN Section ##############
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
t0 = time.time()
#single_regression()
#mutli_regression()
#single_dnn()
single_dnn_normalized()
#multi_dnn()
print_evaluation()
t1 = time.time()
total_time = t1-t0
print("\nProgram Complete - Total Processing Time (s): {}".format(total_time))
|
[
"os.mkdir",
"tensorflow.distribute.get_strategy",
"pandas.read_csv",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"shutil.rmtree",
"tensorflow.keras.callbacks.EarlyStopping",
"pandas.DataFrame",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.config.list_logical_devices",
"os.path.exists",
"tensorflow.keras.optimizers.Adam",
"reporter.csv_prediction_export",
"tensorflow.distribute.TPUStrategy",
"tensorflow.debugging.set_log_device_placement",
"reporter.plot_loss",
"tensorflow.tpu.experimental.initialize_tpu_system",
"reporter.plot_predict_error",
"tensorflow.config.list_physical_devices",
"tensorflow.distribute.MirroredStrategy",
"time.time",
"tensorflow.keras.layers.experimental.preprocessing.Normalization",
"numpy.array",
"tensorflow.config.experimental_connect_to_cluster",
"reporter.plot_predict"
] |
[((692, 717), 'os.path.exists', 'os.path.exists', (['DIRECOTRY'], {}), '(DIRECOTRY)\n', (706, 717), False, 'import os\n'), ((781, 800), 'os.mkdir', 'os.mkdir', (['DIRECOTRY'], {}), '(DIRECOTRY)\n', (789, 800), False, 'import os\n'), ((801, 817), 'os.mkdir', 'os.mkdir', (['MODELS'], {}), '(MODELS)\n', (809, 817), False, 'import os\n'), ((819, 836), 'os.mkdir', 'os.mkdir', (['REPORTS'], {}), '(REPORTS)\n', (827, 836), False, 'import os\n'), ((838, 852), 'os.mkdir', 'os.mkdir', (['DATA'], {}), '(DATA)\n', (846, 852), False, 'import os\n'), ((855, 870), 'os.mkdir', 'os.mkdir', (['PLOTS'], {}), '(PLOTS)\n', (863, 870), False, 'import os\n'), ((1099, 1142), 'tensorflow.debugging.set_log_device_placement', 'tf.debugging.set_log_device_placement', (['(True)'], {}), '(True)\n', (1136, 1142), True, 'import tensorflow as tf\n'), ((1287, 1342), 'pandas.read_csv', 'pd.read_csv', (['path'], {'names': 'column_names', 'low_memory': '(False)'}), '(path, names=column_names, low_memory=False)\n', (1298, 1342), True, 'import pandas as pd\n'), ((2158, 2187), 'tensorflow.keras.layers.experimental.preprocessing.Normalization', 'preprocessing.Normalization', ([], {}), '()\n', (2185, 2187), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), ((2497, 2537), 'numpy.array', 'np.array', (["train_features['output_force']"], {}), "(train_features['output_force'])\n", (2505, 2537), True, 'import numpy as np\n'), ((2563, 2607), 'tensorflow.keras.layers.experimental.preprocessing.Normalization', 'preprocessing.Normalization', ([], {'input_shape': '[1]'}), '(input_shape=[1])\n', (2590, 2607), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), ((10420, 10431), 'time.time', 'time.time', ([], {}), '()\n', (10429, 10431), False, 'import time\n'), ((10552, 10563), 'time.time', 'time.time', ([], {}), '()\n', (10561, 10563), False, 'import time\n'), ((720, 744), 'shutil.rmtree', 'shutil.rmtree', (['DIRECOTRY'], {}), '(DIRECOTRY)\n', (733, 744), False, 'import shutil\n'), ((2205, 2229), 'numpy.array', 'np.array', (['train_features'], {}), '(train_features)\n', (2213, 2229), True, 'import numpy as np\n'), ((2940, 2991), 'tensorflow.distribute.cluster_resolver.TPUClusterResolver', 'tf.distribute.cluster_resolver.TPUClusterResolver', ([], {}), '()\n', (2989, 2991), True, 'import tensorflow as tf\n'), ((3158, 3204), 'tensorflow.config.experimental_connect_to_cluster', 'tf.config.experimental_connect_to_cluster', (['tpu'], {}), '(tpu)\n', (3199, 3204), True, 'import tensorflow as tf\n'), ((3207, 3253), 'tensorflow.tpu.experimental.initialize_tpu_system', 'tf.tpu.experimental.initialize_tpu_system', (['tpu'], {}), '(tpu)\n', (3248, 3253), True, 'import tensorflow as tf\n'), ((3267, 3297), 'tensorflow.distribute.TPUStrategy', 'tf.distribute.TPUStrategy', (['tpu'], {}), '(tpu)\n', (3292, 3297), True, 'import tensorflow as tf\n'), ((4795, 4855), 'reporter.plot_loss', 'rpt.plot_loss', (['history', 'f"""{PLOTS}/SingleVarDNN_Training.png"""'], {}), "(history, f'{PLOTS}/SingleVarDNN_Training.png')\n", (4808, 4855), True, 'import reporter as rpt\n'), ((4984, 5095), 'reporter.csv_prediction_export', 'rpt.csv_prediction_export', (['single_dnn_model_prediction', 'test_labels', 'f"""{DATA}/SingleVarDNN_Predictions.csv"""'], {}), "(single_dnn_model_prediction, test_labels,\n f'{DATA}/SingleVarDNN_Predictions.csv')\n", (5009, 5095), True, 'import reporter as rpt\n'), ((5206, 5309), 'reporter.plot_predict', 'rpt.plot_predict', (['single_dnn_model_prediction', 'test_labels', 'f"""{PLOTS}/SingleVarDNN_Predictions.png"""'], {}), "(single_dnn_model_prediction, test_labels,\n f'{PLOTS}/SingleVarDNN_Predictions.png')\n", (5222, 5309), True, 'import reporter as rpt\n'), ((5327, 5430), 'reporter.plot_predict_error', 'rpt.plot_predict_error', (['single_dnn_model_prediction', 'test_labels', 'f"""{PLOTS}/SingleVarDNN_Error.png"""'], {}), "(single_dnn_model_prediction, test_labels,\n f'{PLOTS}/SingleVarDNN_Error.png')\n", (5349, 5430), True, 'import reporter as rpt\n'), ((6817, 6882), 'reporter.plot_loss', 'rpt.plot_loss', (['history', 'f"""{PLOTS}/SingleVarDNN_Norm_Training.png"""'], {}), "(history, f'{PLOTS}/SingleVarDNN_Norm_Training.png')\n", (6830, 6882), True, 'import reporter as rpt\n'), ((7011, 7127), 'reporter.csv_prediction_export', 'rpt.csv_prediction_export', (['single_dnn_model_prediction', 'test_labels', 'f"""{DATA}/SingleVarDNN_Norm_Predictions.csv"""'], {}), "(single_dnn_model_prediction, test_labels,\n f'{DATA}/SingleVarDNN_Norm_Predictions.csv')\n", (7036, 7127), True, 'import reporter as rpt\n'), ((7238, 7346), 'reporter.plot_predict', 'rpt.plot_predict', (['single_dnn_model_prediction', 'test_labels', 'f"""{PLOTS}/SingleVarDNN_Norm_Predictions.png"""'], {}), "(single_dnn_model_prediction, test_labels,\n f'{PLOTS}/SingleVarDNN_Norm_Predictions.png')\n", (7254, 7346), True, 'import reporter as rpt\n'), ((7364, 7472), 'reporter.plot_predict_error', 'rpt.plot_predict_error', (['single_dnn_model_prediction', 'test_labels', 'f"""{PLOTS}/SingleVarDNN_Norm_Error.png"""'], {}), "(single_dnn_model_prediction, test_labels,\n f'{PLOTS}/SingleVarDNN_Norm_Error.png')\n", (7386, 7472), True, 'import reporter as rpt\n'), ((8930, 8989), 'reporter.plot_loss', 'rpt.plot_loss', (['history', 'f"""{PLOTS}/MultiVarDNN_Training.png"""'], {}), "(history, f'{PLOTS}/MultiVarDNN_Training.png')\n", (8943, 8989), True, 'import reporter as rpt\n'), ((9193, 9302), 'reporter.csv_prediction_export', 'rpt.csv_prediction_export', (['multi_dnn_model_prediction', 'test_labels', 'f"""{DATA}/MultiVarDNN_Predictions.csv"""'], {}), "(multi_dnn_model_prediction, test_labels,\n f'{DATA}/MultiVarDNN_Predictions.csv')\n", (9218, 9302), True, 'import reporter as rpt\n'), ((9400, 9501), 'reporter.plot_predict', 'rpt.plot_predict', (['multi_dnn_model_prediction', 'test_labels', 'f"""{PLOTS}/MultiVarDNN_Predictions.png"""'], {}), "(multi_dnn_model_prediction, test_labels,\n f'{PLOTS}/MultiVarDNN_Predictions.png')\n", (9416, 9501), True, 'import reporter as rpt\n'), ((9521, 9622), 'reporter.plot_predict_error', 'rpt.plot_predict_error', (['multi_dnn_model_prediction', 'test_labels', 'f"""{PLOTS}/MultiVarDNN_Error.png"""'], {}), "(multi_dnn_model_prediction, test_labels,\n f'{PLOTS}/MultiVarDNN_Error.png')\n", (9543, 9622), True, 'import reporter as rpt\n'), ((1058, 1096), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1089, 1096), True, 'import tensorflow as tf\n'), ((3049, 3086), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (3079, 3086), True, 'import tensorflow as tf\n'), ((3374, 3410), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', (['gpus'], {}), '(gpus)\n', (3404, 3410), True, 'import tensorflow as tf\n'), ((3512, 3540), 'tensorflow.distribute.get_strategy', 'tf.distribute.get_strategy', ([], {}), '()\n', (3538, 3540), True, 'import tensorflow as tf\n'), ((4012, 4030), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (4028, 4030), False, 'from tensorflow import keras\n'), ((5808, 5826), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (5824, 5826), False, 'from tensorflow import keras\n'), ((6418, 6479), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(5)'}), "(monitor='val_loss', patience=5)\n", (6447, 6479), False, 'from tensorflow import keras\n'), ((7896, 7914), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (7912, 7914), False, 'from tensorflow import keras\n'), ((10066, 10132), 'pandas.DataFrame', 'pd.DataFrame', (['test_results'], {'index': "['Mean absolute error [Weight]']"}), "(test_results, index=['Mean absolute error [Weight]'])\n", (10078, 10132), True, 'import pandas as pd\n'), ((4045, 4149), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'input_shape': '(1,)', 'name': '"""layer1"""'}), "(64, activation='relu', kernel_initializer='he_normal',\n input_shape=(1,), name='layer1')\n", (4057, 4149), False, 'from tensorflow.keras import layers\n'), ((4161, 4248), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'name': '"""layer2"""'}), "(64, activation='relu', kernel_initializer='he_normal', name=\n 'layer2')\n", (4173, 4248), False, 'from tensorflow.keras import layers\n'), ((4259, 4274), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (4271, 4274), False, 'from tensorflow.keras import layers\n'), ((5841, 5945), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'input_shape': '(1,)', 'name': '"""layer1"""'}), "(64, activation='relu', kernel_initializer='he_normal',\n input_shape=(1,), name='layer1')\n", (5853, 5945), False, 'from tensorflow.keras import layers\n'), ((6001, 6088), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'name': '"""layer2"""'}), "(64, activation='relu', kernel_initializer='he_normal', name=\n 'layer2')\n", (6013, 6088), False, 'from tensorflow.keras import layers\n'), ((6153, 6168), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (6165, 6168), False, 'from tensorflow.keras import layers\n'), ((7933, 8031), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'input_shape': '(n_features,)'}), "(64, activation='relu', kernel_initializer='he_normal',\n input_shape=(n_features,))\n", (7945, 8031), False, 'from tensorflow.keras import layers\n'), ((8047, 8114), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(64, activation='relu', kernel_initializer='he_normal')\n", (8059, 8114), False, 'from tensorflow.keras import layers\n'), ((8134, 8201), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(32, activation='relu', kernel_initializer='he_normal')\n", (8146, 8201), False, 'from tensorflow.keras import layers\n'), ((8221, 8288), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(16)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(16, activation='relu', kernel_initializer='he_normal')\n", (8233, 8288), False, 'from tensorflow.keras import layers\n'), ((8308, 8374), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(8)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(8, activation='relu', kernel_initializer='he_normal')\n", (8320, 8374), False, 'from tensorflow.keras import layers\n'), ((8394, 8409), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (8406, 8409), False, 'from tensorflow.keras import layers\n'), ((4328, 4359), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (4352, 4359), True, 'import tensorflow as tf\n'), ((6227, 6258), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (6251, 6258), True, 'import tensorflow as tf\n'), ((8471, 8502), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (8495, 8502), True, 'import tensorflow as tf\n')]
|
# encoding=utf8
import numpy as np
from datasets import load_metric
# the code below refers to the https://github.com/Yale-LILY/FeTaQA/blob/main/end2end/train.py
def postprocess_text(preds, references_s, metric_name):
preds = [pred.strip() for pred in preds]
references_s = [[reference.strip() for reference in references] for references in references_s]
# rougeLSum expects newline after each sentence
if metric_name in ["sacrebleu"]:
# since hf sacrebleu only support references with same length, we have to pad them into the same length
ref_max_len = max([len(ref) for ref in references_s])
for ref in references_s:
for _ in range(ref_max_len - len(ref)):
ref.append(None) # see https://github.com/mjpost/sacrebleu/pull/132
print(ref)
elif metric_name == "bleu":
preds = [pred.split(' ') for pred in preds]
references_s = [[reference.split(' ') for reference in references] for references in references_s]
else:
pass
return preds, references_s
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
references_s = [item["final_sentences"] for item in golds]
assert len(preds) == len(references_s)
metric_list = []
if section in ['train', 'dev']:
metric_list = ['sacrebleu']
elif section == 'test':
metric_list = ["sacrebleu", "bleurt"] # TODO: add PARENT
for metric_name in metric_list:
metric = load_metric(metric_name)
processed_preds, processed_golds = postprocess_text(preds, references_s, metric_name)
if metric_name == "sacrebleu":
res = metric.compute(predictions=processed_preds, references=processed_golds)
summary[metric_name] = res["score"] * 0.01
elif metric_name == "bleurt":
# We refer to the realization in https://github.com/google-research/language/blob/13fd14e1b285002412252097586f8fe405ba8a24/language/totto/totto_bleurt_eval.py#L94-L131
multi_references = [[], [], []]
for references in processed_golds: # here "references" mean references for one prediction string.
if len(references) == 2:
multi_references[2].append('')
elif len(references) == 3:
multi_references[2].append(references[2])
else:
raise ValueError("The references num for each candidate should be 2 or 3 in ToTTo dataset.")
multi_references[0].append(references[0])
multi_references[1].append(references[1])
multi_bleurt_scores = []
for references in multi_references:
multi_bleurt_scores.append(metric.compute(predictions=processed_preds, references=references))
assert len(multi_references) == 3
avg_bleurt_scores = []
for i in range(len(processed_preds)):
# All examples have atleast two references but some do not have three.
assert multi_references[0][i] and multi_references[1][i]
r2 = multi_references[2][i]
if r2:
# Take average over 3 references.
score_i = (multi_bleurt_scores[0][i] + multi_bleurt_scores[1][i] +
multi_bleurt_scores[2][i]) / 3
else:
# print("only two refs")
# Take average over two references.
score_i = (multi_bleurt_scores[0][i] + multi_bleurt_scores[1][i]) / 2
avg_bleurt_scores.append(score_i)
summary["bleurt"] = np.mean(avg_bleurt_scores)
else:
res = metric.compute(predictions=processed_preds, references=processed_golds)
summary[metric_name] = res[metric_name]
return summary
if __name__ == '__main__':
import json
with open("predictions_eval_3.179650238473768.json") as f:
test_data = json.load(f)
with open("dev_result.txt") as f:
preds = [line.strip() for line in f.readlines()]
evaluator = EvaluateTool(args=None)
score = evaluator.evaluate(preds, test_data, section="test")
print(score)
|
[
"numpy.mean",
"json.load",
"datasets.load_metric"
] |
[((4371, 4383), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4380, 4383), False, 'import json\n'), ((1656, 1680), 'datasets.load_metric', 'load_metric', (['metric_name'], {}), '(metric_name)\n', (1667, 1680), False, 'from datasets import load_metric\n'), ((4013, 4039), 'numpy.mean', 'np.mean', (['avg_bleurt_scores'], {}), '(avg_bleurt_scores)\n', (4020, 4039), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
from vectorbt import defaults
from vectorbt.records.drawdowns import Drawdowns
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
index = pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
])
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=index)
ret = ts.pct_change()
defaults.returns['year_freq'] = '252 days' # same as empyrical
factor_returns = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.freq == day_dt
assert ret['a'].vbt.returns.freq == day_dt
assert ret.vbt.returns(freq='2D').freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).freq == day_dt * 4
def test_year_freq(self):
assert ret.vbt.returns.year_freq == pd.to_timedelta(defaults.returns['year_freq'])
assert ret['a'].vbt.returns.year_freq == pd.to_timedelta(defaults.returns['year_freq'])
assert ret['a'].vbt.returns(year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert ret.vbt.returns(year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a'])._obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts)._obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_daily(self):
ret_12h = pd.DataFrame({
'a': [0.1, 0.1, 0.1, 0.1, 0.1],
'b': [-0.1, -0.1, -0.1, -0.1, -0.1],
'c': [0.1, -0.1, 0.1, -0.1, 0.1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1, 0),
datetime(2018, 1, 1, 12),
datetime(2018, 1, 2, 0),
datetime(2018, 1, 2, 12),
datetime(2018, 1, 3, 0)
]))
pd.testing.assert_series_equal(
ret_12h['a'].vbt.returns.daily(),
pd.Series(
np.array([0.21, 0.21, 0.1]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
name=ret_12h['a'].name
)
)
pd.testing.assert_frame_equal(
ret_12h.vbt.returns.daily(),
pd.DataFrame(
np.array([
[0.21, -0.19, -0.01],
[0.21, -0.19, -0.01],
[0.1, -0.1, 0.1]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
columns=ret_12h.columns
)
)
def test_annual(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.annual(),
pd.Series(
np.array([4.]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.annual(),
pd.DataFrame(
np.array([[4., -0.8, 0.]]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
columns=ret.columns
)
)
def test_cumulative(self):
res_a = empyrical.cum_returns(ret['a']).rename('a')
res_b = empyrical.cum_returns(ret['b']).rename('b')
res_c = empyrical.cum_returns(ret['c']).rename('c')
pd.testing.assert_series_equal(
ret['a'].vbt.returns.cumulative(),
res_a
)
pd.testing.assert_frame_equal(
ret.vbt.returns.cumulative(),
pd.concat([res_a, res_b, res_c], axis=1)
)
def test_total(self):
res_a = empyrical.cum_returns_final(ret['a'])
res_b = empyrical.cum_returns_final(ret['b'])
res_c = empyrical.cum_returns_final(ret['c'])
assert isclose(ret['a'].vbt.returns.total(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.total(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_annualized_return(self):
res_a = empyrical.annual_return(ret['a'])
res_b = empyrical.annual_return(ret['b'])
res_c = empyrical.annual_return(ret['c'])
assert isclose(ret['a'].vbt.returns.annualized_return(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_return(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_alpha",
[1., 2., 3.],
)
def test_annualized_volatility(self, test_alpha):
res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha)
res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha)
res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha)
assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_calmar_ratio(self):
res_a = empyrical.calmar_ratio(ret['a'])
res_b = empyrical.calmar_ratio(ret['b'])
res_c = empyrical.calmar_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.calmar_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.calmar_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_risk_free,test_required_return",
[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)],
)
def test_omega_ratio(self, test_risk_free, test_required_return):
res_a = empyrical.omega_ratio(ret['a'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_a):
res_a = np.inf
res_b = empyrical.omega_ratio(ret['b'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_b):
res_b = np.inf
res_c = empyrical.omega_ratio(ret['c'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_c):
res_c = np.inf
assert isclose(ret['a'].vbt.returns.omega_ratio(
risk_free=test_risk_free, required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.omega_ratio(risk_free=test_risk_free, required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_sharpe_ratio(self, test_risk_free):
res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free)
res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free)
res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_downside_risk(self, test_required_return):
res_a = empyrical.downside_risk(ret['a'], required_return=test_required_return)
res_b = empyrical.downside_risk(ret['b'], required_return=test_required_return)
res_c = empyrical.downside_risk(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.downside_risk(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.downside_risk(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_sortino_ratio(self, test_required_return):
res_a = empyrical.sortino_ratio(ret['a'], required_return=test_required_return)
res_b = empyrical.sortino_ratio(ret['b'], required_return=test_required_return)
res_c = empyrical.sortino_ratio(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.sortino_ratio(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sortino_ratio(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_information_ratio(self):
res_a = empyrical.excess_sharpe(ret['a'], factor_returns['a'])
res_b = empyrical.excess_sharpe(ret['b'], factor_returns['b'])
res_c = empyrical.excess_sharpe(ret['c'], factor_returns['c'])
assert isclose(ret['a'].vbt.returns.information_ratio(factor_returns['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.information_ratio(factor_returns),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_beta(self):
res_a = empyrical.beta(ret['a'], factor_returns['a'])
res_b = empyrical.beta(ret['b'], factor_returns['b'])
res_c = empyrical.beta(ret['c'], factor_returns['c'])
assert isclose(ret['a'].vbt.returns.beta(factor_returns['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.beta(factor_returns),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_alpha(self, test_risk_free):
res_a = empyrical.alpha(ret['a'], factor_returns['a'], risk_free=test_risk_free)
res_b = empyrical.alpha(ret['b'], factor_returns['b'], risk_free=test_risk_free)
res_c = empyrical.alpha(ret['c'], factor_returns['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.alpha(factor_returns['a'], risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.alpha(factor_returns, risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_tail_ratio(self):
res_a = empyrical.tail_ratio(ret['a'])
res_b = empyrical.tail_ratio(ret['b'])
res_c = empyrical.tail_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.tail_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.tail_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaNs here
res_a = empyrical.value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.value_at_risk(cutoff=test_cutoff),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_conditional_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaNs here
res_a = empyrical.conditional_value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.conditional_value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.conditional_value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.conditional_value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.conditional_value_at_risk(cutoff=test_cutoff),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_capture(self):
res_a = empyrical.capture(ret['a'], factor_returns['a'])
res_b = empyrical.capture(ret['b'], factor_returns['b'])
res_c = empyrical.capture(ret['c'], factor_returns['c'])
assert isclose(ret['a'].vbt.returns.capture(factor_returns['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.capture(factor_returns),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_up_capture(self):
res_a = empyrical.up_capture(ret['a'], factor_returns['a'])
res_b = empyrical.up_capture(ret['b'], factor_returns['b'])
res_c = empyrical.up_capture(ret['c'], factor_returns['c'])
assert isclose(ret['a'].vbt.returns.up_capture(factor_returns['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.up_capture(factor_returns),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_down_capture(self):
res_a = empyrical.down_capture(ret['a'], factor_returns['a'])
res_b = empyrical.down_capture(ret['b'], factor_returns['b'])
res_c = empyrical.down_capture(ret['c'], factor_returns['c'])
assert isclose(ret['a'].vbt.returns.down_capture(factor_returns['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.down_capture(factor_returns),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_drawdown(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.drawdown(),
pd.Series(
np.array([0., 0., 0., 0., 0.]),
index=ret['a'].index,
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.drawdown(),
pd.DataFrame(
np.array([
[0., 0., 0.],
[0., -0.2, 0.],
[0., -0.4, 0.],
[0., -0.6, -0.33333333],
[0., -0.8, -0.66666667]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03',
'2018-01-04',
'2018-01-05'
], dtype='datetime64[ns]', freq=None),
columns=ret.columns
)
)
def test_max_drawdown(self):
res_a = empyrical.max_drawdown(ret['a'])
res_b = empyrical.max_drawdown(ret['b'])
res_c = empyrical.max_drawdown(ret['c'])
assert isclose(ret['a'].vbt.returns.max_drawdown(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.max_drawdown(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_drawdowns(self):
assert type(ret['a'].vbt.returns.drawdowns) is Drawdowns
assert ret['a'].vbt.returns.drawdowns.wrapper.freq == ret['a'].vbt.returns.freq
assert ret['a'].vbt.returns.drawdowns.wrapper.ndim == ret['a'].ndim
assert ret.vbt.returns.drawdowns.wrapper.ndim == ret.ndim
assert isclose(ret['a'].vbt.returns.drawdowns.max_drawdown(), ret['a'].vbt.returns.max_drawdown())
pd.testing.assert_series_equal(
ret.vbt.returns.drawdowns.max_drawdown(),
ret.vbt.returns.max_drawdown()
)
|
[
"empyrical.tail_ratio",
"empyrical.excess_sharpe",
"empyrical.conditional_value_at_risk",
"numpy.isnan",
"pandas.DatetimeIndex",
"pytest.mark.parametrize",
"empyrical.value_at_risk",
"empyrical.beta",
"pandas.DataFrame",
"empyrical.omega_ratio",
"empyrical.downside_risk",
"empyrical.max_drawdown",
"empyrical.up_capture",
"empyrical.annual_return",
"pytest.raises",
"pandas.concat",
"empyrical.down_capture",
"empyrical.calmar_ratio",
"empyrical.alpha",
"datetime.datetime",
"pandas.to_timedelta",
"pandas.Series",
"pandas.DataFrame.vbt.returns.from_price",
"empyrical.sharpe_ratio",
"empyrical.annual_volatility",
"numpy.random.uniform",
"empyrical.sortino_ratio",
"empyrical.capture",
"empyrical.cum_returns_final",
"numpy.timedelta64",
"pandas.Series.vbt.returns.from_price",
"numpy.array",
"empyrical.cum_returns"
] |
[((223, 253), 'numpy.timedelta64', 'np.timedelta64', (['(86400000000000)'], {}), '(86400000000000)\n', (237, 253), True, 'import numpy as np\n'), ((419, 516), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3, 4, 5], 'b': [5, 4, 3, 2, 1], 'c': [1, 2, 3, 2, 1]}"], {'index': 'index'}), "({'a': [1, 2, 3, 4, 5], 'b': [5, 4, 3, 2, 1], 'c': [1, 2, 3, 2,\n 1]}, index=index)\n", (431, 516), True, 'import pandas as pd\n'), ((5891, 5945), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_alpha"""', '[1.0, 2.0, 3.0]'], {}), "('test_alpha', [1.0, 2.0, 3.0])\n", (5914, 5945), False, 'import pytest\n'), ((6933, 7040), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_risk_free,test_required_return"""', '[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)]'], {}), "('test_risk_free,test_required_return', [(0.01, 0.1),\n (0.02, 0.2), (0.03, 0.3)])\n", (6956, 7040), False, 'import pytest\n'), ((7997, 8058), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_risk_free"""', '[0.01, 0.02, 0.03]'], {}), "('test_risk_free', [0.01, 0.02, 0.03])\n", (8020, 8058), False, 'import pytest\n'), ((8635, 8702), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_required_return"""', '[0.01, 0.02, 0.03]'], {}), "('test_required_return', [0.01, 0.02, 0.03])\n", (8658, 8702), False, 'import pytest\n'), ((9351, 9418), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_required_return"""', '[0.01, 0.02, 0.03]'], {}), "('test_required_return', [0.01, 0.02, 0.03])\n", (9374, 9418), False, 'import pytest\n'), ((11041, 11102), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_risk_free"""', '[0.01, 0.02, 0.03]'], {}), "('test_risk_free', [0.01, 0.02, 0.03])\n", (11064, 11102), False, 'import pytest\n'), ((12131, 12189), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_cutoff"""', '[0.05, 0.06, 0.07]'], {}), "('test_cutoff', [0.05, 0.06, 0.07])\n", (12154, 12189), False, 'import pytest\n'), ((12811, 12869), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_cutoff"""', '[0.05, 0.06, 0.07]'], {}), "('test_cutoff', [0.05, 0.06, 0.07])\n", (12834, 12869), False, 'import pytest\n'), ((286, 306), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (294, 306), False, 'from datetime import datetime\n'), ((312, 332), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(2)'], {}), '(2018, 1, 2)\n', (320, 332), False, 'from datetime import datetime\n'), ((338, 358), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(3)'], {}), '(2018, 1, 3)\n', (346, 358), False, 'from datetime import datetime\n'), ((364, 384), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(4)'], {}), '(2018, 1, 4)\n', (372, 384), False, 'from datetime import datetime\n'), ((390, 410), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(5)'], {}), '(2018, 1, 5)\n', (398, 410), False, 'from datetime import datetime\n'), ((5104, 5141), 'empyrical.cum_returns_final', 'empyrical.cum_returns_final', (["ret['a']"], {}), "(ret['a'])\n", (5131, 5141), False, 'import empyrical\n'), ((5158, 5195), 'empyrical.cum_returns_final', 'empyrical.cum_returns_final', (["ret['b']"], {}), "(ret['b'])\n", (5185, 5195), False, 'import empyrical\n'), ((5212, 5249), 'empyrical.cum_returns_final', 'empyrical.cum_returns_final', (["ret['c']"], {}), "(ret['c'])\n", (5239, 5249), False, 'import empyrical\n'), ((5516, 5549), 'empyrical.annual_return', 'empyrical.annual_return', (["ret['a']"], {}), "(ret['a'])\n", (5539, 5549), False, 'import empyrical\n'), ((5566, 5599), 'empyrical.annual_return', 'empyrical.annual_return', (["ret['b']"], {}), "(ret['b'])\n", (5589, 5599), False, 'import empyrical\n'), ((5616, 5649), 'empyrical.annual_return', 'empyrical.annual_return', (["ret['c']"], {}), "(ret['c'])\n", (5639, 5649), False, 'import empyrical\n'), ((6036, 6091), 'empyrical.annual_volatility', 'empyrical.annual_volatility', (["ret['a']"], {'alpha': 'test_alpha'}), "(ret['a'], alpha=test_alpha)\n", (6063, 6091), False, 'import empyrical\n'), ((6108, 6163), 'empyrical.annual_volatility', 'empyrical.annual_volatility', (["ret['b']"], {'alpha': 'test_alpha'}), "(ret['b'], alpha=test_alpha)\n", (6135, 6163), False, 'import empyrical\n'), ((6180, 6235), 'empyrical.annual_volatility', 'empyrical.annual_volatility', (["ret['c']"], {'alpha': 'test_alpha'}), "(ret['c'], alpha=test_alpha)\n", (6207, 6235), False, 'import empyrical\n'), ((6571, 6603), 'empyrical.calmar_ratio', 'empyrical.calmar_ratio', (["ret['a']"], {}), "(ret['a'])\n", (6593, 6603), False, 'import empyrical\n'), ((6620, 6652), 'empyrical.calmar_ratio', 'empyrical.calmar_ratio', (["ret['b']"], {}), "(ret['b'])\n", (6642, 6652), False, 'import empyrical\n'), ((6669, 6701), 'empyrical.calmar_ratio', 'empyrical.calmar_ratio', (["ret['c']"], {}), "(ret['c'])\n", (6691, 6701), False, 'import empyrical\n'), ((7146, 7246), 'empyrical.omega_ratio', 'empyrical.omega_ratio', (["ret['a']"], {'risk_free': 'test_risk_free', 'required_return': 'test_required_return'}), "(ret['a'], risk_free=test_risk_free, required_return=\n test_required_return)\n", (7167, 7246), False, 'import empyrical\n'), ((7253, 7268), 'numpy.isnan', 'np.isnan', (['res_a'], {}), '(res_a)\n', (7261, 7268), True, 'import numpy as np\n'), ((7313, 7413), 'empyrical.omega_ratio', 'empyrical.omega_ratio', (["ret['b']"], {'risk_free': 'test_risk_free', 'required_return': 'test_required_return'}), "(ret['b'], risk_free=test_risk_free, required_return=\n test_required_return)\n", (7334, 7413), False, 'import empyrical\n'), ((7420, 7435), 'numpy.isnan', 'np.isnan', (['res_b'], {}), '(res_b)\n', (7428, 7435), True, 'import numpy as np\n'), ((7480, 7580), 'empyrical.omega_ratio', 'empyrical.omega_ratio', (["ret['c']"], {'risk_free': 'test_risk_free', 'required_return': 'test_required_return'}), "(ret['c'], risk_free=test_risk_free, required_return=\n test_required_return)\n", (7501, 7580), False, 'import empyrical\n'), ((7587, 7602), 'numpy.isnan', 'np.isnan', (['res_c'], {}), '(res_c)\n', (7595, 7602), True, 'import numpy as np\n'), ((8147, 8205), 'empyrical.sharpe_ratio', 'empyrical.sharpe_ratio', (["ret['a']"], {'risk_free': 'test_risk_free'}), "(ret['a'], risk_free=test_risk_free)\n", (8169, 8205), False, 'import empyrical\n'), ((8222, 8280), 'empyrical.sharpe_ratio', 'empyrical.sharpe_ratio', (["ret['b']"], {'risk_free': 'test_risk_free'}), "(ret['b'], risk_free=test_risk_free)\n", (8244, 8280), False, 'import empyrical\n'), ((8297, 8355), 'empyrical.sharpe_ratio', 'empyrical.sharpe_ratio', (["ret['c']"], {'risk_free': 'test_risk_free'}), "(ret['c'], risk_free=test_risk_free)\n", (8319, 8355), False, 'import empyrical\n'), ((8798, 8869), 'empyrical.downside_risk', 'empyrical.downside_risk', (["ret['a']"], {'required_return': 'test_required_return'}), "(ret['a'], required_return=test_required_return)\n", (8821, 8869), False, 'import empyrical\n'), ((8886, 8957), 'empyrical.downside_risk', 'empyrical.downside_risk', (["ret['b']"], {'required_return': 'test_required_return'}), "(ret['b'], required_return=test_required_return)\n", (8909, 8957), False, 'import empyrical\n'), ((8974, 9045), 'empyrical.downside_risk', 'empyrical.downside_risk', (["ret['c']"], {'required_return': 'test_required_return'}), "(ret['c'], required_return=test_required_return)\n", (8997, 9045), False, 'import empyrical\n'), ((9514, 9585), 'empyrical.sortino_ratio', 'empyrical.sortino_ratio', (["ret['a']"], {'required_return': 'test_required_return'}), "(ret['a'], required_return=test_required_return)\n", (9537, 9585), False, 'import empyrical\n'), ((9602, 9673), 'empyrical.sortino_ratio', 'empyrical.sortino_ratio', (["ret['b']"], {'required_return': 'test_required_return'}), "(ret['b'], required_return=test_required_return)\n", (9625, 9673), False, 'import empyrical\n'), ((9690, 9761), 'empyrical.sortino_ratio', 'empyrical.sortino_ratio', (["ret['c']"], {'required_return': 'test_required_return'}), "(ret['c'], required_return=test_required_return)\n", (9713, 9761), False, 'import empyrical\n'), ((10116, 10170), 'empyrical.excess_sharpe', 'empyrical.excess_sharpe', (["ret['a']", "factor_returns['a']"], {}), "(ret['a'], factor_returns['a'])\n", (10139, 10170), False, 'import empyrical\n'), ((10187, 10241), 'empyrical.excess_sharpe', 'empyrical.excess_sharpe', (["ret['b']", "factor_returns['b']"], {}), "(ret['b'], factor_returns['b'])\n", (10210, 10241), False, 'import empyrical\n'), ((10258, 10312), 'empyrical.excess_sharpe', 'empyrical.excess_sharpe', (["ret['c']", "factor_returns['c']"], {}), "(ret['c'], factor_returns['c'])\n", (10281, 10312), False, 'import empyrical\n'), ((10623, 10668), 'empyrical.beta', 'empyrical.beta', (["ret['a']", "factor_returns['a']"], {}), "(ret['a'], factor_returns['a'])\n", (10637, 10668), False, 'import empyrical\n'), ((10685, 10730), 'empyrical.beta', 'empyrical.beta', (["ret['b']", "factor_returns['b']"], {}), "(ret['b'], factor_returns['b'])\n", (10699, 10730), False, 'import empyrical\n'), ((10747, 10792), 'empyrical.beta', 'empyrical.beta', (["ret['c']", "factor_returns['c']"], {}), "(ret['c'], factor_returns['c'])\n", (10761, 10792), False, 'import empyrical\n'), ((11184, 11256), 'empyrical.alpha', 'empyrical.alpha', (["ret['a']", "factor_returns['a']"], {'risk_free': 'test_risk_free'}), "(ret['a'], factor_returns['a'], risk_free=test_risk_free)\n", (11199, 11256), False, 'import empyrical\n'), ((11273, 11345), 'empyrical.alpha', 'empyrical.alpha', (["ret['b']", "factor_returns['b']"], {'risk_free': 'test_risk_free'}), "(ret['b'], factor_returns['b'], risk_free=test_risk_free)\n", (11288, 11345), False, 'import empyrical\n'), ((11362, 11434), 'empyrical.alpha', 'empyrical.alpha', (["ret['c']", "factor_returns['c']"], {'risk_free': 'test_risk_free'}), "(ret['c'], factor_returns['c'], risk_free=test_risk_free)\n", (11377, 11434), False, 'import empyrical\n'), ((11779, 11809), 'empyrical.tail_ratio', 'empyrical.tail_ratio', (["ret['a']"], {}), "(ret['a'])\n", (11799, 11809), False, 'import empyrical\n'), ((11826, 11856), 'empyrical.tail_ratio', 'empyrical.tail_ratio', (["ret['b']"], {}), "(ret['b'])\n", (11846, 11856), False, 'import empyrical\n'), ((11873, 11903), 'empyrical.tail_ratio', 'empyrical.tail_ratio', (["ret['c']"], {}), "(ret['c'])\n", (11893, 11903), False, 'import empyrical\n'), ((12321, 12383), 'empyrical.value_at_risk', 'empyrical.value_at_risk', (["ret['a'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['a'].iloc[1:], cutoff=test_cutoff)\n", (12344, 12383), False, 'import empyrical\n'), ((12400, 12462), 'empyrical.value_at_risk', 'empyrical.value_at_risk', (["ret['b'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['b'].iloc[1:], cutoff=test_cutoff)\n", (12423, 12462), False, 'import empyrical\n'), ((12479, 12541), 'empyrical.value_at_risk', 'empyrical.value_at_risk', (["ret['c'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['c'].iloc[1:], cutoff=test_cutoff)\n", (12502, 12541), False, 'import empyrical\n'), ((13013, 13087), 'empyrical.conditional_value_at_risk', 'empyrical.conditional_value_at_risk', (["ret['a'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['a'].iloc[1:], cutoff=test_cutoff)\n", (13048, 13087), False, 'import empyrical\n'), ((13104, 13178), 'empyrical.conditional_value_at_risk', 'empyrical.conditional_value_at_risk', (["ret['b'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['b'].iloc[1:], cutoff=test_cutoff)\n", (13139, 13178), False, 'import empyrical\n'), ((13195, 13269), 'empyrical.conditional_value_at_risk', 'empyrical.conditional_value_at_risk', (["ret['c'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['c'].iloc[1:], cutoff=test_cutoff)\n", (13230, 13269), False, 'import empyrical\n'), ((13602, 13650), 'empyrical.capture', 'empyrical.capture', (["ret['a']", "factor_returns['a']"], {}), "(ret['a'], factor_returns['a'])\n", (13619, 13650), False, 'import empyrical\n'), ((13667, 13715), 'empyrical.capture', 'empyrical.capture', (["ret['b']", "factor_returns['b']"], {}), "(ret['b'], factor_returns['b'])\n", (13684, 13715), False, 'import empyrical\n'), ((13732, 13780), 'empyrical.capture', 'empyrical.capture', (["ret['c']", "factor_returns['c']"], {}), "(ret['c'], factor_returns['c'])\n", (13749, 13780), False, 'import empyrical\n'), ((14077, 14128), 'empyrical.up_capture', 'empyrical.up_capture', (["ret['a']", "factor_returns['a']"], {}), "(ret['a'], factor_returns['a'])\n", (14097, 14128), False, 'import empyrical\n'), ((14145, 14196), 'empyrical.up_capture', 'empyrical.up_capture', (["ret['b']", "factor_returns['b']"], {}), "(ret['b'], factor_returns['b'])\n", (14165, 14196), False, 'import empyrical\n'), ((14213, 14264), 'empyrical.up_capture', 'empyrical.up_capture', (["ret['c']", "factor_returns['c']"], {}), "(ret['c'], factor_returns['c'])\n", (14233, 14264), False, 'import empyrical\n'), ((14569, 14622), 'empyrical.down_capture', 'empyrical.down_capture', (["ret['a']", "factor_returns['a']"], {}), "(ret['a'], factor_returns['a'])\n", (14591, 14622), False, 'import empyrical\n'), ((14639, 14692), 'empyrical.down_capture', 'empyrical.down_capture', (["ret['b']", "factor_returns['b']"], {}), "(ret['b'], factor_returns['b'])\n", (14661, 14692), False, 'import empyrical\n'), ((14709, 14762), 'empyrical.down_capture', 'empyrical.down_capture', (["ret['c']", "factor_returns['c']"], {}), "(ret['c'], factor_returns['c'])\n", (14731, 14762), False, 'import empyrical\n'), ((16026, 16058), 'empyrical.max_drawdown', 'empyrical.max_drawdown', (["ret['a']"], {}), "(ret['a'])\n", (16048, 16058), False, 'import empyrical\n'), ((16075, 16107), 'empyrical.max_drawdown', 'empyrical.max_drawdown', (["ret['b']"], {}), "(ret['b'])\n", (16097, 16107), False, 'import empyrical\n'), ((16124, 16156), 'empyrical.max_drawdown', 'empyrical.max_drawdown', (["ret['c']"], {}), "(ret['c'])\n", (16146, 16156), False, 'import empyrical\n'), ((667, 708), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)', 'ret.shape[0]'], {}), '(0.8, 1.2, ret.shape[0])\n', (684, 708), True, 'import numpy as np\n'), ((1475, 1521), 'pandas.to_timedelta', 'pd.to_timedelta', (["defaults.returns['year_freq']"], {}), "(defaults.returns['year_freq'])\n", (1490, 1521), True, 'import pandas as pd\n'), ((1571, 1617), 'pandas.to_timedelta', 'pd.to_timedelta', (["defaults.returns['year_freq']"], {}), "(defaults.returns['year_freq'])\n", (1586, 1617), True, 'import pandas as pd\n'), ((1689, 1716), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""365 days"""'], {}), "('365 days')\n", (1704, 1716), True, 'import pandas as pd\n'), ((1783, 1810), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""365 days"""'], {}), "('365 days')\n", (1798, 1810), True, 'import pandas as pd\n'), ((2003, 2027), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2016, 2027), False, 'import pytest\n'), ((2447, 2474), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""365 days"""'], {}), "('365 days')\n", (2462, 2474), True, 'import pandas as pd\n'), ((2565, 2592), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""365 days"""'], {}), "('365 days')\n", (2580, 2592), True, 'import pandas as pd\n'), ((5010, 5050), 'pandas.concat', 'pd.concat', (['[res_a, res_b, res_c]'], {'axis': '(1)'}), '([res_a, res_b, res_c], axis=1)\n', (5019, 5050), True, 'import pandas as pd\n'), ((5399, 5450), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (5408, 5450), True, 'import pandas as pd\n'), ((5823, 5874), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (5832, 5874), True, 'import pandas as pd\n'), ((6459, 6510), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (6468, 6510), True, 'import pandas as pd\n'), ((6865, 6916), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (6874, 6916), True, 'import pandas as pd\n'), ((7929, 7980), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (7938, 7980), True, 'import pandas as pd\n'), ((8567, 8618), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (8576, 8618), True, 'import pandas as pd\n'), ((9283, 9334), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (9292, 9334), True, 'import pandas as pd\n'), ((9999, 10050), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (10008, 10050), True, 'import pandas as pd\n'), ((10519, 10570), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (10528, 10570), True, 'import pandas as pd\n'), ((10973, 11024), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (10982, 11024), True, 'import pandas as pd\n'), ((11669, 11720), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (11678, 11720), True, 'import pandas as pd\n'), ((12063, 12114), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (12072, 12114), True, 'import pandas as pd\n'), ((12743, 12794), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (12752, 12794), True, 'import pandas as pd\n'), ((13495, 13546), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (13504, 13546), True, 'import pandas as pd\n'), ((13967, 14018), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (13976, 14018), True, 'import pandas as pd\n'), ((14457, 14508), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (14466, 14508), True, 'import pandas as pd\n'), ((14959, 15010), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (14968, 15010), True, 'import pandas as pd\n'), ((16320, 16371), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (16329, 16371), True, 'import pandas as pd\n'), ((730, 771), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)', 'ret.shape[0]'], {}), '(0.8, 1.2, ret.shape[0])\n', (747, 771), True, 'import numpy as np\n'), ((797, 838), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)', 'ret.shape[0]'], {}), '(0.8, 1.2, ret.shape[0])\n', (814, 838), True, 'import numpy as np\n'), ((2184, 2225), 'pandas.Series.vbt.returns.from_price', 'pd.Series.vbt.returns.from_price', (["ts['a']"], {}), "(ts['a'])\n", (2216, 2225), True, 'import pandas as pd\n'), ((2292, 2331), 'pandas.DataFrame.vbt.returns.from_price', 'pd.DataFrame.vbt.returns.from_price', (['ts'], {}), '(ts)\n', (2327, 2331), True, 'import pandas as pd\n'), ((2370, 2433), 'pandas.Series.vbt.returns.from_price', 'pd.Series.vbt.returns.from_price', (["ts['a']"], {'year_freq': '"""365 days"""'}), "(ts['a'], year_freq='365 days')\n", (2402, 2433), True, 'import pandas as pd\n'), ((2490, 2551), 'pandas.DataFrame.vbt.returns.from_price', 'pd.DataFrame.vbt.returns.from_price', (['ts'], {'year_freq': '"""365 days"""'}), "(ts, year_freq='365 days')\n", (2525, 2551), True, 'import pandas as pd\n'), ((3150, 3177), 'numpy.array', 'np.array', (['[0.21, 0.21, 0.1]'], {}), '([0.21, 0.21, 0.1])\n', (3158, 3177), True, 'import numpy as np\n'), ((3560, 3632), 'numpy.array', 'np.array', (['[[0.21, -0.19, -0.01], [0.21, -0.19, -0.01], [0.1, -0.1, 0.1]]'], {}), '([[0.21, -0.19, -0.01], [0.21, -0.19, -0.01], [0.1, -0.1, 0.1]])\n', (3568, 3632), True, 'import numpy as np\n'), ((4122, 4137), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (4130, 4137), True, 'import numpy as np\n'), ((4409, 4437), 'numpy.array', 'np.array', (['[[4.0, -0.8, 0.0]]'], {}), '([[4.0, -0.8, 0.0]])\n', (4417, 4437), True, 'import numpy as np\n'), ((4638, 4669), 'empyrical.cum_returns', 'empyrical.cum_returns', (["ret['a']"], {}), "(ret['a'])\n", (4659, 4669), False, 'import empyrical\n'), ((4698, 4729), 'empyrical.cum_returns', 'empyrical.cum_returns', (["ret['b']"], {}), "(ret['b'])\n", (4719, 4729), False, 'import empyrical\n'), ((4758, 4789), 'empyrical.cum_returns', 'empyrical.cum_returns', (["ret['c']"], {}), "(ret['c'])\n", (4779, 4789), False, 'import empyrical\n'), ((15175, 15210), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0])\n', (15183, 15210), True, 'import numpy as np\n'), ((15425, 15545), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.0, -0.2, 0.0], [0.0, -0.4, 0.0], [0.0, -0.6, -\n 0.33333333], [0.0, -0.8, -0.66666667]]'], {}), '([[0.0, 0.0, 0.0], [0.0, -0.2, 0.0], [0.0, -0.4, 0.0], [0.0, -0.6, \n -0.33333333], [0.0, -0.8, -0.66666667]])\n', (15433, 15545), True, 'import numpy as np\n'), ((3201, 3300), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01', '2018-01-02', '2018-01-03']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""D"""'}), "(['2018-01-01', '2018-01-02', '2018-01-03'], dtype=\n 'datetime64[ns]', freq='D')\n", (3217, 3300), True, 'import pandas as pd\n'), ((3734, 3833), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01', '2018-01-02', '2018-01-03']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""D"""'}), "(['2018-01-01', '2018-01-02', '2018-01-03'], dtype=\n 'datetime64[ns]', freq='D')\n", (3750, 3833), True, 'import pandas as pd\n'), ((4160, 4229), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""252D"""'}), "(['2018-01-01'], dtype='datetime64[ns]', freq='252D')\n", (4176, 4229), True, 'import pandas as pd\n'), ((4459, 4528), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""252D"""'}), "(['2018-01-01'], dtype='datetime64[ns]', freq='252D')\n", (4475, 4528), True, 'import pandas as pd\n'), ((15673, 15800), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']"], {'dtype': '"""datetime64[ns]"""', 'freq': 'None'}), "(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05'], dtype='datetime64[ns]', freq=None)\n", (15689, 15800), True, 'import pandas as pd\n'), ((1180, 1200), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1189, 1200), True, 'import pandas as pd\n'), ((1357, 1379), 'numpy.timedelta64', 'np.timedelta64', (['(4)', '"""D"""'], {}), "(4, 'D')\n", (1371, 1379), True, 'import numpy as np\n'), ((2839, 2862), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)', '(0)'], {}), '(2018, 1, 1, 0)\n', (2847, 2862), False, 'from datetime import datetime\n'), ((2876, 2900), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)', '(12)'], {}), '(2018, 1, 1, 12)\n', (2884, 2900), False, 'from datetime import datetime\n'), ((2914, 2937), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(2)', '(0)'], {}), '(2018, 1, 2, 0)\n', (2922, 2937), False, 'from datetime import datetime\n'), ((2951, 2975), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(2)', '(12)'], {}), '(2018, 1, 2, 12)\n', (2959, 2975), False, 'from datetime import datetime\n'), ((2989, 3012), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(3)', '(0)'], {}), '(2018, 1, 3, 0)\n', (2997, 3012), False, 'from datetime import datetime\n'), ((1241, 1261), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1250, 1261), True, 'import pandas as pd\n'), ((1319, 1339), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1328, 1339), True, 'import pandas as pd\n'), ((2058, 2078), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2067, 2078), True, 'import pandas as pd\n')]
|
"""
********************************************
test_generator_moduL_formelfrage.py
@digitalfellowship - Stand 07/2021
Autor: <NAME>
********************************************
Dieses Modul dient der Erstellung der Formelfragen-GUI
sowie den Formelfragen in XML Struktur
"""
from tkinter import ttk
from tkinter import *
import sqlite3 #verwendet für mySQL Datenbank
import xml.etree.ElementTree as ET
from sympy import *
import os
import pandas as pd
from pandas.core.reshape.util import cartesian_product
import numpy as np
import re
from tkinter import messagebox
import zipfile
from collections import Counter
from operator import itemgetter
### Eigene Dateien / Module
from Test_Generator_Module import test_generator_modul_datenbanken_anzeigen
from Test_Generator_Module import test_generator_modul_datenbanken_erstellen
from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung
from Test_Generator_Module import test_generator_modul_ilias_test_struktur
from Test_Generator_Module import test_generator_modul_ilias_import_test_datei
from Test_Generator_Module import test_generator_modul_test_einstellungen
from Test_Generator_Module import test_generator_modul_zeigerdiagramme
class Formelfrage:
# Hier wird die GUI für Formelfrage erstellt
# Definition von Labels, Buttons etc.
def __init__(self, app, formelfrage_tab, project_root_path):
self.formelfrage_tab = formelfrage_tab
############## SET QUESTION_TYPE SPECIFIC NAMES FOR DATABASE AND WORBOOK/SHEET
# Name des Fragentyps
self.ff_question_type_name = "formelfrage"
# Name für Datenbank und Tabelle
self.ff_database = "ilias_formelfrage_db.db"
self.ff_database_table = "formelfrage_table"
self.test_settings_database = "test_settings_profiles_db.db"
self.test_settings_database_table = "my_profiles_table"
self.test_settings_database_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", self.test_settings_database))
# Name für Tabellenkalulations-Datei und Tabelle
self.ff_xlsx_workbook_name = "Formelfrage_DB_export_file"
self.ff_xlsx_worksheet_name = "Formelfrage - Database"
############## SET IMAGE VARIABLES
# Die Variablen müssen am Anfang des Programms gesetzt werden, um diese an andere Funktionen weitergeben zu können
self.ff_description_img_name_1 = ""
self.ff_description_img_name_2 = ""
self.ff_description_img_name_3 = ""
self.ff_description_img_data_1 = ""
self.ff_description_img_data_2 = ""
self.ff_description_img_data_3 = ""
self.ff_description_img_path_1 = ""
self.ff_description_img_path_2 = ""
self.ff_description_img_path_3 = ""
############## DEFINE FORMELFRAGE PATHS
# Pfad des Projekts und des FF-Moduls
self.project_root_path = project_root_path
self.formelfrage_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-Formelfrage"))
self.formelfrage_excel_vorlage = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_excel_vorlage", "ff_excel_vorlage.xlsx"))
self.formelfrage_files_path_pool_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_pool_abgabe"))
# Pfad für die Datenbank
self.database_formelfrage_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", self.ff_database))
# Pfad für ILIAS-Test Vorlage
self.formelfrage_test_qti_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
self.formelfrage_test_tst_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
# Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
self.formelfrage_test_qti_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
self.formelfrage_test_tst_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
self.formelfrage_test_img_file_path = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
# Pfad für ILIAS-Pool Vorlage
self.formelfrage_pool_qti_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
self.formelfrage_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
# Pfad für ILIAS-Pool Dateien (zum hochladen in ILIAS)
# Die Pfade für die qti.xml und qpl.xml werden erst zur Laufzeit bestimmt.
# Die Deklaration ist daher unter "class Create_Formelfrage_Pool"
###################### DATENBANK ENTRIES UND INDEX DICT ERSTELLEN ###################
# Dictionary aus zwei Listen erstellen
self.ff_db_find_entries = []
self.ff_db_find_indexes = []
self.ff_db_column_names_list = []
connect = sqlite3.connect(self.database_formelfrage_path)
cursor = connect.execute('select * from ' + self.ff_database_table)
# Durch list(map(lambdax: x[0])) werden die Spaltennamen aus der DB ausgelesen
self.ff_db_column_names_list = list(map(lambda x: x[0], cursor.description))
self.ff_db_column_names_string = ', :'.join(self.ff_db_column_names_list)
self.ff_db_column_names_string = ":" + self.ff_db_column_names_string
for i in range(len(self.ff_db_column_names_list)):
self.ff_db_find_indexes.append(i)
self.ff_db_entry_to_index_dict = dict(zip((self.ff_db_column_names_list), (self.ff_db_find_indexes)))
connect.commit()
connect.close()
############## FRAMES
self.ff_frame_ilias_test_title = LabelFrame(self.formelfrage_tab, text="Testname & Autor", padx=5, pady=5)
self.ff_frame_ilias_test_title.grid(row=0, column=0, padx=10, pady=10, sticky="NW")
self.ff_frame = LabelFrame(self.formelfrage_tab, text="Formelfrage", padx=5, pady=5)
self.ff_frame.grid(row=1, column=0, padx=10, pady=10, sticky="NW")
self.ff_frame_question_attributes = LabelFrame(self.formelfrage_tab, text="Fragen Attribute", padx=5, pady=5)
self.ff_frame_question_attributes.grid(row=2, column=0, padx=10, pady=10, sticky="NE")
self.ff_frame_database = LabelFrame(self.formelfrage_tab, text="Formelfrage-Datenbank", padx=5, pady=5)
self.ff_frame_database.grid(row=2, column=0, padx=10, pady=10, sticky="NW")
self.ff_frame_create_formelfrage_test = LabelFrame(self.formelfrage_tab, text="FF-Test erstellen", padx=5, pady=5)
self.ff_frame_create_formelfrage_test.grid(row=2, column=0, padx=10, pady=120, sticky="NE")
self.ff_frame_test_settings = LabelFrame(self.formelfrage_tab, text="Test Einstellungen", padx=5, pady=5)
self.ff_frame_test_settings.grid(row=0, column=0, padx=10, pady=10, sticky="NE")
self.ff_frame_taxonomy_settings = LabelFrame(self.formelfrage_tab, text="Taxonomie Einstellungen", padx=5, pady=5)
self.ff_frame_taxonomy_settings.grid(row=0, column=1, padx=10, pady=10, sticky="NW")
self.ff_frame_question_description_functions = LabelFrame(self.formelfrage_tab, text="Fragentext Funktionen", padx=5, pady=5)
self.ff_frame_question_description_functions.grid(row=1, column=1, padx=10, pady=10, sticky="NW")
self.ff_frame_excel_import_export = LabelFrame(self.formelfrage_tab, text="Excel Import/Export", padx=5, pady=5)
self.ff_frame_excel_import_export.grid(row=2, column=1, padx=10, pady=10, sticky="NW")
self.ff_frame_calculate_value_range = LabelFrame(self.formelfrage_tab, text="Wertebereich berechnen", padx=5, pady=5)
self.ff_frame_calculate_value_range.grid(row=1, column=1, padx=10, pady=10, sticky="SW")
self.ff_frame_description_picture = LabelFrame(self.formelfrage_tab, text="Fragen-Text Bild", padx=5, pady=5)
self.ff_frame_description_picture.grid(row=1, column=2, padx=10, pady=10, sticky="NW")
# Um Zeigerdiagramme zu aktivieren, die Zeile aus dem Kommentar wieder einfügen
# Das Modul für Zeigerdiagramme ist nur schwach ausgebaut und kann nicht frei verwendet werden
self.ff_frame_vector_diagram = LabelFrame(self.formelfrage_tab, text="Zeigerdiagramme", padx=5, pady=5)
#self.ff_frame_vector_diagram.grid(row=2, column=1, padx=10, pady=200, sticky="NW")
###################### "Testname & Autor" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
self.ff_ilias_test_title_label = Label(self.ff_frame_ilias_test_title, text="Name des Tests")
self.ff_ilias_test_title_label.grid(row=0, column=0, sticky=W)
self.ff_ilias_test_title_entry = Entry(self.ff_frame_ilias_test_title, width=60)
self.ff_ilias_test_title_entry.grid(row=0, column=1, sticky=W, padx=30)
self.ff_ilias_test_autor_label = Label(self.ff_frame_ilias_test_title, text="Autor")
self.ff_ilias_test_autor_label.grid(row=1, column=0, sticky=W)
self.ff_ilias_test_autor_entry = Entry(self.ff_frame_ilias_test_title, width=60)
self.ff_ilias_test_autor_entry.grid(row=1, column=1, sticky=W, padx=30)
###################### TEST SETTINGS
self.show_test_settings_formula_tab = Button(self.ff_frame_test_settings, text="Test Einstellungen",command=lambda: test_generator_modul_test_einstellungen.Test_Einstellungen_GUI.__init__(self, self.project_root_path, self.formelfrage_test_qti_file_path_output))
self.show_test_settings_formula_tab.grid(row=0, column=0, pady=0, sticky=NE)
######################################
###################### "Fragen-Text Bild" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
# Hinzufügen Bild 1
self.ff_var_use_image_1 = IntVar()
self.ff_check_use_image_1_in_description = Checkbutton(self.ff_frame_question_description_functions, text="Bild 1 hochladen?", variable=self.ff_var_use_image_1, onvalue=1, offvalue=0)
self.ff_check_use_image_1_in_description.deselect()
self.ff_check_use_image_1_in_description.grid(row=5, column=0, sticky=W, padx=90, pady=(10, 0))
# Hinzufügen Bild 2
self.ff_var_use_image_2 = IntVar()
self.ff_check_use_image_2_in_description = Checkbutton(self.ff_frame_question_description_functions, text="Bild 2 hochladen?", variable=self.ff_var_use_image_2, onvalue=1, offvalue=0)
self.ff_check_use_image_2_in_description.deselect()
self.ff_check_use_image_2_in_description.grid(row=6, column=0, sticky=W, padx=90)
# Hinzufügen Bild 3
self.ff_var_use_image_3 = IntVar()
self.ff_check_use_image_3_in_description = Checkbutton(self.ff_frame_question_description_functions, text="Bild 3 hochladen?", variable=self.ff_var_use_image_3, onvalue=1, offvalue=0)
self.ff_check_use_image_3_in_description.deselect()
self.ff_check_use_image_3_in_description.grid(row=7, column=0, sticky=W, padx=90)
# Buttons - Bild hinzufügen & Bild löschen
self.ff_add_img_to_description_btn = Button(self.ff_frame_question_description_functions, text="Bild hinzufügen", command=lambda: ff_add_image_to_description_and_create_labels())
self.ff_add_img_to_description_btn.grid(row=8, column=0, sticky=W, padx = 10, pady=(20,0))
# Bild zum Fragentext hinzufügen
def ff_add_image_to_description_and_create_labels():
# Erstelle Labels
self.ff_question_description_img_1_filename_label = Label(self.ff_frame_description_picture, text=self.ff_description_img_name_1)
self.ff_question_description_img_2_filename_label = Label(self.ff_frame_description_picture, text=self.ff_description_img_name_2)
self.ff_question_description_img_3_filename_label = Label(self.ff_frame_description_picture, text=self.ff_description_img_name_3)
self.ff_description_img_name_1, self.ff_description_img_name_2, self.ff_description_img_name_3, self.ff_description_img_path_1, self.ff_description_img_path_2, self.ff_description_img_path_3, self.ff_question_description_img_1_filename_label, self.ff_question_description_img_2_filename_label, self.ff_question_description_img_3_filename_label = test_generator_modul_ilias_test_struktur.Additional_Funtions.add_image_to_description(
self,
self.project_root_path,
self.ff_var_use_image_1.get(),
self.ff_var_use_image_2.get(),
self.ff_var_use_image_3.get(),
self.ff_frame_description_picture,
self.ff_description_img_name_1,
self.ff_description_img_name_2,
self.ff_description_img_name_3,
self.ff_description_img_path_1,
self.ff_description_img_path_2,
self.ff_description_img_path_3,
)
self.ff_remove_img_from_description_btn = Button(self.ff_frame_question_description_functions, text="Bild entfernen", command=lambda: ff_add_image_to_description_and_delete_labels())
self.ff_remove_img_from_description_btn.grid(row=8, column=0, sticky=W, padx=120, pady=(20,0))
# Bild aus Fragentext entfernen
def ff_add_image_to_description_and_delete_labels():
self.ff_description_img_name_1, self.ff_description_img_name_2, self.ff_description_img_name_3 = test_generator_modul_ilias_test_struktur.Additional_Funtions.delete_image_from_description(
self, self.ff_var_use_image_1.get(),
self.ff_var_use_image_2.get(),
self.ff_var_use_image_3.get(),
self.ff_question_description_img_1_filename_label,
self.ff_question_description_img_2_filename_label,
self.ff_question_description_img_3_filename_label,
self.ff_description_img_name_1,
self.ff_description_img_name_2,
self.ff_description_img_name_3,
)
###################### "Taxonomie Einstellungen" - FRAME -------- LABELS / ENTRYS / BUTTONS ################
self.ff_taxonomy_settings_btn = Button(self.ff_frame_taxonomy_settings, text="Taxonomie Einstellungen",command=lambda: test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__(self))
self.ff_taxonomy_settings_btn.grid(row=3, column=0, columnspan = 2, padx=10, sticky="W")
self.ff_question_difficulty_label = Label(self.ff_frame_question_attributes, text="Schwierigkeit")
self.ff_question_difficulty_label.grid(row=0, column=0, pady=5, padx=5, sticky=W)
self.ff_question_difficulty_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_difficulty_entry.grid(row=0, column=1, pady=5, padx=5, sticky=W)
self.ff_question_category_label = Label(self.ff_frame_question_attributes, text="Fragenkategorie")
self.ff_question_category_label.grid(row=1, column=0, pady=5, padx=5, sticky=W)
self.ff_question_category_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_category_entry.grid(row=1, column=1, pady=5, padx=5, sticky=W)
self.ff_question_type_label = Label(self.ff_frame_question_attributes, text="Fragen-Typ")
self.ff_question_type_label.grid(row=0, column=2, pady=5, padx=5, sticky=W)
self.ff_question_type_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_type_entry.grid(row=0, column=3, pady=5, padx=5, sticky=W)
self.ff_question_type_entry.insert(0, "Formelfrage")
self.ff_question_pool_tag_label = Label(self.ff_frame_question_attributes, text="Pool-Tag")
self.ff_question_pool_tag_label.grid(row=1, column=2, pady=5, padx=5, sticky=W)
self.ff_question_pool_tag_entry = Entry(self.ff_frame_question_attributes, width=15)
self.ff_question_pool_tag_entry.grid(row=1, column=3, pady=5, padx=5, sticky=W)
###################### "Wertebereich berechnen" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
# Wertebereich berechnen für Formel aus Eingabefeld: formula 1
self.ff_calculate_value_range_btn = Button(self.ff_frame_calculate_value_range, text="Wertebereich berechnen",command=lambda: Formelfrage.ff_calculate_value_range_function_in_GUI(self, "0"))
self.ff_calculate_value_range_btn.grid(row=0, column=0, padx=0, sticky=W)
# Label für Eingabefeld
self.ff_calculate_value_range_id_label = Label(self.ff_frame_calculate_value_range, text="ID:")
self.ff_calculate_value_range_id_label.grid(row=0, column=0, pady=5, padx=70, sticky=E)
# Eingabefeld für ID
self.ff_calculate_value_range_id_entry = Entry(self.ff_frame_calculate_value_range, width=10)
self.ff_calculate_value_range_id_entry.grid(row=0, column=0, pady=5, padx=5, sticky=E)
# Checkbox "Wertebereiche für Fragenpool berechnen?"
self.ff_var_calculate_value_range_for_all_db_entries_check = IntVar()
self.ff_calculate_value_range_from_db_entries = Checkbutton(self.ff_frame_calculate_value_range, text="Wertebereiche für alle DB Einträge berechnen?", variable=self.ff_var_calculate_value_range_for_all_db_entries_check, onvalue=1, offvalue=0)
self.ff_calculate_value_range_from_db_entries.grid(row=1, column=0, sticky=W, pady=(10,0))
###################### "FF-Test erstellen" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
# Button "Formelfrage-Test erstellen"
self.create_formelfrage_test_btn = Button(self.ff_frame_create_formelfrage_test, text="FF-Test erstellen", command=lambda: Create_Formelfrage_Test.__init__(self, self.ff_db_entry_to_index_dict))
self.create_formelfrage_test_btn.grid(row=0, column=0, sticky=W)
self.create_formelfrage_test_entry = Entry(self.ff_frame_create_formelfrage_test, width=15)
self.create_formelfrage_test_entry.grid(row=0, column=1, sticky=W, padx=0)
# Checkbox "Test-Einstellungen verwenden?"
self.ff_create_test_settings_label = Label(self.ff_frame_create_formelfrage_test, text="Test-Einstellungen verwenden?")
self.ff_create_test_settings_label.grid(row=1, column=0, pady=5, padx=5, sticky=W)
self.ff_var_create_test_settings_check = IntVar()
self.ff_create_test_settings = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_create_test_settings_check, onvalue=1, offvalue=0, command=lambda: refresh_box_test_settings_profiles(self))
self.ff_create_test_settings.grid(row=1, column=1, sticky=W)
# Combobox Profile für Datenbank
self.ff_profile_for_test_settings_value = []
# Datenbank nach Profilen durchsuchen
conn = sqlite3.connect(self.test_settings_database_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM " + self.test_settings_database_table)
profile_records = c.fetchall()
# Loop through Results
for profile_record in profile_records:
self.ff_profile_for_test_settings_value.append(profile_record[0])
conn.commit()
conn.close()
###
# WIrd dazu verwendet ein Event zu verarbeiten
# Bei der Aktivierung für Testeinstellungen sollen alle in der DB gespeicherten Profile geladen werden
def ff_profile_selected(event):
self.var = event
self.ff_selected_profile_for_test_settings_box = ttk.Combobox(self.ff_frame_create_formelfrage_test, value=self.ff_profile_for_test_settings_value, width=8)
self.ff_selected_profile_for_test_settings_box.bind("<<ComboboxSelected>>", ff_profile_selected)
self.ff_selected_profile_for_test_settings_box.grid(row=1, column=1, sticky=W, padx=(22, 0))
# Bei der Aktivierung für Testeinstellungen sollen alle in der DB gespeicherten Profile geladen werden
# Verwendet ff_profile_selected
def refresh_box_test_settings_profiles(self):
if self.ff_var_create_test_settings_check.get() == 1:
self.ff_selected_profile_for_test_settings_box.grid_forget()
# Combobox Profile für Datenbank
self.ff_profile_for_test_settings_value = []
# Datenbank nach Profilen durchsuchen
conn = sqlite3.connect(self.test_settings_database_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM " + self.test_settings_database_table)
profile_records = c.fetchall()
# Loop through Results
for profile_record in profile_records:
self.ff_profile_for_test_settings_value.append(profile_record[0])
self.ff_selected_profile_for_test_settings_box = ttk.Combobox(self.ff_frame_create_formelfrage_test, value=self.ff_profile_for_test_settings_value, width=8)
self.ff_selected_profile_for_test_settings_box.bind("<<ComboboxSelected>>", ff_profile_selected)
self.ff_selected_profile_for_test_settings_box.grid(row=1, column=1, sticky=W, padx=(22, 0))
# Checkbox "Latex für Fragentext nutzen?"
self.ff_use_latex_on_text_label = Label(self.ff_frame_create_formelfrage_test, text="Latex für Fragentext nutzen?")
self.ff_use_latex_on_text_label.grid(row=2, column=0, sticky=W, padx=5)
self.ff_var_use_latex_on_text_check = IntVar()
self.ff_use_latex_on_text_check = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_use_latex_on_text_check, onvalue=1, offvalue=0)
self.ff_use_latex_on_text_check.deselect()
self.ff_use_latex_on_text_check.grid(row=2, column=1, sticky=W)
# Checkbox "Alle Einträge aus der DB erzeugen?"
self.ff_create_question_pool_all_label = Label(self.ff_frame_create_formelfrage_test, text="Alle Einträge aus der DB erzeugen?")
self.ff_create_question_pool_all_label.grid(row=4, column=0, pady=(10,0), padx=5, sticky=W)
self.ff_var_create_question_pool_all_check = IntVar()
self.ff_create_question_pool_all = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_create_question_pool_all_check, onvalue=1, offvalue=0)
self.ff_create_question_pool_all.grid(row=4, column=1, sticky=W, pady=(10,0))
# Checkbox "Mehrere Fragenpools Taxonomie getrennt erstellen?"
self.ff_create_multiple_question_pools_from_tax_label = Label(self.ff_frame_create_formelfrage_test, text="Mehrere Fragenpools (Taxonomie getrennt) erstellen?")
self.ff_create_multiple_question_pools_from_tax_label.grid(row=5, column=0, pady=(10,0), padx=5, sticky=W)
self.ff_var_create_multiple_question_pools_from_tax_check = IntVar()
self.ff_create_multiple_question_pools_from_tax = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_create_multiple_question_pools_from_tax_check, onvalue=1, offvalue=0)
self.ff_create_multiple_question_pools_from_tax.grid(row=5, column=1, sticky=W, pady=(10,0))
# Checkbox "Taxonomie für getrennte Pools behalten?"
self.ff_remove_pool_tags_for_tax_label = Label(self.ff_frame_create_formelfrage_test, text=" ---> Taxonomie für getrennte Pools \"löschen\"?")
self.ff_remove_pool_tags_for_tax_label.grid(row=6, column=0, pady=(0,0), padx=5, sticky=W)
self.ff_var_remove_pool_tags_for_tax_check = IntVar()
self.ff_remove_pool_tags_for_tax = Checkbutton(self.ff_frame_create_formelfrage_test, text="", variable=self.ff_var_remove_pool_tags_for_tax_check, onvalue=1, offvalue=0)
self.ff_remove_pool_tags_for_tax.grid(row=6, column=1, sticky=W, pady=(0,0))
# Button "Formelfrage-Fragenpool erstellen"
self.create_formelfrage_pool_btn = Button(self.ff_frame_create_formelfrage_test, text="FF-Pool erstellen", command=lambda: Create_Formelfrage_Pool.__init__(self, self.ff_db_entry_to_index_dict, self.ff_var_create_question_pool_all_check.get(), self.ff_var_create_multiple_question_pools_from_tax_check.get()))
self.create_formelfrage_pool_btn.grid(row=3, column=0, sticky=W, pady=(30,0))
self.create_formelfrage_pool_entry = Entry(self.ff_frame_create_formelfrage_test, width=15)
self.create_formelfrage_pool_entry.grid(row=3, column=1, sticky=W, padx=0, pady=(30,0))
###################### "Formelfrage-Datenbank" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.ff_database_show_db_formelfrage_btn = Button(self.ff_frame_database, text="FF - Datenbank anzeigen", command=lambda: test_generator_modul_datenbanken_anzeigen.MainGUI.__init__(self, self.database_formelfrage_path, self.ff_database_table))
self.ff_database_show_db_formelfrage_btn.grid(row=0, column=0, sticky=W, pady=5)
self.ff_database_save_id_to_db_formelfrage_btn = Button(self.ff_frame_database, text="Speichern unter neuer ID", command=lambda: Formelfrage.ff_save_id_to_db(self, self.ff_database_table, self.ff_db_column_names_string))
self.ff_database_save_id_to_db_formelfrage_btn.grid(row=1, column=0, sticky=W, pady=5)
self.ff_database_delete_id_from_db_btn = Button(self.ff_frame_database, text="ID Löschen", command=lambda: Formelfrage.ff_delete_id_from_db(self))
self.ff_database_delete_id_from_db_btn.grid(row=6, column=0, sticky=W, pady=5)
self.ff_delete_box = Entry(self.ff_frame_database, width=10)
self.ff_delete_box.grid(row=6, column=0, padx=80, sticky=W)
self.ff_database_new_question_btn = Button(self.ff_frame_database, text="GUI Einträge leeren", command=lambda: Formelfrage.ff_clear_GUI(self))
self.ff_database_new_question_btn.grid(row=8, column=0, sticky=W, pady=5)
self.ff_database_edit_btn = Button(self.ff_frame_database, text="Aktuellen Eintrag editieren", command=lambda: Formelfrage.ff_edit_id_from_db(self))
self.ff_database_edit_btn.grid(row=3, column=0, sticky=W, pady=5)
self.ff_database_load_id_btn = Button(self.ff_frame_database, text="ID Laden", command=lambda: Formelfrage.ff_load_id_from_db(self, self.ff_db_entry_to_index_dict))
self.ff_database_load_id_btn.grid(row=4, column=0, sticky=W, pady=(15,0))
self.ff_load_box = Entry(self.ff_frame_database, width=10)
self.ff_load_box.grid(row=4, column=0, sticky=W, padx=80, pady=(15,0))
self.ff_hidden_edit_box_entry = Entry(self.ff_frame_database, width=10)
# Checkbox - "Fragentext mit Highlighting?"
self.ff_highlight_question_text_label = Label(self.ff_frame_database, text="Fragentext mit Highlighting?")
self.ff_highlight_question_text_label.grid(row=5, column=0, pady=5, padx=5)
self.ff_var_highlight_question_text = IntVar()
self.ff_check_highlight_question_text = Checkbutton(self.ff_frame_database, text="", variable=self.ff_var_highlight_question_text, onvalue=1, offvalue=0)
self.ff_check_highlight_question_text.deselect()
self.ff_check_highlight_question_text.grid(row=5, column=0, sticky=E)
# Checkbox - "Alle DB Einträge löschen?"
self.ff_delete_all_label = Label(self.ff_frame_database, text="Alle DB Einträge löschen?")
self.ff_delete_all_label.grid(row=7, column=0, pady=5, padx=5)
self.ff_var_delete_all = IntVar()
self.ff_check_delete_all = Checkbutton(self.ff_frame_database, text="", variable=self.ff_var_delete_all, onvalue=1, offvalue=0)
self.ff_check_delete_all.deselect()
self.ff_check_delete_all.grid(row=7, column=0, sticky=E)
###################### "Excel Import/Export" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
# excel_import_btn
self.ff_excel_import_to_db_formelfrage_btn = Button(self.ff_frame_excel_import_export, text="Excel-Datei importieren", command=lambda: test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db(self, self.ff_question_type_name, self.ff_db_entry_to_index_dict, self.formelfrage_tab))
self.ff_excel_import_to_db_formelfrage_btn.grid(row=0, column=1, sticky=W, pady=5, padx=10)
# excel_export_btn
self.ff_excel_export_to_xlsx_formelfrage_btn = Button(self.ff_frame_excel_import_export, text="Datenbank exportieren",command=lambda: test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx(self, self.project_root_path, self.ff_db_entry_to_index_dict, self.database_formelfrage_path, self.ff_database, self.ff_database_table, self.ff_xlsx_workbook_name, self.ff_xlsx_worksheet_name))
self.ff_excel_export_to_xlsx_formelfrage_btn.grid(row=1, column=1, sticky=W, pady=5, padx=10)
# ILIAS_testfile_import
self.ff_import_ilias_testfile_btn = Button(self.ff_frame_excel_import_export, text="ILIAS-Datei importieren",command=lambda: test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__(self, self.project_root_path))
self.ff_import_ilias_testfile_btn.grid(row=2, column=1, sticky=W, pady=(20,0), padx=10)
###################### "Fragentext Funktionen" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.add_latex_term_btn = Button(self.ff_frame_question_description_functions, text="Text \"Latex\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex(self, self.ff_question_description_main_entry))
self.add_latex_term_btn.grid(row=1, column=0, padx=10, sticky="W")
self.set_text_sub_btn = Button(self.ff_frame_question_description_functions, text="Text \"Tiefgestellt\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub(self, self.ff_question_description_main_entry))
self.set_text_sub_btn .grid(row=2, column=0, padx=10, pady=(10, 0), sticky="W")
self.set_text_sup_btn = Button(self.ff_frame_question_description_functions, text="Text \"Hochgestellt\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup(self, self.ff_question_description_main_entry))
self.set_text_sup_btn.grid(row=3, column=0, padx=10, sticky="W")
self.set_text_italic_btn = Button(self.ff_frame_question_description_functions, text="Text \"Kursiv\"", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic(self, self.ff_question_description_main_entry))
self.set_text_italic_btn.grid(row=4, column=0, padx=10, sticky="W")
self.set_postion_for_picture_1_btn = Button(self.ff_frame_question_description_functions, text="Pos. Bild 1", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1(self, self.ff_question_description_main_entry))
self.set_postion_for_picture_1_btn.grid(row=5, column=0, padx=10, pady=(10, 0), sticky="W")
self.set_postion_for_picture_2_btn = Button(self.ff_frame_question_description_functions, text="Pos. Bild 2", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2(self, self.ff_question_description_main_entry))
self.set_postion_for_picture_2_btn.grid(row=6, column=0, padx=10, sticky="W")
self.set_postion_for_picture_3_btn = Button(self.ff_frame_question_description_functions, text="Pos. Bild 3", command=lambda: test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3(self, self.ff_question_description_main_entry))
self.set_postion_for_picture_3_btn.grid(row=7, column=0, padx=10, sticky="W")
###################### "Zeigerdiagramme" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.ff_vector_diagram_type =["Serienschaltung: RL", "Serienschaltung: RC", "Serienschaltung: RLC"]
self.ff_vector_diagram_type_box = ttk.Combobox(self.ff_frame_vector_diagram, value=self.ff_vector_diagram_type, width=20)
self.ff_vector_diagram_type_box.grid(row=0, column=0, sticky=W, pady=10)
self.ff_vector_diagram_U_label = Label(self.ff_frame_vector_diagram, text='Wert für U:')
self.ff_vector_diagram_U_label.grid(row=1, column=0, sticky=W)
self.ff_vector_diagram_U_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_U_entry.grid(row=1, column=0, sticky=W, padx=70)
self.ff_vector_diagram_R_label = Label(self.ff_frame_vector_diagram, text='Wert für R:')
self.ff_vector_diagram_R_label.grid(row=2, column=0, sticky=W)
self.ff_vector_diagram_R_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_R_entry.grid(row=2, column=0, sticky=W, padx=70)
self.ff_vector_diagram_L_label = Label(self.ff_frame_vector_diagram, text='Wert für L:')
self.ff_vector_diagram_L_label.grid(row=3, column=0, sticky=W)
self.ff_vector_diagram_L_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_L_entry.grid(row=3, column=0, sticky=W, padx=70)
self.ff_vector_diagram_C_label = Label(self.ff_frame_vector_diagram, text='Wert für C:')
self.ff_vector_diagram_C_label.grid(row=4, column=0, sticky=W)
self.ff_vector_diagram_C_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_C_entry.grid(row=4, column=0, sticky=W, padx=70)
self.ff_vector_diagram_freq_label = Label(self.ff_frame_vector_diagram, text='Wert für f:')
self.ff_vector_diagram_freq_label.grid(row=5, column=0, sticky=W)
self.ff_vector_diagram_freq_entry = Entry(self.ff_frame_vector_diagram, width=10)
self.ff_vector_diagram_freq_entry.grid(row=5, column=0, sticky=W, padx=70)
# Spannung Diagramm erzeugen
self.ff_var_create_voltage_current_vector_diagram = IntVar()
self.ff_check_create_voltage_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Strom-/Spannungsdiagramm", variable=self.ff_var_create_voltage_current_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_voltage_vector_diagram.deselect()
self.ff_check_create_voltage_vector_diagram.grid(row=1, column=1, sticky=W)
# Impedanz Diagramm
self.ff_var_create_impedance_vector_diagram = IntVar()
self.ff_check_create_impedance_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Impedanz-Diagramm ", variable=self.ff_var_create_impedance_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_impedance_vector_diagram.deselect()
self.ff_check_create_impedance_vector_diagram.grid(row=2, column=1, sticky=W)
# Admittanz Diagramm
self.ff_var_create_admittance_vector_diagram = IntVar()
self.ff_check_create_admittance_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Admittanz-Diagramm ", variable=self.ff_var_create_admittance_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_admittance_vector_diagram.deselect()
self.ff_check_create_admittance_vector_diagram.grid(row=3, column=1, sticky=W)
# Leistungsdiagramm
self.ff_var_create_power_vector_diagram = IntVar()
self.ff_check_create_power_vector_diagram = Checkbutton(self.ff_frame_vector_diagram, text="Leistungsdiagramm ", variable=self.ff_var_create_power_vector_diagram, onvalue=1, offvalue=0)
self.ff_check_create_power_vector_diagram.deselect()
self.ff_check_create_power_vector_diagram.grid(row=4, column=1, sticky=W)
self.ff_vector_diagram_btn = Button(self.ff_frame_vector_diagram, text="Zeigerdiagramm erzeugen", command=lambda: test_generator_modul_zeigerdiagramme.Zeigerdiagramme.__init__( self, self.ff_vector_diagram_type_box.get(), ))
self.ff_vector_diagram_btn.grid(row=10, column=0, padx=10, pady=(10, 0), sticky="W")
###################### "Formelfrage" - FRAME -------- LABELS / ENTRYS / BUTTONS ###################
self.ff_question_author_label = Label(self.ff_frame, text="Fragen-Autor")
self.ff_question_author_label.grid(row=0, column=0, sticky=W, pady=(10, 0), padx=10)
self.ff_question_author_entry = Entry(self.ff_frame, width=20)
self.ff_question_author_entry.grid(row=0, column=1, sticky=W, pady=(10, 0))
self.ff_question_title_label = Label(self.ff_frame, text="Fragen-Titel")
self.ff_question_title_label.grid(row=1, column=0, sticky=W, padx=10, pady=(10, 0))
self.ff_question_title_entry = Entry(self.ff_frame, width=60)
self.ff_question_title_entry.grid(row=1, column=1, sticky=W, pady=(10, 0))
self.ff_question_description_title_label = Label(self.ff_frame, text="Fragen-Beschreibung")
self.ff_question_description_title_label.grid(row=2, column=0, sticky=W, padx=10)
self.ff_question_description_title_entry = Entry(self.ff_frame, width=60)
self.ff_question_description_title_entry.grid(row=2, column=1, sticky=W)
self.ff_question_textfield_label = Label(self.ff_frame, text="Fragen-Text")
self.ff_question_textfield_label.grid(row=3, column=0, sticky=W, padx=10)
self.ff_bar = Scrollbar(self.ff_frame)
self.ff_question_description_main_entry = Text(self.ff_frame, height=6, width=65, font=('Helvetica', 9))
self.ff_bar.grid(row=3, column=2, sticky=W)
self.ff_question_description_main_entry.grid(row=3, column=1, pady=10, sticky=W)
self.ff_bar.config(command=self.ff_question_description_main_entry.yview)
self.ff_question_description_main_entry.config(yscrollcommand=self.ff_bar.set)
############## BEARBEITUNGSDAUER
self.ff_processing_time_label = Label(self.ff_frame, text="Bearbeitungsdauer")
self.ff_processing_time_label.grid(row=4, column=0, sticky=W, pady=(5, 0), padx=10)
self.ff_processing_time_label = Label(self.ff_frame, text="Std:")
self.ff_processing_time_label.grid(row=4, column=1, sticky=W, pady=(5, 0))
self.ff_processing_time_label = Label(self.ff_frame, text="Min:")
self.ff_processing_time_label.grid(row=4, column=1, sticky=W, padx=70, pady=(5, 0))
self.ff_processing_time_label = Label(self.ff_frame, text="Sek:")
self.ff_processing_time_label.grid(row=4, column=1, sticky=W, padx=145, pady=(5, 0))
self.ff_processingtime_hours = list(range(24))
self.ff_processingtime_minutes = list(range(60))
self.ff_processingtime_seconds = list(range(60))
self.ff_proc_hours_box = ttk.Combobox(self.ff_frame, value=self.ff_processingtime_hours, width=2)
self.ff_proc_minutes_box = ttk.Combobox(self.ff_frame, value=self.ff_processingtime_minutes, width=2)
self.ff_proc_seconds_box = ttk.Combobox(self.ff_frame, value=self.ff_processingtime_seconds, width=2)
# Voreinstellung für die Zeit pro Frage auf 23h
self.ff_proc_hours_box.current(23)
self.ff_proc_minutes_box.current(0)
self.ff_proc_seconds_box.current(0)
def selected_hours(event):
self.selected_hours = self.ff_proc_hours_box.get()
def selected_minutes(event):
self.selected_minutes = self.ff_proc_minutes_box.get()
def selected_seconds(event):
self.selected_seconds = self.ff_proc_seconds_box.get()
self.ff_proc_hours_box.bind("<<ComboboxSelected>>", selected_hours)
self.ff_proc_minutes_box.bind("<<ComboboxSelected>>", selected_minutes)
self.ff_proc_seconds_box.bind("<<ComboboxSelected>>", selected_seconds)
self.ff_proc_hours_box.grid(row=4, column=1, sticky=W, padx=25, pady=(5, 0))
self.ff_proc_minutes_box.grid(row=4, column=1, sticky=W, padx=100, pady=(5, 0))
self.ff_proc_seconds_box.grid(row=4, column=1, sticky=W, padx=170, pady=(5, 0))
########################### ÜBERSCHRIFTEN / LABELS FÜR EINGABEFELDER-MATRIX ##############################
self.var_min_label = Label(self.ff_frame, text=' Min.')
self.var_min_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=60)
self.var_max_label = Label(self.ff_frame, text=' Max.')
self.var_max_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=100)
self.var_prec_label = Label(self.ff_frame, text=' Präz.')
self.var_prec_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=140)
self.var_divby_label = Label(self.ff_frame, text=' Teilbar\ndurch')
self.var_divby_label.grid(row=5, column=1, sticky=W, pady=(20, 0), padx=180)
self.variable1_label = Label(self.ff_frame, text='Variable 1')
self.variable2_label = Label(self.ff_frame, text='Variable 2')
self.variable3_label = Label(self.ff_frame, text='Variable 3')
self.variable4_label = Label(self.ff_frame, text='Variable 4')
self.variable5_label = Label(self.ff_frame, text='Variable 5')
self.variable6_label = Label(self.ff_frame, text='Variable 6')
self.variable7_label = Label(self.ff_frame, text='Variable 7')
self.variable8_label = Label(self.ff_frame, text='Variable 8')
self.variable9_label = Label(self.ff_frame, text='Variable 9')
self.variable10_label = Label(self.ff_frame, text='Variable 10')
self.variable11_label = Label(self.ff_frame, text='Variable 11')
self.variable12_label = Label(self.ff_frame, text='Variable 12')
self.variable13_label = Label(self.ff_frame, text='Variable 13')
self.variable14_label = Label(self.ff_frame, text='Variable 14')
self.variable15_label = Label(self.ff_frame, text='Variable 15')
# Label für Var1 ist immer aktiv/ zu sehen. Var2-10 werden je nach Auswahl ein-/ausgeblendet
self.variable1_label.grid(row=6, column=0, sticky=W, padx=20)
########################### EINGABEFELDER / ENTRYS FÜR EINGABEFELDER-MATRIX ##############################
self.var1_name_entry = Entry(self.ff_frame, width=6)
self.var1_min_entry = Entry(self.ff_frame, width=6)
self.var1_max_entry = Entry(self.ff_frame, width=6)
self.var1_prec_entry = Entry(self.ff_frame, width=6)
self.var1_divby_entry = Entry(self.ff_frame, width=6)
self.var2_name_entry = Entry(self.ff_frame, width=6)
self.var2_min_entry = Entry(self.ff_frame, width=6)
self.var2_max_entry = Entry(self.ff_frame, width=6)
self.var2_prec_entry = Entry(self.ff_frame, width=6)
self.var2_divby_entry = Entry(self.ff_frame, width=6)
self.var3_name_entry = Entry(self.ff_frame, width=6)
self.var3_min_entry = Entry(self.ff_frame, width=6)
self.var3_max_entry = Entry(self.ff_frame, width=6)
self.var3_prec_entry = Entry(self.ff_frame, width=6)
self.var3_divby_entry = Entry(self.ff_frame, width=6)
self.var4_name_entry = Entry(self.ff_frame, width=6)
self.var4_min_entry = Entry(self.ff_frame, width=6)
self.var4_max_entry = Entry(self.ff_frame, width=6)
self.var4_prec_entry = Entry(self.ff_frame, width=6)
self.var4_divby_entry = Entry(self.ff_frame, width=6)
self.var5_name_entry = Entry(self.ff_frame, width=6)
self.var5_min_entry = Entry(self.ff_frame, width=6)
self.var5_max_entry = Entry(self.ff_frame, width=6)
self.var5_prec_entry = Entry(self.ff_frame, width=6)
self.var5_divby_entry = Entry(self.ff_frame, width=6)
self.var6_name_entry = Entry(self.ff_frame, width=6)
self.var6_min_entry = Entry(self.ff_frame, width=6)
self.var6_max_entry = Entry(self.ff_frame, width=6)
self.var6_prec_entry = Entry(self.ff_frame, width=6)
self.var6_divby_entry = Entry(self.ff_frame, width=6)
self.var7_name_entry = Entry(self.ff_frame, width=6)
self.var7_min_entry = Entry(self.ff_frame, width=6)
self.var7_max_entry = Entry(self.ff_frame, width=6)
self.var7_prec_entry = Entry(self.ff_frame, width=6)
self.var7_divby_entry = Entry(self.ff_frame, width=6)
self.var8_name_entry = Entry(self.ff_frame, width=6)
self.var8_min_entry = Entry(self.ff_frame, width=6)
self.var8_max_entry = Entry(self.ff_frame, width=6)
self.var8_prec_entry = Entry(self.ff_frame, width=6)
self.var8_divby_entry = Entry(self.ff_frame, width=6)
self.var9_name_entry = Entry(self.ff_frame, width=6)
self.var9_min_entry = Entry(self.ff_frame, width=6)
self.var9_max_entry = Entry(self.ff_frame, width=6)
self.var9_prec_entry = Entry(self.ff_frame, width=6)
self.var9_divby_entry = Entry(self.ff_frame, width=6)
self.var10_name_entry = Entry(self.ff_frame, width=6)
self.var10_min_entry = Entry(self.ff_frame, width=6)
self.var10_max_entry = Entry(self.ff_frame, width=6)
self.var10_prec_entry = Entry(self.ff_frame, width=6)
self.var10_divby_entry = Entry(self.ff_frame, width=6)
self.var11_name_entry = Entry(self.ff_frame, width=6)
self.var11_min_entry = Entry(self.ff_frame, width=6)
self.var11_max_entry = Entry(self.ff_frame, width=6)
self.var11_prec_entry = Entry(self.ff_frame, width=6)
self.var11_divby_entry = Entry(self.ff_frame, width=6)
self.var12_name_entry = Entry(self.ff_frame, width=6)
self.var12_min_entry = Entry(self.ff_frame, width=6)
self.var12_max_entry = Entry(self.ff_frame, width=6)
self.var12_prec_entry = Entry(self.ff_frame, width=6)
self.var12_divby_entry = Entry(self.ff_frame, width=6)
self.var13_name_entry = Entry(self.ff_frame, width=6)
self.var13_min_entry = Entry(self.ff_frame, width=6)
self.var13_max_entry = Entry(self.ff_frame, width=6)
self.var13_prec_entry = Entry(self.ff_frame, width=6)
self.var13_divby_entry = Entry(self.ff_frame, width=6)
self.var14_name_entry = Entry(self.ff_frame, width=6)
self.var14_min_entry = Entry(self.ff_frame, width=6)
self.var14_max_entry = Entry(self.ff_frame, width=6)
self.var14_prec_entry = Entry(self.ff_frame, width=6)
self.var14_divby_entry = Entry(self.ff_frame, width=6)
self.var15_name_entry = Entry(self.ff_frame, width=6)
self.var15_min_entry = Entry(self.ff_frame, width=6)
self.var15_max_entry = Entry(self.ff_frame, width=6)
self.var15_prec_entry = Entry(self.ff_frame, width=6)
self.var15_divby_entry = Entry(self.ff_frame, width=6)
# Variablen Entries in Listen zusammenfassen
# Die Listen bieten den Vorteil, dass bei der Platzierung auf der GUI eine Schleife verwendet werden kann
self.var_label_list = [self.variable1_label, self.variable2_label, self.variable3_label, self.variable4_label, self.variable5_label, self.variable6_label, self.variable7_label,
self.variable8_label, self.variable9_label, self.variable10_label, self.variable11_label, self.variable12_label, self.variable13_label, self.variable14_label, self.variable15_label]
self.var_name_entry_list = [self.var1_name_entry, self.var2_name_entry, self.var3_name_entry, self.var4_name_entry, self.var5_name_entry, self.var6_name_entry, self.var7_name_entry,
self.var8_name_entry, self.var9_name_entry, self.var10_name_entry, self.var11_name_entry, self.var12_name_entry, self.var13_name_entry, self.var14_name_entry, self.var15_name_entry]
self.var_min_entry_list = [self.var1_min_entry, self.var2_min_entry, self.var3_min_entry, self.var4_min_entry, self.var5_min_entry, self.var6_min_entry, self.var7_min_entry,
self.var8_min_entry, self.var9_min_entry, self.var10_min_entry, self.var11_min_entry, self.var12_min_entry, self.var13_min_entry, self.var14_min_entry, self.var15_min_entry]
self.var_max_entry_list = [self.var1_max_entry, self.var2_max_entry, self.var3_max_entry, self.var4_max_entry, self.var5_max_entry, self.var6_max_entry, self.var7_max_entry,
self.var8_max_entry, self.var9_max_entry, self.var10_max_entry, self.var11_max_entry, self.var12_max_entry, self.var13_max_entry, self.var14_max_entry, self.var15_max_entry]
self.var_prec_entry_list = [self.var1_prec_entry, self.var2_prec_entry, self.var3_prec_entry, self.var4_prec_entry, self.var5_prec_entry, self.var6_prec_entry, self.var7_prec_entry,
self.var8_prec_entry, self.var9_prec_entry, self.var10_prec_entry, self.var11_prec_entry, self.var12_prec_entry, self.var13_prec_entry, self.var14_prec_entry, self.var15_prec_entry]
self.var_divby_entry_list = [self.var1_divby_entry, self.var2_divby_entry, self.var3_divby_entry, self.var4_divby_entry, self.var5_divby_entry, self.var6_divby_entry, self.var7_divby_entry,
self.var8_divby_entry, self.var9_divby_entry, self.var10_divby_entry, self.var11_divby_entry, self.var12_divby_entry, self.var13_divby_entry, self.var14_divby_entry, self.var15_divby_entry]
# Eingabefelder für Var1 sind immer aktiv/ zu sehen. Var2-10 werden je nach Auswahl ein-/ausgeblendet
self.var1_name_entry.grid(row=6, column=1, sticky=W)
self.var1_min_entry.grid(row=6, column=1, sticky=W, padx=60)
self.var1_max_entry.grid(row=6, column=1, sticky=W, padx=100)
self.var1_prec_entry.grid(row=6, column=1, sticky=W, padx=140)
self.var1_divby_entry.grid(row=6, column=1, sticky=W, padx=180)
########################### EINGABEFELDER-MATRIX (VARIABLEN) EIN/AUSBLENDEN ##############################
# Hier werden durch die Funktion "ff_answer_selected" die Variable - Eingabefelder (je nach Wert) ein-/ausgeblendet
def ff_answer_selected(event): # "variable" need for comboBox Binding
self.selected_number_of_variables = int(self.ff_numbers_of_answers_box.get())
# Schleife zur Platzierung der Entries auf der GUI
# Bei einer Auswahl von 5 Variablen, werden auf der GUI die Zeilen 1-5 platziert
for i in range(self.selected_number_of_variables):
Formelfrage.ff_variable_show_or_remove(self, self.var_label_list[i], self.var_name_entry_list[i], self.var_min_entry_list[i], self.var_max_entry_list[i], self.var_prec_entry_list[i], self.var_divby_entry_list[i], str(i+7), "show")
# Schleife zum ausblenden der Entries auf der GUI
# Bei einer Auswahl von 5 Variablen, werden auf der GUI die Zeilen 6-15 ausgeblendet
for j in range(self.selected_number_of_variables, len(self.var_min_entry_list)):
Formelfrage.ff_variable_show_or_remove(self, self.var_label_list[j], self.var_name_entry_list[j], self.var_min_entry_list[j], self.var_max_entry_list[j], self.var_prec_entry_list[j], self.var_divby_entry_list[j], str(j+7), "remove")
self.ff_numbers_of_answers_box_label = Label(self.ff_frame, text="Anzahl der Variablen: ")
self.ff_numbers_of_answers_box_label.grid(row=5, column=0, sticky=W, padx=10, pady=(20, 0))
self.ff_numbers_of_answers_value = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"]
self.ff_numbers_of_answers_box = ttk.Combobox(self.ff_frame, value=self.ff_numbers_of_answers_value, width=3)
self.ff_numbers_of_answers_box.bind("<<ComboboxSelected>>", ff_answer_selected)
self.ff_numbers_of_answers_box.grid(row=5, column=1, sticky=W, pady=(20, 0))
self.ff_numbers_of_answers_box.current(0)
########################### AUSWAHL DER EINHEITEN FÜR VARIABLEN ---- DERZEIT NICHT AKTIV ##############################
self.select_var_units = ["Unit", "H", "mH", "µH", "nH", "pH", "---", "F", "mF", "µF", "nF", "pF", "---", "MV", "kV", "V", "mV", "µV", "---"]
self.var1_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var1_unit_myCombo.current(0)
self.var2_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var2_unit_myCombo.current(0)
self.var3_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var3_unit_myCombo.current(0)
self.var4_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var4_unit_myCombo.current(0)
self.var5_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var5_unit_myCombo.current(0)
self.var6_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var6_unit_myCombo.current(0)
self.var7_unit_myCombo = ttk.Combobox(self.ff_frame, value=self.select_var_units, width=5)
self.var7_unit_myCombo.current(0)
########################### ÜBERSCHRIFTEN / LABELS FÜR EINGABEFELDER-MATRIX ##############################
self.res_min_label = Label(self.ff_frame, text=' Min.')
self.res_max_label = Label(self.ff_frame, text=' Max.')
self.res_prec_label = Label(self.ff_frame, text=' Präz.')
self.res_tol_label = Label(self.ff_frame, text=' Tol.')
self.res_points_label = Label(self.ff_frame, text='Punkte')
self.res_formula_label = Label(self.ff_frame, text='Formel')
self.res_min_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=60)
self.res_max_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=100)
self.res_prec_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=140)
self.res_tol_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=180)
self.res_points_label.grid(row=40, column=1, sticky=W, pady=(10, 0), padx=220)
self.res_formula_label.grid(row=40, column=1, sticky=E, pady=(10, 0), padx=100)
self.result1_label = Label(self.ff_frame, text='Ergebnis 1')
self.result2_label = Label(self.ff_frame, text='Ergebnis 2')
self.result3_label = Label(self.ff_frame, text='Ergebnis 3')
self.result4_label = Label(self.ff_frame, text='Ergebnis 4')
self.result5_label = Label(self.ff_frame, text='Ergebnis 5')
self.result6_label = Label(self.ff_frame, text='Ergebnis 6')
self.result7_label = Label(self.ff_frame, text='Ergebnis 7')
self.result8_label = Label(self.ff_frame, text='Ergebnis 8')
self.result9_label = Label(self.ff_frame, text='Ergebnis 9')
self.result10_label = Label(self.ff_frame, text='Ergebnis 10')
# Label für Res1 ist immer aktiv/ zu sehen. Res2-10 werden je nach Auswahl ein-/ausgeblendet
self.result1_label.grid(row=41, column=0, sticky=W, padx=20)
########################### EINGABEFELDER / ENTRYS FÜR EINGABEFELDER-MATRIX ##############################
self.res1_name_entry = Entry(self.ff_frame, width=6)
self.res1_min_entry = Entry(self.ff_frame, width=6)
self.res1_max_entry = Entry(self.ff_frame, width=6)
self.res1_prec_entry = Entry(self.ff_frame, width=6)
self.res1_tol_entry = Entry(self.ff_frame, width=6)
self.res1_points_entry = Entry(self.ff_frame, width=6)
self.res1_formula_entry = Entry(self.ff_frame, width=30)
self.res2_name_entry = Entry(self.ff_frame, width=6)
self.res2_min_entry = Entry(self.ff_frame, width=6)
self.res2_max_entry = Entry(self.ff_frame, width=6)
self.res2_prec_entry = Entry(self.ff_frame, width=6)
self.res2_tol_entry = Entry(self.ff_frame, width=6)
self.res2_points_entry = Entry(self.ff_frame, width=6)
self.res2_formula_entry = Entry(self.ff_frame, width=30)
self.res3_name_entry = Entry(self.ff_frame, width=6)
self.res3_min_entry = Entry(self.ff_frame, width=6)
self.res3_max_entry = Entry(self.ff_frame, width=6)
self.res3_prec_entry = Entry(self.ff_frame, width=6)
self.res3_tol_entry = Entry(self.ff_frame, width=6)
self.res3_points_entry = Entry(self.ff_frame, width=6)
self.res3_formula_entry = Entry(self.ff_frame, width=30)
self.res4_name_entry = Entry(self.ff_frame, width=6)
self.res4_min_entry = Entry(self.ff_frame, width=6)
self.res4_max_entry = Entry(self.ff_frame, width=6)
self.res4_prec_entry = Entry(self.ff_frame, width=6)
self.res4_tol_entry = Entry(self.ff_frame, width=6)
self.res4_points_entry = Entry(self.ff_frame, width=6)
self.res4_formula_entry = Entry(self.ff_frame, width=30)
self.res5_name_entry = Entry(self.ff_frame, width=6)
self.res5_min_entry = Entry(self.ff_frame, width=6)
self.res5_max_entry = Entry(self.ff_frame, width=6)
self.res5_prec_entry = Entry(self.ff_frame, width=6)
self.res5_tol_entry = Entry(self.ff_frame, width=6)
self.res5_points_entry = Entry(self.ff_frame, width=6)
self.res5_formula_entry = Entry(self.ff_frame, width=30)
self.res6_name_entry = Entry(self.ff_frame, width=6)
self.res6_min_entry = Entry(self.ff_frame, width=6)
self.res6_max_entry = Entry(self.ff_frame, width=6)
self.res6_prec_entry = Entry(self.ff_frame, width=6)
self.res6_tol_entry = Entry(self.ff_frame, width=6)
self.res6_points_entry = Entry(self.ff_frame, width=6)
self.res6_formula_entry = Entry(self.ff_frame, width=30)
self.res7_name_entry = Entry(self.ff_frame, width=6)
self.res7_min_entry = Entry(self.ff_frame, width=6)
self.res7_max_entry = Entry(self.ff_frame, width=6)
self.res7_prec_entry = Entry(self.ff_frame, width=6)
self.res7_tol_entry = Entry(self.ff_frame, width=6)
self.res7_points_entry = Entry(self.ff_frame, width=6)
self.res7_formula_entry = Entry(self.ff_frame, width=30)
self.res8_name_entry = Entry(self.ff_frame, width=6)
self.res8_min_entry = Entry(self.ff_frame, width=6)
self.res8_max_entry = Entry(self.ff_frame, width=6)
self.res8_prec_entry = Entry(self.ff_frame, width=6)
self.res8_tol_entry = Entry(self.ff_frame, width=6)
self.res8_points_entry = Entry(self.ff_frame, width=6)
self.res8_formula_entry = Entry(self.ff_frame, width=30)
self.res9_name_entry = Entry(self.ff_frame, width=6)
self.res9_min_entry = Entry(self.ff_frame, width=6)
self.res9_max_entry = Entry(self.ff_frame, width=6)
self.res9_prec_entry = Entry(self.ff_frame, width=6)
self.res9_tol_entry = Entry(self.ff_frame, width=6)
self.res9_points_entry = Entry(self.ff_frame, width=6)
self.res9_formula_entry = Entry(self.ff_frame, width=30)
self.res10_name_entry = Entry(self.ff_frame, width=6)
self.res10_min_entry = Entry(self.ff_frame, width=6)
self.res10_max_entry = Entry(self.ff_frame, width=6)
self.res10_prec_entry = Entry(self.ff_frame, width=6)
self.res10_tol_entry = Entry(self.ff_frame, width=6)
self.res10_points_entry = Entry(self.ff_frame, width=6)
self.res10_formula_entry = Entry(self.ff_frame, width=30)
# Eingabefelder für Res1 sind immer aktiv/ zu sehen. Res2-10 werden je nach Auswahl ein-/ausgeblendet
self.res1_name_entry.grid(row=41, column=1, sticky=W)
self.res1_min_entry.grid(row=41, column=1, sticky=W, padx=60)
self.res1_max_entry.grid(row=41, column=1, sticky=W, padx=100)
self.res1_prec_entry.grid(row=41, column=1, sticky=W, padx=140)
self.res1_tol_entry.grid(row=41, column=1, sticky=W, padx=180)
self.res1_points_entry.grid(row=41, column=1, sticky=W, padx=220)
self.res1_formula_entry.grid(row=41, column=1, sticky=E, padx=20)
# Ergebnis Entries in Listen zusammenfassen
# Die Listen bieten den Vorteil, dass bei der Platzierung auf der GUI eine Schleife verwendet werden kann
self.res_label_list = [self.result1_label, self.result2_label, self.result3_label, self.result4_label, self.result5_label,
self.result6_label, self.result7_label, self.result8_label, self.result9_label, self.result10_label]
self.res_name_entry_list = [self.res1_name_entry, self.res2_name_entry, self.res3_name_entry, self.res4_name_entry, self.res5_name_entry,
self.res6_name_entry, self.res7_name_entry, self.res8_name_entry, self.res9_name_entry, self.res10_name_entry]
self.res_min_entry_list = [self.res1_min_entry, self.res2_min_entry, self.res3_min_entry, self.res4_min_entry, self.res5_min_entry,
self.res6_min_entry, self.res7_min_entry, self.res8_min_entry, self.res9_min_entry, self.res10_min_entry]
self.res_max_entry_list = [self.res1_max_entry, self.res2_max_entry, self.res3_max_entry, self.res4_max_entry, self.res5_max_entry,
self.res6_max_entry, self.res7_max_entry, self.res8_max_entry, self.res9_max_entry, self.res10_max_entry]
self.res_prec_entry_list = [self.res1_prec_entry, self.res2_prec_entry, self.res3_prec_entry, self.res4_prec_entry, self.res5_prec_entry,
self.res6_prec_entry, self.res7_prec_entry, self.res8_prec_entry, self.res9_prec_entry, self.res10_prec_entry]
self.res_tol_entry_list = [self.res1_tol_entry, self.res2_tol_entry, self.res3_tol_entry, self.res4_tol_entry, self.res5_tol_entry,
self.res6_tol_entry, self.res7_tol_entry, self.res8_tol_entry, self.res9_tol_entry, self.res10_tol_entry]
self.res_points_entry_list = [self.res1_points_entry, self.res2_points_entry, self.res3_points_entry, self.res4_points_entry, self.res5_points_entry,
self.res6_points_entry, self.res7_points_entry, self.res8_points_entry, self.res9_points_entry, self.res10_points_entry]
self.res_formula_entry_list = [self.res1_formula_entry, self.res2_formula_entry, self.res3_formula_entry, self.res4_formula_entry, self.res5_formula_entry,
self.res6_formula_entry, self.res7_formula_entry, self.res8_formula_entry, self.res9_formula_entry, self.res10_formula_entry]
# Liste werden für Wertebereich berechnung verwendet
self.var_res_combined_min_entries_list = [self.var1_min_entry, self.var2_min_entry, self.var3_min_entry, self.var4_min_entry,
self.var5_min_entry, self.var6_min_entry, self.var7_min_entry,
self.var8_min_entry, self.var9_min_entry, self.var10_min_entry, self.var11_min_entry,
self.var12_min_entry, self.var13_min_entry, self.var14_min_entry,
self.var15_min_entry, self.res1_min_entry, self.res2_min_entry, self.res3_min_entry,
self.res4_min_entry, self.res5_min_entry, self.res6_min_entry, self.res7_min_entry,
self.res8_min_entry, self.res9_min_entry, self.res10_min_entry ]
self.var_res_combined_max_entries_list = [self.var1_max_entry, self.var2_max_entry, self.var3_max_entry, self.var4_max_entry,
self.var5_max_entry, self.var6_max_entry, self.var7_max_entry,
self.var8_max_entry, self.var9_max_entry, self.var10_max_entry, self.var11_max_entry,
self.var12_max_entry, self.var13_max_entry, self.var14_max_entry,
self.var15_max_entry, self.res1_max_entry, self.res2_max_entry, self.res3_max_entry, self.res4_max_entry,
self.res5_max_entry,
self.res6_max_entry, self.res7_max_entry, self.res8_max_entry, self.res9_max_entry,
self.res10_max_entry]
#############################
#################### EINHEITEN FÜR ERGEBNISSE DERZEIT DEAKTIVIERT
# self.res1_unit_myCombo = ttk.Combobox(self.frame_formula, value=self.select_var_units, width=5)
# self.res1_unit_myCombo.current(0)
# self.res1_unit_myCombo.bind("<<ComboboxSelected>>", selected_var)
# #self.res1_unit_myCombo.grid(row=21, column=0, sticky=E, padx=10)
#
# self.res2_unit_myCombo = ttk.Combobox(self.frame_formula, value=self.select_var_units, width=5)
# self.res2_unit_myCombo.current(0)
# self.res2_unit_myCombo.bind("<<ComboboxSelected>>", selected_var)
#
# self.res3_unit_myCombo = ttk.Combobox(self.frame_formula, value=self.select_var_units, width=5)
# self.res3_unit_myCombo.current(0)
# self.res3_unit_myCombo.bind("<<ComboboxSelected>>", selected_var)
# Hier werden durch die Funktion "ff_result_selected" die Ergebnisse - Eingabefelder (je nach Wert) ein-/ausgeblendet
def ff_result_selected(event): # "variable" need for comboBox Binding
self.selected_number_of_results = int(self.ff_numbers_of_results_box.get())
# Schleife zur Platzierung der Entries auf der GUI
# Bei einer Auswahl von 5 Ergebnissen, werden auf der GUI die Zeilen 1-5 platziert
for i in range(self.selected_number_of_results):
#Formelfrage.ff_variable_show_or_remove(self, self.var_label_list[i], self.var_name_entry_list[i], self.var_min_entry_list[i], self.var_max_entry_list[i], self.var_prec_entry_list[i], self.var_divby_entry_list[i], str(i+7), "show")
Formelfrage.ff_result_show_or_remove(self, self.res_label_list[i], self.res_name_entry_list[i], self.res_min_entry_list[i], self.res_max_entry_list[i], self.res_prec_entry_list[i], self.res_tol_entry_list[i], self.res_points_entry_list[i], self.res_formula_entry_list[i], str(i+42), "show")
# Schleife zum ausblenden der Entries auf der GUI
# Bei einer Auswahl von 5 Ergebnissen, werden auf der GUI die Zeilen 6-15 ausgeblendet
for j in range(self.selected_number_of_results, len(self.res_min_entry_list)):
Formelfrage.ff_result_show_or_remove(self, self.res_label_list[j], self.res_name_entry_list[j], self.res_min_entry_list[j], self.res_max_entry_list[j], self.res_prec_entry_list[j], self.res_tol_entry_list[j], self.res_points_entry_list[j], self.res_formula_entry_list[j], str(j+42), "remove")
self.ff_numbers_of_results_box_label = Label(self.ff_frame, text="Anzahl der Ergebnisse: ")
self.ff_numbers_of_results_box_label.grid(row=40, column=0, sticky=W, padx=10, pady=(20, 0))
self.ff_numbers_of_results_value = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.ff_numbers_of_results_box = ttk.Combobox(self.ff_frame, value=self.ff_numbers_of_results_value, width=3)
self.ff_numbers_of_results_box.current(0)
self.ff_numbers_of_results_box.bind("<<ComboboxSelected>>", ff_result_selected)
self.ff_numbers_of_results_box.grid(row=40, column=1, sticky=W, pady=(20, 0))
# Wird verwendet um Eingabefelder ein- und auszublenden, abhängig von der gewählten Anzahl an Variablen bzw. Ergebnissen
def ff_variable_show_or_remove(self, var_label, var_name_entry, var_min_entry, var_max_entry, var_prec_entry, var_divby_entry, row_nr, var_status):
if var_status == "show":
var_label.grid(row=int(row_nr), column=0, sticky=W, padx=20)
var_name_entry.grid(row=int(row_nr), column=1, sticky=W)
var_min_entry.grid(row=int(row_nr), column=1, sticky=W, padx=60)
var_max_entry.grid(row=int(row_nr), column=1, sticky=W, padx=100)
var_prec_entry.grid(row=int(row_nr), column=1, sticky=W, padx=140)
var_divby_entry.grid(row=int(row_nr), column=1, sticky=W, padx=180)
#var_unit_myCombo.grid(row=int(row_nr), column=0, sticky=E, padx=10)
else:
var_label.grid_remove()
var_name_entry.grid_remove()
var_min_entry.grid_remove()
var_max_entry.grid_remove()
var_prec_entry.grid_remove()
var_divby_entry.grid_remove()
# var_unit_myCombo.grid_remove()
def ff_result_show_or_remove(self, res_label, res_name_entry, res_min_entry, res_max_entry, res_prec_entry, res_tol_entry, res_points_entry, res_formula_entry, row_nr, res_status):
if res_status == "show":
res_label.grid(row=int(row_nr), column=0, sticky=W, padx=20)
res_name_entry.grid(row=int(row_nr), column=1, sticky=W)
res_min_entry.grid(row=int(row_nr), column=1, sticky=W, padx=60)
res_max_entry.grid(row=int(row_nr), column=1, sticky=W, padx=100)
res_prec_entry.grid(row=int(row_nr), column=1, sticky=W, padx=140)
res_tol_entry.grid(row=int(row_nr), column=1, sticky=W, padx=180)
res_points_entry.grid(row=int(row_nr), column=1, sticky=W, padx=220)
res_formula_entry.grid(row=int(row_nr), column=1, sticky=E, padx=20)
#res_unit_myCombo.grid(row=int(row_nr), column=0, sticky=E, padx=10)
else:
res_label.grid_remove()
res_name_entry.grid_remove()
res_min_entry.grid_remove()
res_max_entry.grid_remove()
res_prec_entry.grid_remove()
res_tol_entry.grid_remove()
res_points_entry.grid_remove()
res_formula_entry.grid_remove()
#var_unit_myCombo.grid_remove()
# Wird für die Verarbeitung von Einheiten verwendet
# Als Dictionary festlegen welche benutzerdefinierten Einheiten zu welcher benutzerdefinierten ID gehört
# Die ID wurde hier willkürlich gewählt, jede ID < 100 wurde im ILIAS nicht richtig verwertet
def unit_table(self, selected_unit):
self.unit_to_ilias_code = { "H" : "125", "mH" : "126", "µH" : "127", "nH" : "128", "kH" : "129", "pH" : "130",
"F" : "131", "mF" : "132", "µF" : "133", "nF" : "134", "kF" : "135",
"W" : "136", "kW" : "137", "MW" : "138", "mW" : "149",
"V" : "139", "kV" : "140", "mV" : "141", "µV" : "142", "MV" : "143",
"A" : "144", "mA" : "145", "µA" : "146", "kA" : "147",
"Ohm" : "148", "kOhm" : "150", "mOhm" : "151"}
self.var_selected_unit = selected_unit
self.selected_unit = self.unit_to_ilias_code[self.var_selected_unit]
return self.selected_unit
# Bei einem Import der XML nach ILIAS dürfen keine '&' vorhanden sein
# Diese werden hier durch das entsprechende Symbol '&' ersetzt
def ff_replace_character_in_xml_file(self, file_path_qti_xml):
# Im Nachgang werden alle "&" wieder gegen "&" getauscht
# "&" Zeichen kann XML nicht verarbeiten, daher wurde beim schreiben der Texte in die XML "&" gegen "&" getauscht
# XML Datei zum lesen öffnen 'r' -> "read"
with open(file_path_qti_xml, 'r') as xml_file:
xml_str = xml_file.read()
xml_str = xml_str.replace('&', '&') #replace 'x' with 'new_x'
# In XML Datei schreiben 'w" -> "write"
with open(file_path_qti_xml, 'w') as replaced_xml_file:
replaced_xml_file.write(xml_str)
print("...XML_DATEI_QTI -- \"&\"-ZEICHEN ÜBERARBEITUNG ABGESCHLOSSEN!")
# Wertebereich berechnen (für bis zu 4 Variablen in akzeptabler Zeit)
def ff_calculate_value_range_function_in_GUI(self, ids_in_entry_box):
self.all_entries_from_db_list = []
self.ff_test_entry_splitted = self.ff_calculate_value_range_id_entry.get().split(",")
if self.ff_calculate_value_range_id_entry.get() == "" and self.ff_var_calculate_value_range_for_all_db_entries_check.get() == 0:
# Formel ausrechnen, wenn eine im Eingabefeld vorhanden ist
if self.res1_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res1_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res1_min_entry, self.res1_max_entry, self.res1_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res2_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res2_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res2_min_entry, self.res2_max_entry, self.res2_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res3_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res3_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res3_min_entry, self.res3_max_entry, self.res3_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res4_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res4_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res4_min_entry, self.res4_max_entry, self.res4_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res5_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res5_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res5_min_entry, self.res5_max_entry, self.res5_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res6_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res6_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res6_min_entry, self.res6_max_entry, self.res6_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res7_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res7_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res7_min_entry, self.res7_max_entry, self.res7_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res8_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res8_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res8_min_entry, self.res8_max_entry, self.res8_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res9_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res9_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res9_min_entry, self.res9_max_entry, self.res9_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res10_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res10_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res10_min_entry, self.res10_max_entry, self.res10_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
else:
# Einzelne ID's berechnen
if self.ff_calculate_value_range_id_entry.get() != "":
self.ff_test_entry_splitted = self.ff_calculate_value_range_id_entry.get()
# Alle ID's berechnen
if self.ff_var_calculate_value_range_for_all_db_entries_check.get() == 1:
# Für alle DB Einträge Wertebereich berechnen - popup
# showinfo, showwarning, showerror, askquestion, askokcancel, askyesno
self.response_calculate_value_for_all_db_entries = messagebox.askquestion("Wertebereich für DB Einträge berechnen", "ACHTUNG!\n\nEs werden für ALLE DB Einträge die Min/Max-Werte überschrieben!\n\nFortfahren?")
if self.response_calculate_value_for_all_db_entries == "yes":
self.ff_test_entry_splitted = ids_in_entry_box.split(",")
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = c.fetchall()
for ff_db_record in ff_db_records:
self.all_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.string_temp = ','.join(map(str, self.all_entries_from_db_list))
self.ff_test_entry_splitted = self.string_temp.split(",")
# Eintrag mit ID "1" entspricht der Vorlage und soll nicht mit erstellt werden
#self.ff_test_entry_splitted.pop(0)
else:
print("Vorgang abgebrochen")
# Mit Datenbank verbinden
conn = sqlite3.connect(self.database_formelfrage_path)
cursor = conn.cursor()
cursor.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = cursor.fetchall()
for i in range(len(self.ff_test_entry_splitted)):
for ff_db_record in ff_db_records:
if str(ff_db_record[len(ff_db_record) - 1]) == self.ff_test_entry_splitted[i]:
Formelfrage.ff_clear_var_res_entries(self)
self.var1_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var1_min']])
self.var1_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var1_max']])
self.var1_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var1_prec']])
self.var2_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var2_min']])
self.var2_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var2_max']])
self.var2_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var2_prec']])
self.var3_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var3_min']])
self.var3_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var3_max']])
self.var3_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var3_prec']])
self.var4_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var4_min']])
self.var4_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var4_max']])
self.var4_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var4_prec']])
self.var5_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var5_min']])
self.var5_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var5_max']])
self.var5_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var5_prec']])
self.var6_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var6_min']])
self.var6_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var6_max']])
self.var6_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var6_prec']])
self.var7_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var7_min']])
self.var7_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var7_max']])
self.var7_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var7_prec']])
self.var8_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var8_min']])
self.var8_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var8_max']])
self.var8_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var8_prec']])
self.var9_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var9_min']])
self.var9_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var9_max']])
self.var9_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var9_prec']])
self.var10_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var10_min']])
self.var10_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var10_max']])
self.var10_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var10_prec']])
self.var11_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var11_min']])
self.var11_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var11_max']])
self.var11_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var11_prec']])
self.var12_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var12_min']])
self.var12_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var12_max']])
self.var12_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var12_prec']])
self.var13_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var13_min']])
self.var13_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var13_max']])
self.var13_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var13_prec']])
self.var14_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var14_min']])
self.var14_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var14_max']])
self.var14_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var14_prec']])
self.var15_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var15_min']])
self.var15_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var15_max']])
self.var15_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['var15_prec']])
self.res1_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']])
self.res1_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_min']])
self.res1_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_max']])
self.res1_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res1_prec']])
self.res2_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']])
self.res2_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_min']])
self.res2_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_max']])
self.res2_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res2_prec']])
self.res3_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']])
self.res3_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_min']])
self.res3_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_max']])
self.res3_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res3_prec']])
self.res4_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']])
self.res4_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_min']])
self.res4_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_max']])
self.res4_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res4_prec']])
self.res5_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']])
self.res5_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_min']])
self.res5_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_max']])
self.res5_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res5_prec']])
self.res6_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']])
self.res6_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_min']])
self.res6_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_max']])
self.res6_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res6_prec']])
self.res7_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']])
self.res7_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_min']])
self.res7_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_max']])
self.res7_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res7_prec']])
self.res8_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']])
self.res8_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_min']])
self.res8_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_max']])
self.res8_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res8_prec']])
self.res9_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']])
self.res9_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_min']])
self.res9_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_max']])
self.res9_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res9_prec']])
self.res10_formula_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']])
self.res10_min_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_min']])
self.res10_max_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_max']])
self.res10_prec_entry.insert(0, ff_db_record[self.ff_db_entry_to_index_dict['res10_prec']])
#print("INSERTED")
# Formel ausrechnen, wenn eine im Eingabefeld vorhanden ist
if self.res1_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res1_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res1_min_entry, self.res1_max_entry, self.res1_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res2_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res2_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res2_min_entry, self.res2_max_entry, self.res2_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res3_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res3_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res3_min_entry, self.res3_max_entry, self.res3_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res4_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res4_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res4_min_entry, self.res4_max_entry, self.res4_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res5_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res5_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res5_min_entry, self.res5_max_entry, self.res5_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res6_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res6_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res6_min_entry, self.res6_max_entry, self.res6_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res7_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res7_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res7_min_entry, self.res7_max_entry, self.res7_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res8_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res8_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res8_min_entry, self.res8_max_entry, self.res8_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res9_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res9_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res9_min_entry, self.res9_max_entry, self.res9_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.res10_formula_entry.get() != "":
Formelfrage.ff_calculate_value_range_from_formula_in_GUI(self, self.res10_formula_entry.get(), self.var_res_combined_min_entries_list, self.var_res_combined_max_entries_list, self.var_prec_entry_list, self.res10_min_entry, self.res10_max_entry, self.res10_prec_entry, self.res_min_entry_list, self.res_max_entry_list, self.ff_var_calculate_value_range_for_all_db_entries_check.get())
if self.ff_var_calculate_value_range_for_all_db_entries_check.get() == 1:
# Verbindung mit der Datenbank
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
# sql_update_query = "UPDATE " + self.ff_database_table + " SET res1_min=?, res1_max=? WHERE id=?",( res_min_entry, res_max_entry, record_id)
for t in range(0, 10):
c.execute("UPDATE " + self.ff_database_table + " SET res" + str(t+1) + "_min=?, res" + str(t+1) + "_max=? WHERE oid=?", (self.var_res_combined_min_entries_list[t+15].get(), self.var_res_combined_max_entries_list[t+15].get(), self.ff_test_entry_splitted[i]))
conn.commit()
conn.close()
# Hier wird die eingegebene Formel in eine Numpy konforme Formel umgewandelt
def ff_calculate_value_range_replace_formula_numpy(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, res_min_entries_list, res_max_entries_list):
self.formula = formula
self.formula_var_replaced = formula.replace('$', '_')
self.formula_var_replaced = self.formula_var_replaced.replace('^', '**')
self.np_variables_translator_dict = {"pi": "np.pi",
",": ".",
"^": "**",
"e": "*10**",
"sin": "np.sin",
"cos": "np.cos",
"tan": "np.tan",
"arcsin": "np.arcsin",
"arccos": "np.arccos",
"arctan": "np.arctan",
"sinh": "np.sinh",
"cosh": "np.cosh",
"tanh": "np.tanh",
"arcsinh": "np.arcsinh",
"arccosh": "np.arccosh",
"arctanh": "np.arctanh",
"sqrt": "np.sqrt",
"abs": "np.abs",
"ln": "np.ln",
"log": "np.log",
"_v1": " row['a'] ",
"_v2": " row['b'] ",
"_v3": " row['c'] ",
"_v4": " row['d'] ",
"_v5": " row['e'] ",
"_v6": " row['f'] ",
"_v7": " row['g'] ",
"_v8": " row['h'] ",
"_v9": " row['i'] ",
"_v10": " row['j'] ",
"_v11": " row['k'] ",
"_v12": " row['l'] ",
"_v13": " row['m'] ",
"_v14": " row['n'] ",
"_v15": " row['o'] "}
self.np_results_translator_dict = {
"_r1": " row['p'] ",
"_r2": " row['q'] ",
"_r3": " row['r'] ",
"_r4": " row['s'] ",
"_r5": " row['t'] ",
"_r6": " row['u'] ",
"_r7": " row['v'] ",
"_r8": " row['w'] ",
"_r9": " row['x'] ",
"_r10": " row['y'] "}
print("----------------------")
#print("Übernehme Formel aus Eingabefeld:")
print("---> ", self.formula, end="", flush=True)
#print("Prüfe auf Grenzen")
def replace_var(match):
return self.np_variables_translator_dict[match.group(0)]
def replace_res(match):
return self.np_results_translator_dict[match.group(0)]
self.formula_var_replaced = re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in self.np_variables_translator_dict),replace_var, self.formula_var_replaced)
#for key in self.np_variables_translator_dict.keys():
# self.formula_var_replaced = self.formula_var_replaced.replace(key, self.np_variables_translator_dict[key])
self.formula_res_replaced = re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in self.np_results_translator_dict),replace_res, self.formula_var_replaced)
print(" --- ", "NUMPY: ", self.formula_res_replaced)
#for key in self.np_results_translator_dict.keys():
# self.formula_res_replaced = self.formula_res_replaced.replace(key, self.np_results_translator_dict[key])
for i in range(len(var_res_combined_min_entries_list)):
if "$v" + (str(i+1)) in formula and var_res_combined_min_entries_list[i].get() != "" and var_res_combined_max_entries_list[i].get() != "":
self.formula = self.formula_var_replaced
for j in range(len(res_min_entries_list)):
if "$r" + (str(j+1)) in formula:
if res_min_entries_list[j].get() != "" and res_max_entries_list[j].get() != "":
#print("Grenzen verfügbar! --> Ersetze alle Symbole mit numpy-symoblik")
self.formula = self.formula_res_replaced
else:
self.formula = "NaN"
if "$r" + (str(i+1)) in formula and var_res_combined_min_entries_list[i].get() != "" and var_res_combined_max_entries_list[i].get() != "":
self.formula = self.formula_res_replaced
return self.formula
# Hier wird der Wertebereich berechnet
def ff_calculate_value_range_from_formula_in_GUI(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, var_prec_entries_list, res_min_entry, res_max_entry, res_prec_entry, res_min_entries_list, res_max_entries_list, calculate_value_range_for_pool_check):
def value_range_lower_upper_bounds(var_res_combined_min_entries_list, var_res_combined_max_entries_list, var_lower_bound_list, var_upper_bound_list):
for u in range(len(var_res_combined_min_entries_list)):
if var_res_combined_min_entries_list[u] != "":
if bool(re.search(r'\d', var_res_combined_min_entries_list[u].get())) == True and bool(re.search(r'\d', var_res_combined_max_entries_list[u].get())) == True:
try:
var_lower_bound_list[u], var_upper_bound_list[u] = int(var_res_combined_min_entries_list[u].get()), int(var_res_combined_max_entries_list[u].get())
except ValueError:
var_lower_bound_list[u], var_upper_bound_list[u] = float(var_res_combined_min_entries_list[u].get()), float(var_res_combined_max_entries_list[u].get())
else:
var_lower_bound_list[u], var_upper_bound_list[u] = 0, 0
def min_max(col):
return pd.Series(index=['min', 'max'], data=[col.min(), col.max()])
# Alle Formeln berechnen die KEIN $r enthalten (nur variablen)
self.var1_lower, self.var1_upper = 0, 0
self.var2_lower, self.var2_upper = 0, 0
self.var3_lower, self.var3_upper = 0, 0
self.var4_lower, self.var4_upper = 0, 0
self.var5_lower, self.var5_upper = 0, 0
self.var6_lower, self.var6_upper = 0, 0
self.var7_lower, self.var7_upper = 0, 0
self.var8_lower, self.var8_upper = 0, 0
self.var9_lower, self.var9_upper = 0, 0
self.var10_lower, self.var10_upper = 0, 0
self.var11_lower, self.var11_upper = 0, 0
self.var12_lower, self.var12_upper = 0, 0
self.var13_lower, self.var13_upper = 0, 0
self.var14_lower, self.var14_upper = 0, 0
self.var15_lower, self.var15_upper = 0, 0
self.res1_lower, self.res1_upper = 0, 0
self.res2_lower, self.res2_upper = 0, 0
self.res3_lower, self.res3_upper = 0, 0
self.res4_lower, self.res4_upper = 0, 0
self.res5_lower, self.res5_upper = 0, 0
self.res6_lower, self.res6_upper = 0, 0
self.res7_lower, self.res7_upper = 0, 0
self.res8_lower, self.res8_upper = 0, 0
self.res9_lower, self.res9_upper = 0, 0
self.res10_lower, self.res10_upper = 0, 0
self.new_list = []
self.new_list2 = []
self.set_nr_of_var_index = []
self.var_prec_entry_list_values = []
self.lower_list = [self.var1_lower, self.var2_lower, self.var3_lower, self.var4_lower, self.var5_lower,
self.var6_lower, self.var7_lower, self.var8_lower, self.var9_lower, self.var10_lower,
self.var11_lower, self.var12_lower, self.var13_lower, self.var14_lower, self.var15_lower,
self.res1_lower, self.res2_lower, self.res3_lower, self.res4_lower, self.res5_lower,
self.res6_lower, self.res7_lower, self.res8_lower, self.res9_lower, self.res10_lower]
self.upper_list = [self.var1_upper, self.var2_upper, self.var3_upper, self.var4_upper, self.var5_upper,
self.var6_upper, self.var7_upper, self.var8_upper, self.var9_upper, self.var10_upper,
self.var11_upper, self.var12_upper, self.var13_upper, self.var14_upper, self.var15_upper,
self.res1_upper, self.res2_upper, self.res3_upper, self.res4_upper, self.res5_upper,
self.res6_upper, self.res7_upper, self.res8_upper, self.res9_upper, self.res10_upper]
self.new_dict = {"row['a']": 'a',
"row['b']": 'b',
"row['c']": 'c',
"row['d']": 'd',
"row['e']": 'e',
"row['f']": 'f',
"row['g']": 'g',
"row['h']": 'h',
"row['i']": 'i',
"row['j']": 'j',
"row['k']": 'k',
"row['l']": 'l',
"row['m']": 'm',
"row['n']": 'n',
"row['o']": 'o',
"row['p']": 'p',
"row['q']": 'q',
"row['r']": 'r',
"row['s']": 's',
"row['t']": 't',
"row['u']": 'u',
"row['v']": 'v',
"row['w']": 'w',
"row['x']": 'x',
"row['y']": 'y' }
self.list_index_dict = {'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7,
'i': 8,
'j': 9,
'k': 10,
'l': 11,
'm': 12,
'n': 13,
'o': 14,
'p': 15,
'q': 16,
'r': 17,
's': 18,
't': 19,
'u': 20,
'v': 21,
'w': 22,
'x': 23,
'y': 24,
}
values = []
# Number of values per range
N = 5
# ersetzt formel durch numpy expressions: z.B. 2^5 -> 2**5, $v1*2+$v3 -> row[a] *2+ row[c]
self.formula_1_numpy_expression = Formelfrage.ff_calculate_value_range_replace_formula_numpy(self, formula, var_res_combined_min_entries_list, var_res_combined_max_entries_list, res_min_entries_list, res_max_entries_list)
if self.formula_1_numpy_expression != None and self.formula_1_numpy_expression != "NaN":
# neue formel wird nach leerzeichen gesplittet um einzelne 'row[a]' durch 'a' zu ersetzen
self.new_list = self.formula_1_numpy_expression.split(' ')
self.exp_as_func = eval('lambda row: ' + self.formula_1_numpy_expression)
# self.exp_as_func is not iterable, therefore it is assigned to function[]
functions = [self.exp_as_func]
value_range_lower_upper_bounds(var_res_combined_min_entries_list, var_res_combined_max_entries_list, self.lower_list, self.upper_list)
# ersetzen: 'row[a]' -> 'a' als neue Liste
for i in range(len(self.new_list)):
if "row" in self.new_list[i]:
if self.new_dict[self.new_list[i]] not in self.new_list2:
self.new_list2.append(self.new_dict[self.new_list[i]])
self.set_nr_of_var_index = sorted(self.new_list2)
self.max_index_nr = self.list_index_dict[self.set_nr_of_var_index[-1]] + 1
# Berechnung der Formel. "linspace" erstellt "N" Werte zwischen zwei Grenzen -> linspace(0,10,N) N=11 --> 0,1,2,3,4,5,6,7,8,9,10
for p in range(len(self.set_nr_of_var_index)):
values.append(np.linspace(self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p]]], self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]], N))
df = pd.DataFrame(cartesian_product(values), index=self.set_nr_of_var_index).T
if res_prec_entry.get() != "":
self.var_prec_highest_value = res_prec_entry.get()
else:
for i in range(len(var_prec_entries_list)):
self.var_prec_entry_list_values.append(var_prec_entries_list[i].get())
self.var_prec_highest_value = max(self.var_prec_entry_list_values)
#pd.options.display.float_format = '{:,.3f}'.format
for i, f in enumerate(functions):
df[f'f_{i + 1}'] = df.apply(f, axis=1)
df1 = df.apply(pd.to_numeric, errors='coerce')
#print(df1)
#print()
print(" --- ", "min: ", df1.apply(min_max).iloc[0]['f_1'], " max: ",df1.apply(min_max).iloc[1]['f_1'])
#print(df1.apply(min_max).iloc[0]['f_1'])
#print(df1.apply(min_max).iloc[1]['f_1'])
#print("////////////////////////")
self.res_min_calc_value = df1.apply(min_max).iloc[0]['f_1']
self.res_max_calc_value = df1.apply(min_max).iloc[1]['f_1']
#"{:.2f}".format(a_float)
res_min_entry.delete(0, END)
res_min_entry.insert(END, str("{:.2f}".format(self.res_min_calc_value)))
res_max_entry.delete(0, END)
res_max_entry.insert(END, str(self.res_max_calc_value))
# Prüfen ob $r.. in Formeln enthalten
for i in range(len(self.res_formula_entry_list)):
for j in range(1,10):
if "$r" + str(j) in str(self.res_formula_entry_list[i].get()):
print("$r" + str(j) + " found!", self.res_formula_entry_list[i].get())
if self.res_min_entry_list[j-1].get() != "" and self.res_max_entry_list[j-1].get() != "":
print("---", self.res_min_entry_list[j-1].get(), self.res_max_entry_list[j-1].get())
############# DATENBANK FUNKTIONEN
# Frage in DB speichern (neue Frage)
def ff_save_id_to_db(self, ff_database_table, column_names_string):
self.ff_database_table = ff_database_table
self.column_names_string = column_names_string
conn = sqlite3.connect(self.database_formelfrage_path)
c =conn.cursor()
# format of duration P0Y0M0DT0H30M0S
self.ff_test_time = "P0Y0M0DT" + self.ff_proc_hours_box.get() + "H" + self.ff_proc_minutes_box.get() + "M" + self.ff_proc_seconds_box.get() + "S"
# Bild 1
if self.ff_description_img_name_1 != "" and self.ff_description_img_name_1 != "EMPTY":
# read image data in byte format
with open(self.ff_description_img_path_1, 'rb') as image_file_1:
self.ff_description_img_data_1 = image_file_1.read()
else:
self.ff_description_img_name_1 = ""
self.ff_description_img_path_1 = ""
self.ff_description_img_data_1 = ""
# Bild 2
if self.ff_description_img_name_2 != "" and self.ff_description_img_name_2 != "EMPTY":
# read image data in byte format
with open(self.ff_description_img_path_2, 'rb') as image_file_2:
self.ff_description_img_data_2 = image_file_2.read()
else:
self.ff_description_img_name_2 = ""
self.ff_description_img_path_2 = ""
self.ff_description_img_data_2 = ""
# Bild 3
if self.ff_description_img_name_3 != "" and self.ff_description_img_name_3 != "EMPTY":
# read image data in byte format
with open(self.ff_description_img_path_3, 'rb') as image_file_3:
self.ff_description_img_data_3 = image_file_3.read()
else:
self.ff_description_img_name_3 = ""
self.ff_description_img_path_3 = ""
self.ff_description_img_data_3 = ""
########### Prüfen ob Fragen-TItel oder Fragen-ID bereits in DB vorhanden ####
c.execute("SELECT *, oid FROM " + self.ff_database_table)
db_records = c.fetchall()
self.db_records_fragen_titel_list = []
self.db_records_fragen_id_list = []
self.temp_list = []
self.temp2_list = []
self.temp_string = ""
for db_record in db_records:
self.db_records_fragen_titel_list.append(db_record[self.ff_db_entry_to_index_dict['question_title']])
self.temp_list = db_record[self.ff_db_entry_to_index_dict['question_title']].split(' ')
self.db_records_fragen_id_list.append(self.temp_list[0])
print("\n")
if self.ff_question_title_entry.get() in self.db_records_fragen_titel_list:
print(" -----> ACHTUNG! Fragentitel: \"" + str(self.ff_question_title_entry.get()) + "\" befindet sich bereits in der Datenbank")
self.temp2_list = self.ff_question_title_entry.get().split(' ')
self.temp_string = self.temp2_list[0]
if self.temp_string in self.db_records_fragen_id_list:
print(" -----> ACHTUNG! Fragen-ID: \"" + str(self.temp_string) + "\" befindet sich bereits in der Datenbank")
print("\n")
#############
# Insert into Table
c.execute(
"INSERT INTO " + self.ff_database_table + " VALUES (" + self.ff_db_column_names_string + ")",
{
'question_difficulty': self.ff_question_difficulty_entry.get(),
'question_category': self.ff_question_category_entry.get(),
'question_type': self.ff_question_type_entry.get(),
'question_title': self.ff_question_title_entry.get(),
'question_description_title': self.ff_question_description_title_entry.get(),
# The first part, "1.0" means that the input should be read from line one, character zero (ie: the very first character).
# END is an imported constant which is set to the string "end". The END part means to read until the end of the text box is reached.
# The only issue with this is that it actually adds a newline to our input. "
# "So, in order to fix it we should change END to end-1c(Thanks <NAME>) The -1c deletes 1 character, while -2c would mean delete two characters, and so on."
'question_description_main': self.ff_question_description_main_entry.get("1.0", 'end-1c'),
'res1_formula': self.res1_formula_entry.get(),
'res2_formula': self.res2_formula_entry.get(),
'res3_formula': self.res3_formula_entry.get(),
'res4_formula': self.res4_formula_entry.get(),
'res5_formula': self.res5_formula_entry.get(),
'res6_formula': self.res6_formula_entry.get(),
'res7_formula': self.res7_formula_entry.get(),
'res8_formula': self.res8_formula_entry.get(),
'res9_formula': self.res9_formula_entry.get(),
'res10_formula': self.res10_formula_entry.get(),
'var1_name': self.var1_name_entry.get(),
'var1_min': self.var1_min_entry.get(),
'var1_max': self.var1_max_entry.get(),
'var1_prec': self.var1_prec_entry.get(),
'var1_divby': self.var1_divby_entry.get(),
'var1_unit': "",
'var2_name': self.var2_name_entry.get(),
'var2_min': self.var2_min_entry.get(),
'var2_max': self.var2_max_entry.get(),
'var2_prec': self.var2_prec_entry.get(),
'var2_divby': self.var2_divby_entry.get(),
'var2_unit': "",
'var3_name': self.var3_name_entry.get(),
'var3_min': self.var3_min_entry.get(),
'var3_max': self.var3_max_entry.get(),
'var3_prec': self.var3_prec_entry.get(),
'var3_divby': self.var3_divby_entry.get(),
'var3_unit': "",
'var4_name': self.var4_name_entry.get(),
'var4_min': self.var4_min_entry.get(),
'var4_max': self.var4_max_entry.get(),
'var4_prec': self.var4_prec_entry.get(),
'var4_divby': self.var4_divby_entry.get(),
'var4_unit': "",
'var5_name': self.var5_name_entry.get(),
'var5_min': self.var5_min_entry.get(),
'var5_max': self.var5_max_entry.get(),
'var5_prec': self.var5_prec_entry.get(),
'var5_divby': self.var5_divby_entry.get(),
'var5_unit': "",
'var6_name': self.var6_name_entry.get(),
'var6_min': self.var6_min_entry.get(),
'var6_max': self.var6_max_entry.get(),
'var6_prec': self.var6_prec_entry.get(),
'var6_divby': self.var6_divby_entry.get(),
'var6_unit': "",
'var7_name': self.var7_name_entry.get(),
'var7_min': self.var7_min_entry.get(),
'var7_max': self.var7_max_entry.get(),
'var7_prec': self.var7_prec_entry.get(),
'var7_divby': self.var7_divby_entry.get(),
'var7_unit': "",
'var8_name': self.var8_name_entry.get(),
'var8_min': self.var8_min_entry.get(),
'var8_max': self.var8_max_entry.get(),
'var8_prec': self.var8_prec_entry.get(),
'var8_divby': self.var8_divby_entry.get(),
'var8_unit': "",
'var9_name': self.var9_name_entry.get(),
'var9_min': self.var9_min_entry.get(),
'var9_max': self.var9_max_entry.get(),
'var9_prec': self.var9_prec_entry.get(),
'var9_divby': self.var9_divby_entry.get(),
'var9_unit': "",
'var10_name': self.var10_name_entry.get(),
'var10_min': self.var10_min_entry.get(),
'var10_max': self.var10_max_entry.get(),
'var10_prec': self.var10_prec_entry.get(),
'var10_divby': self.var10_divby_entry.get(),
'var10_unit': "",
'var11_name': self.var11_name_entry.get(),
'var11_min': self.var11_min_entry.get(),
'var11_max': self.var11_max_entry.get(),
'var11_prec': self.var11_prec_entry.get(),
'var11_divby': self.var11_divby_entry.get(),
'var11_unit': "",
'var12_name': self.var12_name_entry.get(),
'var12_min': self.var12_min_entry.get(),
'var12_max': self.var12_max_entry.get(),
'var12_prec': self.var12_prec_entry.get(),
'var12_divby': self.var12_divby_entry.get(),
'var12_unit': "",
'var13_name': self.var13_name_entry.get(),
'var13_min': self.var13_min_entry.get(),
'var13_max': self.var13_max_entry.get(),
'var13_prec': self.var13_prec_entry.get(),
'var13_divby': self.var13_divby_entry.get(),
'var13_unit': "",
'var14_name': self.var14_name_entry.get(),
'var14_min': self.var14_min_entry.get(),
'var14_max': self.var14_max_entry.get(),
'var14_prec': self.var14_prec_entry.get(),
'var14_divby': self.var14_divby_entry.get(),
'var14_unit': "",
'var15_name': self.var15_name_entry.get(),
'var15_min': self.var15_min_entry.get(),
'var15_max': self.var15_max_entry.get(),
'var15_prec': self.var15_prec_entry.get(),
'var15_divby': self.var15_divby_entry.get(),
'var15_unit': "",
'res1_name': self.res1_name_entry.get(),
'res1_min': self.res1_min_entry.get(),
'res1_max': self.res1_max_entry.get(),
'res1_prec': self.res1_prec_entry.get(),
'res1_tol': self.res1_tol_entry.get(),
'res1_points': self.res1_points_entry.get(),
'res1_unit': "",
'res2_name': self.res2_name_entry.get(),
'res2_min': self.res2_min_entry.get(),
'res2_max': self.res2_max_entry.get(),
'res2_prec': self.res2_prec_entry.get(),
'res2_tol': self.res2_tol_entry.get(),
'res2_points': self.res2_points_entry.get(),
'res2_unit': "",
'res3_name': self.res3_name_entry.get(),
'res3_min': self.res3_min_entry.get(),
'res3_max': self.res3_max_entry.get(),
'res3_prec': self.res3_prec_entry.get(),
'res3_tol': self.res3_tol_entry.get(),
'res3_points': self.res3_points_entry.get(),
'res3_unit': "",
'res4_name': self.res4_name_entry.get(),
'res4_min': self.res4_min_entry.get(),
'res4_max': self.res4_max_entry.get(),
'res4_prec': self.res4_prec_entry.get(),
'res4_tol': self.res4_tol_entry.get(),
'res4_points': self.res4_points_entry.get(),
'res4_unit': "",
'res5_name': self.res5_name_entry.get(),
'res5_min': self.res5_min_entry.get(),
'res5_max': self.res5_max_entry.get(),
'res5_prec': self.res5_prec_entry.get(),
'res5_tol': self.res5_tol_entry.get(),
'res5_points': self.res5_points_entry.get(),
'res5_unit': "",
'res6_name': self.res6_name_entry.get(),
'res6_min': self.res6_min_entry.get(),
'res6_max': self.res6_max_entry.get(),
'res6_prec': self.res6_prec_entry.get(),
'res6_tol': self.res6_tol_entry.get(),
'res6_points': self.res6_points_entry.get(),
'res6_unit': "",
'res7_name': self.res7_name_entry.get(),
'res7_min': self.res7_min_entry.get(),
'res7_max': self.res7_max_entry.get(),
'res7_prec': self.res7_prec_entry.get(),
'res7_tol': self.res7_tol_entry.get(),
'res7_points': self.res7_points_entry.get(),
'res7_unit': "",
'res8_name': self.res8_name_entry.get(),
'res8_min': self.res8_min_entry.get(),
'res8_max': self.res8_max_entry.get(),
'res8_prec': self.res8_prec_entry.get(),
'res8_tol': self.res8_tol_entry.get(),
'res8_points': self.res8_points_entry.get(),
'res8_unit': "",
'res9_name': self.res9_name_entry.get(),
'res9_min': self.res9_min_entry.get(),
'res9_max': self.res9_max_entry.get(),
'res9_prec': self.res9_prec_entry.get(),
'res9_tol': self.res9_tol_entry.get(),
'res9_points': self.res9_points_entry.get(),
'res9_unit': "",
'res10_name': self.res10_name_entry.get(),
'res10_min': self.res10_min_entry.get(),
'res10_max': self.res10_max_entry.get(),
'res10_prec': self.res10_prec_entry.get(),
'res10_tol': self.res10_tol_entry.get(),
'res10_points': self.res10_points_entry.get(),
'res10_unit': "",
'description_img_name_1': self.ff_description_img_name_1,
'description_img_data_1': self.ff_description_img_data_1,
'description_img_path_1': self.ff_description_img_path_1,
'description_img_name_2': self.ff_description_img_name_2,
'description_img_data_2': self.ff_description_img_data_2,
'description_img_path_2': self.ff_description_img_path_2,
'description_img_name_3': self.ff_description_img_name_3,
'description_img_data_3': self.ff_description_img_data_3,
'description_img_path_3': self.ff_description_img_path_3,
'test_time': self.ff_test_time,
'var_number': self.ff_numbers_of_answers_box.get(),
'res_number': self.ff_numbers_of_results_box.get(),
'question_pool_tag': self.ff_question_pool_tag_entry.get(),
'question_author': self.ff_question_author_entry.get()
})
conn.commit()
conn.close()
print("Neuer Eintrag in die Formelfrage-Datenbank --> Fragentitel: " + str(self.ff_question_title_entry.get()))
# Frage aus der DB in die GUI laden
def ff_load_id_from_db(self, entry_to_index_dict):
self.ff_db_entry_to_index_dict = entry_to_index_dict
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
record_id = self.ff_load_box.get()
self.ff_hidden_edit_box_entry.delete(0, END)
self.ff_hidden_edit_box_entry.insert(0, self.ff_load_box.get())
c.execute("SELECT * FROM %s WHERE oid = %s " % (self.ff_database_table, str(record_id)))
ff_db_records = c.fetchall()
Formelfrage.ff_clear_GUI(self)
for ff_db_record in ff_db_records:
self.ff_question_difficulty_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_difficulty']] )
self.ff_question_category_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_category']])
self.ff_question_type_entry.delete(0, END)
self.ff_question_type_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_type']])
self.ff_question_title_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_title']])
self.ff_question_description_title_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_description_title']])
self.ff_question_description_main_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_description_main']])
self.res1_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']])
self.res2_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']])
self.res3_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']])
self.res4_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']])
self.res5_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']])
self.res6_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']])
self.res7_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']])
self.res8_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']])
self.res9_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']])
self.res10_formula_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']])
self.var1_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_name']])
self.var1_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_min']])
self.var1_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_max']])
self.var1_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_prec']])
self.var1_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var1_divby']])
self.var2_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_name']])
self.var2_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_min']])
self.var2_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_max']])
self.var2_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_prec']])
self.var2_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var2_divby']])
self.var3_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_name']])
self.var3_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_min']])
self.var3_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_max']])
self.var3_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_prec']])
self.var3_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var3_divby']])
self.var4_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_name']])
self.var4_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_min']])
self.var4_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_max']])
self.var4_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_prec']])
self.var4_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var4_divby']])
self.var5_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_name']])
self.var5_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_min']])
self.var5_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_max']])
self.var5_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_prec']])
self.var5_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var5_divby']])
self.var6_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_name']])
self.var6_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_min']])
self.var6_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_max']])
self.var6_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_prec']])
self.var6_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var6_divby']])
self.var7_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_name']])
self.var7_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_min']])
self.var7_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_max']])
self.var7_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_prec']])
self.var7_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var7_divby']])
self.var8_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_name']])
self.var8_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_min']])
self.var8_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_max']])
self.var8_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_prec']])
self.var8_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var8_divby']])
self.var9_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_name']])
self.var9_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_min']])
self.var9_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_max']])
self.var9_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_prec']])
self.var9_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var9_divby']])
self.var10_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_name']])
self.var10_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_min']])
self.var10_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_max']])
self.var10_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_prec']])
self.var10_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var10_divby']])
self.var11_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_name']])
self.var11_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_min']])
self.var11_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_max']])
self.var11_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_prec']])
self.var11_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var11_divby']])
self.var12_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_name']])
self.var12_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_min']])
self.var12_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_max']])
self.var12_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_prec']])
self.var12_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var12_divby']])
self.var13_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_name']])
self.var13_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_min']])
self.var13_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_max']])
self.var13_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_prec']])
self.var13_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var13_divby']])
self.var14_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_name']])
self.var14_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_min']])
self.var14_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_max']])
self.var14_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_prec']])
self.var14_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var14_divby']])
self.var15_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_name']])
self.var15_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_min']])
self.var15_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_max']])
self.var15_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_prec']])
self.var15_divby_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['var15_divby']])
self.res1_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_name']])
self.res1_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_min']])
self.res1_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_max']])
self.res1_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_prec']])
self.res1_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_tol']])
self.res1_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res1_points']])
self.res2_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_name']])
self.res2_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_min']])
self.res2_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_max']])
self.res2_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_prec']])
self.res2_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_tol']])
self.res2_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res2_points']])
self.res3_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_name']])
self.res3_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_min']])
self.res3_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_max']])
self.res3_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_prec']])
self.res3_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_tol']])
self.res3_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res3_points']])
self.res4_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_name']])
self.res4_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_min']])
self.res4_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_max']])
self.res4_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_prec']])
self.res4_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_tol']])
self.res4_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res4_points']])
self.res5_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_name']])
self.res5_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_min']])
self.res5_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_max']])
self.res5_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_prec']])
self.res5_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_tol']])
self.res5_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res5_points']])
self.res6_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_name']])
self.res6_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_min']])
self.res6_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_max']])
self.res6_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_prec']])
self.res6_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_tol']])
self.res6_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res6_points']])
self.res7_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_name']])
self.res7_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_min']])
self.res7_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_max']])
self.res7_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_prec']])
self.res7_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_tol']])
self.res7_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res7_points']])
self.res8_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_name']])
self.res8_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_min']])
self.res8_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_max']])
self.res8_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_prec']])
self.res8_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_tol']])
self.res8_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res8_points']])
self.res9_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_name']])
self.res9_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_min']])
self.res9_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_max']])
self.res9_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_prec']])
self.res9_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_tol']])
self.res9_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res9_points']])
self.res10_name_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_name']])
self.res10_min_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_min']])
self.res10_max_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_max']])
self.res10_prec_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_prec']])
self.res10_tol_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_tol']])
self.res10_points_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['res10_points']])
self.ff_description_img_name_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_1']]
self.ff_description_img_data_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_1']]
self.ff_description_img_path_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_1']]
self.ff_description_img_name_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_2']]
self.ff_description_img_data_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_2']]
self.ff_description_img_path_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_2']]
self.ff_description_img_name_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_3']]
self.ff_description_img_data_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_3']]
self.ff_description_img_path_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_3']]
self.ff_question_pool_tag_entry.insert(END, ff_db_record[self.ff_db_entry_to_index_dict['question_pool_tag']])
conn.commit()
conn.close()
if self.ff_var_highlight_question_text.get() == 1:
print("Frage wird MIT Text-Formatierung geladen. --> Fragen-ID: " + str(self.ff_load_box.get()))
test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text(self, self.ff_question_description_main_entry)
else:
print("Frage wird OHNE Text-Formatierung geladen. --> Fragen-ID: " + str(self.ff_load_box.get()))
# Aktuell geladene Frage editieren
def ff_edit_id_from_db(self):
# Verbindung mit der Datenbank
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
# ID der Frage aus dem Eingabefeld "ID editieren" auslesen
# Eingabefeld ist für den User nicht sichtbar
record_id = self.ff_hidden_edit_box_entry.get()
# Format von Testdauer in der XML Datei: P0Y0M0DT0H30M0S
self.ff_test_time = "P0Y0M0DT" + self.ff_proc_hours_box.get() + "H" + self.ff_proc_minutes_box.get() + "M" + self.ff_proc_seconds_box.get() + "S"
# Ist ein Bild-Name vorhanden, dann das Bild über den Pfad einlesen
# Sonst auf "" setzen
# Bilder werden als byte eingelesen "rb" = read byte
# Fragen-Text Bild 1
if self.ff_description_img_name_1 != "" and self.ff_description_img_name_1 != "EMPTY":
with open(os.path.join(self.project_root_path, self.ff_description_img_path_1), 'rb') as description_image_file_1:
self.ff_description_img_data_1 = description_image_file_1.read()
else:
self.ff_description_img_name_1 = ""
self.ff_description_img_data_1 = ""
self.ff_description_img_path_1 = ""
# Fragen-Text Bild 2
if self.ff_description_img_name_2 != "" and self.ff_description_img_name_2 != "EMPTY":
with open( self.ff_description_img_path_2, 'rb') as description_image_file_2:
self.ff_description_img_data_2 = description_image_file_2.read()
else:
self.ff_description_img_name_2 = ""
self.ff_description_img_data_2 = ""
self.ff_description_img_path_2 = ""
# Fragen-Text Bild 3
if self.ff_description_img_name_3 != "" and self.ff_description_img_name_3 != "EMPTY":
with open( self.ff_description_img_path_3, 'rb') as description_image_file_3:
self.ff_description_img_data_3 = description_image_file_3.read()
else:
self.ff_description_img_name_3 = ""
self.ff_description_img_data_3 = ""
self.ff_description_img_path_3 = ""
self.edit_list = []
for i in range(len(self.ff_db_column_names_list)):
self.edit_list.append(self.ff_db_column_names_list[i] + " = :" + self.ff_db_column_names_list[i])
self.db_column_names_string_for_edit = ','.join(self.edit_list)
print("''''''''''''''", self.db_column_names_string_for_edit)
c.execute("UPDATE " + self.ff_database_table + " SET " + self.db_column_names_string_for_edit + " WHERE oid = :oid",
{'question_difficulty': self.ff_question_difficulty_entry.get(),
'question_category': self.ff_question_category_entry.get(),
'question_type': self.ff_question_type_entry.get(),
'question_title': self.ff_question_title_entry.get(),
'question_description_title': self.ff_question_description_title_entry.get(),
'question_description_main': self.ff_question_description_main_entry.get("1.0", 'end-1c'),
'res1_formula': self.res1_formula_entry.get(),
'res2_formula': self.res2_formula_entry.get(),
'res3_formula': self.res3_formula_entry.get(),
'res4_formula': self.res4_formula_entry.get(),
'res5_formula': self.res5_formula_entry.get(),
'res6_formula': self.res6_formula_entry.get(),
'res7_formula': self.res7_formula_entry.get(),
'res8_formula': self.res8_formula_entry.get(),
'res9_formula': self.res9_formula_entry.get(),
'res10_formula': self.res10_formula_entry.get(),
'var1_name': self.var1_name_entry.get(),
'var1_min': self.var1_min_entry.get(),
'var1_max': self.var1_max_entry.get(),
'var1_prec': self.var1_prec_entry.get(),
'var1_divby': self.var1_divby_entry.get(),
'var1_unit': "",
'var2_name': self.var2_name_entry.get(),
'var2_min': self.var2_min_entry.get(),
'var2_max': self.var2_max_entry.get(),
'var2_prec': self.var2_prec_entry.get(),
'var2_divby': self.var2_divby_entry.get(),
'var2_unit': "",
'var3_name': self.var3_name_entry.get(),
'var3_min': self.var3_min_entry.get(),
'var3_max': self.var3_max_entry.get(),
'var3_prec': self.var3_prec_entry.get(),
'var3_divby': self.var3_divby_entry.get(),
'var3_unit': "",
'var4_name': self.var4_name_entry.get(),
'var4_min': self.var4_min_entry.get(),
'var4_max': self.var4_max_entry.get(),
'var4_prec': self.var4_prec_entry.get(),
'var4_divby': self.var4_divby_entry.get(),
'var4_unit': "",
'var5_name': self.var5_name_entry.get(),
'var5_min': self.var5_min_entry.get(),
'var5_max': self.var5_max_entry.get(),
'var5_prec': self.var5_prec_entry.get(),
'var5_divby': self.var5_divby_entry.get(),
'var5_unit': "",
'var6_name': self.var6_name_entry.get(),
'var6_min': self.var6_min_entry.get(),
'var6_max': self.var6_max_entry.get(),
'var6_prec': self.var6_prec_entry.get(),
'var6_divby': self.var6_divby_entry.get(),
'var6_unit': "",
'var7_name': self.var7_name_entry.get(),
'var7_min': self.var7_min_entry.get(),
'var7_max': self.var7_max_entry.get(),
'var7_prec': self.var7_prec_entry.get(),
'var7_divby': self.var7_divby_entry.get(),
'var7_unit': "",
'var8_name': self.var8_name_entry.get(),
'var8_min': self.var8_min_entry.get(),
'var8_max': self.var8_max_entry.get(),
'var8_prec': self.var8_prec_entry.get(),
'var8_divby': self.var8_divby_entry.get(),
'var8_unit': "",
'var9_name': self.var9_name_entry.get(),
'var9_min': self.var9_min_entry.get(),
'var9_max': self.var9_max_entry.get(),
'var9_prec': self.var9_prec_entry.get(),
'var9_divby': self.var9_divby_entry.get(),
'var9_unit': "",
'var10_name': self.var10_name_entry.get(),
'var10_min': self.var10_min_entry.get(),
'var10_max': self.var10_max_entry.get(),
'var10_prec': self.var10_prec_entry.get(),
'var10_divby': self.var10_divby_entry.get(),
'var10_unit': "",
'var11_name': self.var11_name_entry.get(),
'var11_min': self.var11_min_entry.get(),
'var11_max': self.var11_max_entry.get(),
'var11_prec': self.var11_prec_entry.get(),
'var11_divby': self.var11_divby_entry.get(),
'var11_unit': "",
'var12_name': self.var12_name_entry.get(),
'var12_min': self.var12_min_entry.get(),
'var12_max': self.var12_max_entry.get(),
'var12_prec': self.var12_prec_entry.get(),
'var12_divby': self.var12_divby_entry.get(),
'var12_unit': "",
'var13_name': self.var13_name_entry.get(),
'var13_min': self.var13_min_entry.get(),
'var13_max': self.var13_max_entry.get(),
'var13_prec': self.var13_prec_entry.get(),
'var13_divby': self.var13_divby_entry.get(),
'var13_unit': "",
'var14_name': self.var14_name_entry.get(),
'var14_min': self.var14_min_entry.get(),
'var14_max': self.var14_max_entry.get(),
'var14_prec': self.var14_prec_entry.get(),
'var14_divby': self.var14_divby_entry.get(),
'var14_unit': "",
'var15_name': self.var15_name_entry.get(),
'var15_min': self.var15_min_entry.get(),
'var15_max': self.var15_max_entry.get(),
'var15_prec': self.var15_prec_entry.get(),
'var15_divby': self.var15_divby_entry.get(),
'var15_unit': "",
'res1_name': self.res1_name_entry.get(),
'res1_min': self.res1_min_entry.get(),
'res1_max': self.res1_max_entry.get(),
'res1_prec': self.res1_prec_entry.get(),
'res1_tol': self.res1_tol_entry.get(),
'res1_points': self.res1_points_entry.get(),
'res1_unit': "",
'res2_name': self.res2_name_entry.get(),
'res2_min': self.res2_min_entry.get(),
'res2_max': self.res2_max_entry.get(),
'res2_prec': self.res2_prec_entry.get(),
'res2_tol': self.res2_tol_entry.get(),
'res2_points': self.res2_points_entry.get(),
'res2_unit': "",
'res3_name': self.res3_name_entry.get(),
'res3_min': self.res3_min_entry.get(),
'res3_max': self.res3_max_entry.get(),
'res3_prec': self.res3_prec_entry.get(),
'res3_tol': self.res3_tol_entry.get(),
'res3_points': self.res3_points_entry.get(),
'res3_unit': "",
'res4_name': self.res4_name_entry.get(),
'res4_min': self.res4_min_entry.get(),
'res4_max': self.res4_max_entry.get(),
'res4_prec': self.res4_prec_entry.get(),
'res4_tol': self.res4_tol_entry.get(),
'res4_points': self.res4_points_entry.get(),
'res4_unit': "",
'res5_name': self.res5_name_entry.get(),
'res5_min': self.res5_min_entry.get(),
'res5_max': self.res5_max_entry.get(),
'res5_prec': self.res5_prec_entry.get(),
'res5_tol': self.res5_tol_entry.get(),
'res5_points': self.res5_points_entry.get(),
'res5_unit': "",
'res6_name': self.res6_name_entry.get(),
'res6_min': self.res6_min_entry.get(),
'res6_max': self.res6_max_entry.get(),
'res6_prec': self.res6_prec_entry.get(),
'res6_tol': self.res6_tol_entry.get(),
'res6_points': self.res6_points_entry.get(),
'res6_unit': "",
'res7_name': self.res7_name_entry.get(),
'res7_min': self.res7_min_entry.get(),
'res7_max': self.res7_max_entry.get(),
'res7_prec': self.res7_prec_entry.get(),
'res7_tol': self.res7_tol_entry.get(),
'res7_points': self.res7_points_entry.get(),
'res7_unit': "",
'res8_name': self.res8_name_entry.get(),
'res8_min': self.res8_min_entry.get(),
'res8_max': self.res8_max_entry.get(),
'res8_prec': self.res8_prec_entry.get(),
'res8_tol': self.res8_tol_entry.get(),
'res8_points': self.res8_points_entry.get(),
'res8_unit': "",
'res9_name': self.res9_name_entry.get(),
'res9_min': self.res9_min_entry.get(),
'res9_max': self.res9_max_entry.get(),
'res9_prec': self.res9_prec_entry.get(),
'res9_tol': self.res9_tol_entry.get(),
'res9_points': self.res9_points_entry.get(),
'res9_unit': "",
'res10_name': self.res10_name_entry.get(),
'res10_min': self.res10_min_entry.get(),
'res10_max': self.res10_max_entry.get(),
'res10_prec': self.res10_prec_entry.get(),
'res10_tol': self.res10_tol_entry.get(),
'res10_points': self.res10_points_entry.get(),
'res10_unit': "",
'description_img_name_1': self.ff_description_img_name_1,
'description_img_data_1': self.ff_description_img_data_1,
'description_img_path_1': self.ff_description_img_path_1,
'description_img_name_2': self.ff_description_img_name_2,
'description_img_data_2': self.ff_description_img_data_2,
'description_img_path_2': self.ff_description_img_path_2,
'description_img_name_3': self.ff_description_img_name_3,
'description_img_data_3': self.ff_description_img_data_3,
'description_img_path_3': self.ff_description_img_path_3,
'test_time': self.ff_test_time,
'var_number': "",
'res_number': "",
'question_pool_tag': self.ff_question_pool_tag_entry.get(),
'question_author': self.ff_question_author_entry.get(),
'oid': record_id
})
conn.commit()
conn.close()
print("Frage mit ID: '" + record_id + "' editiert")
# Frage aus der DB löschen
def ff_delete_id_from_db(self):
self.ff_delete_box_id = ""
self.ff_delete_box_id = self.ff_delete_box.get()
test_generator_modul_datenbanken_erstellen.Delete_Entry_from_Database.__init__(self, self.ff_delete_box_id, self.ff_question_type_name, self.ff_var_delete_all.get(), self.project_root_path, self.ff_db_entry_to_index_dict, self.database_formelfrage_path, self.ff_database, self.ff_database_table, "Formelfrage_DB_export_file.xlsx", "Formelfrage - Database")
# Wird für Wertebreich berechnen verwendet
# Bei mehreren Fragen hintereinander, müssen die Entry-Felder leer sein
def ff_clear_var_res_entries(self):
self.res1_formula_entry.delete(0, END)
self.res2_formula_entry.delete(0, END)
self.res3_formula_entry.delete(0, END)
self.res4_formula_entry.delete(0, END)
self.res5_formula_entry.delete(0, END)
self.res6_formula_entry.delete(0, END)
self.res7_formula_entry.delete(0, END)
self.res8_formula_entry.delete(0, END)
self.res9_formula_entry.delete(0, END)
self.res10_formula_entry.delete(0, END)
self.var1_name_entry.delete(0, END)
self.var1_min_entry.delete(0, END)
self.var1_max_entry.delete(0, END)
self.var1_prec_entry.delete(0, END)
self.var1_divby_entry.delete(0, END)
self.var2_name_entry.delete(0, END)
self.var2_min_entry.delete(0, END)
self.var2_max_entry.delete(0, END)
self.var2_prec_entry.delete(0, END)
self.var2_divby_entry.delete(0, END)
self.var3_name_entry.delete(0, END)
self.var3_min_entry.delete(0, END)
self.var3_max_entry.delete(0, END)
self.var3_prec_entry.delete(0, END)
self.var3_divby_entry.delete(0, END)
self.var4_name_entry.delete(0, END)
self.var4_min_entry.delete(0, END)
self.var4_max_entry.delete(0, END)
self.var4_prec_entry.delete(0, END)
self.var4_divby_entry.delete(0, END)
self.var5_name_entry.delete(0, END)
self.var5_min_entry.delete(0, END)
self.var5_max_entry.delete(0, END)
self.var5_prec_entry.delete(0, END)
self.var5_divby_entry.delete(0, END)
self.var6_name_entry.delete(0, END)
self.var6_min_entry.delete(0, END)
self.var6_max_entry.delete(0, END)
self.var6_prec_entry.delete(0, END)
self.var6_divby_entry.delete(0, END)
self.var7_name_entry.delete(0, END)
self.var7_min_entry.delete(0, END)
self.var7_max_entry.delete(0, END)
self.var7_prec_entry.delete(0, END)
self.var7_divby_entry.delete(0, END)
self.var8_name_entry.delete(0, END)
self.var8_min_entry.delete(0, END)
self.var8_max_entry.delete(0, END)
self.var8_prec_entry.delete(0, END)
self.var8_divby_entry.delete(0, END)
self.var9_name_entry.delete(0, END)
self.var9_min_entry.delete(0, END)
self.var9_max_entry.delete(0, END)
self.var9_prec_entry.delete(0, END)
self.var9_divby_entry.delete(0, END)
self.var10_name_entry.delete(0, END)
self.var10_min_entry.delete(0, END)
self.var10_max_entry.delete(0, END)
self.var10_prec_entry.delete(0, END)
self.var10_divby_entry.delete(0, END)
self.var11_name_entry.delete(0, END)
self.var11_min_entry.delete(0, END)
self.var11_max_entry.delete(0, END)
self.var11_prec_entry.delete(0, END)
self.var11_divby_entry.delete(0, END)
self.var12_name_entry.delete(0, END)
self.var12_min_entry.delete(0, END)
self.var12_max_entry.delete(0, END)
self.var12_prec_entry.delete(0, END)
self.var12_divby_entry.delete(0, END)
self.var13_name_entry.delete(0, END)
self.var13_min_entry.delete(0, END)
self.var13_max_entry.delete(0, END)
self.var13_prec_entry.delete(0, END)
self.var13_divby_entry.delete(0, END)
self.var14_name_entry.delete(0, END)
self.var14_min_entry.delete(0, END)
self.var14_max_entry.delete(0, END)
self.var14_prec_entry.delete(0, END)
self.var14_divby_entry.delete(0, END)
self.var15_name_entry.delete(0, END)
self.var15_min_entry.delete(0, END)
self.var15_max_entry.delete(0, END)
self.var15_prec_entry.delete(0, END)
self.var15_divby_entry.delete(0, END)
self.res1_name_entry.delete(0, END)
self.res1_min_entry.delete(0, END)
self.res1_max_entry.delete(0, END)
self.res1_prec_entry.delete(0, END)
self.res1_tol_entry.delete(0, END)
self.res1_points_entry.delete(0, END)
self.res2_name_entry.delete(0, END)
self.res2_min_entry.delete(0, END)
self.res2_max_entry.delete(0, END)
self.res2_prec_entry.delete(0, END)
self.res2_tol_entry.delete(0, END)
self.res2_points_entry.delete(0, END)
self.res3_name_entry.delete(0, END)
self.res3_min_entry.delete(0, END)
self.res3_max_entry.delete(0, END)
self.res3_prec_entry.delete(0, END)
self.res3_tol_entry.delete(0, END)
self.res3_points_entry.delete(0, END)
self.res4_name_entry.delete(0, END)
self.res4_min_entry.delete(0, END)
self.res4_max_entry.delete(0, END)
self.res4_prec_entry.delete(0, END)
self.res4_tol_entry.delete(0, END)
self.res4_points_entry.delete(0, END)
self.res5_name_entry.delete(0, END)
self.res5_min_entry.delete(0, END)
self.res5_max_entry.delete(0, END)
self.res5_prec_entry.delete(0, END)
self.res5_tol_entry.delete(0, END)
self.res5_points_entry.delete(0, END)
self.res6_name_entry.delete(0, END)
self.res6_min_entry.delete(0, END)
self.res6_max_entry.delete(0, END)
self.res6_prec_entry.delete(0, END)
self.res6_tol_entry.delete(0, END)
self.res6_points_entry.delete(0, END)
self.res7_name_entry.delete(0, END)
self.res7_min_entry.delete(0, END)
self.res7_max_entry.delete(0, END)
self.res7_prec_entry.delete(0, END)
self.res7_tol_entry.delete(0, END)
self.res7_points_entry.delete(0, END)
self.res8_name_entry.delete(0, END)
self.res8_min_entry.delete(0, END)
self.res8_max_entry.delete(0, END)
self.res8_prec_entry.delete(0, END)
self.res8_tol_entry.delete(0, END)
self.res8_points_entry.delete(0, END)
self.res9_name_entry.delete(0, END)
self.res9_min_entry.delete(0, END)
self.res9_max_entry.delete(0, END)
self.res9_prec_entry.delete(0, END)
self.res9_tol_entry.delete(0, END)
self.res9_points_entry.delete(0, END)
self.res10_name_entry.delete(0, END)
self.res10_min_entry.delete(0, END)
self.res10_max_entry.delete(0, END)
self.res10_prec_entry.delete(0, END)
self.res10_tol_entry.delete(0, END)
self.res10_points_entry.delete(0, END)
# Alle GUI Einträge löschen
def ff_clear_GUI(self):
self.ff_question_difficulty_entry.delete(0, END)
self.ff_question_category_entry.delete(0, END)
#self.ff_question_type_entry.delete(0, END)
self.ff_question_title_entry.delete(0, END)
self.ff_question_description_title_entry.delete(0, END)
self.ff_question_description_main_entry.delete('1.0', 'end-1c')
self.res1_formula_entry.delete(0, END)
self.res2_formula_entry.delete(0, END)
self.res3_formula_entry.delete(0, END)
self.res4_formula_entry.delete(0, END)
self.res5_formula_entry.delete(0, END)
self.res6_formula_entry.delete(0, END)
self.res7_formula_entry.delete(0, END)
self.res8_formula_entry.delete(0, END)
self.res9_formula_entry.delete(0, END)
self.res10_formula_entry.delete(0, END)
self.var1_name_entry.delete(0, END)
self.var1_min_entry.delete(0, END)
self.var1_max_entry.delete(0, END)
self.var1_prec_entry.delete(0, END)
self.var1_divby_entry.delete(0, END)
self.var2_name_entry.delete(0, END)
self.var2_min_entry.delete(0, END)
self.var2_max_entry.delete(0, END)
self.var2_prec_entry.delete(0, END)
self.var2_divby_entry.delete(0, END)
self.var3_name_entry.delete(0, END)
self.var3_min_entry.delete(0, END)
self.var3_max_entry.delete(0, END)
self.var3_prec_entry.delete(0, END)
self.var3_divby_entry.delete(0, END)
self.var4_name_entry.delete(0, END)
self.var4_min_entry.delete(0, END)
self.var4_max_entry.delete(0, END)
self.var4_prec_entry.delete(0, END)
self.var4_divby_entry.delete(0, END)
self.var5_name_entry.delete(0, END)
self.var5_min_entry.delete(0, END)
self.var5_max_entry.delete(0, END)
self.var5_prec_entry.delete(0, END)
self.var5_divby_entry.delete(0, END)
self.var6_name_entry.delete(0, END)
self.var6_min_entry.delete(0, END)
self.var6_max_entry.delete(0, END)
self.var6_prec_entry.delete(0, END)
self.var6_divby_entry.delete(0, END)
self.var7_name_entry.delete(0, END)
self.var7_min_entry.delete(0, END)
self.var7_max_entry.delete(0, END)
self.var7_prec_entry.delete(0, END)
self.var7_divby_entry.delete(0, END)
self.var8_name_entry.delete(0, END)
self.var8_min_entry.delete(0, END)
self.var8_max_entry.delete(0, END)
self.var8_prec_entry.delete(0, END)
self.var8_divby_entry.delete(0, END)
self.var9_name_entry.delete(0, END)
self.var9_min_entry.delete(0, END)
self.var9_max_entry.delete(0, END)
self.var9_prec_entry.delete(0, END)
self.var9_divby_entry.delete(0, END)
self.var10_name_entry.delete(0, END)
self.var10_min_entry.delete(0, END)
self.var10_max_entry.delete(0, END)
self.var10_prec_entry.delete(0, END)
self.var10_divby_entry.delete(0, END)
self.var11_name_entry.delete(0, END)
self.var11_min_entry.delete(0, END)
self.var11_max_entry.delete(0, END)
self.var11_prec_entry.delete(0, END)
self.var11_divby_entry.delete(0, END)
self.var12_name_entry.delete(0, END)
self.var12_min_entry.delete(0, END)
self.var12_max_entry.delete(0, END)
self.var12_prec_entry.delete(0, END)
self.var12_divby_entry.delete(0, END)
self.var13_name_entry.delete(0, END)
self.var13_min_entry.delete(0, END)
self.var13_max_entry.delete(0, END)
self.var13_prec_entry.delete(0, END)
self.var13_divby_entry.delete(0, END)
self.var14_name_entry.delete(0, END)
self.var14_min_entry.delete(0, END)
self.var14_max_entry.delete(0, END)
self.var14_prec_entry.delete(0, END)
self.var14_divby_entry.delete(0, END)
self.var15_name_entry.delete(0, END)
self.var15_min_entry.delete(0, END)
self.var15_max_entry.delete(0, END)
self.var15_prec_entry.delete(0, END)
self.var15_divby_entry.delete(0, END)
self.res1_name_entry.delete(0, END)
self.res1_min_entry.delete(0, END)
self.res1_max_entry.delete(0, END)
self.res1_prec_entry.delete(0, END)
self.res1_tol_entry.delete(0, END)
self.res1_points_entry.delete(0, END)
self.res2_name_entry.delete(0, END)
self.res2_min_entry.delete(0, END)
self.res2_max_entry.delete(0, END)
self.res2_prec_entry.delete(0, END)
self.res2_tol_entry.delete(0, END)
self.res2_points_entry.delete(0, END)
self.res3_name_entry.delete(0, END)
self.res3_min_entry.delete(0, END)
self.res3_max_entry.delete(0, END)
self.res3_prec_entry.delete(0, END)
self.res3_tol_entry.delete(0, END)
self.res3_points_entry.delete(0, END)
self.res4_name_entry.delete(0, END)
self.res4_min_entry.delete(0, END)
self.res4_max_entry.delete(0, END)
self.res4_prec_entry.delete(0, END)
self.res4_tol_entry.delete(0, END)
self.res4_points_entry.delete(0, END)
self.res5_name_entry.delete(0, END)
self.res5_min_entry.delete(0, END)
self.res5_max_entry.delete(0, END)
self.res5_prec_entry.delete(0, END)
self.res5_tol_entry.delete(0, END)
self.res5_points_entry.delete(0, END)
self.res6_name_entry.delete(0, END)
self.res6_min_entry.delete(0, END)
self.res6_max_entry.delete(0, END)
self.res6_prec_entry.delete(0, END)
self.res6_tol_entry.delete(0, END)
self.res6_points_entry.delete(0, END)
self.res7_name_entry.delete(0, END)
self.res7_min_entry.delete(0, END)
self.res7_max_entry.delete(0, END)
self.res7_prec_entry.delete(0, END)
self.res7_tol_entry.delete(0, END)
self.res7_points_entry.delete(0, END)
self.res8_name_entry.delete(0, END)
self.res8_min_entry.delete(0, END)
self.res8_max_entry.delete(0, END)
self.res8_prec_entry.delete(0, END)
self.res8_tol_entry.delete(0, END)
self.res8_points_entry.delete(0, END)
self.res9_name_entry.delete(0, END)
self.res9_min_entry.delete(0, END)
self.res9_max_entry.delete(0, END)
self.res9_prec_entry.delete(0, END)
self.res9_tol_entry.delete(0, END)
self.res9_points_entry.delete(0, END)
self.res10_name_entry.delete(0, END)
self.res10_min_entry.delete(0, END)
self.res10_max_entry.delete(0, END)
self.res10_prec_entry.delete(0, END)
self.res10_tol_entry.delete(0, END)
self.res10_points_entry.delete(0, END)
self.ff_question_pool_tag_entry.delete(0, END)
class Create_Formelfrage_Questions(Formelfrage):
def __init__(self, db_entry_to_index_dict, ids_in_entry_box, question_type, pool_img_dir, ilias_id_pool_qpl_dir, xml_read_qti_template_path, xml_qti_output_file_path, xml_qpl_output_file_path, max_id_pool_qti_xml, max_id, taxonomy_file_question_pool):
# Gibt die ANzahl der Pools an
# Üblicherweise wird nur 1 Pool erzeugt. Nur bei "Taxonomie getrennt" Erstellung, werden mehrere Pools erzeugt
#self.number_of_pools = 1
self.ff_db_entry_to_index_dict = db_entry_to_index_dict
self.ff_test_entry_splitted = ids_in_entry_box.split(",")
self.qti_file_path_output = xml_qti_output_file_path
self.formelfrage_pool_qpl_file_path_output = xml_qpl_output_file_path
self.ff_mytree = ET.parse(xml_read_qti_template_path)
self.ff_myroot = self.ff_mytree.getroot()
self.ff_question_type_test_or_pool = question_type
self.formelfrage_pool_img_file_path = pool_img_dir # Wird nur bei Erstellung eines Fragen-Pool verwendet. Ordnername wird erst bei Laufzeit erstellt)
self.all_entries_from_db_list = []
self.number_of_entrys = []
self.ff_collection_of_question_titles = []
self.question_pool_id_list = []
self.question_title_list = []
self.ff_number_of_questions_generated = 1
self.ilias_id_pool_qpl_dir = ilias_id_pool_qpl_dir
self.ff_file_max_id = max_id
self.taxonomy_file_question_pool = taxonomy_file_question_pool
self.ilias_id_pool_qti_xml = max_id_pool_qti_xml
print("\n")
if self.ff_question_type_test_or_pool == "question_test":
print("FORMELFRAGE: ILIAS-TEST WIRD ERSTELLT... ID: " + str(ids_in_entry_box))
else:
print("FORMELFRAGE: ILIAS-POOL WIRD ERSTELLT... ID: " + str(ids_in_entry_box))
# Mit FF_Datenbank verknüpfen
connect_ff_db = sqlite3.connect(self.database_formelfrage_path)
cursor = connect_ff_db.cursor()
# Prüfen ob alle Einträge generiert werden sollen (checkbox gesetzt)
if self.ff_var_create_question_pool_all_check.get() == 1 and self.ff_var_create_multiple_question_pools_from_tax_check.get() == 0:
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = c.fetchall()
for ff_db_record in ff_db_records:
self.all_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.string_temp = ','.join(map(str, self.all_entries_from_db_list))
self.ff_test_entry_splitted = self.string_temp.split(",")
# Eintrag mit ID "1" entspricht der Vorlage und soll nicht mit erstellt werden
#self.ff_test_entry_splitted.pop(0)
#print(self.ff_test_entry_splitted)
#print("Number of Pools: " + str(len(self.list_of_lists)))
#self.number_of_pools = len(self.list_of_lists)
# Sämtliche Datenbank Einträge auslesen mit der entsprechenden "oid" (Datenbank ID)
# Datenbank ID wird automatisch bei einem neuen Eintrag erstellt (fortlaufend) und kann nicht beeinflusst werden
cursor.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = cursor.fetchall()
"""
for pool_number in range(self.number_of_pools):
self.string2_temp = ','.join(map(str, self.list_of_lists[pool_number]))
self.ff_test_entry_splitted = self.string2_temp.split(",")
print("%%%%%%")
print(self.ff_test_entry_splitted)
"""
for i in range(len(self.ff_test_entry_splitted)):
for ff_db_record in ff_db_records:
if str(ff_db_record[len(ff_db_record) - 1]) == self.ff_test_entry_splitted[i]:
for t in range(len(ff_db_record)):
if ff_db_record[self.ff_db_entry_to_index_dict['question_type']].lower() == self.ff_question_type_name.lower():
self.ff_question_difficulty = ff_db_record[self.ff_db_entry_to_index_dict['question_difficulty']]
self.ff_question_category = ff_db_record[self.ff_db_entry_to_index_dict['question_category']]
self.ff_question_type = ff_db_record[self.ff_db_entry_to_index_dict['question_type']]
self.ff_question_title = ff_db_record[self.ff_db_entry_to_index_dict['question_title']].replace('&', "&")
self.ff_question_description_title = ff_db_record[self.ff_db_entry_to_index_dict['question_description_title']].replace('&', "&")
self.ff_question_description_main = ff_db_record[self.ff_db_entry_to_index_dict['question_description_main']]
self.ff_res1_formula = ff_db_record[self.ff_db_entry_to_index_dict['res1_formula']]
self.ff_res2_formula = ff_db_record[self.ff_db_entry_to_index_dict['res2_formula']]
self.ff_res3_formula = ff_db_record[self.ff_db_entry_to_index_dict['res3_formula']]
self.ff_res4_formula = ff_db_record[self.ff_db_entry_to_index_dict['res4_formula']]
self.ff_res5_formula = ff_db_record[self.ff_db_entry_to_index_dict['res5_formula']]
self.ff_res6_formula = ff_db_record[self.ff_db_entry_to_index_dict['res6_formula']]
self.ff_res7_formula = ff_db_record[self.ff_db_entry_to_index_dict['res7_formula']]
self.ff_res8_formula = ff_db_record[self.ff_db_entry_to_index_dict['res8_formula']]
self.ff_res9_formula = ff_db_record[self.ff_db_entry_to_index_dict['res9_formula']]
self.ff_res10_formula = ff_db_record[self.ff_db_entry_to_index_dict['res10_formula']]
self.ff_var1_name = ff_db_record[self.ff_db_entry_to_index_dict['var1_name']]
self.ff_var1_min = ff_db_record[self.ff_db_entry_to_index_dict['var1_min']]
self.ff_var1_max = ff_db_record[self.ff_db_entry_to_index_dict['var1_max']]
self.ff_var1_prec = ff_db_record[self.ff_db_entry_to_index_dict['var1_prec']]
self.ff_var1_divby = ff_db_record[self.ff_db_entry_to_index_dict['var1_divby']]
self.ff_var1_unit = ff_db_record[self.ff_db_entry_to_index_dict['var1_unit']]
self.ff_var2_name = ff_db_record[self.ff_db_entry_to_index_dict['var2_name']]
self.ff_var2_min = ff_db_record[self.ff_db_entry_to_index_dict['var2_min']]
self.ff_var2_max = ff_db_record[self.ff_db_entry_to_index_dict['var2_max']]
self.ff_var2_prec = ff_db_record[self.ff_db_entry_to_index_dict['var2_prec']]
self.ff_var2_divby = ff_db_record[self.ff_db_entry_to_index_dict['var2_divby']]
self.ff_var2_unit = ff_db_record[self.ff_db_entry_to_index_dict['var2_unit']]
self.ff_var3_name = ff_db_record[self.ff_db_entry_to_index_dict['var3_name']]
self.ff_var3_min = ff_db_record[self.ff_db_entry_to_index_dict['var3_min']]
self.ff_var3_max = ff_db_record[self.ff_db_entry_to_index_dict['var3_max']]
self.ff_var3_prec = ff_db_record[self.ff_db_entry_to_index_dict['var3_prec']]
self.ff_var3_divby = ff_db_record[self.ff_db_entry_to_index_dict['var3_divby']]
self.ff_var3_unit = ff_db_record[self.ff_db_entry_to_index_dict['var3_unit']]
self.ff_var4_name = ff_db_record[self.ff_db_entry_to_index_dict['var4_name']]
self.ff_var4_min = ff_db_record[self.ff_db_entry_to_index_dict['var4_min']]
self.ff_var4_max = ff_db_record[self.ff_db_entry_to_index_dict['var4_max']]
self.ff_var4_prec = ff_db_record[self.ff_db_entry_to_index_dict['var4_prec']]
self.ff_var4_divby = ff_db_record[self.ff_db_entry_to_index_dict['var4_divby']]
self.ff_var4_unit = ff_db_record[self.ff_db_entry_to_index_dict['var4_unit']]
self.ff_var5_name = ff_db_record[self.ff_db_entry_to_index_dict['var5_name']]
self.ff_var5_min = ff_db_record[self.ff_db_entry_to_index_dict['var5_min']]
self.ff_var5_max = ff_db_record[self.ff_db_entry_to_index_dict['var5_max']]
self.ff_var5_prec = ff_db_record[self.ff_db_entry_to_index_dict['var5_prec']]
self.ff_var5_divby = ff_db_record[self.ff_db_entry_to_index_dict['var5_divby']]
self.ff_var5_unit = ff_db_record[self.ff_db_entry_to_index_dict['var5_unit']]
self.ff_var6_name = ff_db_record[self.ff_db_entry_to_index_dict['var6_name']]
self.ff_var6_min = ff_db_record[self.ff_db_entry_to_index_dict['var6_min']]
self.ff_var6_max = ff_db_record[self.ff_db_entry_to_index_dict['var6_max']]
self.ff_var6_prec = ff_db_record[self.ff_db_entry_to_index_dict['var6_prec']]
self.ff_var6_divby = ff_db_record[self.ff_db_entry_to_index_dict['var6_divby']]
self.ff_var6_unit = ff_db_record[self.ff_db_entry_to_index_dict['var6_unit']]
self.ff_var7_name = ff_db_record[self.ff_db_entry_to_index_dict['var7_name']]
self.ff_var7_min = ff_db_record[self.ff_db_entry_to_index_dict['var7_min']]
self.ff_var7_max = ff_db_record[self.ff_db_entry_to_index_dict['var7_max']]
self.ff_var7_prec = ff_db_record[self.ff_db_entry_to_index_dict['var7_prec']]
self.ff_var7_divby = ff_db_record[self.ff_db_entry_to_index_dict['var7_divby']]
self.ff_var7_unit = ff_db_record[self.ff_db_entry_to_index_dict['var7_unit']]
self.ff_var8_name = ff_db_record[self.ff_db_entry_to_index_dict['var8_name']]
self.ff_var8_min = ff_db_record[self.ff_db_entry_to_index_dict['var8_min']]
self.ff_var8_max = ff_db_record[self.ff_db_entry_to_index_dict['var8_max']]
self.ff_var8_prec = ff_db_record[self.ff_db_entry_to_index_dict['var8_prec']]
self.ff_var8_divby = ff_db_record[self.ff_db_entry_to_index_dict['var8_divby']]
self.ff_var8_unit = ff_db_record[self.ff_db_entry_to_index_dict['var8_unit']]
self.ff_var9_name = ff_db_record[self.ff_db_entry_to_index_dict['var9_name']]
self.ff_var9_min = ff_db_record[self.ff_db_entry_to_index_dict['var9_min']]
self.ff_var9_max = ff_db_record[self.ff_db_entry_to_index_dict['var9_max']]
self.ff_var9_prec = ff_db_record[self.ff_db_entry_to_index_dict['var9_prec']]
self.ff_var9_divby = ff_db_record[self.ff_db_entry_to_index_dict['var9_divby']]
self.ff_var9_unit = ff_db_record[self.ff_db_entry_to_index_dict['var9_unit']]
self.ff_var10_name = ff_db_record[self.ff_db_entry_to_index_dict['var10_name']]
self.ff_var10_min = ff_db_record[self.ff_db_entry_to_index_dict['var10_min']]
self.ff_var10_max = ff_db_record[self.ff_db_entry_to_index_dict['var10_max']]
self.ff_var10_prec = ff_db_record[self.ff_db_entry_to_index_dict['var10_prec']]
self.ff_var10_divby = ff_db_record[self.ff_db_entry_to_index_dict['var10_divby']]
self.ff_var10_unit = ff_db_record[self.ff_db_entry_to_index_dict['var10_unit']]
self.ff_var11_name = ff_db_record[self.ff_db_entry_to_index_dict['var11_name']]
self.ff_var11_min = ff_db_record[self.ff_db_entry_to_index_dict['var11_min']]
self.ff_var11_max = ff_db_record[self.ff_db_entry_to_index_dict['var11_max']]
self.ff_var11_prec = ff_db_record[self.ff_db_entry_to_index_dict['var11_prec']]
self.ff_var11_divby = ff_db_record[self.ff_db_entry_to_index_dict['var11_divby']]
self.ff_var11_unit = ff_db_record[self.ff_db_entry_to_index_dict['var11_unit']]
self.ff_var12_name = ff_db_record[self.ff_db_entry_to_index_dict['var12_name']]
self.ff_var12_min = ff_db_record[self.ff_db_entry_to_index_dict['var12_min']]
self.ff_var12_max = ff_db_record[self.ff_db_entry_to_index_dict['var12_max']]
self.ff_var12_prec = ff_db_record[self.ff_db_entry_to_index_dict['var12_prec']]
self.ff_var12_divby = ff_db_record[self.ff_db_entry_to_index_dict['var12_divby']]
self.ff_var12_unit = ff_db_record[self.ff_db_entry_to_index_dict['var12_unit']]
self.ff_var13_name = ff_db_record[self.ff_db_entry_to_index_dict['var13_name']]
self.ff_var13_min = ff_db_record[self.ff_db_entry_to_index_dict['var13_min']]
self.ff_var13_max = ff_db_record[self.ff_db_entry_to_index_dict['var13_max']]
self.ff_var13_prec = ff_db_record[self.ff_db_entry_to_index_dict['var13_prec']]
self.ff_var13_divby = ff_db_record[self.ff_db_entry_to_index_dict['var13_divby']]
self.ff_var13_unit = ff_db_record[self.ff_db_entry_to_index_dict['var13_unit']]
self.ff_var14_name = ff_db_record[self.ff_db_entry_to_index_dict['var14_name']]
self.ff_var14_min = ff_db_record[self.ff_db_entry_to_index_dict['var14_min']]
self.ff_var14_max = ff_db_record[self.ff_db_entry_to_index_dict['var14_max']]
self.ff_var14_prec = ff_db_record[self.ff_db_entry_to_index_dict['var14_prec']]
self.ff_var14_divby = ff_db_record[self.ff_db_entry_to_index_dict['var14_divby']]
self.ff_var14_unit = ff_db_record[self.ff_db_entry_to_index_dict['var14_unit']]
self.ff_var15_name = ff_db_record[self.ff_db_entry_to_index_dict['var15_name']]
self.ff_var15_min = ff_db_record[self.ff_db_entry_to_index_dict['var15_min']]
self.ff_var15_max = ff_db_record[self.ff_db_entry_to_index_dict['var15_max']]
self.ff_var15_prec = ff_db_record[self.ff_db_entry_to_index_dict['var15_prec']]
self.ff_var15_divby = ff_db_record[self.ff_db_entry_to_index_dict['var15_divby']]
self.ff_var15_unit = ff_db_record[self.ff_db_entry_to_index_dict['var15_unit']]
self.ff_res1_name = ff_db_record[self.ff_db_entry_to_index_dict['res1_name']]
self.ff_res1_min = ff_db_record[self.ff_db_entry_to_index_dict['res1_min']]
self.ff_res1_max = ff_db_record[self.ff_db_entry_to_index_dict['res1_max']]
self.ff_res1_prec = ff_db_record[self.ff_db_entry_to_index_dict['res1_prec']]
self.ff_res1_tol = ff_db_record[self.ff_db_entry_to_index_dict['res1_tol']]
self.ff_res1_points = ff_db_record[self.ff_db_entry_to_index_dict['res1_points']]
self.ff_res1_unit = ff_db_record[self.ff_db_entry_to_index_dict['res1_unit']]
self.ff_res2_name = ff_db_record[self.ff_db_entry_to_index_dict['res2_name']]
self.ff_res2_min = ff_db_record[self.ff_db_entry_to_index_dict['res2_min']]
self.ff_res2_max = ff_db_record[self.ff_db_entry_to_index_dict['res2_max']]
self.ff_res2_prec = ff_db_record[self.ff_db_entry_to_index_dict['res2_prec']]
self.ff_res2_tol = ff_db_record[self.ff_db_entry_to_index_dict['res2_tol']]
self.ff_res2_points = ff_db_record[self.ff_db_entry_to_index_dict['res2_points']]
self.ff_res2_unit = ff_db_record[self.ff_db_entry_to_index_dict['res2_unit']]
self.ff_res3_name = ff_db_record[self.ff_db_entry_to_index_dict['res3_name']]
self.ff_res3_min = ff_db_record[self.ff_db_entry_to_index_dict['res3_min']]
self.ff_res3_max = ff_db_record[self.ff_db_entry_to_index_dict['res3_max']]
self.ff_res3_prec = ff_db_record[self.ff_db_entry_to_index_dict['res3_prec']]
self.ff_res3_tol = ff_db_record[self.ff_db_entry_to_index_dict['res3_tol']]
self.ff_res3_points = ff_db_record[self.ff_db_entry_to_index_dict['res3_points']]
self.ff_res3_unit = ff_db_record[self.ff_db_entry_to_index_dict['res3_unit']]
self.ff_res4_name = ff_db_record[self.ff_db_entry_to_index_dict['res4_name']]
self.ff_res4_min = ff_db_record[self.ff_db_entry_to_index_dict['res4_min']]
self.ff_res4_max = ff_db_record[self.ff_db_entry_to_index_dict['res4_max']]
self.ff_res4_prec = ff_db_record[self.ff_db_entry_to_index_dict['res4_prec']]
self.ff_res4_tol = ff_db_record[self.ff_db_entry_to_index_dict['res4_tol']]
self.ff_res4_points = ff_db_record[self.ff_db_entry_to_index_dict['res4_points']]
self.ff_res4_unit = ff_db_record[self.ff_db_entry_to_index_dict['res4_unit']]
self.ff_res5_name = ff_db_record[self.ff_db_entry_to_index_dict['res5_name']]
self.ff_res5_min = ff_db_record[self.ff_db_entry_to_index_dict['res5_min']]
self.ff_res5_max = ff_db_record[self.ff_db_entry_to_index_dict['res5_max']]
self.ff_res5_prec = ff_db_record[self.ff_db_entry_to_index_dict['res5_prec']]
self.ff_res5_tol = ff_db_record[self.ff_db_entry_to_index_dict['res5_tol']]
self.ff_res5_points = ff_db_record[self.ff_db_entry_to_index_dict['res5_points']]
self.ff_res5_unit = ff_db_record[self.ff_db_entry_to_index_dict['res5_unit']]
self.ff_res6_name = ff_db_record[self.ff_db_entry_to_index_dict['res6_name']]
self.ff_res6_min = ff_db_record[self.ff_db_entry_to_index_dict['res6_min']]
self.ff_res6_max = ff_db_record[self.ff_db_entry_to_index_dict['res6_max']]
self.ff_res6_prec = ff_db_record[self.ff_db_entry_to_index_dict['res6_prec']]
self.ff_res6_tol = ff_db_record[self.ff_db_entry_to_index_dict['res6_tol']]
self.ff_res6_points = ff_db_record[self.ff_db_entry_to_index_dict['res6_points']]
self.ff_res6_unit = ff_db_record[self.ff_db_entry_to_index_dict['res6_unit']]
self.ff_res7_name = ff_db_record[self.ff_db_entry_to_index_dict['res7_name']]
self.ff_res7_min = ff_db_record[self.ff_db_entry_to_index_dict['res7_min']]
self.ff_res7_max = ff_db_record[self.ff_db_entry_to_index_dict['res7_max']]
self.ff_res7_prec = ff_db_record[self.ff_db_entry_to_index_dict['res7_prec']]
self.ff_res7_tol = ff_db_record[self.ff_db_entry_to_index_dict['res7_tol']]
self.ff_res7_points = ff_db_record[self.ff_db_entry_to_index_dict['res7_points']]
self.ff_res7_unit = ff_db_record[self.ff_db_entry_to_index_dict['res7_unit']]
self.ff_es8_name = ff_db_record[self.ff_db_entry_to_index_dict['res8_name']]
self.ff_res8_min = ff_db_record[self.ff_db_entry_to_index_dict['res8_min']]
self.ff_res8_max = ff_db_record[self.ff_db_entry_to_index_dict['res8_max']]
self.ff_res8_prec = ff_db_record[self.ff_db_entry_to_index_dict['res8_prec']]
self.ff_res8_tol = ff_db_record[self.ff_db_entry_to_index_dict['res8_tol']]
self.ff_res8_points = ff_db_record[self.ff_db_entry_to_index_dict['res8_points']]
self.ff_res8_unit = ff_db_record[self.ff_db_entry_to_index_dict['res8_unit']]
self.ff_res9_name = ff_db_record[self.ff_db_entry_to_index_dict['res9_name']]
self.ff_res9_min = ff_db_record[self.ff_db_entry_to_index_dict['res9_min']]
self.ff_res9_max = ff_db_record[self.ff_db_entry_to_index_dict['res9_max']]
self.ff_res9_prec = ff_db_record[self.ff_db_entry_to_index_dict['res9_prec']]
self.ff_res9_tol = ff_db_record[self.ff_db_entry_to_index_dict['res9_tol']]
self.ff_res9_points = ff_db_record[self.ff_db_entry_to_index_dict['res9_points']]
self.ff_res9_unit = ff_db_record[self.ff_db_entry_to_index_dict['res9_unit']]
self.ff_res10_name = ff_db_record[self.ff_db_entry_to_index_dict['res10_name']]
self.ff_res10_min = ff_db_record[self.ff_db_entry_to_index_dict['res10_min']]
self.ff_res10_max = ff_db_record[self.ff_db_entry_to_index_dict['res10_max']]
self.ff_res10_prec = ff_db_record[self.ff_db_entry_to_index_dict['res10_prec']]
self.ff_res10_tol = ff_db_record[self.ff_db_entry_to_index_dict['res10_tol']]
self.ff_res10_points = ff_db_record[self.ff_db_entry_to_index_dict['res10_points']]
self.ff_res10_unit = ff_db_record[self.ff_db_entry_to_index_dict['res10_unit']]
self.ff_description_img_name_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_1']]
self.ff_description_img_data_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_1']]
self.ff_description_img_path_1 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_1']]
self.ff_description_img_name_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_2']]
self.ff_description_img_data_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_2']]
self.ff_description_img_path_2 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_2']]
self.ff_description_img_name_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_name_3']]
self.ff_description_img_data_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_data_3']]
self.ff_description_img_path_3 = ff_db_record[self.ff_db_entry_to_index_dict['description_img_path_3']]
self.ff_test_time = ff_db_record[self.ff_db_entry_to_index_dict['test_time']]
self.ff_var_number = ff_db_record[self.ff_db_entry_to_index_dict['var_number']]
self.ff_res_number = ff_db_record[self.ff_db_entry_to_index_dict['res_number']]
self.ff_question_pool_tag = ff_db_record[self.ff_db_entry_to_index_dict['question_pool_tag']]
self.ff_question_author = ff_db_record[self.ff_db_entry_to_index_dict['question_author']].replace('&', "&")
Create_Formelfrage_Questions.ff_question_structure(self, i)
# Stellt die XML Fragenstruktur für Formelfrage dar
# Jede Frage hat einen allgemeinen Teil und einen spezifischen Teil
# Im allg. Teil steht z.B. "ILIAS-Version", "Questiontype", "Author"
# Im spez. Teil stehen die Variablen/Ergebnisse etc
def ff_question_structure(self, id_nr):
"""Diese Funktion wandelt die SQL-Einträge in die .xml um, welche anschließend in ILIAS eingespielt werden kann"""
# VARIABLEN
self.ff_response_counter = 0 #wird verwendet zu zählen, wieviele Anworten pro Frage verwendet werden. Bei einer neuer Antwort -> +1
self.ff_question_description_main = test_generator_modul_taxonomie_und_textformatierung.Textformatierung.format_description_text_in_xml(self, self.ff_var_use_latex_on_text_check.get(), self.ff_question_description_main)
# Verbindung zur FF-Datenank
ff_connect = sqlite3.connect(self.database_formelfrage_path)
ff_cursor = ff_connect.cursor()
# Alle Einträge auslesen
ff_cursor.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = ff_cursor.fetchall()
for ff_db_record in ff_db_records:
# Hier werden die Fragen anhand der ID's erstellt
if str(ff_db_record[len(ff_db_record)-1]) == self.ff_test_entry_splitted[id_nr]:
# Bilder für die Beschreibung speichern
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.ff_description_img_name_1, self.ff_description_img_data_1, id_nr, self.ff_question_type_test_or_pool, self.formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.ff_description_img_name_2, self.ff_description_img_data_2, id_nr, self.ff_question_type_test_or_pool, self.formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)
test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images(self, self.ff_description_img_name_3, self.ff_description_img_data_3, id_nr, self.ff_question_type_test_or_pool, self.formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)
# Aufbau für Fragenstruktur "TEST"
if self.ff_question_type_test_or_pool == "question_test":
# XML Struktur aus XML Datei festlegen. Muss nur einmal angelegt werden
questestinterop = ET.Element('questestinterop')
assessment = ET.SubElement(questestinterop, 'assessment')
section = ET.SubElement(assessment, 'section')
item = ET.SubElement(section, 'item')
# Aufbau für Fragenstruktur "POOL"
else:
# XML Struktur aus XML Datei festlegen. Muss nur einmal angelegt werden
questestinterop = ET.Element('questestinterop')
item = ET.SubElement(questestinterop, 'item')
# Zusatz für Taxonomie Einstellungen
test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question(self,
id_nr,
self.number_of_entrys,
item,
self.formelfrage_pool_qpl_file_path_template,
self.formelfrage_pool_qpl_file_path_output
)
# Struktur für den Formelfragen - Variableen/Lösungen Teil
# Muss für jede Frage neu angelegt/hinzugefügt werden
qticomment = ET.SubElement(item, 'qticomment')
duration = ET.SubElement(item, 'duration')
itemmetadata = ET.SubElement(item, 'itemmetadata')
presentation = ET.SubElement(item, 'presentation')
flow = ET.SubElement(presentation, 'flow')
question_description_material = ET.SubElement(flow, 'material')
question_description_mattext = ET.SubElement(question_description_material, 'mattext')
qtimetadata = ET.SubElement(itemmetadata, 'qtimetadata')
### ------------------------------------------------------- XML Einträge mit Werten füllen
# Fragen-Titel -- "item title" in xml
item.set('title', self.ff_question_title)
# Fragen-Titel Beschreibung
qticomment.text = self.ff_question_description_title
# Testdauer -- "duration" in xml
# wird keine Testzeit eingetragen, wird 1h vorausgewählt
duration.text = self.ff_test_time
if duration.text == "":
duration.text = "P0Y0M0DT1H0M0S"
# -----------------------------------------------------------------------ILIAS VERSION
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "ILIAS_VERSION"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "5.4.10 2020-03-04"
# -----------------------------------------------------------------------QUESTIONTYPE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "QUESTIONTYPE"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "assFormulaQuestion"
# -----------------------------------------------------------------------AUTHOR
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "AUTHOR"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = self.ff_question_author
# -----------------------------------------------------------------------POINTS
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "points"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = str(self.ff_res1_points)
# Fragentitel einsetzen -- "presentation label" in xml
presentation.set('label', self.ff_question_title)
# Fragen-Text (Format) einsetzen -- "mattext_texttype" in xml -- Gibt das Format des Textes an
question_description_mattext.set('texttype', "text/html")
# Fragen-Text (Text) einsetzen -- "mattext_texttype" in xml -- Gibt die eigentliche Fragen-Beschreibung an
# Wenn Bild enthalten ist, dann in Fragenbeschreibung einbetten
question_description_mattext.text = test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main(
self, self.ff_description_img_name_1, self.ff_description_img_data_1,
self.ff_description_img_name_2, self.ff_description_img_data_2,
self.ff_description_img_name_3, self.ff_description_img_data_3,
self.ff_question_description_main, question_description_mattext, question_description_material, id_nr)
# ----------------------------------------------------------------------- Variable
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v1", self.ff_var1_min, self.ff_var1_max, self.ff_var1_prec, self.ff_var1_divby, self.ff_var1_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v2", self.ff_var2_min, self.ff_var2_max, self.ff_var2_prec, self.ff_var2_divby, self.ff_var2_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v3", self.ff_var3_min, self.ff_var3_max, self.ff_var3_prec, self.ff_var3_divby, self.ff_var3_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v4", self.ff_var4_min, self.ff_var4_max, self.ff_var4_prec, self.ff_var4_divby, self.ff_var4_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v5", self.ff_var5_min, self.ff_var5_max, self.ff_var5_prec, self.ff_var5_divby, self.ff_var5_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v6", self.ff_var6_min, self.ff_var6_max, self.ff_var6_prec, self.ff_var6_divby, self.ff_var6_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v7", self.ff_var7_min, self.ff_var7_max, self.ff_var7_prec, self.ff_var7_divby, self.ff_var7_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v8", self.ff_var8_min, self.ff_var8_max, self.ff_var8_prec, self.ff_var8_divby, self.ff_var8_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v9", self.ff_var9_min, self.ff_var9_max, self.ff_var9_prec, self.ff_var9_divby, self.ff_var9_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v10", self.ff_var10_min, self.ff_var10_max, self.ff_var10_prec, self.ff_var10_divby, self.ff_var10_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v11", self.ff_var11_min, self.ff_var11_max, self.ff_var11_prec, self.ff_var11_divby, self.ff_var11_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v12", self.ff_var12_min, self.ff_var12_max, self.ff_var12_prec, self.ff_var12_divby, self.ff_var12_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v13", self.ff_var13_min, self.ff_var13_max, self.ff_var13_prec, self.ff_var13_divby, self.ff_var13_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v14", self.ff_var14_min, self.ff_var14_max, self.ff_var14_prec, self.ff_var14_divby, self.ff_var14_unit)
Create_Formelfrage_Questions.ff_question_variables_structure(self, qtimetadata, "$v15", self.ff_var15_min, self.ff_var15_max, self.ff_var15_prec, self.ff_var15_divby, self.ff_var15_unit)
# ----------------------------------------------------------------------- Solution
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r1", self.ff_res1_formula, self.ff_res1_min, self.ff_res1_max, self.ff_res1_prec, self.ff_res1_tol, self.ff_res1_points, self.ff_res1_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r2", self.ff_res2_formula, self.ff_res2_min, self.ff_res2_max, self.ff_res2_prec, self.ff_res2_tol, self.ff_res2_points, self.ff_res2_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r3", self.ff_res3_formula, self.ff_res3_min, self.ff_res3_max, self.ff_res3_prec, self.ff_res3_tol, self.ff_res3_points, self.ff_res3_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r4", self.ff_res4_formula, self.ff_res4_min, self.ff_res4_max, self.ff_res4_prec, self.ff_res4_tol, self.ff_res4_points, self.ff_res4_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r5", self.ff_res5_formula, self.ff_res5_min, self.ff_res5_max, self.ff_res5_prec, self.ff_res5_tol, self.ff_res5_points, self.ff_res5_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r6", self.ff_res6_formula, self.ff_res6_min, self.ff_res6_max, self.ff_res6_prec, self.ff_res6_tol, self.ff_res6_points, self.ff_res6_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r7", self.ff_res7_formula, self.ff_res7_min, self.ff_res7_max, self.ff_res7_prec, self.ff_res7_tol, self.ff_res7_points, self.ff_res7_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r8", self.ff_res8_formula, self.ff_res8_min, self.ff_res8_max, self.ff_res8_prec, self.ff_res8_tol, self.ff_res8_points, self.ff_res8_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r9", self.ff_res9_formula, self.ff_res9_min, self.ff_res9_max, self.ff_res9_prec, self.ff_res9_tol, self.ff_res9_points, self.ff_res9_unit)
Create_Formelfrage_Questions.ff_question_results_structure(self, qtimetadata, "$r10", self.ff_res10_formula, self.ff_res10_min, self.ff_res10_max, self.ff_res10_prec, self.ff_res10_tol, self.ff_res10_points, self.ff_res10_unit)
# -----------------------------------------------------------------------ADDITIONAL_CONT_EDIT_MODE
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "additional_cont_edit_mode"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "default"
# -----------------------------------------------------------------------EXTERNAL_ID
qtimetadatafield = ET.SubElement(qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = "externalId"
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
fieldentry.text = "5ea15be69c1e96.43933468"
# Wenn es sich um einen ILIAS-Test handelt, beinhaltet die XML eine Struktur mit mehreren "Zweigen"
# Der letzte "Zweig" --> "len(self.ff_myroot[0]) - 1" (beschreibt das letze Fach) beinhaltet die eigentlichen Fragen
if self.ff_question_type_test_or_pool == "question_test":
self.ff_myroot[0][len(self.ff_myroot[0]) - 1].append(item)
# Wenn es sich um einen ILIAS-Pool handelt, beinhaltet die XML keine Struktur
# Die Frage kann einfach angehangen werden
else:
self.ff_myroot.append(item)
self.ff_mytree.write(self.qti_file_path_output)
print(str(self.ff_number_of_questions_generated) + ".) Formelfrage Frage erstellt! ---> Titel: " + str(self.ff_question_title))
self.ff_number_of_questions_generated += 1
self.ff_collection_of_question_titles.append(self.ff_question_title)
ff_connect.commit()
ff_connect.close()
if self.ff_question_type_test_or_pool == "question_pool":
###### Anpassung der Datei "qpl". Akualisierung des Dateinamens
self.qpl_file = os.path.normpath(os.path.join(self.formelfrage_files_path,"ff_ilias_pool_abgabe", self.ilias_id_pool_qpl_dir, self.ilias_id_pool_qti_xml))
self.mytree = ET.parse(self.qpl_file)
self.myroot = self.mytree.getroot()
for ident_id in self.myroot.iter('Identifier'):
ident_id.set('Entry', "il_0_qpl_" + str(self.ff_file_max_id+1))
self.mytree.write(self.qpl_file)
# Definiert den XML Aufbau für eine Variable
def ff_question_variables_structure(self, xml_qtimetadata, ff_var_name, ff_var_min, ff_var_max, ff_var_prec, ff_var_divby, ff_var_unit):
# <------------ INIT ----------->
self.ff_var_name = ff_var_name
self.ff_var_min = str(ff_var_min)
self.ff_var_max = str(ff_var_max)
self.ff_var_prec = str(ff_var_prec)
self.ff_var_divby = str(ff_var_divby)
self.ff_var_divby_length = len(str(self.ff_var_divby))
self.ff_var_unit = ff_var_unit
self.ff_var_unit_length = len(str(self.ff_var_unit))
# <------------ FORMELFRAGE VARIABLEN STRUKTUR (in XML) ----------->
qtimetadatafield = ET.SubElement(xml_qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = ff_var_name
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
# Mit Einheiten:
if self.ff_var_unit != "":
fieldentry.text = "a:6:{" \
"s:9:\"precision\";i:" + self.ff_var_prec + ";" \
"s:12:\"intprecision\";s:" + str(self.ff_var_divby_length) + ":\"" + self.ff_var_divby + "\";" \
"s:8:\"rangemin\";d:" + self.ff_var_min + ";" \
"s:8:\"rangemax\";d:" + self.ff_var_max + ";" \
"s:4:\"unit\";s:" + str(self.ff_var_unit_length) + ":\"" + self.ff_var_unit + "\";" \
"s:9:\"unitvalue\";s:" + str(len(Formelfrage.unit_table(self, self.ff_var_unit))) + ":\"" + Formelfrage.unit_table(self, self.ff_var_unit) + "\";" \
"}"
# Ohne Einheiten:
else:
fieldentry.text = "a:6:{" \
"s:9:\"precision\";i:" + self.ff_var_prec + ";" \
"s:12:\"intprecision\";s:" + str(self.ff_var_divby_length) + ":\"" + self.ff_var_divby + "\";" \
"s:8:\"rangemin\";d:" + self.ff_var_min + ";" \
"s:8:\"rangemax\";d:" + self.ff_var_max + ";" \
"s:4:\"unit\";s:0:\"\";" \
"s:9:\"unitvalue\";s:0:\"\";" \
"}"
# Definiert den XML Aufbau eines Ergebnisses
def ff_question_results_structure(self, xml_qtimetadata, ff_res_name, ff_res_formula, ff_res_min, ff_res_max, ff_res_prec, ff_res_tol, ff_res_points, ff_res_unit):
def replace_words_in_formula(formula):
self.replace_words_dict = {
"$V": "$v",
"$R": "$r",
"=": " ",
"SIN": "sin",
"SINH": "sinh",
"ARCSIN": "arcsin",
"ASIN": "asin",
"ARCSINH": "arcsinh",
"ASINH": "asinh",
"COS": "cos",
"COSH": "cosh",
"ARCCOS": "arccos",
"ACOS": "acos",
"ARCCOSH": "arccosh",
"ACOSH": "acosh",
"TAN": "tan",
"TANH": "tanh",
"ARCTAN": "arctan",
"ATAN": "atan",
"ARCTANH": "arctanh",
"ATANH": "atanh",
"SQRT": "sqrt",
"Wurzel": "sqrt",
"wurzel": "sqrt",
"ABS": "abs",
"LN": "ln",
"LOG": "log"
}
formula = ' '.join([self.replace_words_dict.get(i,i) for i in formula.split()])
return formula
# <------------ INIT ----------->
self.ff_res_name = ff_res_name
self.ff_res_formula = ff_res_formula
self.ff_res_formula_length = len(str(self.ff_res_formula))
self.ff_res_min = str(ff_res_min)
self.ff_res_min_length = len(str(self.ff_res_min))
self.ff_res_max = str(ff_res_max)
self.ff_res_max_length = len(str(self.ff_res_max))
self.ff_res_prec = str(ff_res_prec)
self.ff_res_tol = str(ff_res_tol)
self.ff_res_tol_length = len(str(self.ff_res_tol))
self.ff_res_points = str(ff_res_points)
self.ff_res_points_length = len(self.ff_res_points)
self.ff_res_unit = ff_res_unit
self.ff_res_unit_length = len(str(self.ff_res_unit))
# ILIAS kann nicht mit "$Vx" statt "$vx" oder "$Rx" statt "$rx" umgehen (kleines statt großes "V" für Variablen)
# In der Ergebnisgleichung darf kein "=" verwendet werden! Es erscheint keine Fehlermeldung, jedoch werden die Ergebnisse
# aus der ILIAS-Berechnung immer auf "0" gesetzt
self.ff_res_formula = replace_words_in_formula(self.ff_res_formula)
# <------------ FORMELFRAGE ERGEBNIS STRUKTUR (in XML) ----------->
# Hier wird die Struktur des Ergebnis-Teils (z.B. $r1) in XML geschrieben
# Wenn der Ergebnisteil mit Einheiten verwendet wird, müssen entsprechend Daten in "resultunits" eingetragen werden
# s for string length: "9" -> precision = "9" characters
# rangemin: "s" for read string-like type --> "10*1000"
qtimetadatafield = ET.SubElement(xml_qtimetadata, 'qtimetadatafield')
fieldlabel = ET.SubElement(qtimetadatafield, 'fieldlabel')
fieldlabel.text = self.ff_res_name
fieldentry = ET.SubElement(qtimetadatafield, 'fieldentry')
# Mit Einheiten:
if self.ff_res_unit != "":
fieldentry.text = "a:10:{" \
"s:9:\"precision\";i:" + self.ff_res_prec + ";" \
"s:9:\"tolerance\";s:" + self.ff_res_tol_length + ":\"" + self.ff_res_tol + "\";" \
"s:8:\"rangemin\";s:" + self.ff_res_min_length + ":\"" + self.ff_res_min + "\";" \
"s:8:\"rangemax\";s:" + self.ff_res_max_length + ":\"" + self.ff_res_max + "\";" \
"s:6:\"points\";s:1:\"" + self.ff_res_points + "\";" \
"s:7:\"formula\";s:" + self.ff_res_formula_length + ":\"" + self.ff_res_formula + "\";" \
"s:6:\"rating\";s:0:\"\";" \
"s:4:\"unit\";s:" + str(self.ff_res_unit_length) + ":\"" + self.ff_res_unit + "\";" \
"s:9:\"unitvalue\";s:" + str(len(Formelfrage.unit_table(self, self.ff_res_unit))) + ":\"" + Formelfrage.unit_table(self, self.ff_res_unit) + "\";" \
"s:11:\"resultunits\";a:27:{i:0;a:2:{s:4:\"unit\";s:1:\"H\";s:9:\"unitvalue\";s:3:\"125\";}" \
"i:1;a:2:{s:4:\"unit\";s:2:\"mH\";s:9:\"unitvalue\";s:3:\"126\";}" \
"i:2;a:2:{s:4:\"unit\";s:3:\"µH\";s:9:\"unitvalue\";s:3:\"127\";}" \
"i:3;a:2:{s:4:\"unit\";s:2:\"nH\";s:9:\"unitvalue\";s:3:\"128\";}" \
"i:4;a:2:{s:4:\"unit\";s:2:\"kH\";s:9:\"unitvalue\";s:3:\"129\";}" \
"i:5;a:2:{s:4:\"unit\";s:2:\"pH\";s:9:\"unitvalue\";s:3:\"130\";}" \
"i:6;a:2:{s:4:\"unit\";s:1:\"F\";s:9:\"unitvalue\";s:3:\"131\";}" \
"i:7;a:2:{s:4:\"unit\";s:2:\"mF\";s:9:\"unitvalue\";s:3:\"132\";}" \
"i:8;a:2:{s:4:\"unit\";s:3:\"µF\";s:9:\"unitvalue\";s:3:\"133\";}" \
"i:9;a:2:{s:4:\"unit\";s:2:\"nF\";s:9:\"unitvalue\";s:3:\"134\";}" \
"i:10;a:2:{s:4:\"unit\";s:2:\"pF\";s:9:\"unitvalue\";s:3:\"135\";}" \
"i:11;a:2:{s:4:\"unit\";s:1:\"W\";s:9:\"unitvalue\";s:3:\"136\";}" \
"i:12;a:2:{s:4:\"unit\";s:2:\"kW\";s:9:\"unitvalue\";s:3:\"137\";}" \
"i:13;a:2:{s:4:\"unit\";s:2:\"MW\";s:9:\"unitvalue\";s:3:\"138\";}" \
"i:14;a:2:{s:4:\"unit\";s:1:\"V\";s:9:\"unitvalue\";s:3:\"139\";}" \
"i:15;a:2:{s:4:\"unit\";s:2:\"kV\";s:9:\"unitvalue\";s:3:\"140\";}" \
"i:16;a:2:{s:4:\"unit\";s:2:\"mV\";s:9:\"unitvalue\";s:3:\"141\";}" \
"i:17;a:2:{s:4:\"unit\";s:3:\"µV\";s:9:\"unitvalue\";s:3:\"142\";}" \
"i:18;a:2:{s:4:\"unit\";s:2:\"MV\";s:9:\"unitvalue\";s:3:\"143\";}" \
"i:19;a:2:{s:4:\"unit\";s:1:\"A\";s:9:\"unitvalue\";s:3:\"144\";}" \
"i:20;a:2:{s:4:\"unit\";s:2:\"mA\";s:9:\"unitvalue\";s:3:\"145\";}" \
"i:21;a:2:{s:4:\"unit\";s:3:\"µA\";s:9:\"unitvalue\";s:3:\"146\";}" \
"i:22;a:2:{s:4:\"unit\";s:2:\"kA\";s:9:\"unitvalue\";s:3:\"147\";}" \
"i:23;a:2:{s:4:\"unit\";s:3:\"Ohm\";s:9:\"unitvalue\";s:3:\"148\";}" \
"i:24;a:2:{s:4:\"unit\";s:2:\"mW\";s:9:\"unitvalue\";s:3:\"149\";}" \
"i:25;a:2:{s:4:\"unit\";s:4:\"kOhm\";s:9:\"unitvalue\";s:3:\"150\";}" \
"i:26;a:2:{s:4:\"unit\";s:4:\"mOhm\";s:9:\"unitvalue\";s:3:\"151\";}}" \
"}"
# Ohne Einheiten:
else:
fieldentry.text = "a:10:{" \
"s:9:\"precision\";i:" + self.ff_res_prec + ";" \
"s:9:\"tolerance\";s:" + str(self.ff_res_tol_length) + ":\"" + self.ff_res_tol + "\";" \
"s:8:\"rangemin\";s:" + str(self.ff_res_min_length) + ":\"" + self.ff_res_min + "\";" \
"s:8:\"rangemax\";s:" + str(self.ff_res_max_length) + ":\"" + self.ff_res_max + "\";" \
"s:6:\"points\";s:" + str(self.ff_res_points_length) + ":\"" + self.ff_res_points + "\";" \
"s:7:\"formula\";s:" + str(self.ff_res_formula_length) + ":\"" + self.ff_res_formula + "\";" \
"s:6:\"rating\";s:0:\"\";" \
"s:4:\"unit\";s:0:\"\";" \
"s:9:\"unitvalue\";s:0:\"\";" \
"s:11:\"resultunits\";a:0:{}" \
"}"
# <------------ FORMELFRAGE-TEST ERSTELLEN ----------->
# Die Erstellung eines Tests ist etwas verzweigt. Das liegt daran dass für jeden Fragentyp die gleiche Vorlage verwendet wird
# Jeder Fragentyp sendet sein Dictionary, Pfade etc an "test_generator_modul_ilias_test_struktur.py"
# Hier wird dann das "Grundmodell" für einen Test definiert, anschließend wird von "test_generator_modul_ilias_test_struktur.py"
# wieder der jeweilige Fragentyp mit Fragenstruktur aufgerufen
# ---> formelfrage.py(Create_FF_Test) -> ilias_test_struktur.py(Create_ILIAS_Test) -> formelfrage.py(Create_FF_Questions)
class Create_Formelfrage_Test(Formelfrage):
def __init__(self, entry_to_index_dict):
self.ff_db_entry_to_index_dict = entry_to_index_dict
test_generator_modul_ilias_test_struktur.Create_ILIAS_Test.__init__(self,
self.ff_db_entry_to_index_dict,
self.formelfrage_test_tst_file_path_template,
self.formelfrage_test_tst_file_path_output,
self.formelfrage_test_qti_file_path_template,
self.formelfrage_test_qti_file_path_output,
self.ff_ilias_test_title_entry.get(),
self.create_formelfrage_test_entry.get(),
self.ff_question_type_entry.get(),
)
if self.ff_var_create_test_settings_check.get() == 1:
test_generator_modul_test_einstellungen.Test_Einstellungen_GUI.create_settings(self, self.test_settings_database_path, self.test_settings_database_table, self.ff_selected_profile_for_test_settings_box.get())
self.excel_id_list =[]
self.excel_temp_list = []
for t in range(len(self.ff_collection_of_question_titles)):
self.excel_temp_list = self.ff_collection_of_question_titles[t].split(' ')
self.excel_id_list.append(self.excel_temp_list[0])
self.id_dublicates_counter = Counter(self.excel_id_list)
self.id_dublicates_results = [k for k, v in self.id_dublicates_counter.items() if v > 1]
self.titels_dublicates_counter = Counter(self.ff_collection_of_question_titles)
self.titles_dublicates_results = [k for k, v in self.titels_dublicates_counter.items() if v > 1]
dublicate_id_warning = ""
dublicate_title_warning = ""
if len(self.id_dublicates_results) >= 1 or len(self.titles_dublicates_results) >= 1:
dublicate_id_warning = "ACHTUNG!\nErstellter Fragentest enthält doppelte Fragen:" + "\n"
if len(self.id_dublicates_results) >= 1:
dublicate_id_warning += "\n\n" + "Fragen-ID" + "\n"
for i in range(len(self.id_dublicates_results)):
dublicate_id_warning += "---> " + str(self.id_dublicates_results[i]) + "\n"
if len(self.titles_dublicates_results) >= 1:
dublicate_title_warning = "Fragen-Titel" + "\n"
for i in range(len(self.titles_dublicates_results)):
dublicate_title_warning += "---> " + str(self.titles_dublicates_results[i]) + "\n"
messagebox.showinfo("Fragentest erstellen", "Fragentest wurde erstellt!" + "\n\n" + dublicate_id_warning + "\n\n" + dublicate_title_warning)
# <------------ FORMELFRAGE-POOL ERSTELLEN ----------->
class Create_Formelfrage_Pool(Formelfrage):
def __init__(self, entry_to_index_dict, var_create_all_questions, var_create_multiple_question_pools_from_tax):
self.ff_entry_to_index_dict = entry_to_index_dict
self.ff_var_create_question_pool_all = var_create_all_questions
self.var_create_multiple_question_pools_from_tax = var_create_multiple_question_pools_from_tax
self.ff_pool_entry = self.create_formelfrage_pool_entry.get()
self.taxonomy_collection_no_dublicates = []
self.pool_number_list = []
self.taxonomy_number_list = []
self.directory_number_list = []
self.oid_number_list_temp = []
self.oid_number_list = []
# "Normalerweise" wird nur ein Fragenpool erstellt
# Wenn mehrere Fragenpools "nach Taxonomie getrennt" erstellt werden sollen, wird "self.number_of_pool"
# auf die Anzahl der Taxonomien gesetzt
self.number_of_pools = 1
# Wenn "nach Taxonomie getrennte Fragenpools" == 1:
if self.ff_var_create_multiple_question_pools_from_tax_check.get() == 1:
self.tax_entries_from_db_list = []
self.oid_entries_from_db_list = []
self.tax_and_oid_entries_from_db_list = []
self.tax_and_oid_entries_from_db_list_sorted = []
self.ids_with_same_tax_list = []
self.list_of_lists = []
# Verbindung mit Datenbank
conn = sqlite3.connect(self.database_formelfrage_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM %s" % self.ff_database_table)
ff_db_records = c.fetchall()
# Alle Einträge aus der DB nehmen
if self.ff_var_create_question_pool_all == 1:
for ff_db_record in ff_db_records:
self.oid_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.tax_entries_from_db_list.append(ff_db_record[self.ff_db_entry_to_index_dict['question_pool_tag']])
#self.oid_entries_from_db_list.pop(0)
#self.tax_entries_from_db_list.pop(0)
# ID's aus dem Eingabefeld nehmen
else:
self.ff_pool_entry_list = []
self.ff_pool_entry_list = self.ff_pool_entry.split(',')
for ff_db_record in ff_db_records:
if str(ff_db_record[len(ff_db_record) - 1]) in self.ff_pool_entry_list:
self.oid_entries_from_db_list.append(int(ff_db_record[len(ff_db_record) - 1]))
self.tax_entries_from_db_list.append(ff_db_record[self.ff_db_entry_to_index_dict['question_pool_tag']])
# Listen zusammenfügen
for i in range(len(self.oid_entries_from_db_list)):
self.tax_and_oid_entries_from_db_list.append([self.oid_entries_from_db_list[i], self.tax_entries_from_db_list[i]])
#print(self.oid_entries_from_db_list)
#print(self.tax_entries_from_db_list)
# Liste muss sortiert sein (Alphabetisch) itemgetter(1) nimmt den Wert aus Fach 1 aus den Listen in der Liste
# Bsp. Format von "self.tax_and_oid_entries_from_db_list" = [[2, '1'], [3, '2'], [4, '2'], [5, '3'], [6, '3']]
# hier: '1', '2', '2', '3', '3'
self.tax_and_oid_entries_from_db_list_sorted = sorted(self.tax_and_oid_entries_from_db_list, key=itemgetter(1))
# Taxonomie der Fragen (ohne doppelte Einträge)
self.taxonomy_collection_no_dublicates = list(dict.fromkeys(self.tax_entries_from_db_list))
new_list = []
# 1. Feld auslesen (Tax_id)
# Bsp. Format von "self.tax_and_oid_entries_from_db_list" = [[2, '1'], [3, '2'], [4, '2'], [5, '3'], [6, '3']]
# Taxonomien sind hier als '1', '2','3' deklariert
# Tax_id im Bsp. self.id_temp = '1'
self.id_temp = self.tax_and_oid_entries_from_db_list_sorted[0][1]
#new_list.append(self.tax_and_oid_entries_from_db_list[0][0])
for k in range(len(self.tax_and_oid_entries_from_db_list_sorted)):
if self.tax_and_oid_entries_from_db_list_sorted[k][1] == self.id_temp:
new_list.append(self.tax_and_oid_entries_from_db_list_sorted[k][0])
else:
self.list_of_lists.append(new_list)
new_list = []
new_list.append(self.tax_and_oid_entries_from_db_list_sorted[k][0])
self.id_temp = self.tax_and_oid_entries_from_db_list_sorted[k][1]
# new_list wird nur der list_of_lists hinzugefügt wenn die Taxonomien unterschiedlich sind
# Da die letzten Taxonomien gleich sein können, muss nochmal manuell der Befehl gestartet werden
self.list_of_lists.append(new_list)
self.number_of_pools = len(self.list_of_lists)
# Die __init__ wird bei einem Knopfdruck auf "ILIAS-Fragenpool erstellen" ausgeführt
# Es werden XML-Dateien und Ordner mit einer aufsteigenden ID erstellt.
for pool_number in range(self.number_of_pools):
if self.var_create_multiple_question_pools_from_tax == 1:
self.string_entry = ','.join(map(str, self.list_of_lists[pool_number]))
self.ff_pool_entry = self.string_entry
self.ilias_id_pool_img_dir, self.ilias_id_pool_qpl_dir, self.pool_qti_file_path_output, self.pool_qpl_file_path_output, self.ilias_id_pool_qti_xml, self.file_max_id, self.taxonomy_file_question_pool = test_generator_modul_ilias_test_struktur.Create_ILIAS_Pool.__init__(
self, self.project_root_path, self.formelfrage_files_path_pool_output,
self.formelfrage_files_path_pool_output, self.formelfrage_pool_qti_file_path_template,
self.ff_ilias_test_title_entry.get(), self.ff_pool_entry, self.ff_question_type_name,
self.database_formelfrage_path, self.ff_database_table, self.ff_db_entry_to_index_dict,
self.ff_var_create_question_pool_all)
# Bestimmt den Pfad zum spezifischen erstellten Formelfrage-Pool Ordner
# z.B.: ...ILIAS-Formelfrage\ff_ilias_pool_abgabe\1596569820__0__qpl_1115713
self.ff_specific_pool_dir_path = os.path.join(self.formelfrage_files_path_pool_output, self.ilias_id_pool_qpl_dir)
# Variablen für Bildschirmausgabe sammeln
self.pool_number_list.append(pool_number)
self.directory_number_list.append(self.ilias_id_pool_qpl_dir)
self.oid_number_list_temp = self.ff_pool_entry.split(',')
self.oid_number_list.append(len(self.oid_number_list_temp))
# Formelfrage Fragen erstellen
Create_Formelfrage_Questions.__init__(self,
self.ff_db_entry_to_index_dict,
self.ff_pool_entry,
"question_pool",
self.ilias_id_pool_img_dir,
self.ilias_id_pool_qpl_dir,
self.formelfrage_pool_qti_file_path_template,
self.pool_qti_file_path_output,
self.pool_qpl_file_path_output,
self.ilias_id_pool_qti_xml,
self.file_max_id,
self.taxonomy_file_question_pool)
# In der erstellten XML Datei muss "&" gegen "&" getauscht werden
test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file(self, self.pool_qti_file_path_output)
# Taxonomien werden für erstellte Pools nicht verwendet
if self.ff_var_remove_pool_tags_for_tax_check.get() == 0:
# Hier wird die Taxonomie des Fragenpools bearbeitet / konfiguriert
test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool(self,
self.ff_pool_entry,
self.ff_var_create_question_pool_all,
self.database_formelfrage_path,
"formelfrage_table",
self.ff_entry_to_index_dict,
self.taxonomy_file_question_pool,
self.pool_qti_file_path_output,
pool_number,
self.number_of_pools
)
# Abgeschlossener Fragenpool abgelegt
print("______________________________________________________________________")
print("FRAGENPOOL ABGESCHLOSSEN")
print(" ---> Erstellt im Ordner \"" + "ff_ilias_pool_abgabe\\" + self.ilias_id_pool_qpl_dir)
self.zip_output_path = os.path.join(self.ff_specific_pool_dir_path, self.ilias_id_pool_qpl_dir)
self.zip_output_path2 = os.path.join(self.ff_specific_pool_dir_path, "test")
# Zip Ordner erstellen
def zip(src, dst):
zf = zipfile.ZipFile("%s.zip" % (dst), "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src)-len(self.ilias_id_pool_qpl_dir):]
#print('zipping %s as %s' % (os.path.join(dirname, filename), arcname))
zf.write(absname, arcname)
zf.close()
zip(os.path.join(self.formelfrage_files_path_pool_output, self.ilias_id_pool_qpl_dir), os.path.join(self.formelfrage_files_path_pool_output, self.ilias_id_pool_qpl_dir))
string_collection = ""
if self.var_create_multiple_question_pools_from_tax == 1:
for i in range(len(self.pool_number_list)):
string_collection += "Fragenpool: " + str(self.pool_number_list[i]+1) + "/" + str(len(self.pool_number_list)) + "\n" + \
"Abgelegt im Ordner: " + str(self.directory_number_list[i]) + "\n" + \
"Taxonomie: " + str(self.taxonomy_collection_no_dublicates[i]) + "\n" + \
"Anzahl der Fragen: " + str(self.oid_number_list[i]) + " \n" + \
"_____________________________________________________________" + "\n" + \
"\n"
self.excel_id_list =[]
self.excel_temp_list = []
for t in range(len(self.ff_collection_of_question_titles)):
self.excel_temp_list = self.ff_collection_of_question_titles[t].split(' ')
self.excel_id_list.append(self.excel_temp_list[0])
self.id_dublicates_counter = Counter(self.excel_id_list)
self.id_dublicates_results = [k for k, v in self.id_dublicates_counter.items() if v > 1]
self.titels_dublicates_counter = Counter(self.ff_collection_of_question_titles)
self.titles_dublicates_results = [k for k, v in self.titels_dublicates_counter.items() if v > 1]
dublicate_id_warning = ""
dublicate_title_warning = ""
if len(self.id_dublicates_results) >= 1 or len(self.titles_dublicates_results) >= 1:
dublicate_id_warning = "ACHTUNG!\nErstellter Fragenpool enthält doppelte Fragen:" + "\n"
if len(self.id_dublicates_results) >= 1:
dublicate_id_warning += "\n\n" + "Fragen-ID" + "\n"
for i in range(len(self.id_dublicates_results)):
dublicate_id_warning += "---> " + str(self.id_dublicates_results[i]) + "\n"
if len(self.titles_dublicates_results) >= 1:
dublicate_title_warning = "Fragen-Titel" + "\n"
for i in range(len(self.titles_dublicates_results)):
dublicate_title_warning += "---> " + str(self.titles_dublicates_results[i]) + "\n"
messagebox.showinfo("Fragenpool erstellen", "Fragenpool wurde erstellt!" + "\n\n" + dublicate_id_warning + "\n\n" + dublicate_title_warning + "\n\n"+ string_collection)
|
[
"Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db",
"os.walk",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub",
"xml.etree.ElementTree.SubElement",
"os.path.join",
"os.path.abspath",
"pandas.core.reshape.util.cartesian_product",
"xml.etree.ElementTree.Element",
"re.escape",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__",
"numpy.linspace",
"Test_Generator_Module.test_generator_modul_test_einstellungen.Test_Einstellungen_GUI.__init__",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3",
"collections.Counter",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex",
"xml.etree.ElementTree.parse",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question",
"Test_Generator_Module.test_generator_modul_datenbanken_anzeigen.MainGUI.__init__",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main",
"tkinter.messagebox.showinfo",
"tkinter.ttk.Combobox",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images",
"sqlite3.connect",
"Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx",
"tkinter.messagebox.askquestion",
"zipfile.ZipFile",
"Test_Generator_Module.test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup",
"Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file",
"Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic",
"operator.itemgetter"
] |
[((5633, 5680), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (5648, 5680), False, 'import sqlite3\n'), ((19804, 19853), 'sqlite3.connect', 'sqlite3.connect', (['self.test_settings_database_path'], {}), '(self.test_settings_database_path)\n', (19819, 19853), False, 'import sqlite3\n'), ((20523, 20635), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame_create_formelfrage_test'], {'value': 'self.ff_profile_for_test_settings_value', 'width': '(8)'}), '(self.ff_frame_create_formelfrage_test, value=self.\n ff_profile_for_test_settings_value, width=8)\n', (20535, 20635), False, 'from tkinter import ttk\n'), ((33137, 33229), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame_vector_diagram'], {'value': 'self.ff_vector_diagram_type', 'width': '(20)'}), '(self.ff_frame_vector_diagram, value=self.\n ff_vector_diagram_type, width=20)\n', (33149, 33229), False, 'from tkinter import ttk\n'), ((40066, 40138), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_processingtime_hours', 'width': '(2)'}), '(self.ff_frame, value=self.ff_processingtime_hours, width=2)\n', (40078, 40138), False, 'from tkinter import ttk\n'), ((40175, 40249), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_processingtime_minutes', 'width': '(2)'}), '(self.ff_frame, value=self.ff_processingtime_minutes, width=2)\n', (40187, 40249), False, 'from tkinter import ttk\n'), ((40286, 40360), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_processingtime_seconds', 'width': '(2)'}), '(self.ff_frame, value=self.ff_processingtime_seconds, width=2)\n', (40298, 40360), False, 'from tkinter import ttk\n'), ((53147, 53223), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_numbers_of_answers_value', 'width': '(3)'}), '(self.ff_frame, value=self.ff_numbers_of_answers_value, width=3)\n', (53159, 53223), False, 'from tkinter import ttk\n'), ((53772, 53837), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (53784, 53837), False, 'from tkinter import ttk\n'), ((53917, 53982), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (53929, 53982), False, 'from tkinter import ttk\n'), ((54062, 54127), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54074, 54127), False, 'from tkinter import ttk\n'), ((54207, 54272), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54219, 54272), False, 'from tkinter import ttk\n'), ((54352, 54417), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54364, 54417), False, 'from tkinter import ttk\n'), ((54497, 54562), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54509, 54562), False, 'from tkinter import ttk\n'), ((54642, 54707), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.select_var_units', 'width': '(5)'}), '(self.ff_frame, value=self.select_var_units, width=5)\n', (54654, 54707), False, 'from tkinter import ttk\n'), ((69014, 69090), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame'], {'value': 'self.ff_numbers_of_results_value', 'width': '(3)'}), '(self.ff_frame, value=self.ff_numbers_of_results_value, width=3)\n', (69026, 69090), False, 'from tkinter import ttk\n'), ((112519, 112566), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (112534, 112566), False, 'import sqlite3\n'), ((127572, 127619), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (127587, 127619), False, 'import sqlite3\n'), ((146092, 146139), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (146107, 146139), False, 'import sqlite3\n'), ((175109, 175145), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_read_qti_template_path'], {}), '(xml_read_qti_template_path)\n', (175117, 175145), True, 'import xml.etree.ElementTree as ET\n'), ((176304, 176351), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (176319, 176351), False, 'import sqlite3\n'), ((211014, 211061), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (211029, 211061), False, 'import sqlite3\n'), ((227371, 227421), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['xml_qtimetadata', '"""qtimetadatafield"""'], {}), "(xml_qtimetadata, 'qtimetadatafield')\n", (227384, 227421), True, 'import xml.etree.ElementTree as ET\n'), ((227444, 227489), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (227457, 227489), True, 'import xml.etree.ElementTree as ET\n'), ((227551, 227596), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (227564, 227596), True, 'import xml.etree.ElementTree as ET\n'), ((231980, 232030), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['xml_qtimetadata', '"""qtimetadatafield"""'], {}), "(xml_qtimetadata, 'qtimetadatafield')\n", (231993, 232030), True, 'import xml.etree.ElementTree as ET\n'), ((232053, 232098), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (232066, 232098), True, 'import xml.etree.ElementTree as ET\n'), ((232165, 232210), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (232178, 232210), True, 'import xml.etree.ElementTree as ET\n'), ((240417, 240444), 'collections.Counter', 'Counter', (['self.excel_id_list'], {}), '(self.excel_id_list)\n', (240424, 240444), False, 'from collections import Counter\n'), ((240587, 240633), 'collections.Counter', 'Counter', (['self.ff_collection_of_question_titles'], {}), '(self.ff_collection_of_question_titles)\n', (240594, 240633), False, 'from collections import Counter\n'), ((241584, 241728), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Fragentest erstellen"""', "('Fragentest wurde erstellt!' + '\\n\\n' + dublicate_id_warning + '\\n\\n' +\n dublicate_title_warning)"], {}), "('Fragentest erstellen', 'Fragentest wurde erstellt!' +\n '\\n\\n' + dublicate_id_warning + '\\n\\n' + dublicate_title_warning)\n", (241603, 241728), False, 'from tkinter import messagebox\n'), ((254999, 255026), 'collections.Counter', 'Counter', (['self.excel_id_list'], {}), '(self.excel_id_list)\n', (255006, 255026), False, 'from collections import Counter\n'), ((255169, 255215), 'collections.Counter', 'Counter', (['self.ff_collection_of_question_titles'], {}), '(self.ff_collection_of_question_titles)\n', (255176, 255215), False, 'from collections import Counter\n'), ((256164, 256341), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Fragenpool erstellen"""', "('Fragenpool wurde erstellt!' + '\\n\\n' + dublicate_id_warning + '\\n\\n' +\n dublicate_title_warning + '\\n\\n' + string_collection)"], {}), "('Fragenpool erstellen', 'Fragenpool wurde erstellt!' +\n '\\n\\n' + dublicate_id_warning + '\\n\\n' + dublicate_title_warning +\n '\\n\\n' + string_collection)\n", (256183, 256341), False, 'from tkinter import messagebox\n'), ((2039, 2139), 'os.path.join', 'os.path.join', (['self.project_root_path', '"""Test_Generator_Datenbanken"""', 'self.test_settings_database'], {}), "(self.project_root_path, 'Test_Generator_Datenbanken', self.\n test_settings_database)\n", (2051, 2139), False, 'import os\n'), ((3118, 3175), 'os.path.join', 'os.path.join', (['self.project_root_path', '"""ILIAS-Formelfrage"""'], {}), "(self.project_root_path, 'ILIAS-Formelfrage')\n", (3130, 3175), False, 'import os\n'), ((3236, 3326), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_excel_vorlage"""', '"""ff_excel_vorlage.xlsx"""'], {}), "(self.formelfrage_files_path, 'ff_excel_vorlage',\n 'ff_excel_vorlage.xlsx')\n", (3248, 3326), False, 'import os\n'), ((3392, 3457), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_pool_abgabe"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_pool_abgabe')\n", (3404, 3457), False, 'import os\n'), ((3554, 3643), 'os.path.join', 'os.path.join', (['self.project_root_path', '"""Test_Generator_Datenbanken"""', 'self.ff_database'], {}), "(self.project_root_path, 'Test_Generator_Datenbanken', self.\n ff_database)\n", (3566, 3643), False, 'import os\n'), ((3754, 3871), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_test_qti_und_tst_dateien_vorlage"""', '"""ilias_test_vorlage__qti__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_test_qti_und_tst_dateien_vorlage', 'ilias_test_vorlage__qti__.xml')\n", (3766, 3871), False, 'import os\n'), ((3942, 4059), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_test_qti_und_tst_dateien_vorlage"""', '"""ilias_test_vorlage__tst__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_test_qti_und_tst_dateien_vorlage', 'ilias_test_vorlage__tst__.xml')\n", (3954, 4059), False, 'import os\n'), ((4196, 4329), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_test_abgabe"""', '"""1604407426__0__tst_2040314"""', '"""1604407426__0__qti_2040314.xml"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_test_abgabe',\n '1604407426__0__tst_2040314', '1604407426__0__qti_2040314.xml')\n", (4208, 4329), False, 'import os\n'), ((4398, 4531), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_test_abgabe"""', '"""1604407426__0__tst_2040314"""', '"""1604407426__0__tst_2040314.xml"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_test_abgabe',\n '1604407426__0__tst_2040314', '1604407426__0__tst_2040314.xml')\n", (4410, 4531), False, 'import os\n'), ((4593, 4703), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_test_abgabe"""', '"""1604407426__0__tst_2040314"""', '"""objects"""'], {}), "(self.formelfrage_files_path, 'ff_ilias_test_abgabe',\n '1604407426__0__tst_2040314', 'objects')\n", (4605, 4703), False, 'import os\n'), ((4817, 4934), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_pool_qti_und_qpl_dateien_vorlage"""', '"""ilias_pool_vorlage__qti__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_pool_qti_und_qpl_dateien_vorlage', 'ilias_pool_vorlage__qti__.xml')\n", (4829, 4934), False, 'import os\n'), ((5005, 5122), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_pool_qti_und_qpl_dateien_vorlage"""', '"""ilias_pool_vorlage__qpl__.xml"""'], {}), "(self.formelfrage_files_path,\n 'ff_pool_qti_und_qpl_dateien_vorlage', 'ilias_pool_vorlage__qpl__.xml')\n", (5017, 5122), False, 'import os\n'), ((80637, 80684), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (80652, 80684), False, 'import sqlite3\n'), ((145697, 145833), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.reallocate_text', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (145781, 145833), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((176635, 176682), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (176650, 176682), False, 'import sqlite3\n'), ((226376, 226399), 'xml.etree.ElementTree.parse', 'ET.parse', (['self.qpl_file'], {}), '(self.qpl_file)\n', (226384, 226399), True, 'import xml.etree.ElementTree as ET\n'), ((243283, 243330), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (243298, 243330), False, 'import sqlite3\n'), ((249285, 249371), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path_pool_output', 'self.ilias_id_pool_qpl_dir'], {}), '(self.formelfrage_files_path_pool_output, self.\n ilias_id_pool_qpl_dir)\n', (249297, 249371), False, 'import os\n'), ((250790, 250923), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.replace_character_in_xml_file', (['self', 'self.pool_qti_file_path_output'], {}), '(\n self, self.pool_qti_file_path_output)\n', (250880, 250923), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((252879, 252951), 'os.path.join', 'os.path.join', (['self.ff_specific_pool_dir_path', 'self.ilias_id_pool_qpl_dir'], {}), '(self.ff_specific_pool_dir_path, self.ilias_id_pool_qpl_dir)\n', (252891, 252951), False, 'import os\n'), ((252989, 253041), 'os.path.join', 'os.path.join', (['self.ff_specific_pool_dir_path', '"""test"""'], {}), "(self.ff_specific_pool_dir_path, 'test')\n", (253001, 253041), False, 'import os\n'), ((21389, 21438), 'sqlite3.connect', 'sqlite3.connect', (['self.test_settings_database_path'], {}), '(self.test_settings_database_path)\n', (21404, 21438), False, 'import sqlite3\n'), ((21863, 21975), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.ff_frame_create_formelfrage_test'], {'value': 'self.ff_profile_for_test_settings_value', 'width': '(8)'}), '(self.ff_frame_create_formelfrage_test, value=self.\n ff_profile_for_test_settings_value, width=8)\n', (21875, 21975), False, 'from tkinter import ttk\n'), ((79437, 79604), 'tkinter.messagebox.askquestion', 'messagebox.askquestion', (['"""Wertebereich für DB Einträge berechnen"""', '"""ACHTUNG!\n\nEs werden für ALLE DB Einträge die Min/Max-Werte überschrieben!\n\nFortfahren?"""'], {}), '(\'Wertebereich für DB Einträge berechnen\',\n """ACHTUNG!\n\nEs werden für ALLE DB Einträge die Min/Max-Werte überschrieben!\n\nFortfahren?"""\n )\n', (79459, 79604), False, 'from tkinter import messagebox\n'), ((211549, 211829), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', (['self', 'self.ff_description_img_name_1', 'self.ff_description_img_data_1', 'id_nr', 'self.ff_question_type_test_or_pool', 'self.formelfrage_test_img_file_path', 'self.formelfrage_pool_img_file_path'], {}), '(\n self, self.ff_description_img_name_1, self.ff_description_img_data_1,\n id_nr, self.ff_question_type_test_or_pool, self.\n formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)\n', (211628, 211829), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((211833, 212113), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', (['self', 'self.ff_description_img_name_2', 'self.ff_description_img_data_2', 'id_nr', 'self.ff_question_type_test_or_pool', 'self.formelfrage_test_img_file_path', 'self.formelfrage_pool_img_file_path'], {}), '(\n self, self.ff_description_img_name_2, self.ff_description_img_data_2,\n id_nr, self.ff_question_type_test_or_pool, self.\n formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)\n', (211912, 212113), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((212117, 212397), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_dir_for_images', (['self', 'self.ff_description_img_name_3', 'self.ff_description_img_data_3', 'id_nr', 'self.ff_question_type_test_or_pool', 'self.formelfrage_test_img_file_path', 'self.formelfrage_pool_img_file_path'], {}), '(\n self, self.ff_description_img_name_3, self.ff_description_img_data_3,\n id_nr, self.ff_question_type_test_or_pool, self.\n formelfrage_test_img_file_path, self.formelfrage_pool_img_file_path)\n', (212196, 212397), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((214325, 214358), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""qticomment"""'], {}), "(item, 'qticomment')\n", (214338, 214358), True, 'import xml.etree.ElementTree as ET\n'), ((214387, 214418), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""duration"""'], {}), "(item, 'duration')\n", (214400, 214418), True, 'import xml.etree.ElementTree as ET\n'), ((214451, 214486), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""itemmetadata"""'], {}), "(item, 'itemmetadata')\n", (214464, 214486), True, 'import xml.etree.ElementTree as ET\n'), ((214519, 214554), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['item', '"""presentation"""'], {}), "(item, 'presentation')\n", (214532, 214554), True, 'import xml.etree.ElementTree as ET\n'), ((214581, 214616), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['presentation', '"""flow"""'], {}), "(presentation, 'flow')\n", (214594, 214616), True, 'import xml.etree.ElementTree as ET\n'), ((214666, 214697), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['flow', '"""material"""'], {}), "(flow, 'material')\n", (214679, 214697), True, 'import xml.etree.ElementTree as ET\n'), ((214746, 214801), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['question_description_material', '"""mattext"""'], {}), "(question_description_material, 'mattext')\n", (214759, 214801), True, 'import xml.etree.ElementTree as ET\n'), ((214833, 214875), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['itemmetadata', '"""qtimetadata"""'], {}), "(itemmetadata, 'qtimetadata')\n", (214846, 214875), True, 'import xml.etree.ElementTree as ET\n'), ((215634, 215680), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (215647, 215680), True, 'import xml.etree.ElementTree as ET\n'), ((215711, 215756), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (215724, 215756), True, 'import xml.etree.ElementTree as ET\n'), ((215838, 215883), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (215851, 215883), True, 'import xml.etree.ElementTree as ET\n'), ((216078, 216124), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (216091, 216124), True, 'import xml.etree.ElementTree as ET\n'), ((216155, 216200), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (216168, 216200), True, 'import xml.etree.ElementTree as ET\n'), ((216281, 216326), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (216294, 216326), True, 'import xml.etree.ElementTree as ET\n'), ((216516, 216562), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (216529, 216562), True, 'import xml.etree.ElementTree as ET\n'), ((216593, 216638), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (216606, 216638), True, 'import xml.etree.ElementTree as ET\n'), ((216713, 216758), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (216726, 216758), True, 'import xml.etree.ElementTree as ET\n'), ((216951, 216997), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (216964, 216997), True, 'import xml.etree.ElementTree as ET\n'), ((217028, 217073), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (217041, 217073), True, 'import xml.etree.ElementTree as ET\n'), ((217148, 217193), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (217161, 217193), True, 'import xml.etree.ElementTree as ET\n'), ((217846, 218262), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.add_picture_to_description_main', (['self', 'self.ff_description_img_name_1', 'self.ff_description_img_data_1', 'self.ff_description_img_name_2', 'self.ff_description_img_data_2', 'self.ff_description_img_name_3', 'self.ff_description_img_data_3', 'self.ff_question_description_main', 'question_description_mattext', 'question_description_material', 'id_nr'], {}), '(\n self, self.ff_description_img_name_1, self.ff_description_img_data_1,\n self.ff_description_img_name_2, self.ff_description_img_data_2, self.\n ff_description_img_name_3, self.ff_description_img_data_3, self.\n ff_question_description_main, question_description_mattext,\n question_description_material, id_nr)\n', (217938, 218262), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((224219, 224265), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (224232, 224265), True, 'import xml.etree.ElementTree as ET\n'), ((224296, 224341), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (224309, 224341), True, 'import xml.etree.ElementTree as ET\n'), ((224435, 224480), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (224448, 224480), True, 'import xml.etree.ElementTree as ET\n'), ((224664, 224710), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadata', '"""qtimetadatafield"""'], {}), "(qtimetadata, 'qtimetadatafield')\n", (224677, 224710), True, 'import xml.etree.ElementTree as ET\n'), ((224741, 224786), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldlabel"""'], {}), "(qtimetadatafield, 'fieldlabel')\n", (224754, 224786), True, 'import xml.etree.ElementTree as ET\n'), ((224865, 224910), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['qtimetadatafield', '"""fieldentry"""'], {}), "(qtimetadatafield, 'fieldentry')\n", (224878, 224910), True, 'import xml.etree.ElementTree as ET\n'), ((226225, 226351), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path', '"""ff_ilias_pool_abgabe"""', 'self.ilias_id_pool_qpl_dir', 'self.ilias_id_pool_qti_xml'], {}), "(self.formelfrage_files_path, 'ff_ilias_pool_abgabe', self.\n ilias_id_pool_qpl_dir, self.ilias_id_pool_qti_xml)\n", (226237, 226351), False, 'import os\n'), ((251163, 251516), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool', 'test_generator_modul_taxonomie_und_textformatierung.Taxonomie.create_taxonomy_for_pool', (['self', 'self.ff_pool_entry', 'self.ff_var_create_question_pool_all', 'self.database_formelfrage_path', '"""formelfrage_table"""', 'self.ff_entry_to_index_dict', 'self.taxonomy_file_question_pool', 'self.pool_qti_file_path_output', 'pool_number', 'self.number_of_pools'], {}), "(\n self, self.ff_pool_entry, self.ff_var_create_question_pool_all, self.\n database_formelfrage_path, 'formelfrage_table', self.\n ff_entry_to_index_dict, self.taxonomy_file_question_pool, self.\n pool_qti_file_path_output, pool_number, self.number_of_pools)\n", (251249, 251516), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((253134, 253192), 'zipfile.ZipFile', 'zipfile.ZipFile', (["('%s.zip' % dst)", '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "('%s.zip' % dst, 'w', zipfile.ZIP_DEFLATED)\n", (253149, 253192), False, 'import zipfile\n'), ((253222, 253242), 'os.path.abspath', 'os.path.abspath', (['src'], {}), '(src)\n', (253237, 253242), False, 'import os\n'), ((253291, 253303), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (253298, 253303), False, 'import os\n'), ((253719, 253805), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path_pool_output', 'self.ilias_id_pool_qpl_dir'], {}), '(self.formelfrage_files_path_pool_output, self.\n ilias_id_pool_qpl_dir)\n', (253731, 253805), False, 'import os\n'), ((253802, 253888), 'os.path.join', 'os.path.join', (['self.formelfrage_files_path_pool_output', 'self.ilias_id_pool_qpl_dir'], {}), '(self.formelfrage_files_path_pool_output, self.\n ilias_id_pool_qpl_dir)\n', (253814, 253888), False, 'import os\n'), ((10137, 10286), 'Test_Generator_Module.test_generator_modul_test_einstellungen.Test_Einstellungen_GUI.__init__', 'test_generator_modul_test_einstellungen.Test_Einstellungen_GUI.__init__', (['self', 'self.project_root_path', 'self.formelfrage_test_qti_file_path_output'], {}), '(self,\n self.project_root_path, self.formelfrage_test_qti_file_path_output)\n', (10208, 10286), False, 'from Test_Generator_Module import test_generator_modul_test_einstellungen\n'), ((15174, 15250), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__', 'test_generator_modul_taxonomie_und_textformatierung.Taxonomie.__init__', (['self'], {}), '(self)\n', (15244, 15250), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((25754, 25879), 'Test_Generator_Module.test_generator_modul_datenbanken_anzeigen.MainGUI.__init__', 'test_generator_modul_datenbanken_anzeigen.MainGUI.__init__', (['self', 'self.database_formelfrage_path', 'self.ff_database_table'], {}), '(self, self.\n database_formelfrage_path, self.ff_database_table)\n', (25812, 25879), False, 'from Test_Generator_Module import test_generator_modul_datenbanken_anzeigen\n'), ((29070, 29252), 'Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db', 'test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_import_to_db', (['self', 'self.ff_question_type_name', 'self.ff_db_entry_to_index_dict', 'self.formelfrage_tab'], {}), '(\n self, self.ff_question_type_name, self.ff_db_entry_to_index_dict, self.\n formelfrage_tab)\n', (29154, 29252), False, 'from Test_Generator_Module import test_generator_modul_datenbanken_erstellen\n'), ((29518, 29811), 'Test_Generator_Module.test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx', 'test_generator_modul_datenbanken_erstellen.Import_Export_Database.excel_export_to_xlsx', (['self', 'self.project_root_path', 'self.ff_db_entry_to_index_dict', 'self.database_formelfrage_path', 'self.ff_database', 'self.ff_database_table', 'self.ff_xlsx_workbook_name', 'self.ff_xlsx_worksheet_name'], {}), '(\n self, self.project_root_path, self.ff_db_entry_to_index_dict, self.\n database_formelfrage_path, self.ff_database, self.ff_database_table,\n self.ff_xlsx_workbook_name, self.ff_xlsx_worksheet_name)\n', (29604, 29811), False, 'from Test_Generator_Module import test_generator_modul_datenbanken_erstellen\n'), ((30073, 30186), 'Test_Generator_Module.test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__', 'test_generator_modul_ilias_import_test_datei.Import_ILIAS_Datei_in_DB.__init__', (['self', 'self.project_root_path'], {}), '(\n self, self.project_root_path)\n', (30151, 30186), False, 'from Test_Generator_Module import test_generator_modul_ilias_import_test_datei\n'), ((30529, 30660), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_latex', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (30608, 30660), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((30867, 30996), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sub', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (30944, 30996), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((31216, 31345), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_sup', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (31293, 31345), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((31547, 31679), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.text_italic', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (31627, 31679), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((31890, 32037), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_1', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (31985, 32037), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((32272, 32419), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_2', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (32367, 32419), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((32641, 32788), 'Test_Generator_Module.test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3', 'test_generator_modul_taxonomie_und_textformatierung.Textformatierung.set_position_for_picture_3', (['self', 'self.ff_question_description_main_entry'], {}), '(\n self, self.ff_question_description_main_entry)\n', (32736, 32788), False, 'from Test_Generator_Module import test_generator_modul_taxonomie_und_textformatierung\n'), ((79788, 79835), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (79803, 79835), False, 'import sqlite3\n'), ((110028, 110183), 'numpy.linspace', 'np.linspace', (['self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p]]]', 'self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]]', 'N'], {}), '(self.lower_list[self.list_index_dict[self.set_nr_of_var_index[p\n ]]], self.upper_list[self.list_index_dict[self.set_nr_of_var_index[p]]], N)\n', (110039, 110183), True, 'import numpy as np\n'), ((110215, 110240), 'pandas.core.reshape.util.cartesian_product', 'cartesian_product', (['values'], {}), '(values)\n', (110232, 110240), False, 'from pandas.core.reshape.util import cartesian_product\n'), ((146896, 146964), 'os.path.join', 'os.path.join', (['self.project_root_path', 'self.ff_description_img_path_1'], {}), '(self.project_root_path, self.ff_description_img_path_1)\n', (146908, 146964), False, 'import os\n'), ((212648, 212677), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""questestinterop"""'], {}), "('questestinterop')\n", (212658, 212677), True, 'import xml.etree.ElementTree as ET\n'), ((212712, 212756), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['questestinterop', '"""assessment"""'], {}), "(questestinterop, 'assessment')\n", (212725, 212756), True, 'import xml.etree.ElementTree as ET\n'), ((212788, 212824), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['assessment', '"""section"""'], {}), "(assessment, 'section')\n", (212801, 212824), True, 'import xml.etree.ElementTree as ET\n'), ((212853, 212883), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['section', '"""item"""'], {}), "(section, 'item')\n", (212866, 212883), True, 'import xml.etree.ElementTree as ET\n'), ((213094, 213123), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""questestinterop"""'], {}), "('questestinterop')\n", (213104, 213123), True, 'import xml.etree.ElementTree as ET\n'), ((213152, 213190), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['questestinterop', '"""item"""'], {}), "(questestinterop, 'item')\n", (213165, 213190), True, 'import xml.etree.ElementTree as ET\n'), ((213274, 213507), 'Test_Generator_Module.test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question', 'test_generator_modul_ilias_test_struktur.Additional_Funtions.set_taxonomy_for_question', (['self', 'id_nr', 'self.number_of_entrys', 'item', 'self.formelfrage_pool_qpl_file_path_template', 'self.formelfrage_pool_qpl_file_path_output'], {}), '(\n self, id_nr, self.number_of_entrys, item, self.\n formelfrage_pool_qpl_file_path_template, self.\n formelfrage_pool_qpl_file_path_output)\n', (213360, 213507), False, 'from Test_Generator_Module import test_generator_modul_ilias_test_struktur\n'), ((245297, 245310), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (245307, 245310), False, 'from operator import itemgetter\n'), ((100262, 100274), 're.escape', 're.escape', (['s'], {}), '(s)\n', (100271, 100274), False, 'import re\n'), ((100614, 100626), 're.escape', 're.escape', (['s'], {}), '(s)\n', (100623, 100626), False, 'import re\n'), ((96046, 96093), 'sqlite3.connect', 'sqlite3.connect', (['self.database_formelfrage_path'], {}), '(self.database_formelfrage_path)\n', (96061, 96093), False, 'import sqlite3\n'), ((253400, 253431), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (253412, 253431), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 01:11:07 2020
@author: liorr
"""
import numpy as np
import warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import pickle
from typing import List
import tensorflow as tf
class nn_Model:
def __init__(self,controller):
self.controller = controller
def cab_driver_distance(self,vectors):
return tf.keras.backend.exp(-tf.keras.backend.sum(
tf.keras.backend.abs(vectors[0]-vectors[-1]), axis=1, keepdims=True))
def output_shape(self,shapes):
shape1, shape2 = shapes
return (shape1[0],1)
@property
def lstm_net_builder(self):
"""
build the lstm component of the architecture
Returns
-------
lstm network.
"""
bi, lstm_hidden_units,lstm_dropout_rate = self.comfig.get("bidirectional"),
self.comfig.get("lstm_hidden_units"), self.comfig.get("lstm_dropout_rate")
if bi:
lstm_layers = [tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(h_u, return_sequences=True),
merge_mode='concat') for h_u in lstm_hidden_units]
if lstm_dropout_rate:
lstm_layers.append(tf.keras.layers.Dropout(lstm_dropout_rate))
return tf.keras.Sequential(lstm_layers,name="Siamese-lstm")
@property
def cnn_net_builder(self):
"""
method to build the one channeled cnn component
Returns
-------
regular cnn net
"""
return tf.keras.Sequential([tf.keras.layers.Conv1D(filters=self.config.get("regular_cnn").get("filters")
,kernel_size=self.config.get("regular_cnn").get("kernel") ,activation='relu',padding="same"),
tf.keras.layers.Dropout(self.config.get("regular_cnn").get("dropout")),
],name ="cnn-Siamese")
@property
def attention_builder(self):
"""
method to build the attention mechanisem of the architecture
Returns
-------
attention.
"""
return tf.keras.Sequential([
tf.keras.layersDense(1, activation='tanh'),
tf.keras.layers.Flatten(),
tf.keras.layers.Activation('softmax'),
tf.keras.layers.RepeatVector(self.comfig.get("lstm_hidden_units")[-1]*2),
tf.keras.layers.Permute([2, 1])
],name="Attention")
@property
def multi_cnn_builder(self)->list:
"""
method to buld multi channaled cnn
Returns
-------
channels : list
list of multi-cnn models.
"""
channels = []
for i,size in enumerate(self.config.get("multi_cnn").get("kernels")):
channels.append(tf.keras.Sequential([tf.keras.layers.Conv1D(filters=self.config.get("multi_cnn").get("filters"),
kernel_size=size,
activation='relu'),
tf.keras.layers.Dropout(self.config.get("multi_cnn").get("dropout")),
tf.keras.layers.AveragePooling1D(),
tf.keras.layers.Flatten()],name ="channel_{}".format(i)))
return channels
@property
def load_nn_model(self):
"""
load pretrained model(s)
Returns
-------
None.
"""
self.score_model = tf.keras.models.load_model(filepath=self.score_path)
self.penalty_model =tf.keras.models.load_model(filepath=self.penalty_path) if self.penalty_path else None
def train_model(self,config:dict,data,build:bool=True)->dict:
"""
train model
Parameters
----------
config : dict
configaration dictionary
data : numpy array
the data to train on.
build : bool
the disired functionality , train or re train a model
true iff train new model.
Returns
-------
results.
"""
self.config = config
if build:
self.shape = data["bad"]["train"]["X1"].shape
self.score_model = self.build_model
if self.config.get("penalty"):
self.penalty_model = self.build_model
else:
self.score_path, self.penalty_path = config.get("score_path"),config.get("penalty_path")
if not self.score_path:
self.controller.progress_updater("you must provide path to the score (reward) network")
return
self.load_nn_model
score_hist = self.score_model.fit([data["bad"]["train"]["X1"],["bad"]["train"]["X2"] ],data["bad"]["train"]["Y"]
,batch_size=self.config.get("batch_size",64),epochs=self.config.get("epochs",25),
validation_data=([data["bad"]["dev"]["X1"],["bad"]["dev"]["X2"] ],data["bad"]["dev"]["Y"]),
callbacks=[SaveBestModel( save_format=None,filepath=self.config.get("model_path","score_net.h5"),monitor="val_loss",
patient=self.config.get("patient"))])
score_hist = score_hist.history
if self.config.get("penalty"):
penalty_hist = self.penalty_model.fit([data["penelty"]["train"]["X1"],["penelty"]["train"]["X2"] ],data["penelty"]["train"]["Y"]
,batch_size=self.config.get("batch_size",64),epochs=self.config.get("epochs",25),
validation_data=([data["penelty"]["dev"]["X1"],["penelty"]["dev"]["X2"] ],data["penelty"]["dev"]["Y"]),
callbacks=[SaveBestModel( save_format=None,filepath=self.config.get("model_path","score_net.h5"),monitor="val_loss",
patient=self.config.get("patient"))])
score_hist["pen"] = penalty_hist.history
return score_hist
@property
def build_model(self):
"""
build and compile model
Returns
-------
compiled model
"""
use_lstm,use_multi_cnn,use_regular_cnn,use_attention = self.config.get("lstm_hidden_units"),
self.config.get("multi_cnn"),self.config.get("regular_cnn"),self.config.get("use_attention")
if use_lstm and use_multi_cnn:
self.controller.progress_updater("you cannot have multi cnn and lstm in the same model")
return
if use_attention and use_multi_cnn:
self.controller.progress_updater("you cannot have multi cnn and attention in the same model")
return
if not use_lstm and use_regular_cnn:
self.controller.progress_updater("you cannot have regular cnn without lstm")
return
left_input , right_input = tf.keras.layers.Input(self.shape), tf.keras.layers.Input(self.shape)
if use_lstm:
lstm_net = self.lstm_net_builder
if use_regular_cnn:
cnn_net = self.cnn_net_builder
combined_l = tf.keras.layers.concatenate([left_input,cnn_net(left_input)],name="combined_left")
combined_r = tf.keras.layers.concatenate([right_input,cnn_net(right_input)],name="combined_right")
res_l,res_r = lstm_net(combined_l), lstm_net(combined_r)
else:
res_l,res_r = lstm_net(left_input),lstm_net(right_input)
if use_attention:
attention = self.attention_builder
res_r = tf.keras.layers.Lambda(lambda x: tf.keras.layers.sum(x, axis=1))(tf.keras.layers.multiply([attention(res_r), res_r]))
res_l = tf.keras.layers.Lambda(lambda x: tf.keras.layers.sum(x, axis=1))(tf.keras.layers.multiply([attention(res_l), res_l]))
if use_multi_cnn:
channels = self.multi_cnn_builder
res_r,res_l = tf.keras.layers.concatenate([x(right_input) for x in channels]),
tf.keras.layers.concatenate([x(left_input) for x in channels])
similarity=tf.keras.layers.Lambda(function=self.cab_driver_distance,output_shape=self.output_shape)([res_r, res_l])
similarity = tf.keras.layers.Dense(1)(similarity)
model = tf.keras.models.Model([right_input,left_input],similarity)
model.compile(loss=self.config.get("optimizer","mse"),optimizer='adam')
return model
def eval_summary(self,data:list,
score_path:str="models/second_expiriment.h5",
penalty_path:str="models/penalty_expiriment.h5")->float:
"""
method to evaluate a given summary
Parameters
----------
data : list
DESCRIPTION.
score_path : str, optional
path for score network. The default is "models/score_second_expiriment.h5".
penalty_path : str, optional
path for penalty network. The default is "models/penalty_expiriment.h5".
Returns
-------
float
the score.
"""
self.score_path, self.penalty_path = score_path, penalty_path
self.load_nn_model
self.score = self.score_model.predict(data).flatten()
if self.penalty_model:
self.score -= self.penalty_model.predict(data).flatten()
return self.score
class SaveBestModel(tf.keras.callbacks.Callback):
"""
monitor to save the best model according to val_loss or val_acc
Parameters
----------
filepath : str
the disaired model path or name.
monitor : str, optional
metric to monitor on it can be "val_loss" or "val_acc". The default is 'val_loss'.
save_format : str, optional
the disared format for the saved model. The defalut is 'tf'.
patient : int, optional
optional early stopping
Returns
-------
None.
"""
def __init__(self, filepath:str, monitor:str='val_loss',save_format:str='tf',patient:str=None):
super(SaveBestModel, self).__init__()
import warnings
if monitor not in {'val_loss','val_acc'}:
warnings.warn("""ModelCheckpoint monitor must be "val_loss" or
"val_acc" but got: {} so monitoring val_loss""".
format(monitor),RuntimeWarning)
self.monitor = monitor
self.filepath = filepath
self.op,self.best = (np.less,np.Inf) if self.monitor =='val_loss' else (np.greater,-np.Inf)
self.num_epochs = 0 #number since last save
self.save_format = save_format
self.patient = patient
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
print(logs.keys())
if not current:
warnings.warn('{} is not avilable,skiping'.format(self.monitor), RuntimeWarning)
elif self.op(current, self.best):
print('\nEpoch {}:{} improved from {} to {} saving the model'.format(epoch + 1, self.monitor, self.best,current))
self.best = current
self.num_epochs = 0
self.model.save(self.filepath, overwrite=True,save_format=self.save_format )
else:
self.num_epochs += 1
print("\nEpoch {}: {} did not improved from {}, this is the {} epoch without any improvment.".format(epoch + 1, self.monitor, self.best,self.num_epochs))
if self.patient and self.patient == self.num_epochs:
self.model.stop_training = True
print('\nstopping the train, did not improved for {}'.format(self.patient))
def on_train_end(self, logs=None):
print('this is the end my only friend the end')
class Terminate(tf.keras.callbacks.Callback):
"""Callback that terminates training when:
1.NaN loss is encountered.
2.val_loss <=0
3.val_acc >=0.99
"""
def on_epoch_end(self, batch, logs=None):
def halt(msg):
print(msg)
self.model.stop_training = True
logs = logs or {}
val_acc,loss,val_loss = logs.get('loss'),logs.get('val_acc'),logs.get('val_loss')
if loss and (np.isnan(loss) or np.isinf(loss)):
halt('Batch {}: Invalid loss, terminating training'.format(batch))
if val_loss and (val_loss<=0 or np.isnan(val_loss)):
halt('val_loss is at minimum, terminating training')
if val_acc and val_acc>=0.99:
halt('val_acc is at maximum, terminating training')
|
[
"tensorflow.keras.layers.sum",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.AveragePooling1D",
"numpy.isinf",
"numpy.isnan",
"tensorflow.keras.models.Model",
"tensorflow.keras.backend.abs",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layersDense",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Permute",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Flatten"
] |
[((1347, 1400), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['lstm_layers'], {'name': '"""Siamese-lstm"""'}), "(lstm_layers, name='Siamese-lstm')\n", (1366, 1400), True, 'import tensorflow as tf\n'), ((3629, 3681), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', ([], {'filepath': 'self.score_path'}), '(filepath=self.score_path)\n', (3655, 3681), True, 'import tensorflow as tf\n'), ((8788, 8848), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['[right_input, left_input]', 'similarity'], {}), '([right_input, left_input], similarity)\n', (8809, 8848), True, 'import tensorflow as tf\n'), ((3711, 3765), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', ([], {'filepath': 'self.penalty_path'}), '(filepath=self.penalty_path)\n', (3737, 3765), True, 'import tensorflow as tf\n'), ((7314, 7347), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['self.shape'], {}), '(self.shape)\n', (7335, 7347), True, 'import tensorflow as tf\n'), ((7349, 7382), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['self.shape'], {}), '(self.shape)\n', (7370, 7382), True, 'import tensorflow as tf\n'), ((8607, 8701), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', ([], {'function': 'self.cab_driver_distance', 'output_shape': 'self.output_shape'}), '(function=self.cab_driver_distance, output_shape=self\n .output_shape)\n', (8629, 8701), True, 'import tensorflow as tf\n'), ((8734, 8758), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (8755, 8758), True, 'import tensorflow as tf\n'), ((1287, 1329), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['lstm_dropout_rate'], {}), '(lstm_dropout_rate)\n', (1310, 1329), True, 'import tensorflow as tf\n'), ((2271, 2313), 'tensorflow.keras.layersDense', 'tf.keras.layersDense', (['(1)'], {'activation': '"""tanh"""'}), "(1, activation='tanh')\n", (2291, 2313), True, 'import tensorflow as tf\n'), ((2324, 2349), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (2347, 2349), True, 'import tensorflow as tf\n'), ((2360, 2397), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {}), "('softmax')\n", (2386, 2397), True, 'import tensorflow as tf\n'), ((2491, 2522), 'tensorflow.keras.layers.Permute', 'tf.keras.layers.Permute', (['[2, 1]'], {}), '([2, 1])\n', (2514, 2522), True, 'import tensorflow as tf\n'), ((12930, 12944), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (12938, 12944), True, 'import numpy as np\n'), ((12948, 12962), 'numpy.isinf', 'np.isinf', (['loss'], {}), '(loss)\n', (12956, 12962), True, 'import numpy as np\n'), ((13087, 13105), 'numpy.isnan', 'np.isnan', (['val_loss'], {}), '(val_loss)\n', (13095, 13105), True, 'import numpy as np\n'), ((465, 511), 'tensorflow.keras.backend.abs', 'tf.keras.backend.abs', (['(vectors[0] - vectors[-1])'], {}), '(vectors[0] - vectors[-1])\n', (485, 511), True, 'import tensorflow as tf\n'), ((1078, 1126), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['h_u'], {'return_sequences': '(True)'}), '(h_u, return_sequences=True)\n', (1098, 1126), True, 'import tensorflow as tf\n'), ((3286, 3320), 'tensorflow.keras.layers.AveragePooling1D', 'tf.keras.layers.AveragePooling1D', ([], {}), '()\n', (3318, 3320), True, 'import tensorflow as tf\n'), ((3360, 3385), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (3383, 3385), True, 'import tensorflow as tf\n'), ((8091, 8121), 'tensorflow.keras.layers.sum', 'tf.keras.layers.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (8110, 8121), True, 'import tensorflow as tf\n'), ((8230, 8260), 'tensorflow.keras.layers.sum', 'tf.keras.layers.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (8249, 8260), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 16:12:22 2021
@author: aschauer
"""
import os
import logging
from collections import defaultdict
from matplotlib.transforms import Affine2D
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.metrics import r2_score, mean_squared_error
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.lines as mlines
from matplotlib.legend_handler import HandlerTuple
import sqlalchemy as sa
from cv_results_database import get_cv_results_as_df
import plotting_utils as pu
import scoring_utils as scu
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
sns.set_color_codes(palette='deep')
sc = get_cv_results_as_df()
sc = sc.loc[sc['gap_type']=='LOWO']
sc = sc.rename(columns={'bias': 'BIAS'})
class HandlerTupleHorizontal(HandlerTuple):
"""
https://stackoverflow.com/a/59068881
"""
def __init__(self, **kwargs):
HandlerTuple.__init__(self, **kwargs)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# How many lines are there.
numlines = len(orig_handle)
handler_map = legend.get_legend_handler_map()
# divide the horizontal space where the lines will go
# into equal parts based on the number of lines
# width_x = (width / numlines)
width_x = width
leglines = []
for i, handle in enumerate(orig_handle):
handler = legend.get_legend_handler(handler_map, handle)
legline = handler.create_artists(legend, handle,
xdescent,
height,
(2 * i + 1) * width_x,
2 * height,
fontsize, trans)
leglines.extend(legline)
return leglines
def scatterplot(methods_used,
metrics_used,
xaxs_value,
filename,
dpi=300,
no_legend=False,
legend_kw=None,
score_df=sc):
fig, axes = plt.subplots(len(metrics_used),len(methods_used),
figsize=[len(methods_used)*2,len(metrics_used)*2.1],
sharex=True, sharey=False)
x_labels = {'gap_stn_altitude': 'Altitude [m]',
'HSavg_true': 'HSavg [cm]',
'gap_winter': 'gap winter'}
ylabels = {'dHS1_abs_diff': 'dHS1\nabs. error [days]',
'HSmax_abs_diff': 'HSmax\nabs. error [cm]',
'HSavg_abs_diff': 'HSavg\nabs. error [cm]',
'RMSE': 'RMSE',
'MAAPE': 'MAAPE'}
#different markers and colors for different station grids:
markers={'full': "s",
'only_target_stations': "^"}
colors={'full': 'tab:orange',
'only_target_stations': "b"}
if legend_kw is None:
legend_kw = {}
default_legend_kwargs={
'bbox_to_anchor':[0.99, 0],
'loc': 1,
'ncol': 1,
'bbox_transform':fig.transFigure,
'fontsize': 11,
'frameon': False}
for key, value in default_legend_kwargs.items():
legend_kw.setdefault(key, value)
for station_grid in ['full','only_target_stations']:
color = colors[station_grid]
marker = markers[station_grid]
for row, metric in enumerate(metrics_used):
for column, method in enumerate(methods_used):
score_df.loc[(score_df['fill_method']==method) & (score_df['station_grid']==station_grid)].plot(xaxs_value,
metric,
kind='scatter',
ax=axes[row,column],
color=color,
marker=marker,
alpha=0.4,
label=station_grid)
axes[row,column].get_legend().remove()
# y_labels
if column == 0:
try:
axes[row,column].set_ylabel(ylabels[metric], fontsize=13)
except KeyError:
axes[row,column].set_ylabel(metric, fontsize=13)
else:
axes[row,column].set_ylabel(None)
axes[row,column].tick_params(labelleft=False)
# x_labels
if row == len(metrics_used)-1:
axes[row,column].set_xlabel(x_labels[xaxs_value],
fontsize=13)
# titles
if row == 0:
if legend_kw['bbox_to_anchor']=='below_titles':
axes[row,column].set_title(f'{pu.METHOD_NAMES[method]}\n', fontsize=13)
else:
axes[row,column].set_title(pu.METHOD_NAMES[method], fontsize=13)
# adapt y-lim for both station grids in every row
for row, metric in enumerate(metrics_used):
ygmin = 0.; ygmax = 0.
for ax in axes[row,:]:
#Get global minimum and maximum y values accross all axis
ymin, ymax = ax.get_ylim()
ygmin = min(ygmin,ymin)
ygmax = max(ygmax,ymax)
[ax.set_ylim((ygmin,ygmax)) for ax in axes[row,:]]
plt.tight_layout()
if no_legend:
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
else:
plt.show()
else:
handles, labels = axes[-1,-1].get_legend_handles_labels()
if legend_kw['bbox_to_anchor']=='top_right_axis':
top = axes.flatten()[0].get_position().ymax
right = axes.flatten()[-1].get_position().xmax
legend_kw['bbox_to_anchor'] = [right, top]
legend_kw['borderaxespad'] = 0
legend_kw['edgecolor'] = 'black'
legend_kw['fancybox'] = False
legend_kw['framealpha'] = 1
if legend_kw['bbox_to_anchor']=='below_titles':
legend_kw['loc'] = 'upper center'
legend_kw['bbox_to_anchor'] = (0.515, 0.95)
legend_kw['borderaxespad'] = 0.
legend_kw['ncol'] = 2
legend_kw['fancybox'] = False
legend_kw['framealpha'] = 0
legend_kw['columnspacing'] = 2
legend_kw['handletextpad'] = 0.2
leg = fig.legend(handles, ['dense station network', 'only evaluation stations'],
**legend_kw)
for l in leg.legendHandles:
l.set_alpha(1)
plt.tight_layout()
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
else:
plt.show()
return None
def scatterplot_true_vs_pred(
methods_used,
climate_metrics,
filename=None,
dpi=300,
no_legend=False,
legend_kw=None,
equal_xy_axes=False,
fitlines=False,
print_score_values=False,
score_df=sc,
panel_height=2.25,
panel_width=2.1,
sharex=False,
sharey=False,
individual_panel_labels=False,
global_x_label='measured [cm or days]',
global_y_label='modeled [cm or days]',
markersize=5
):
fig, axes = plt.subplots(len(climate_metrics),len(methods_used),
figsize=[len(methods_used)*panel_width,len(climate_metrics)*panel_height],
sharex=sharex, sharey=sharey)
#different markers and colors for different station grids:
markers={'full': "s",
'only_target_stations': "^"}
colors={'full': 'tab:orange',
'only_target_stations': "b"}
units = defaultdict(lambda: '',
{'HSavg': ' [cm]',
'HSmax': ' [cm]',
'dHS1': ' [days]'})
if legend_kw is None:
legend_kw = {}
default_legend_kwargs={
'bbox_to_anchor':[0.99, 0],
'loc': 1,
'ncol': 1,
'bbox_transform':fig.transFigure,
'fontsize': 11,
'frameon': False}
for key, value in default_legend_kwargs.items():
legend_kw.setdefault(key, value)
score_annotations = {}
for station_grid in ['full','only_target_stations']:
color = colors[station_grid]
marker = markers[station_grid]
for row, metric in enumerate(climate_metrics):
for column, method in enumerate(methods_used):
score_df.loc[(score_df['fill_method']==method) & (score_df['station_grid']==station_grid)].plot(
f'{metric}_true',
f'{metric}_pred',
kind='scatter',
ax=axes[row,column],
color=color,
marker=marker,
s=markersize,
alpha=0.4,
label=station_grid)
axes[row,column].get_legend().remove()
if fitlines:
try:
linestyles = {'full':'--', 'only_target_stations':':'}
plot_data = score_df.loc[(score_df['fill_method']==method) & (score_df['station_grid']==station_grid)].dropna()
true = plot_data[f'{metric}_true']
pred = plot_data[f'{metric}_pred']
# linear fit to the scatterplot:
#obtain m (slope) and b(intercept) of linear regression line
m, b = np.polyfit(true, pred, 1)
# new x-vector
x_fitline = np.linspace(true.min(), true.max())
#add linear regression line to scatterplot
axes[row,column].plot(
x_fitline,
m*x_fitline+b,
linestyle=linestyles[station_grid],
color='k',
lw=1)
score_annotations[f"{method}{metric}{station_grid}r2"] = r2_score(true, pred)
score_annotations[f"{method}{metric}{station_grid}rmse"] = np.sqrt(mean_squared_error(true, pred))
score_annotations[f"{method}{metric}{station_grid}bias"] = scu._bias_score(true, pred)
except TypeError:
# only nans are in y_pred (for some stations/years for IDS)
pass
# y-labels
if column == 0:
if individual_panel_labels:
axes[row,column].set_ylabel(f'{metric} modeled{units[metric]}', fontsize=11)
else:
axes[row,column].set_ylabel(f'{metric}', fontsize=13)
axes[row,column].yaxis.set_label_coords(-0.4,0.5)
else:
axes[row,column].set_ylabel(None)
axes[row,column].tick_params(labelleft=False)
if individual_panel_labels:
axes[row,column].set_xlabel(f'{metric} measured{units[metric]}',
fontsize=11)
else:
axes[row,column].set_xlabel(None)
# titles
if row == 0:
axes[row,column].set_title(pu.METHOD_NAMES[method], fontsize=13)
if legend_kw['bbox_to_anchor']=='below_titles':
axes[row,column].set_title(f'{pu.METHOD_NAMES[method]}\n', fontsize=13)
if print_score_values:
# Annotations with mixed colors: extremely hacky...
for score_metric in ['rmse','r2','bias']:
for row, metric in enumerate(climate_metrics):
for column, method in enumerate(methods_used):
score_printed={
'rmse':'RMSE:',
'r2':'$r^2$:',
'bias':'BIAS:'}
float_format = {
'rmse':'1f',
'r2':'2f',
'bias':'2f'}
fontheight=0.095
y_pos = {'rmse':0.01+fontheight,
'r2':0.01+2*fontheight,
'bias':0.01}
score_dense = score_annotations[f'{method}{metric}full{score_metric}']
score_sparse = score_annotations[f"{method}{metric}only_target_stations{score_metric}"]
plt.rcParams.update({
"text.usetex": True})
fs = 11.5
x_pos = {'full':0.80, 'only_target_stations':0.99}
widths = {
'2f':{'negative':0.20,
'below_ten':0.17,
'above_ten':0.23},
'1f':{'negative':0.14,
'below_ten':0.13,
'above_ten':0.17}}
above_ten_add = 0.23
below_ten_add = 0.17
negative_add = 0.20
offset=0
axes[row,column].text(
0.99,
y_pos[score_metric],
f"{score_sparse:.{float_format[score_metric]}}",
ha='right',
va='bottom',
color=colors['only_target_stations'],
fontsize=fs,
transform=axes[row,column].transAxes
)
if score_sparse < 0:
offset += widths[float_format[score_metric]]['negative']
elif score_sparse < 10:
offset += widths[float_format[score_metric]]['below_ten']
else:
offset += widths[float_format[score_metric]]['above_ten']
axes[row,column].text(
0.99-offset,
y_pos[score_metric],
r"$\mid$",
ha='right',
va='bottom',
fontsize=fs,
transform=axes[row,column].transAxes
)
offset += 0.045
axes[row,column].text(
0.99-offset,
y_pos[score_metric],
f"{score_dense:.{float_format[score_metric]}}",
ha='right',
va='bottom',
color=colors['full'],
fontsize=fs,
transform=axes[row,column].transAxes
)
if score_dense < 0:
offset += widths[float_format[score_metric]]['negative']
elif score_dense < 10:
offset += widths[float_format[score_metric]]['below_ten']
else:
offset += widths[float_format[score_metric]]['above_ten']
offset += 0.005
axes[row,column].text(
0.99-offset,
y_pos[score_metric],
score_printed[score_metric],
ha='right',
va='bottom',
fontsize=fs,
transform=axes[row,column].transAxes
)
plt.rcParams.update({
"text.usetex": False})
# adapt y-lim for both station grids in every row
for row, metric in enumerate(climate_metrics):
ygmin = 0.; ygmax = 0.
xgmin = 0.; xgmax = 0.
for ax in axes[row,:]:
#Get global minimum and maximum y values accross all axis
ymin, ymax = ax.get_ylim()
ygmin = min(ygmin,ymin)
ygmax = max(ygmax,ymax)
xmin, xmax = ax.get_xlim()
xgmin = min(xgmin,xmin)
xgmax = max(xgmax,xmax)
[ax.set_ylim((ygmin,ygmax)) for ax in axes[row,:]]
if equal_xy_axes:
gmin = min(xgmin,ygmin)
gmax = max(xgmax,ygmax)
for ax in axes[row,:]:
ax.set_ylim((gmin,gmax))
ax.set_xlim((gmin,gmax))
ax.set_aspect(1, adjustable='box')
# draw x=y line:
for ax in axes.flatten():
ax.axline([0, 0], [1, 1], color='k',lw=0.9)
if not individual_panel_labels:
fig.text(0.5, 0.0, global_x_label, ha='center', va='center')
fig.text(0.04, 0.5, global_y_label, ha='center', va='center', rotation='vertical')
plt.tight_layout()
if no_legend:
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
else:
plt.show()
else:
handles, labels = axes[-1,-1].get_legend_handles_labels()
if fitlines:
custom_handles = []
for station_grid in ['full','only_target_stations']:
custom_handles.append(mlines.Line2D([], [], ls=linestyles[station_grid], color='k'))
handles = [(handles[0],custom_handles[0]),(handles[1],custom_handles[1])]
if legend_kw['bbox_to_anchor']=='top_right_axis':
top = axes.flatten()[0].get_position().ymax
right = axes.flatten()[-1].get_position().xmax
legend_kw['bbox_to_anchor'] = [right, top]
legend_kw['borderaxespad'] = 0
legend_kw['edgecolor'] = 'black'
legend_kw['fancybox'] = False
legend_kw['framealpha'] = 1
if legend_kw['bbox_to_anchor']=='below_titles':
legend_kw['loc'] = 'upper center'
legend_kw['bbox_to_anchor'] = (0.515, 0.955)
legend_kw['borderaxespad'] = 0.
legend_kw['ncol'] = 2
legend_kw['fancybox'] = False
legend_kw['framealpha'] = 0
legend_kw['columnspacing'] = 2
legend_kw['handletextpad'] = 0.2
if fitlines:
legend_kw['handler_map'] = {tuple: HandlerTuple()}
leg = fig.legend(handles, ['dense station network', 'only evaluation stations'],
**legend_kw)
for l in leg.legendHandles:
l.set_alpha(1)
plt.tight_layout()
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
else:
plt.show()
return None
# Boxplots
def evaluation_boxplot(methods_used,
metrics_used,
filename,
dpi=300,
legend_axis=-1,
boxstyle='whisker-box',
showfliers=False,
score_df=sc):
plot_func = {'whisker-box': sns.boxplot,
'letter-value': sns.boxenplot,
'violin': sns.violinplot}
assert boxstyle in plot_func.keys()
ylabels = {'dHS1_abs_diff': 'dHS1 abs. error [days]',
'HSmax_abs_diff': 'HSmax abs. error [cm]',
'HSavg_abs_diff': 'HSavg abs. error [cm]',
'RMSE': 'RMSE [cm]',
'MAAPE': 'MAAPE'}
plot_data = score_df.loc[score_df['fill_method'].isin(methods_used)].copy()
plot_data.replace(to_replace={'fill_method':pu.METHOD_NAMES}, inplace=True)
fig, axes = plt.subplots(1,len(metrics_used),
figsize=[(len(methods_used)*len(metrics_used))*0.7,10*0.65],
sharey=False)
if len(metrics_used) == 1:
axes = np.array([axes])
for ax, metric in zip(axes.flat, metrics_used):
plot_func[boxstyle](
data = plot_data,
x = 'fill_method',
y = metric,
order=[pu.METHOD_NAMES[m] for m in methods_used],
hue = 'station_grid',
hue_order = ['full', 'only_target_stations'],
palette=['C1', 'C0'],
# sym='',
showfliers=showfliers,
flierprops={'marker':'x'},
medianprops={'color':'yellow'},
ax=ax)
try:
ax.set_ylabel(ylabels[metric])
except KeyError:
ax.set_ylabel(metric)
ax.set_xlabel(None)
# ax.set_xticklabels(ax.get_xticklabels(), rotation = 55, ha='right')
ax.grid(axis='y',zorder=-1)
ax.set_axisbelow(True)
ax.get_legend().remove()
handles, labels = axes[legend_axis].get_legend_handles_labels()
# When creating the legend, only use the first two elements
# to effectively remove the last two.
l = axes[legend_axis].legend(handles, ['dense station network', 'only evaluation stations'],
bbox_to_anchor=(0.99, 0.99),
loc=1,
borderaxespad=0.,
frameon=False)
plt.tight_layout()
fig.subplots_adjust(bottom=0.2)
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
return None
def scatter_and_boxplot_subgrid(
methods_used,
metrics_used,
xaxs_value,
filename,
dpi=300,
no_legend=False,
legend_kw=None,
score_df=sc):
# methods_used= ['Inverse distance squared',
# 'GIDS',
# 'matiu vertical weighted',
# 'Elastic Net Regression',
# 'RandomForest_V3.5',
# 'SWE2HS_SLFTI']
# metrics_used = ['HSavg_abs_diff', 'HSmax_abs_diff', 'dHS1_abs_diff']
# xaxs_value = 'HSavg_true'
# filename=None
# legend_kw={
# 'bbox_to_anchor':'below_titles',
# 'frameon': True}
# no_legend=False
# Figure setup
fig = plt.figure(figsize=[len(methods_used)*2.2,len(metrics_used)*2.4])
outer_gs = gridspec.GridSpec(len(metrics_used), len(methods_used), figure=fig)
axs = []
inner_gs = []
for g in outer_gs:
inner = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=g, width_ratios=[3,1],wspace=0.)
inner_gs.append(inner)
inner_axs = [fig.add_subplot(ax) for ax in inner]
axs.append(inner_axs)
x_labels = {'gap_stn_altitude': 'Altitude [m]',
'HSavg_true': 'HSavg [cm]',
'gap_winter': 'gap winter'}
ylabels = {'dHS1_abs_diff': 'dHS1\nabs. error [days]',
'HSmax_abs_diff': 'HSmax\nabs. error [cm]',
'HSavg_abs_diff': 'HSavg\nabs. error [cm]',
'RMSE': 'RMSE',
'MAAPE': 'MAAPE'}
#different markers and colors for different station grids:
markers={'full': "s",
'only_target_stations': "^"}
colors={'full': 'tab:orange',
'only_target_stations': "b"}
linestyles={'full': '-',
'only_target_stations': "--"}
if legend_kw is None:
legend_kw = {}
default_legend_kwargs={
'bbox_to_anchor':[0.99, 0],
'loc': 1,
'ncol': 1,
'bbox_transform':fig.transFigure,
'fontsize': 11,
'frameon': False}
for key, value in default_legend_kwargs.items():
legend_kw.setdefault(key, value)
for station_grid in ['full','only_target_stations']:
color = colors[station_grid]
marker = markers[station_grid]
for row, metric in enumerate(metrics_used):
for column, method in enumerate(methods_used):
outer_ax = axs[(row*len(methods_used))+column]
plot_data = score_df.loc[(score_df['fill_method']==method) & (score_df['station_grid']==station_grid)].copy()
plot_data.plot(
xaxs_value,
metric,
kind='scatter',
ax=outer_ax[0],
color=color,
marker=marker,
alpha=0.4,
label=station_grid)
# binning based on xaxs_val:
bins = np.arange(0,140,20)
labels = np.arange(10,130,20)
# bins = np.arange(0,130,10)
# labels = np.arange(5,125,10)
plot_data['binned_xval'] = pd.cut(plot_data[xaxs_value], bins,labels=labels)
median_bins = plot_data.groupby('binned_xval').median()
outer_ax[0].plot(
median_bins.index,
median_bins[metric],
color='k',
marker=marker,
ls=linestyles[station_grid],
label=station_grid)
if station_grid == 'only_target_stations':
sns.boxplot(
data = score_df.loc[(score_df['fill_method']==method)],
x = 'fill_method',
y = metric,
hue = 'station_grid',
hue_order = ['full', 'only_target_stations'],
palette=['C1', 'C0'],
# sym='',
# showfliers=False,
flierprops={'marker':'x'},
medianprops={'color':'yellow'},
ax=outer_ax[1])
for ax in outer_ax:
ax.get_legend().remove()
# y_labels
if column == 0:
try:
outer_ax[0].set_ylabel(ylabels[metric], fontsize=13)
except KeyError:
outer_ax[0].set_ylabel(metric, fontsize=13)
else:
outer_ax[0].set_ylabel(None)
outer_ax[0].tick_params(labelleft=False)
# x_labels
if row == len(metrics_used)-1:
outer_ax[0].set_xlabel(x_labels[xaxs_value],
fontsize=13)
else:
outer_ax[0].set_xlabel(None)
outer_ax[0].tick_params(labelbottom=False)
# titles
if row == 0:
if legend_kw['bbox_to_anchor']=='below_titles':
outer_ax[0].set_title(f'{pu.METHOD_NAMES[method]}\n', fontsize=13)
else:
outer_ax[0].set_title(pu.METHOD_NAMES[method], fontsize=13)
outer_ax[1].set(xticks=[], yticks=[])
outer_ax[1].set_ylabel(None)
outer_ax[1].set_xlabel(None)
# adapt y-lim for both station grids in every row
for row, metric in enumerate(metrics_used):
ygmin = 0.; ygmax = 0.
row_axs = []
for outer_ax in axs[row*len(methods_used):row*len(methods_used)+len(methods_used)]:
for ax in outer_ax:
#Get global minimum and maximum y values accross all axis
ymin, ymax = ax.get_ylim()
ygmin = min(ygmin,ymin)
ygmax = max(ygmax,ymax)
row_axs.append(ax)
[ax.set_ylim((ygmin,ygmax)) for ax in row_axs]
# if metric =='r2_score':
# [ax.set_ylim((-1.5,1.1)) for ax in row_axs]
plt.tight_layout()
if no_legend:
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
else:
plt.show()
else:
handles, labels = axs[0][0].get_legend_handles_labels()
custom_handles = []
for station_grid in ['full','only_target_stations']:
custom_handles.append(mlines.Line2D([], [],
color=colors[station_grid],
marker=markers[station_grid],
mfc=colors[station_grid],
mec=colors[station_grid],
ls='')
)
if legend_kw['bbox_to_anchor']=='below_titles':
legend_kw['loc'] = 'upper center'
if len(metrics_used)==3:
legend_kw['bbox_to_anchor'] = (0.515, 0.955)
elif len(metrics_used)==2:
legend_kw['bbox_to_anchor'] = (0.515, 0.935)
else:
legend_kw['bbox_to_anchor'] = (0.515, 0.975)
legend_kw['borderaxespad'] = 0.
legend_kw['ncol'] = 2
legend_kw['fancybox'] = False
legend_kw['framealpha'] = 0
legend_kw['columnspacing'] = 4
legend_kw['handletextpad'] = 1.8
leg = fig.legend([(handles[0],custom_handles[0]),(handles[1],custom_handles[1])],
['dense station network', 'only evaluation stations'],
handler_map={tuple: HandlerTupleHorizontal()},
**legend_kw, )
for l in leg.legendHandles:
l.set_alpha(1)
plt.tight_layout()
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
else:
plt.show()
return None
def scatterboxbins(
methods_used,
metrics_used,
xaxs_value,
filename,
dpi=300,
no_legend=False,
legend_kw=None,
showfliers=False,
score_df=sc):
fig, axs = plt.subplots(len(metrics_used),len(methods_used),
figsize=[len(methods_used)*2,len(metrics_used)*2.1],
sharex=True, sharey=False)
x_labels = {'gap_stn_altitude': 'Altitude [m]',
'HSavg_true': 'HSavg [cm]',
'gap_winter': 'gap winter'}
ylabels = {'dHS1_abs_diff': 'dHS1\nabs. error [days]',
'HSmax_abs_diff': 'HSmax\nabs. error [cm]',
'HSavg_abs_diff': 'HSavg\nabs. error [cm]',
'RMSE': 'RMSE',
'MAAPE': 'MAAPE'}
#different markers and colors for different station grids:
markers={'full': "s",
'only_target_stations': "^"}
colors={'full': 'tab:orange',
'only_target_stations': "b"}
if legend_kw is None:
legend_kw = {}
default_legend_kwargs={
'bbox_to_anchor':[0.99, 0],
'loc': 1,
'ncol': 1,
'bbox_transform':fig.transFigure,
'fontsize': 11,
'frameon': False}
for key, value in default_legend_kwargs.items():
legend_kw.setdefault(key, value)
for row, metric in enumerate(metrics_used):
for column, method in enumerate(methods_used):
plt_data = score_df.loc[(score_df['fill_method']==method)].copy()
# binning based on xaxs_val:
if xaxs_value == 'HSavg_true':
bins = np.arange(0,140,20)
labels = np.arange(10,130,20)
if xaxs_value == 'gap_stn_altitude':
bins = np.linspace(200,2000,5)
labels = None
plt_data['binned_xval'] = pd.cut(plt_data[xaxs_value], bins,labels=labels)
sns.boxplot(
data = plt_data,
x = 'binned_xval',
y = metric,
hue = 'station_grid',
hue_order = ['full', 'only_target_stations'],
palette=['C1', 'C0'],
# sym='',
showfliers=showfliers,
flierprops={'marker':'d',
'markersize':2},
medianprops={'color':'yellow'},
ax=axs[row,column])
axs[row,column].get_legend().remove()
axs[row,column].yaxis.grid(True)
# y_labels
if column == 0:
try:
axs[row,column].set_ylabel(ylabels[metric], fontsize=13)
except KeyError:
axs[row,column].set_ylabel(metric, fontsize=13)
else:
axs[row,column].set_ylabel(None)
axs[row,column].tick_params(labelleft=False)
# x_labels
if row == len(metrics_used)-1:
axs[row,column].set_xlabel(x_labels[xaxs_value],
fontsize=13)
else:
axs[row,column].set_xlabel(None)
# titles
if row == 0:
if legend_kw['bbox_to_anchor']=='below_titles':
axs[row,column].set_title(f'{pu.METHOD_NAMES[method]}\n', fontsize=13)
else:
axs[row,column].set_title(pu.METHOD_NAMES[method], fontsize=13)
# adapt y-lim for both station grids in every row
for row, metric in enumerate(metrics_used):
ygmin = 0.; ygmax = 0.
for ax in axs[row,:]:
#Get global minimum and maximum y values accross all axis
ymin, ymax = ax.get_ylim()
ygmin = min(ygmin,ymin)
ygmax = max(ygmax,ymax)
[ax.set_ylim((ygmin,ygmax)) for ax in axs[row,:]]
# somehow changing ylim semms to move the grid to the front...
[ax.set_axisbelow(True) for ax in axs.flatten()]
plt.tight_layout()
if no_legend:
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
else:
plt.show()
else:
handles, labels = axs[-1,-1].get_legend_handles_labels()
if legend_kw['bbox_to_anchor']=='below_titles':
legend_kw['loc'] = 'upper center'
if len(metrics_used)==3:
legend_kw['bbox_to_anchor'] = (0.515, 0.96)
elif len(metrics_used)==2:
legend_kw['bbox_to_anchor'] = (0.515, 0.94)
else:
legend_kw['bbox_to_anchor'] = (0.515, 0.975)
legend_kw['ncol'] = 2
legend_kw['fancybox'] = False
legend_kw['framealpha'] = 0
legend_kw['columnspacing'] = 2
leg = fig.legend(handles, ['dense station network', 'only evaluation stations'],
**legend_kw)
for l in leg.legendHandles:
l.set_alpha(1)
plt.tight_layout()
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
else:
plt.show()
return None
# %% ########### Standard Methods
def main(used_methods):
plot_output = '../results/cross_validation/score_box_and_scatterplots/'
if not os.path.isdir(plot_output):
os.makedirs(plot_output)
logger.info(("generate boxplots and scatterplots for scores and save to\n"
f"{os.path.abspath(plot_output)}"))
# list of methods
all_methods = sc['fill_method'].unique()
climate_metrics = ['HSavg_diff', 'HSmax_diff', 'dHS1_diff']
climate_metrics_abs_diff = ['HSavg_abs_diff', 'HSmax_abs_diff', 'dHS1_abs_diff']
metrics_used = ['RMSE', 'MAAPE', 'BIAS']
scatterplot(used_methods,
['RMSE', 'MAAPE'],
'HSavg_true',
f'{plot_output}fig03_scatterplots_HSavg_vs_RMSE_MAAPE_used_methods.png',
legend_kw={
'bbox_to_anchor':'top_right_axis',
'frameon': True})
scatterplot(used_methods,
climate_metrics,
'HSavg_true',
f'{plot_output}scatterplots_HSavg_vs_climate_metrics_used_methods.png',
legend_kw={
'bbox_to_anchor':'top_right_axis',
'frameon': True})
scatterplot(used_methods,
['RMSE', 'MAAPE'],
'gap_stn_altitude',
f'{plot_output}scatterplots_stationaltitude_vs_RMSE_MAAPE_used_methods.png')
scatterplot(used_methods,
metrics_used=climate_metrics,
xaxs_value='HSavg_true',
filename=f'{plot_output}scatterplots_HSavg_vs_climate_metrics_used_methods.png')
scatterplot(used_methods,
metrics_used=climate_metrics,
xaxs_value='HSavg_true',
filename=f'{plot_output}scatterplots_stationaltitude_vs_climate_metrics_used_methods.png')
evaluation_boxplot(used_methods,
climate_metrics,
f'{plot_output}boxplots_climate_metrics_standar_methods.png')
evaluation_boxplot(used_methods,
climate_metrics_abs_diff,
f'{plot_output}fig06_boxplots_climate_metrics_abs_diff_used_methods.png',
legend_axis=-1)
evaluation_boxplot(['Elastic Net Regression',
'RandomForest_V3.5',
'SWE2HS_SLFTI'],
['HSavg_diff'],
f'{plot_output}boxplots_HSavg_diff_BIAS_ela_rand_swe2hs_for_bias_estimation.png')
evaluation_boxplot(used_methods,
['RMSE', 'MAAPE'],
f'{plot_output}fig02_boxplots_RMSE_MAAPE_used_methods.png'
)
scatterplot(all_methods,
['RMSE', 'MAAPE'],
'HSavg_true',
f'{plot_output}scatterplots_HSavg_vs_RMSE_MAAPE_all_methods.png')
scatterplot(all_methods,
['HSavg_pred','HSavg_true'],
'HSavg_true',
f'{plot_output}scatterplots_HSavg_true_vs_HSavg_pred_all_methods.png')
scatterplot_true_vs_pred(
used_methods,
['HSavg','HSmax','dHS1'],
f'{plot_output}fig05_scatterplots_true_vs_pred_climate_metrics_used_methods.png',
legend_kw={'bbox_to_anchor':'top_right_axis',
'frameon':True})
scatterplot_true_vs_pred(
all_methods,
['HSavg','HSmax','dHS1'],
f'{plot_output}scatterplots_true_vs_pred_climate_metrics_all_methods.png')
scatterplot(all_methods,
metrics_used=climate_metrics,
xaxs_value='HSavg_true',
filename=f'{plot_output}scatterplots_HSavg_vs_climate_metrics_all_methods.png')
scatterplot(all_methods,
metrics_used=['RMSE','MAAPE','BIAS'],
xaxs_value='gap_winter',
filename=f'{plot_output}scatterplots_gap_winter_vs_RMSE_MAAPE_BIAS_all_methods.png')
evaluation_boxplot(all_methods,
['RMSE', 'MAAPE', 'BIAS'],
filename=f'{plot_output}boxplots_all_methods.png')
evaluation_boxplot(all_methods,
climate_metrics,
f'{plot_output}boxplots_climate_metrics_all_methods.png')
scatterplot(used_methods,
['HSavg_abs_diff', 'HSmax_abs_diff', 'dHS1_abs_diff'],
'HSavg_true',
filename=f'{plot_output}scatter_climate_metrics_abs_diff_vs_HSavg.png',
legend_kw={
'bbox_to_anchor':'top_right_axis',
'frameon': True})
|
[
"cv_results_database.get_cv_results_as_df",
"numpy.polyfit",
"sklearn.metrics.r2_score",
"collections.defaultdict",
"numpy.arange",
"matplotlib.legend_handler.HandlerTuple.__init__",
"matplotlib.pyplot.tight_layout",
"matplotlib.legend_handler.HandlerTuple",
"os.path.abspath",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"numpy.linspace",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.show",
"seaborn.set_color_codes",
"scoring_utils._bias_score",
"pandas.cut",
"seaborn.boxplot",
"os.makedirs",
"os.path.isdir",
"numpy.array",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"logging.getLogger"
] |
[((604, 631), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (621, 631), False, 'import logging\n'), ((663, 698), 'seaborn.set_color_codes', 'sns.set_color_codes', ([], {'palette': '"""deep"""'}), "(palette='deep')\n", (682, 698), True, 'import seaborn as sns\n'), ((705, 727), 'cv_results_database.get_cv_results_as_df', 'get_cv_results_as_df', ([], {}), '()\n', (725, 727), False, 'from cv_results_database import get_cv_results_as_df\n'), ((5981, 5999), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5997, 5999), True, 'import matplotlib.pyplot as plt\n'), ((8470, 8555), 'collections.defaultdict', 'defaultdict', (["(lambda : '')", "{'HSavg': ' [cm]', 'HSmax': ' [cm]', 'dHS1': ' [days]'}"], {}), "(lambda : '', {'HSavg': ' [cm]', 'HSmax': ' [cm]', 'dHS1':\n ' [days]'})\n", (8481, 8555), False, 'from collections import defaultdict\n'), ((18079, 18097), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18095, 18097), True, 'import matplotlib.pyplot as plt\n'), ((22459, 22477), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22475, 22477), True, 'import matplotlib.pyplot as plt\n'), ((22579, 22593), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (22588, 22593), True, 'import matplotlib.pyplot as plt\n'), ((28841, 28859), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (28857, 28859), True, 'import matplotlib.pyplot as plt\n'), ((34902, 34920), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (34918, 34920), True, 'import matplotlib.pyplot as plt\n'), ((950, 987), 'matplotlib.legend_handler.HandlerTuple.__init__', 'HandlerTuple.__init__', (['self'], {}), '(self, **kwargs)\n', (971, 987), False, 'from matplotlib.legend_handler import HandlerTuple\n'), ((7280, 7298), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7296, 7298), True, 'import matplotlib.pyplot as plt\n'), ((19773, 19791), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19789, 19791), True, 'import matplotlib.pyplot as plt\n'), ((21117, 21133), 'numpy.array', 'np.array', (['[axes]'], {}), '([axes])\n', (21125, 21133), True, 'import numpy as np\n'), ((23475, 23566), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(2)'], {'subplot_spec': 'g', 'width_ratios': '[3, 1]', 'wspace': '(0.0)'}), '(1, 2, subplot_spec=g, width_ratios=[3, 1],\n wspace=0.0)\n', (23507, 23566), False, 'from matplotlib import gridspec\n'), ((30645, 30663), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30661, 30663), True, 'import matplotlib.pyplot as plt\n'), ((35923, 35941), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (35939, 35941), True, 'import matplotlib.pyplot as plt\n'), ((36270, 36296), 'os.path.isdir', 'os.path.isdir', (['plot_output'], {}), '(plot_output)\n', (36283, 36296), False, 'import os\n'), ((36306, 36330), 'os.makedirs', 'os.makedirs', (['plot_output'], {}), '(plot_output)\n', (36317, 36330), False, 'import os\n'), ((6127, 6141), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6136, 6141), True, 'import matplotlib.pyplot as plt\n'), ((6168, 6178), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6176, 6178), True, 'import matplotlib.pyplot as plt\n'), ((7408, 7422), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7417, 7422), True, 'import matplotlib.pyplot as plt\n'), ((7449, 7459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7457, 7459), True, 'import matplotlib.pyplot as plt\n'), ((18239, 18249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18247, 18249), True, 'import matplotlib.pyplot as plt\n'), ((19901, 19915), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (19910, 19915), True, 'import matplotlib.pyplot as plt\n'), ((19942, 19952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19950, 19952), True, 'import matplotlib.pyplot as plt\n'), ((28987, 29001), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (28996, 29001), True, 'import matplotlib.pyplot as plt\n'), ((29028, 29038), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29036, 29038), True, 'import matplotlib.pyplot as plt\n'), ((30773, 30787), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (30782, 30787), True, 'import matplotlib.pyplot as plt\n'), ((30814, 30824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30822, 30824), True, 'import matplotlib.pyplot as plt\n'), ((32757, 32806), 'pandas.cut', 'pd.cut', (['plt_data[xaxs_value]', 'bins'], {'labels': 'labels'}), '(plt_data[xaxs_value], bins, labels=labels)\n', (32763, 32806), True, 'import pandas as pd\n'), ((32818, 33091), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'plt_data', 'x': '"""binned_xval"""', 'y': 'metric', 'hue': '"""station_grid"""', 'hue_order': "['full', 'only_target_stations']", 'palette': "['C1', 'C0']", 'showfliers': 'showfliers', 'flierprops': "{'marker': 'd', 'markersize': 2}", 'medianprops': "{'color': 'yellow'}", 'ax': 'axs[row, column]'}), "(data=plt_data, x='binned_xval', y=metric, hue='station_grid',\n hue_order=['full', 'only_target_stations'], palette=['C1', 'C0'],\n showfliers=showfliers, flierprops={'marker': 'd', 'markersize': 2},\n medianprops={'color': 'yellow'}, ax=axs[row, column])\n", (32829, 33091), True, 'import seaborn as sns\n'), ((35048, 35062), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (35057, 35062), True, 'import matplotlib.pyplot as plt\n'), ((35089, 35099), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35097, 35099), True, 'import matplotlib.pyplot as plt\n'), ((36051, 36065), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (36060, 36065), True, 'import matplotlib.pyplot as plt\n'), ((36092, 36102), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36100, 36102), True, 'import matplotlib.pyplot as plt\n'), ((19540, 19554), 'matplotlib.legend_handler.HandlerTuple', 'HandlerTuple', ([], {}), '()\n', (19552, 19554), False, 'from matplotlib.legend_handler import HandlerTuple\n'), ((25551, 25572), 'numpy.arange', 'np.arange', (['(0)', '(140)', '(20)'], {}), '(0, 140, 20)\n', (25560, 25572), True, 'import numpy as np\n'), ((25596, 25618), 'numpy.arange', 'np.arange', (['(10)', '(130)', '(20)'], {}), '(10, 130, 20)\n', (25605, 25618), True, 'import numpy as np\n'), ((25752, 25802), 'pandas.cut', 'pd.cut', (['plot_data[xaxs_value]', 'bins'], {'labels': 'labels'}), '(plot_data[xaxs_value], bins, labels=labels)\n', (25758, 25802), True, 'import pandas as pd\n'), ((29241, 29384), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': 'colors[station_grid]', 'marker': 'markers[station_grid]', 'mfc': 'colors[station_grid]', 'mec': 'colors[station_grid]', 'ls': '""""""'}), "([], [], color=colors[station_grid], marker=markers[\n station_grid], mfc=colors[station_grid], mec=colors[station_grid], ls='')\n", (29254, 29384), True, 'import matplotlib.lines as mlines\n'), ((32526, 32547), 'numpy.arange', 'np.arange', (['(0)', '(140)', '(20)'], {}), '(0, 140, 20)\n', (32535, 32547), True, 'import numpy as np\n'), ((32571, 32593), 'numpy.arange', 'np.arange', (['(10)', '(130)', '(20)'], {}), '(10, 130, 20)\n', (32580, 32593), True, 'import numpy as np\n'), ((32664, 32689), 'numpy.linspace', 'np.linspace', (['(200)', '(2000)', '(5)'], {}), '(200, 2000, 5)\n', (32675, 32689), True, 'import numpy as np\n'), ((36430, 36458), 'os.path.abspath', 'os.path.abspath', (['plot_output'], {}), '(plot_output)\n', (36445, 36458), False, 'import os\n'), ((13448, 13490), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True}"], {}), "({'text.usetex': True})\n", (13467, 13490), True, 'import matplotlib.pyplot as plt\n'), ((16863, 16906), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': False}"], {}), "({'text.usetex': False})\n", (16882, 16906), True, 'import matplotlib.pyplot as plt\n'), ((18491, 18552), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'ls': 'linestyles[station_grid]', 'color': '"""k"""'}), "([], [], ls=linestyles[station_grid], color='k')\n", (18504, 18552), True, 'import matplotlib.lines as mlines\n'), ((26256, 26524), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': "score_df.loc[score_df['fill_method'] == method]", 'x': '"""fill_method"""', 'y': 'metric', 'hue': '"""station_grid"""', 'hue_order': "['full', 'only_target_stations']", 'palette': "['C1', 'C0']", 'flierprops': "{'marker': 'x'}", 'medianprops': "{'color': 'yellow'}", 'ax': 'outer_ax[1]'}), "(data=score_df.loc[score_df['fill_method'] == method], x=\n 'fill_method', y=metric, hue='station_grid', hue_order=['full',\n 'only_target_stations'], palette=['C1', 'C0'], flierprops={'marker':\n 'x'}, medianprops={'color': 'yellow'}, ax=outer_ax[1])\n", (26267, 26524), True, 'import seaborn as sns\n'), ((10324, 10349), 'numpy.polyfit', 'np.polyfit', (['true', 'pred', '(1)'], {}), '(true, pred, 1)\n', (10334, 10349), True, 'import numpy as np\n'), ((10877, 10897), 'sklearn.metrics.r2_score', 'r2_score', (['true', 'pred'], {}), '(true, pred)\n', (10885, 10897), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((11104, 11131), 'scoring_utils._bias_score', 'scu._bias_score', (['true', 'pred'], {}), '(true, pred)\n', (11119, 11131), True, 'import scoring_utils as scu\n'), ((10989, 11019), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['true', 'pred'], {}), '(true, pred)\n', (11007, 11019), False, 'from sklearn.metrics import r2_score, mean_squared_error\n')]
|
from argparse import ArgumentParser
from os import path
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import Axes3D, proj3d
import numpy
from plotypus.preprocessing import Fourier
from matplotlib import rc
rc('font', **{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
from plotypus.utils import make_sure_path_exists
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def get_args():
parser = ArgumentParser("fourier_space.py")
parser.add_argument("-i", "--input", type=str,
help="Table file containing time, magnitude, and (optional) error "
"in its columns.")
parser.add_argument("-o", "--output", type=str,
default=".",
help="Directory to output demo plots.")
parser.add_argument("-n", "--name", type=str,
help="Name to use as prefix in all output files.")
parser.add_argument("-t", "--type", type=str,
default="png",
help="File type to output plots in. Default is png.")
parser.add_argument("-p", "--period", type=float,
help="Period to phase observations by.")
parser.add_argument("--use-cols", type=int, nargs="+",
default=(0, 1, 2),
help="Columns to read time, magnigude, and (optional) error from, "
"respectively. "
"Defaults to 0, 1, 2.")
parser.add_argument("--temporal",
action="store_true",
help="Enable 2D temporal plot")
parser.add_argument("--2d", dest="two_dee",
action="store_true",
help="Enable 2D Fourier space plot")
parser.add_argument("--3d-flat", dest="three_dee_flat",
action="store_true",
help="Enable flat 3D Fourier space plot")
parser.add_argument("--3d-rotate", dest="three_dee_rotate",
action="store_true",
help="Enable rotating 3D Fourier space plot")
parser.add_argument("--3d-plane", dest="three_dee_plane",
action="store_true",
help="Enable rotating 3D Fourier space plane-fit plot")
args = parser.parse_args()
return args
def plot_temporal(x, y, x_label, y_label, filename,
color='b', size=10, marker='.'):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x, y,
color=color, s=size, marker=marker)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
fig.savefig(filename)
plt.close(fig)
def plot2d(x, y, x_label, y_label, filename,
color='b', size=10, marker='.'):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x, y,
color=color, s=size, marker=marker)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_xticks((-1.0, -0.5, 0.0, 0.5, 1.0))
ax.set_yticks((-1.0, -0.5, 0.0, 0.5, 1.0))
fig.savefig(filename)
plt.close(fig)
def plot3d(x, y, z, x_label, y_label, z_label, filename,
color='b', size=10, marker='.'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal', 'datalim')
ax.scatter(x, y, z,
color=color, s=size, marker=marker)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
ax.set_xticks((-1.0, -0.5, 0.0, 0.5, 1.0))
ax.set_yticks((-1.0, -0.5, 0.0, 0.5, 1.0))
fig.savefig(filename)
plt.close(fig)
def plot3drotate(x, y, z, x_label, y_label, z_label, file_prefix, file_type,
color='b', size=10, marker='.'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal', 'datalim')
ax.scatter(x, y, z,
color=color, s=size, marker=marker)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
ax.set_xticks((-1.0, -0.5, 0.0, 0.5, 1.0))
ax.set_yticks((-1.0, -0.5, 0.0, 0.5, 1.0))
for i in range(360):
ax.view_init(azim=i)
fig.savefig("{0}-{1:03d}.{2}".format(file_prefix, i, file_type))
plt.close(fig)
def plot3dplane(x, y, z, A_0, a, b,
x_label, y_label, z_label, file_prefix, file_type,
color='b', size=10, marker='.'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal', 'datalim')
xx, yy = numpy.meshgrid(numpy.arange(-1, 1, 0.01),
numpy.arange(-1, 1, 0.01))
zz = A_0 + a*xx + b*yy
mean_x, mean_y, mean_z = x.mean(), y.mean(), z.mean()
mean_vec = numpy.array([mean_x, mean_y, mean_z])
a_vec = numpy.array([1.0, 0.0, a])+mean_vec
b_vec = numpy.array([0.0, 1.0, b])+mean_vec
ax.scatter(x, y, z,
color=color, s=size, marker=marker)
ax.plot_surface(xx, yy, zz,
color="#555555", alpha=0.2)
mean_x, mean_y, mean_z = x.mean(), y.mean(), z.mean()
for vec in [a_vec, b_vec]:
arrow = Arrow3D([mean_x, vec[0]],
[mean_y, vec[1]],
[mean_z, vec[2]],
mutation_scale=20,
lw=3, arrowstyle="-|>", color="r")
ax.add_artist(arrow)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
ax.set_xticks((-1.0, -0.5, 0.0, 0.5, 1.0))
ax.set_yticks((-1.0, -0.5, 0.0, 0.5, 1.0))
for i in range(360):
ax.view_init(azim=i)
fig.savefig("{0}-{1:03d}.{2}".format(file_prefix, i, file_type))
plt.close(fig)
def main():
args = get_args()
make_sure_path_exists(args.output)
phase, mag, *err = numpy.loadtxt(args.input, usecols=args.use_cols,
unpack=True)
design_matrix = Fourier.design_matrix(phase/args.period, 1)
coeffs, *_ = numpy.linalg.lstsq(design_matrix, mag)
A_0, a, b = coeffs
if args.temporal:
plot_temporal((phase/args.period)%1.0, mag,
r"$t$", r"$y(t)$",
path.join(args.output,
args.name + "-temporal-space." + args.type))
if args.two_dee:
plot2d(design_matrix[:,1], design_matrix[:,2],
r"$\sin(1 \omega t)$", r"$\cos(1 \omega t)$",
path.join(args.output,
args.name + "-2D-fourier-space." + args.type))
if args.three_dee_flat:
plot3d(design_matrix[:,1], design_matrix[:,2], mag,
r"$\sin(1 \omega t)$", r"$\cos(1 \omega t)$", r"$m$",
path.join(args.output,
args.name + "-3D-fourier-space." + args.type))
if args.three_dee_rotate:
plot3drotate(design_matrix[:,1], design_matrix[:,2], mag,
r"$\sin(1 \omega t)$", r"$\cos(1 \omega t)$", r"$m$",
path.join(args.output,
args.name + "-3D-fourier-space"), args.type)
if args.three_dee_plane:
plot3dplane(design_matrix[:,1], design_matrix[:,2], mag,
A_0, a, b,
r"$\sin(1 \omega t)$", r"$\cos(1 \omega t)$", r"$m$",
path.join(args.output,
args.name + "-3D-fourier-space-plane"), args.type)
return 0
if __name__ == "__main__":
exit(main())
|
[
"matplotlib.rc",
"mpl_toolkits.mplot3d.proj3d.proj_transform",
"argparse.ArgumentParser",
"numpy.linalg.lstsq",
"matplotlib.patches.FancyArrowPatch.draw",
"matplotlib.pyplot.close",
"plotypus.utils.make_sure_path_exists",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.array",
"numpy.loadtxt",
"numpy.arange",
"plotypus.preprocessing.Fourier.design_matrix",
"os.path.join",
"matplotlib.patches.FancyArrowPatch.__init__"
] |
[((74, 95), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (88, 95), False, 'import matplotlib\n'), ((307, 374), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n", (309, 374), False, 'from matplotlib import rc\n'), ((372, 395), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (374, 395), False, 'from matplotlib import rc\n'), ((916, 950), 'argparse.ArgumentParser', 'ArgumentParser', (['"""fourier_space.py"""'], {}), "('fourier_space.py')\n", (930, 950), False, 'from argparse import ArgumentParser\n'), ((2642, 2654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2652, 2654), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2859), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2854, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2961, 2973), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2971, 2973), True, 'import matplotlib.pyplot as plt\n'), ((3259, 3273), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3268, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3387, 3399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3397, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3785), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3780, 3785), True, 'import matplotlib.pyplot as plt\n'), ((3925, 3937), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3935, 3937), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4424), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4419, 4424), True, 'import matplotlib.pyplot as plt\n'), ((4589, 4601), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4599, 4601), True, 'import matplotlib.pyplot as plt\n'), ((4900, 4937), 'numpy.array', 'numpy.array', (['[mean_x, mean_y, mean_z]'], {}), '([mean_x, mean_y, mean_z])\n', (4911, 4937), False, 'import numpy\n'), ((5858, 5872), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5867, 5872), True, 'import matplotlib.pyplot as plt\n'), ((5914, 5948), 'plotypus.utils.make_sure_path_exists', 'make_sure_path_exists', (['args.output'], {}), '(args.output)\n', (5935, 5948), False, 'from plotypus.utils import make_sure_path_exists\n'), ((5973, 6034), 'numpy.loadtxt', 'numpy.loadtxt', (['args.input'], {'usecols': 'args.use_cols', 'unpack': '(True)'}), '(args.input, usecols=args.use_cols, unpack=True)\n', (5986, 6034), False, 'import numpy\n'), ((6093, 6138), 'plotypus.preprocessing.Fourier.design_matrix', 'Fourier.design_matrix', (['(phase / args.period)', '(1)'], {}), '(phase / args.period, 1)\n', (6114, 6138), False, 'from plotypus.preprocessing import Fourier\n'), ((6154, 6192), 'numpy.linalg.lstsq', 'numpy.linalg.lstsq', (['design_matrix', 'mag'], {}), '(design_matrix, mag)\n', (6172, 6192), False, 'import numpy\n'), ((540, 603), 'matplotlib.patches.FancyArrowPatch.__init__', 'FancyArrowPatch.__init__', (['self', '(0, 0)', '(0, 0)', '*args'], {}), '(self, (0, 0), (0, 0), *args, **kwargs)\n', (564, 603), False, 'from matplotlib.patches import FancyArrowPatch\n'), ((730, 781), 'mpl_toolkits.mplot3d.proj3d.proj_transform', 'proj3d.proj_transform', (['xs3d', 'ys3d', 'zs3d', 'renderer.M'], {}), '(xs3d, ys3d, zs3d, renderer.M)\n', (751, 781), False, 'from mpl_toolkits.mplot3d import Axes3D, proj3d\n'), ((849, 885), 'matplotlib.patches.FancyArrowPatch.draw', 'FancyArrowPatch.draw', (['self', 'renderer'], {}), '(self, renderer)\n', (869, 885), False, 'from matplotlib.patches import FancyArrowPatch\n'), ((4717, 4742), 'numpy.arange', 'numpy.arange', (['(-1)', '(1)', '(0.01)'], {}), '(-1, 1, 0.01)\n', (4729, 4742), False, 'import numpy\n'), ((4772, 4797), 'numpy.arange', 'numpy.arange', (['(-1)', '(1)', '(0.01)'], {}), '(-1, 1, 0.01)\n', (4784, 4797), False, 'import numpy\n'), ((4951, 4977), 'numpy.array', 'numpy.array', (['[1.0, 0.0, a]'], {}), '([1.0, 0.0, a])\n', (4962, 4977), False, 'import numpy\n'), ((4999, 5025), 'numpy.array', 'numpy.array', (['[0.0, 1.0, b]'], {}), '([0.0, 1.0, b])\n', (5010, 5025), False, 'import numpy\n'), ((6354, 6420), 'os.path.join', 'path.join', (['args.output', "(args.name + '-temporal-space.' + args.type)"], {}), "(args.output, args.name + '-temporal-space.' + args.type)\n", (6363, 6420), False, 'from os import path\n'), ((6607, 6675), 'os.path.join', 'path.join', (['args.output', "(args.name + '-2D-fourier-space.' + args.type)"], {}), "(args.output, args.name + '-2D-fourier-space.' + args.type)\n", (6616, 6675), False, 'from os import path\n'), ((6875, 6943), 'os.path.join', 'path.join', (['args.output', "(args.name + '-3D-fourier-space.' + args.type)"], {}), "(args.output, args.name + '-3D-fourier-space.' + args.type)\n", (6884, 6943), False, 'from os import path\n'), ((7163, 7218), 'os.path.join', 'path.join', (['args.output', "(args.name + '-3D-fourier-space')"], {}), "(args.output, args.name + '-3D-fourier-space')\n", (7172, 7218), False, 'from os import path\n'), ((7482, 7543), 'os.path.join', 'path.join', (['args.output', "(args.name + '-3D-fourier-space-plane')"], {}), "(args.output, args.name + '-3D-fourier-space-plane')\n", (7491, 7543), False, 'from os import path\n')]
|
import logging
import numpy as np
import itertools
logger = logging.getLogger(__name__)
# #######################################
# ############ set_action ###############
# #######################################
def ctrl_set_action(sim, action):
"""
For torque actuators it copies the action into mujoco ctrl field.
For position actuators it sets the target relative to the current qpos.
"""
if sim.model.nmocap > 0:
_, action = np.split(action, (sim.model.nmocap * 7, ))
if sim.data.ctrl is not None:
for i in range(action.shape[0]):
if sim.model.actuator_biastype[i] == 0:
sim.data.ctrl[i] = action[i]
else:
idx = sim.model.jnt_qposadr[sim.model.actuator_trnid[i, 0]]
sim.data.ctrl[i] = sim.data.qpos[idx] + action[i]
# #######################################
# ############ get_reward ###############
# #######################################
def zero_get_reward(sim):
return 0.0
def gps_dist(sim, obj0, obj1):
obj0 = sim.data.get_site_xpos(obj0)
obj1 = sim.data.get_site_xpos(obj1)
diff = np.sum(np.square(obj0 - obj1))
return diff + 0.3 * np.log(diff + 1e-4)
def l2_dist(sim, obj0, obj1):
obj0 = sim.data.get_site_xpos(obj0)
obj1 = sim.data.get_site_xpos(obj1)
return np.sqrt(np.mean(np.square(obj0 - obj1)))
# #######################################
# ########### get_diverged ##############
# #######################################
def false_get_diverged(sim):
return False, 0.0
def simple_get_diverged(sim):
if sim.data.qpos is not None and \
(np.max(np.abs(sim.data.qpos)) > 1000.0 or
np.max(np.abs(sim.data.qvel)) > 100.0):
return True, -20.0
return False, 0.0
# #######################################
# ########### get_info ##############
# #######################################
def empty_get_info(sim):
return {}
# #######################################
# ############## get_obs ################
# #######################################
def flatten_get_obs(sim):
if sim.data.qpos is None:
return np.zeros(0)
return np.concatenate([sim.data.qpos, sim.data.qvel])
def image_get_obs(sim):
return sim.render(100, 100, camera_name="rgb")
# Helpers
def get_body_geom_ids(model, body_name):
""" Returns geom_ids in the body. """
body_id = model.body_name2id(body_name)
geom_ids = []
for geom_id in range(model.ngeom):
if model.geom_bodyid[geom_id] == body_id:
geom_ids.append(geom_id)
return geom_ids
def change_geom_alpha(model, body_name_prefix, new_alpha):
''' Changes the visual transparency (alpha) of an object'''
for body_name in model.body_names:
if body_name.startswith(body_name_prefix):
for geom_id in get_body_geom_ids(model, body_name):
model.geom_rgba[geom_id, 3] = new_alpha
def joint_qpos_idxs(sim, joint_name):
''' Gets indexes for the specified joint's qpos values'''
addr = sim.model.get_joint_qpos_addr(joint_name)
if isinstance(addr, tuple):
return list(range(addr[0], addr[1]))
else:
return [addr]
def qpos_idxs_from_joint_prefix(sim, prefix):
''' Gets indexes for the qpos values of all joints matching the prefix'''
qpos_idxs_list = [joint_qpos_idxs(sim, name)
for name in sim.model.joint_names
if name.startswith(prefix)]
return list(itertools.chain.from_iterable(qpos_idxs_list))
def joint_qvel_idxs(sim, joint_name):
''' Gets indexes for the specified joint's qvel values'''
addr = sim.model.get_joint_qvel_addr(joint_name)
if isinstance(addr, tuple):
return list(range(addr[0], addr[1]))
else:
return [addr]
def qvel_idxs_from_joint_prefix(sim, prefix):
''' Gets indexes for the qvel values of all joints matching the prefix'''
qvel_idxs_list = [joint_qvel_idxs(sim, name)
for name in sim.model.joint_names
if name.startswith(prefix)]
return list(itertools.chain.from_iterable(qvel_idxs_list))
def body_names_from_joint_prefix(sim, prefix):
''' Returns a list of body names that contain joints matching the given prefix'''
return [sim.model.body_id2name(sim.model.jnt_bodyid[sim.model.joint_name2id(name)])
for name in sim.model.joint_names
if name.startswith(prefix)]
|
[
"numpy.abs",
"numpy.log",
"numpy.square",
"numpy.zeros",
"logging.getLogger",
"numpy.split",
"itertools.chain.from_iterable",
"numpy.concatenate"
] |
[((61, 88), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (78, 88), False, 'import logging\n'), ((2162, 2208), 'numpy.concatenate', 'np.concatenate', (['[sim.data.qpos, sim.data.qvel]'], {}), '([sim.data.qpos, sim.data.qvel])\n', (2176, 2208), True, 'import numpy as np\n'), ((463, 504), 'numpy.split', 'np.split', (['action', '(sim.model.nmocap * 7,)'], {}), '(action, (sim.model.nmocap * 7,))\n', (471, 504), True, 'import numpy as np\n'), ((1140, 1162), 'numpy.square', 'np.square', (['(obj0 - obj1)'], {}), '(obj0 - obj1)\n', (1149, 1162), True, 'import numpy as np\n'), ((2139, 2150), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (2147, 2150), True, 'import numpy as np\n'), ((3486, 3531), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['qpos_idxs_list'], {}), '(qpos_idxs_list)\n', (3515, 3531), False, 'import itertools\n'), ((4094, 4139), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['qvel_idxs_list'], {}), '(qvel_idxs_list)\n', (4123, 4139), False, 'import itertools\n'), ((1188, 1209), 'numpy.log', 'np.log', (['(diff + 0.0001)'], {}), '(diff + 0.0001)\n', (1194, 1209), True, 'import numpy as np\n'), ((1347, 1369), 'numpy.square', 'np.square', (['(obj0 - obj1)'], {}), '(obj0 - obj1)\n', (1356, 1369), True, 'import numpy as np\n'), ((1641, 1662), 'numpy.abs', 'np.abs', (['sim.data.qpos'], {}), '(sim.data.qpos)\n', (1647, 1662), True, 'import numpy as np\n'), ((1693, 1714), 'numpy.abs', 'np.abs', (['sim.data.qvel'], {}), '(sim.data.qvel)\n', (1699, 1714), True, 'import numpy as np\n')]
|
import data.shaders.shader_program as sp
import data.tools.maths as m
import numpy
class skybox_shader(sp.ShaderProgram):
VERTEX_FILE = "data\\shaders\\skybox_vertex_shader.txt"
FRAGMENT_FILE = "data\\shaders\\skybox_fragment_shader.txt"
ROTATION_SPEED = 1.0
current_rotation = 0.0
def __init__(self):
super(skybox_shader, self).__init__(self.VERTEX_FILE, self.FRAGMENT_FILE)
def get_all_uniform_locations(self):
self.location_projection_matrix = super(skybox_shader, self).get_uniform_location("projection_matrix")
self.location_view_matrix = super(skybox_shader, self).get_uniform_location("view_matrix")
self.location_fog_color = super(skybox_shader, self).get_uniform_location("fog_color")
self.location_cube_map = super(skybox_shader, self).get_uniform_location("cube_map")
self.location_cube_map_2 = super(skybox_shader, self).get_uniform_location("cube_map_2")
self.location_blend_factor = super(skybox_shader, self).get_uniform_location("blend_factor")
def bind_all_attributes(self):
super(skybox_shader, self).bind_attribute(0, "position")
def load_fog_color(self, r, g, b):
super(skybox_shader, self).load_3d_vector(self.location_fog_color, (r, g, b))
def load_projection_matrix(self, matrix):
super(skybox_shader, self).load_matrix(self.location_projection_matrix, matrix)
def load_blend_factor(self, blend_factor):
super(skybox_shader, self).load_float(self.location_blend_factor, blend_factor)
def connect_texture_units(self):
super(skybox_shader, self).load_int(self.location_cube_map, 0)
super(skybox_shader, self).load_int(self.location_cube_map_2, 1)
def load_view_matrix(self, camera, clock):
matrix = m.Maths().create_view_matrix(camera)
matrix[3][0] = 0.0
matrix[3][1] = 0.0
matrix[3][2] = 0.0
self.current_rotation += self.ROTATION_SPEED / clock.get_time()
matrix = m.Maths().rotate(numpy.radians(self.current_rotation), (0, 1, 0), matrix, matrix)
super(skybox_shader, self).load_matrix(self.location_view_matrix, matrix)
|
[
"numpy.radians",
"data.tools.maths.Maths"
] |
[((1890, 1926), 'numpy.radians', 'numpy.radians', (['self.current_rotation'], {}), '(self.current_rotation)\n', (1903, 1926), False, 'import numpy\n'), ((1691, 1700), 'data.tools.maths.Maths', 'm.Maths', ([], {}), '()\n', (1698, 1700), True, 'import data.tools.maths as m\n'), ((1873, 1882), 'data.tools.maths.Maths', 'm.Maths', ([], {}), '()\n', (1880, 1882), True, 'import data.tools.maths as m\n')]
|
import numpy as np
import matplotlib.pyplot as plt
plt.figure()
data = np.loadtxt("temperaturas.dat")
plt.show(data)
plt.savefig("calor.png")
|
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.savefig"
] |
[((57, 69), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (67, 69), True, 'import matplotlib.pyplot as plt\n'), ((77, 107), 'numpy.loadtxt', 'np.loadtxt', (['"""temperaturas.dat"""'], {}), "('temperaturas.dat')\n", (87, 107), True, 'import numpy as np\n'), ((108, 122), 'matplotlib.pyplot.show', 'plt.show', (['data'], {}), '(data)\n', (116, 122), True, 'import matplotlib.pyplot as plt\n'), ((123, 147), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""calor.png"""'], {}), "('calor.png')\n", (134, 147), True, 'import matplotlib.pyplot as plt\n')]
|
"""
primitive plotting for ex2
"""
import numpy as np
import matplotlib.pyplot as plt
xs = {}
xs["n,g"] = np.loadtxt("ex2/rp082209.tot")
xs["n,n"] = np.loadtxt("ex2/rp082208.tot")
xs["n,2n"] = np.loadtxt("ex2/rp082207.tot")
exp = {}
exp["n,g"] = np.loadtxt("ng.exp")
fig, ax = plt.subplots()
ax.plot(xs["n,g"][:, 0], xs["n,g"][:, 1], label="n,g")
ax.errorbar(exp["n,g"][:, 0], exp["n,g"][:, 1],
yerr=exp["n,g"][:, 2], fmt="o", label="exp")
ax.set_xlabel("Energy [MeV]")
ax.set_ylabel("Cross section [mb]")
ax.legend()
ax.set_yscale("log")
ax.set_xlim(0, 20)
plt.show()
|
[
"matplotlib.pyplot.subplots",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] |
[((108, 138), 'numpy.loadtxt', 'np.loadtxt', (['"""ex2/rp082209.tot"""'], {}), "('ex2/rp082209.tot')\n", (118, 138), True, 'import numpy as np\n'), ((151, 181), 'numpy.loadtxt', 'np.loadtxt', (['"""ex2/rp082208.tot"""'], {}), "('ex2/rp082208.tot')\n", (161, 181), True, 'import numpy as np\n'), ((195, 225), 'numpy.loadtxt', 'np.loadtxt', (['"""ex2/rp082207.tot"""'], {}), "('ex2/rp082207.tot')\n", (205, 225), True, 'import numpy as np\n'), ((249, 269), 'numpy.loadtxt', 'np.loadtxt', (['"""ng.exp"""'], {}), "('ng.exp')\n", (259, 269), True, 'import numpy as np\n'), ((281, 295), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (293, 295), True, 'import matplotlib.pyplot as plt\n'), ((578, 588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (586, 588), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as patches
import pylab
import glob
from scipy import interpolate
import math
#Let us begin with the discrete values
epsilon = np.arange(12, dtype= 'f')
epsilon[0] = 3.0
epsilon[1] = 8.0
epsilon[2] = 15.0
epsilon[3] = 30.0
epsilon[4] = 38.0
epsilon[5] = 40.0
epsilon[6] = 45.0
epsilon[7] = 50.0
epsilon[8] = 55.0
epsilon[9] = 60.0
epsilon[10] = 100.0
epsilon[11] = 200.0
#mobilities
#PIC
mu_PIC = np.arange(12, dtype= 'f')
mu_PIC[0] = 6.1
mu_PIC[1] = 6.0
mu_PIC[2] = 5.9
mu_PIC[3] = 5.6
mu_PIC[4] = 5.6
mu_PIC[5] = 6.0
mu_PIC[6] = 5.7
mu_PIC[7] = 5.6
mu_PIC[8] = 5.6
mu_PIC[9] = 5.6
mu_PIC[10] = 5.8
mu_PIC[11] = 5.67
#Effective
mu_EFF = np.arange(12, dtype= 'f')
mu_EFF[0] = 5.2
mu_EFF[1] = 4.92
mu_EFF[2] = 4.9
mu_EFF[3] = 4.2
mu_EFF[4] = 4.1
mu_EFF[5] = 4.7
mu_EFF[6] = 4.9
mu_EFF[7] = 4.4
mu_EFF[8] = 4.4
mu_EFF[9] = 4.4
mu_EFF[10] = 5.1
mu_EFF[11] = 5.0
#classical
mu_CLA = np.arange(12, dtype= 'f')
mu_CLA[0] = 0.2
mu_CLA[1] = 0.2
mu_CLA[2] = 0.2
mu_CLA[3] = 0.2
mu_CLA[4] = 0.19
mu_CLA[5] = 0.21
mu_CLA[6] = 0.20
mu_CLA[7] = 0.22
mu_CLA[8] = 0.22
mu_CLA[9] = 0.22
mu_CLA[10] = 0.22
mu_CLA[11] = 0.22
#saturation
mu_SAT = np.arange(12, dtype= 'f')
mu_SAT[0] = 3.1
mu_SAT[1] = 3.1
mu_SAT[2] = 3.2
mu_SAT[3] = 3.3
mu_SAT[4] = 3.4
mu_SAT[5] = 3.5
mu_SAT[6] = 3.6
mu_SAT[7] = 3.75
mu_SAT[8] = 3.8
mu_SAT[9] = 3.75
mu_SAT[10] = 3.75
mu_SAT[11] = 3.75
#plot
plt.close()
fig= plt.figure()
ax = plt.subplot(111)
ax.plot(epsilon,mu_PIC,label='$\mu_{pic}$',marker='o',linestyle='--',color='b')
ax.plot(epsilon,mu_EFF,label='$\mu_{eff}$',marker='o',linestyle='--',color='r')
ax.plot(epsilon,mu_SAT,label='$\mu_{eff}^{sat}$',marker='o',linestyle='--',color='g')
ax.plot(epsilon,mu_CLA,label='$\mu_{cla}$',marker='o',linestyle='--',color='m')
#zones d'erreur autour des lignes de valeur
plt.fill_between(epsilon,mu_PIC-0.5,mu_PIC+0.5,alpha=0.15,facecolor='b')
plt.fill_between(epsilon,mu_EFF-0.5,mu_EFF+0.5,alpha=0.1,facecolor='r')
plt.fill_between(epsilon,mu_SAT-0.1,mu_SAT+0.1,alpha=0.15,facecolor='g')
plt.fill_between(epsilon,mu_CLA-0.1,mu_CLA+0.1,alpha=0.1,facecolor='m')
plt.grid()
plt.xlabel('$\epsilon^*$ [eV]')
plt.ylabel("Mean electron mobility [$m^2 (Vs)^{-1}$]")
plt.xlim([0,203])
plt.ylim([0,7])
#regimes with the arrows
plt.annotate(s='', xy=(0.00,1.6), xytext=(38.0,1.6), arrowprops=dict(arrowstyle='<->'))
plt.annotate(s='', xy=(38.5,1.6), xytext=(48.5,1.6), arrowprops=dict(arrowstyle='<->'))
plt.annotate(s='', xy=(49.0,1.6), xytext=(200,1.6), arrowprops=dict(arrowstyle='<->'))
plt.text(19.0,1.7,r'I',fontsize=15)
plt.text(41.0,1.7,r'II',fontsize=15)
plt.text(100.0,1.7,r'III',fontsize=15)
#rectabgle patch
#currentAxis = plt.gca()
#currentAxis.add_patch(patches.Rectangle( (38.5,0.0), 10.0, 280., facecolor='0.7', edgecolor="none", alpha=0.3))
#legend outside the box
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=2, fancybox=False, shadow=False)
#ax.legend(loc='lower center',ncol=2)
plt.savefig('parametric_mobs_eps_complete.png')
plt.show()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((232, 256), 'numpy.arange', 'np.arange', (['(12)'], {'dtype': '"""f"""'}), "(12, dtype='f')\n", (241, 256), True, 'import numpy as np\n'), ((503, 527), 'numpy.arange', 'np.arange', (['(12)'], {'dtype': '"""f"""'}), "(12, dtype='f')\n", (512, 527), True, 'import numpy as np\n'), ((745, 769), 'numpy.arange', 'np.arange', (['(12)'], {'dtype': '"""f"""'}), "(12, dtype='f')\n", (754, 769), True, 'import numpy as np\n'), ((987, 1011), 'numpy.arange', 'np.arange', (['(12)'], {'dtype': '"""f"""'}), "(12, dtype='f')\n", (996, 1011), True, 'import numpy as np\n'), ((1237, 1261), 'numpy.arange', 'np.arange', (['(12)'], {'dtype': '"""f"""'}), "(12, dtype='f')\n", (1246, 1261), True, 'import numpy as np\n'), ((1468, 1479), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1477, 1479), True, 'import matplotlib.pyplot as plt\n'), ((1486, 1498), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1496, 1498), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1520), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1515, 1520), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1978), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['epsilon', '(mu_PIC - 0.5)', '(mu_PIC + 0.5)'], {'alpha': '(0.15)', 'facecolor': '"""b"""'}), "(epsilon, mu_PIC - 0.5, mu_PIC + 0.5, alpha=0.15, facecolor='b'\n )\n", (1909, 1978), True, 'import matplotlib.pyplot as plt\n'), ((1966, 2045), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['epsilon', '(mu_EFF - 0.5)', '(mu_EFF + 0.5)'], {'alpha': '(0.1)', 'facecolor': '"""r"""'}), "(epsilon, mu_EFF - 0.5, mu_EFF + 0.5, alpha=0.1, facecolor='r')\n", (1982, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2123), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['epsilon', '(mu_SAT - 0.1)', '(mu_SAT + 0.1)'], {'alpha': '(0.15)', 'facecolor': '"""g"""'}), "(epsilon, mu_SAT - 0.1, mu_SAT + 0.1, alpha=0.15, facecolor='g'\n )\n", (2054, 2123), True, 'import matplotlib.pyplot as plt\n'), ((2111, 2190), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['epsilon', '(mu_CLA - 0.1)', '(mu_CLA + 0.1)'], {'alpha': '(0.1)', 'facecolor': '"""m"""'}), "(epsilon, mu_CLA - 0.1, mu_CLA + 0.1, alpha=0.1, facecolor='m')\n", (2127, 2190), True, 'import matplotlib.pyplot as plt\n'), ((2184, 2194), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2192, 2194), True, 'import matplotlib.pyplot as plt\n'), ((2195, 2227), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\epsilon^*$ [eV]"""'], {}), "('$\\\\epsilon^*$ [eV]')\n", (2205, 2227), True, 'import matplotlib.pyplot as plt\n'), ((2227, 2281), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean electron mobility [$m^2 (Vs)^{-1}$]"""'], {}), "('Mean electron mobility [$m^2 (Vs)^{-1}$]')\n", (2237, 2281), True, 'import matplotlib.pyplot as plt\n'), ((2282, 2300), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 203]'], {}), '([0, 203])\n', (2290, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2316), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 7]'], {}), '([0, 7])\n', (2308, 2316), True, 'import matplotlib.pyplot as plt\n'), ((2606, 2643), 'matplotlib.pyplot.text', 'plt.text', (['(19.0)', '(1.7)', '"""I"""'], {'fontsize': '(15)'}), "(19.0, 1.7, 'I', fontsize=15)\n", (2614, 2643), True, 'import matplotlib.pyplot as plt\n'), ((2642, 2680), 'matplotlib.pyplot.text', 'plt.text', (['(41.0)', '(1.7)', '"""II"""'], {'fontsize': '(15)'}), "(41.0, 1.7, 'II', fontsize=15)\n", (2650, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2719), 'matplotlib.pyplot.text', 'plt.text', (['(100.0)', '(1.7)', '"""III"""'], {'fontsize': '(15)'}), "(100.0, 1.7, 'III', fontsize=15)\n", (2687, 2719), True, 'import matplotlib.pyplot as plt\n'), ((3033, 3080), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""parametric_mobs_eps_complete.png"""'], {}), "('parametric_mobs_eps_complete.png')\n", (3044, 3080), True, 'import matplotlib.pyplot as plt\n'), ((3081, 3091), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3089, 3091), True, 'import matplotlib.pyplot as plt\n')]
|
"""Automatic-differentiation-based initialization routines."""
import itertools
import numpy as np
from probnum import problems, randprocs, randvars
from ._interface import InitializationRoutine
# pylint: disable="import-outside-toplevel"
try:
import jax
from jax.config import config
from jax.experimental.jet import jet
import jax.numpy as jnp
config.update("jax_enable_x64", True)
JAX_IS_AVAILABLE = True
except ImportError as JAX_IMPORT_ERROR:
JAX_IS_AVAILABLE = False
JAX_IMPORT_ERROR_MSG = (
"Cannot perform Jax-based initialization without the optional "
"dependencies jax and jaxlib. "
"Try installing them via `pip install jax jaxlib`."
)
class _AutoDiffBase(InitializationRoutine):
def __init__(self):
if not JAX_IS_AVAILABLE:
raise ImportError(JAX_IMPORT_ERROR_MSG) from JAX_IMPORT_ERROR
super().__init__(is_exact=True, requires_jax=True)
def __call__(
self,
*,
ivp: problems.InitialValueProblem,
prior_process: randprocs.markov.MarkovProcess,
) -> randvars.RandomVariable:
num_derivatives = prior_process.transition.num_derivatives
f, y0 = self._make_autonomous(ivp=ivp)
mean_matrix = self._compute_ode_derivatives(
f=f, y0=y0, num_derivatives=num_derivatives
)
mean = mean_matrix.reshape((-1,), order="F")
zeros = jnp.zeros((mean.shape[0], mean.shape[0]))
return randvars.Normal(
mean=np.asarray(mean),
cov=np.asarray(zeros),
cov_cholesky=np.asarray(zeros),
)
def _compute_ode_derivatives(self, *, f, y0, num_derivatives):
gen = self._initial_derivative_generator(f=f, y0=y0)
mean_matrix = jnp.stack(
[next(gen)(y0)[:-1] for _ in range(num_derivatives + 1)]
)
return mean_matrix
def _make_autonomous(self, *, ivp):
"""Preprocess the ODE.
Turn the ODE into a format that is more convenient to handle with automatic
differentiation. This has no effect on the ODE itself. It is purely internal.
"""
y0_autonomous = jnp.concatenate([ivp.y0, jnp.array([ivp.t0])])
def f_autonomous(y):
x, t = y[:-1], y[-1]
fx = ivp.f(t, x)
return jnp.concatenate([fx, jnp.array([1.0])])
return f_autonomous, y0_autonomous
def _initial_derivative_generator(self, *, f, y0):
"""Generate the inital derivatives recursively."""
def fwd_deriv(f, f0):
def df(x):
return self._jvp_or_vjp(fun=f, primals=x, tangents=f0(x))
return df
yield lambda x: y0
g = f
f0 = f
while True:
yield g
g = fwd_deriv(g, f0)
def _jvp_or_vjp(self, *, fun, primals, tangents):
raise NotImplementedError
class ForwardModeJVP(_AutoDiffBase):
"""Initialization via Jacobian-vector-product-based automatic differentiation."""
def _jvp_or_vjp(self, *, fun, primals, tangents):
_, y = jax.jvp(fun, (primals,), (tangents,))
return y
class ForwardMode(_AutoDiffBase):
"""Initialization via forward-mode automatic differentiation."""
def _jvp_or_vjp(self, *, fun, primals, tangents):
return jax.jacfwd(fun)(primals) @ tangents
class ReverseMode(_AutoDiffBase):
"""Initialization via reverse-mode automatic differentiation."""
def _jvp_or_vjp(self, *, fun, primals, tangents):
return jax.jacrev(fun)(primals) @ tangents
class TaylorMode(_AutoDiffBase):
"""Initialize a probabilistic ODE solver with Taylor-mode automatic differentiation.
This requires JAX. For an explanation of what happens ``under the hood``, see [1]_.
References
----------
.. [1] <NAME>. and <NAME>.,
Stable implementation of probabilistic ODE solvers,
*arXiv:2012.10106*, 2020.
Examples
--------
>>> import sys, pytest
>>> if not sys.platform.startswith('linux'):
... pytest.skip()
>>> import numpy as np
>>> from probnum.randvars import Normal
>>> from probnum.problems.zoo.diffeq import threebody_jax, vanderpol_jax
>>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess
Compute the initial values of the restricted three-body problem as follows
>>> ivp = threebody_jax()
>>> print(ivp.y0)
[ 0.994 0. 0. -2.00158511]
Construct the prior process.
>>> prior_process = IntegratedWienerProcess(
... initarg=ivp.t0, wiener_process_dimension=4, num_derivatives=3
... )
Initialize with Taylor-mode autodiff.
>>> taylor_init = TaylorMode()
>>> improved_initrv = taylor_init(ivp=ivp, prior_process=prior_process)
Print the results.
>>> print(prior_process.transition.proj2coord(0) @ improved_initrv.mean)
[ 0.994 0. 0. -2.00158511]
>>> print(improved_initrv.mean)
[ 9.94000000e-01 0.00000000e+00 -3.15543023e+02 0.00000000e+00
0.00000000e+00 -2.00158511e+00 0.00000000e+00 9.99720945e+04
0.00000000e+00 -3.15543023e+02 0.00000000e+00 6.39028111e+07
-2.00158511e+00 0.00000000e+00 9.99720945e+04 0.00000000e+00]
Compute the initial values of the van-der-Pol oscillator as follows.
First, set up the IVP and prior process.
>>> ivp = vanderpol_jax()
>>> print(ivp.y0)
[2. 0.]
>>> prior_process = IntegratedWienerProcess(
... initarg=ivp.t0, wiener_process_dimension=2, num_derivatives=3
... )
>>> taylor_init = TaylorMode()
>>> improved_initrv = taylor_init(ivp=ivp, prior_process=prior_process)
Print the results.
>>> print(prior_process.transition.proj2coord(0) @ improved_initrv.mean)
[2. 0.]
>>> print(improved_initrv.mean)
[ 2. 0. -2. 60. 0. -2. 60. -1798.]
>>> print(improved_initrv.std)
[0. 0. 0. 0. 0. 0. 0. 0.]
"""
def _compute_ode_derivatives(self, *, f, y0, num_derivatives):
# Compute the ODE derivatives by computing an nth-order Taylor
# approximation of the function g(t) = f(x(t))
taylor_coefficients = self._taylor_approximation(
f=f, y0=y0, order=num_derivatives
)
# The `f` parameter is an autonomous ODE vector field that
# used to be a non-autonomous ODE vector field.
# Therefore, we eliminate the final column of the result,
# which would correspond to the `t`-part in f(t, y(t)).
return taylor_coefficients[:, :-1]
def _taylor_approximation(self, *, f, y0, order):
"""Compute an `n`th order Taylor approximation of f at y0."""
taylor_coefficient_gen = self._taylor_coefficient_generator(f=f, y0=y0)
# Get the 'order'th entry of the coefficient-generator via itertools.islice
# The result is a tuple of length 'order+1', each entry of which
# corresponds to a derivative / Taylor coefficient.
derivatives = next(itertools.islice(taylor_coefficient_gen, order, None))
# The shape of this array is (order+1, ode_dim+1).
# 'order+1' since a 0th order approximation has 1 coefficient (f(x0)),
# a 1st order approximation has 2 coefficients (f(x0), df(x0)), etc.
# 'ode_dim+1' since we tranformed the ODE into an autonomous ODE.
derivatives_as_array = jnp.stack(derivatives, axis=0)
return derivatives_as_array
@staticmethod
def _taylor_coefficient_generator(*, f, y0):
"""Generate Taylor coefficients.
Generate Taylor-series-coefficients of the ODE solution `x(t)` via generating
Taylor-series-coefficients of `g(t)=f(x(t))` via ``jax.experimental.jet()``.
"""
# This is the 0th Taylor coefficient of x(t) at t=t0.
x_primals = y0
yield (x_primals,)
# This contains the higher-order, unnormalised
# Taylor coefficients of x(t) at t=t0.
# We know them because of the ODE.
x_series = (f(y0),)
while True:
yield (x_primals,) + x_series
# jet() computes a Taylor approximation of g(t) := f(x(t))
# The output is the zeroth Taylor approximation g(t_0) ('primals')
# as well its higher-order Taylor coefficients ('series')
g_primals, g_series = jet(fun=f, primals=(x_primals,), series=(x_series,))
# For ODEs \dot y(t) = f(y(t)),
# The nth Taylor coefficient of y is the
# (n-1)th Taylor coefficient of g(t) = f(y(t)).
# This way, by augmenting x0 with the Taylor series
# approximating g(t) = f(y(t)), we increase the order
# of the approximation by 1.
x_series = (g_primals, *g_series)
|
[
"jax.config.config.update",
"jax.jvp",
"jax.numpy.array",
"jax.jacrev",
"numpy.asarray",
"itertools.islice",
"jax.numpy.zeros",
"jax.jacfwd",
"jax.numpy.stack",
"jax.experimental.jet.jet"
] |
[((372, 409), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (385, 409), False, 'from jax.config import config\n'), ((1434, 1475), 'jax.numpy.zeros', 'jnp.zeros', (['(mean.shape[0], mean.shape[0])'], {}), '((mean.shape[0], mean.shape[0]))\n', (1443, 1475), True, 'import jax.numpy as jnp\n'), ((3102, 3139), 'jax.jvp', 'jax.jvp', (['fun', '(primals,)', '(tangents,)'], {}), '(fun, (primals,), (tangents,))\n', (3109, 3139), False, 'import jax\n'), ((7432, 7462), 'jax.numpy.stack', 'jnp.stack', (['derivatives'], {'axis': '(0)'}), '(derivatives, axis=0)\n', (7441, 7462), True, 'import jax.numpy as jnp\n'), ((7056, 7109), 'itertools.islice', 'itertools.islice', (['taylor_coefficient_gen', 'order', 'None'], {}), '(taylor_coefficient_gen, order, None)\n', (7072, 7109), False, 'import itertools\n'), ((8397, 8449), 'jax.experimental.jet.jet', 'jet', ([], {'fun': 'f', 'primals': '(x_primals,)', 'series': '(x_series,)'}), '(fun=f, primals=(x_primals,), series=(x_series,))\n', (8400, 8449), False, 'from jax.experimental.jet import jet\n'), ((1525, 1541), 'numpy.asarray', 'np.asarray', (['mean'], {}), '(mean)\n', (1535, 1541), True, 'import numpy as np\n'), ((1559, 1576), 'numpy.asarray', 'np.asarray', (['zeros'], {}), '(zeros)\n', (1569, 1576), True, 'import numpy as np\n'), ((1603, 1620), 'numpy.asarray', 'np.asarray', (['zeros'], {}), '(zeros)\n', (1613, 1620), True, 'import numpy as np\n'), ((2204, 2223), 'jax.numpy.array', 'jnp.array', (['[ivp.t0]'], {}), '([ivp.t0])\n', (2213, 2223), True, 'import jax.numpy as jnp\n'), ((3332, 3347), 'jax.jacfwd', 'jax.jacfwd', (['fun'], {}), '(fun)\n', (3342, 3347), False, 'import jax\n'), ((3543, 3558), 'jax.jacrev', 'jax.jacrev', (['fun'], {}), '(fun)\n', (3553, 3558), False, 'import jax\n'), ((2358, 2374), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (2367, 2374), True, 'import jax.numpy as jnp\n')]
|
from .util import cos_sim, dot_score
from .faiss_index import FaissBinaryIndex
import logging
import sys
import torch
import faiss
import numpy as np
from typing import Dict, List
logger = logging.getLogger(__name__)
#Parent class for any dense model
class DenseRetrievalBinaryCodeSearch:
def __init__(self, model, batch_size: int = 128, corpus_chunk_size: int = 50000, **kwargs):
self.model = model
self.batch_size = batch_size
self.corpus_chunk_size = corpus_chunk_size
self.show_progress_bar = True
self.faiss_index = None
self.results = {}
self.mapping = {}
self.rev_mapping = {}
def _create_mapping_ids(self, corpus_ids):
if not all(isinstance(doc_id, int) for doc_id in corpus_ids):
for idx in range(len(corpus_ids)):
self.mapping[corpus_ids[idx]] = idx
self.rev_mapping[idx] = corpus_ids[idx]
def index(self, corpus: Dict[str, Dict[str, str]], hash_num_bits: int = 768, output_dir: str = None):
logger.info("Encoding Corpus in batches... Warning: This might take a while!")
corpus_ids = list(corpus.keys())
self._create_mapping_ids(corpus_ids)
corpus = [corpus[cid] for cid in corpus_ids]
itr = range(0, len(corpus), self.corpus_chunk_size)
for batch_num, corpus_start_idx in enumerate(itr):
logger.info("Encoding Batch {}/{}...".format(batch_num+1, len(itr)))
corpus_end_idx = min(corpus_start_idx + self.corpus_chunk_size, len(corpus))
#Encode chunk of corpus
sub_corpus_embeddings = self.model.encode_corpus(
corpus[corpus_start_idx:corpus_end_idx],
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size)
if not batch_num:
corpus_embeddings = sub_corpus_embeddings
else:
corpus_embeddings = np.vstack([corpus_embeddings, sub_corpus_embeddings])
#Index chunk of corpus into faiss
logger.info("Indexing Passages into Faiss...")
base_index = faiss.IndexBinaryHash(corpus_embeddings.shape[1] * 8, hash_num_bits)
faiss_ids = [self.mapping.get(corpus_id) for corpus_id in corpus_ids]
self.faiss_index = FaissBinaryIndex.build(faiss_ids, corpus_embeddings, base_index)
logger.info("Faiss indexing completed! {} Documents Indexed...".format(len(self.faiss_index._passage_ids)))
del sub_corpus_embeddings, corpus_embeddings
def search(self,
corpus: Dict[str, Dict[str, str]],
queries: Dict[str, str],
top_k: int,
score_function = None,
rerank: bool = True,
binary_k: int = 1000,
index: bool = True, **kwargs) -> Dict[str, Dict[str, float]]:
## Used for Indexing
if index: self.index(corpus, **kwargs)
logger.info("Encoding Queries...")
query_ids = list(queries.keys())
queries = [queries[qid] for qid in queries]
query_embeddings = self.model.encode_queries(
queries, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size)
faiss_scores, faiss_doc_ids = self.faiss_index.search(query_embeddings, top_k, binary_k=binary_k, rerank=rerank)
for idx in range(len(query_ids)):
scores = [float(score) for score in faiss_scores[idx]]
if len(self.rev_mapping) != 0:
doc_ids = [self.rev_mapping[doc_id] for doc_id in faiss_doc_ids[idx]]
else:
doc_ids = [str(doc_id) for doc_id in faiss_doc_ids[idx]]
self.results[query_ids[idx]] = dict(zip(doc_ids, scores))
return self.results
|
[
"numpy.vstack",
"faiss.IndexBinaryHash",
"logging.getLogger"
] |
[((190, 217), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (207, 217), False, 'import logging\n'), ((2196, 2264), 'faiss.IndexBinaryHash', 'faiss.IndexBinaryHash', (['(corpus_embeddings.shape[1] * 8)', 'hash_num_bits'], {}), '(corpus_embeddings.shape[1] * 8, hash_num_bits)\n', (2217, 2264), False, 'import faiss\n'), ((2005, 2058), 'numpy.vstack', 'np.vstack', (['[corpus_embeddings, sub_corpus_embeddings]'], {}), '([corpus_embeddings, sub_corpus_embeddings])\n', (2014, 2058), True, 'import numpy as np\n')]
|
#-*- coding: utf-8 -*-
# 구현 대상
# https://github.com/jcjohnson/densecap/tree/master/eval
# 참고
# https://sites.google.com/site/hyunguk1986/personal-study/-ap-map-recall-precision
from metric.meteor import Meteor
import numpy as np
from ..utils.cython_bbox import bbox_overlaps
from ..datasets.visual_genome_loader import visual_genome
# Usage : call evaluate_caption(vg, gt_caption, gt_region, pred_caption, pred_region)
# requirement : pytorch, java 1.8.0(for Meteor), cython, numpy ...
# Dataset : Visual Genome dataset
class Caption_Evaluator():
def __init__(self, base_metric='meteor', final_metric='mAP', thr_ious=None, thr_scores=None):
self.base_metric = base_metric
self.final_metric = final_metric
if base_metric == 'meteor': # 현재는 Meteor만 측정
self.Evaluator = Meteor()
if thr_ious == None:
self.thr_ious = [0.3, 0.4, 0.5, 0.6, 0.7] # mAP 측정 시, 고려하는 iou 범위
else:
self.thr_ious = thr_ious
if thr_scores == None:
self.thr_scores = [0, 0.05, 0.1, 0.15, 0.2, 0.25] # mAP 측정 시, 고려하는 스코어 범위
else:
self.thr_scores = thr_scores
# 모델로부터 예측된 결과와 정답을 비교하여 점수를 평가 (기본: Meteor:mAP)
# vg : DATASET 클래스. index to word를 위함
# gt_caption : 정답 캡션 리스트 (word들의 index로 구성) [gt#, max_len]
# gt_region : 정답 캡션 영역 리스트 (각 캡션에 해당되는 bounding box) [gt#, 4(x1, y2, x2, y2)]
# pred_caption : 예측된 캡션 리스트 (word들의 index로 구성) [pred#, max_len]
# pred_region : 예측된 캡션 영역 리스트 (각 캡션에 해당되는 bounding box) [pred#, 4(x1, y2, x2, y2)]
def evaluate_caption(self, vg, gt_caption, gt_region, pred_caption, pred_region):
IoUs = self.__get_IoUs(gt_region, pred_region)
if self.final_metric == 'mAP':
score = self.__get_mAP(vg, gt_caption, pred_caption, IoUs)
else:
raise Exception('None Score :', self.final_metric)
return score
# gt와 예측된 region간의 iou를 계산
def __get_IoUs(self, gt_region, pred_region):
# overlaps (pred#, gt#)
overlaps = bbox_overlaps(
np.ascontiguousarray(pred_region, dtype=np.float),
np.ascontiguousarray(gt_region, dtype=np.float))
#return IoUs # ndarry (pred#, gt#) # 최대값만 실수, 나머진 0
return overlaps
# 두 캡션 리스트로부터 mAP를 구함
def __get_mAP(self, vg, gt_caption, pred_caption, IoUs):
# precision_list = {'0.3_0':0., '0.3_0.05':0., ... } # [mAP]
AP_dict = {str(score)+'_'+str(iou):0. for score in self.thr_scores for iou in self.thr_ious}
gt_caption = gt_caption.astype(int) # np.float32 -> int
pred_caption = pred_caption.numpy().astype(int)
gt_cnt = len(gt_caption)
gt_t_caps = [vg.untokenize_single_sentence(cap) for cap in gt_caption]
pred_t_caps = [vg.untokenize_single_sentence(cap) for cap in pred_caption]
pred_t_caps2 = []
for cap in pred_t_caps:
if not len(cap) == 0:
pred_t_caps2.append(cap)
gt_num = len(gt_t_caps)
pred_num = len(pred_t_caps2)
scores = np.zeros((pred_num, gt_num)) # score 계산 값을 저장해둠
for idx in range(pred_num): # pred# 만큼 반복, iou는 gt#에 대한 iou리스트
for t_idx in range(gt_num):
result, _ = self.Evaluator.compute_score({0:[gt_t_caps[t_idx]]}, {0:[pred_t_caps2[idx]]})
scores[idx, t_idx] = result
# threshold 별 AP 계산
for scr_thr in self.thr_scores:
for iou_thr in self.thr_ious:
correct_cnt = 0
precision_list = []
for idx in range(pred_num): # pred# 만큼 반복
for t_idx in range(gt_num):
if IoUs[idx][t_idx] < iou_thr:
continue
if scores[idx, t_idx] >= scr_thr:
correct_cnt += 1
precision_list.append(correct_cnt / (idx+1))
break
AP_dict[str(scr_thr)+'_'+str(iou_thr)] = np.average(precision_list) if len(precision_list) > 0 else 0
sum_AP = 0.
for key in AP_dict.keys(): # 모든 threshold 조합에 대한 ap 합
sum_AP += AP_dict[key]
mAP = sum_AP / len(AP_dict.keys()) # 전체 mAP 평균
#print '******', mAP, '******' ## test
return mAP # double
|
[
"numpy.average",
"numpy.ascontiguousarray",
"metric.meteor.Meteor",
"numpy.zeros"
] |
[((3240, 3268), 'numpy.zeros', 'np.zeros', (['(pred_num, gt_num)'], {}), '((pred_num, gt_num))\n', (3248, 3268), True, 'import numpy as np\n'), ((846, 854), 'metric.meteor.Meteor', 'Meteor', ([], {}), '()\n', (852, 854), False, 'from metric.meteor import Meteor\n'), ((2178, 2227), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pred_region'], {'dtype': 'np.float'}), '(pred_region, dtype=np.float)\n', (2198, 2227), True, 'import numpy as np\n'), ((2242, 2289), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['gt_region'], {'dtype': 'np.float'}), '(gt_region, dtype=np.float)\n', (2262, 2289), True, 'import numpy as np\n'), ((4250, 4276), 'numpy.average', 'np.average', (['precision_list'], {}), '(precision_list)\n', (4260, 4276), True, 'import numpy as np\n')]
|
# 2020.06.05
# active learning: query by committee
# modified from Xiou
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from scipy.stats import entropy
import time
class QBC():
def __init__(self, learners, init=0.01, n_increment=200, n_iter=40, percent=0.05):
self.init = init
self.n_increment = n_increment
self.n_learner = len(learners)
self.n_iter = n_iter
self.num_class = 3
self.learners = learners
self.percent = percent
self.trained = False
self.acc_t = []
self.acc_v = []
def metric(self, prob):
return entropy(prob, base=self.num_class, axis=1)
def fit(self, x, y, xv=None, yv=None):
self.trained = True
self.num_class = np.unique(y).shape[0]
#x, xt, y, yt = train_test_split(x, y, train_size=self.init, random_state=42, stratify=y)
idx = np.random.choice(x.shape[0], (int)(x.shape[0]*self.percent))
x_train, y_train = x[idx], y[idx]
x_pool = np.delete(x, idx, axis=0)
y_pool = np.delete(y, idx, axis=0)
acc_t, acc_v, s = [], [], []
for k in range(self.n_iter):
print(' start iter -> %3s'%str(k))
t0 = time.time()
for i in range(self.n_learner):
self.learners[i].fit(x_train, y_train)
pt = self.predict_proba(x_pool)
at = accuracy_score(y_pool, np.argmax(pt, axis=1))
acc_t.append(at)
s.append(y_pool.shape[0])
ht = self.metric(pt)
try:
xv.shape
print(' test shape: %s, val shape: %s'%(str(x_pool.shape), str(xv.shape)))
pv = self.predict_proba(xv)
av = accuracy_score(yv, np.argmax(pv, axis=1))
print(' <Acc> test: %s, val: %s'%(at, av))
acc_v.append(av)
hv = self.metric(pv)
print(' <Entropy> test: %s, val: %s'%(np.mean(ht), np.mean(hv)))
except:
pass
idx = np.argsort(ht)[-self.n_increment:]
x_train = np.concatenate((x_train, x_pool[idx]), axis=0)
y_train = np.concatenate((y_train, y_pool[idx]), axis=0)
x_pool = np.delete(x_pool, idx, axis=0)
y_pool = np.delete(y_pool, idx, axis=0)
print(' end iter -> %3s using %10s seconds\n'%(str(k),str(time.time()-t0)))
self.acc_t = acc_t
self.acc_v = acc_v
return s, acc_t, acc_v
def predict_proba(self, x):
assert (self.trained == True), "Must call fit first!"
pred = np.zeros((x.shape[0], self.num_class))
for i in range(self.n_learner):
pred += self.learners[i].predict_proba(x)
return pred / np.sum(pred, axis=1, keepdims=True)
def predict(self, x):
assert (self.trained == True), "Must call fit first!"
pred = self.predict_proba(x)
return np.argmax(pred, axis=1)
def score(self, x, y):
assert (self.trained == True), "Must call fit first!"
pred = self.predict(x)
return accuracy_score(y, pred)
if __name__ == "__main__":
from sklearn.svm import SVC
from sklearn import datasets
from sklearn.model_selection import train_test_split
from mylearner import myLearner
import pickle
import sys
#path = '/mnt/yifan/face/'
path = '../../fea/'
i = (int)(sys.argv[1])
with open(path+str(i)+'_discard.pkl', 'rb') as f:
d = pickle.load(f)
X_train, y_train, X_test, y_test = d['x'],d['y'],d['xt'],d['yt']
clf = QBC(init=0.05, n_increment=200, n_iter=14,
learners=[SVC(gamma='auto', probability=True)])
s, a, b = clf.fit(X_train, y_train, X_test, y_test)
clf = SVC(gamma='auto', probability=True)
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
save = {'shape':s, 'train':a, 'test':b}
with open('qbc_0327'+str(i)+'.pkl', 'wb') as f:
pickle.dump(save, f)
|
[
"pickle.dump",
"numpy.sum",
"numpy.argmax",
"scipy.stats.entropy",
"sklearn.metrics.accuracy_score",
"numpy.unique",
"numpy.zeros",
"time.time",
"numpy.argsort",
"pickle.load",
"numpy.mean",
"sklearn.svm.SVC",
"numpy.delete",
"numpy.concatenate"
] |
[((3867, 3902), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""', 'probability': '(True)'}), "(gamma='auto', probability=True)\n", (3870, 3902), False, 'from sklearn.svm import SVC\n'), ((680, 722), 'scipy.stats.entropy', 'entropy', (['prob'], {'base': 'self.num_class', 'axis': '(1)'}), '(prob, base=self.num_class, axis=1)\n', (687, 722), False, 'from scipy.stats import entropy\n'), ((1074, 1099), 'numpy.delete', 'np.delete', (['x', 'idx'], {'axis': '(0)'}), '(x, idx, axis=0)\n', (1083, 1099), True, 'import numpy as np\n'), ((1117, 1142), 'numpy.delete', 'np.delete', (['y', 'idx'], {'axis': '(0)'}), '(y, idx, axis=0)\n', (1126, 1142), True, 'import numpy as np\n'), ((2710, 2748), 'numpy.zeros', 'np.zeros', (['(x.shape[0], self.num_class)'], {}), '((x.shape[0], self.num_class))\n', (2718, 2748), True, 'import numpy as np\n'), ((3046, 3069), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (3055, 3069), True, 'import numpy as np\n'), ((3210, 3233), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'pred'], {}), '(y, pred)\n', (3224, 3233), False, 'from sklearn.metrics import accuracy_score\n'), ((3601, 3615), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3612, 3615), False, 'import pickle\n'), ((4074, 4094), 'pickle.dump', 'pickle.dump', (['save', 'f'], {}), '(save, f)\n', (4085, 4094), False, 'import pickle\n'), ((1287, 1298), 'time.time', 'time.time', ([], {}), '()\n', (1296, 1298), False, 'import time\n'), ((2201, 2247), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_pool[idx])'], {'axis': '(0)'}), '((x_train, x_pool[idx]), axis=0)\n', (2215, 2247), True, 'import numpy as np\n'), ((2270, 2316), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_pool[idx])'], {'axis': '(0)'}), '((y_train, y_pool[idx]), axis=0)\n', (2284, 2316), True, 'import numpy as np\n'), ((2338, 2368), 'numpy.delete', 'np.delete', (['x_pool', 'idx'], {'axis': '(0)'}), '(x_pool, idx, axis=0)\n', (2347, 2368), True, 'import numpy as np\n'), ((2390, 2420), 'numpy.delete', 'np.delete', (['y_pool', 'idx'], {'axis': '(0)'}), '(y_pool, idx, axis=0)\n', (2399, 2420), True, 'import numpy as np\n'), ((2865, 2900), 'numpy.sum', 'np.sum', (['pred'], {'axis': '(1)', 'keepdims': '(True)'}), '(pred, axis=1, keepdims=True)\n', (2871, 2900), True, 'import numpy as np\n'), ((820, 832), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (829, 832), True, 'import numpy as np\n'), ((1482, 1503), 'numpy.argmax', 'np.argmax', (['pt'], {'axis': '(1)'}), '(pt, axis=1)\n', (1491, 1503), True, 'import numpy as np\n'), ((2144, 2158), 'numpy.argsort', 'np.argsort', (['ht'], {}), '(ht)\n', (2154, 2158), True, 'import numpy as np\n'), ((3763, 3798), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""', 'probability': '(True)'}), "(gamma='auto', probability=True)\n", (3766, 3798), False, 'from sklearn.svm import SVC\n'), ((1832, 1853), 'numpy.argmax', 'np.argmax', (['pv'], {'axis': '(1)'}), '(pv, axis=1)\n', (1841, 1853), True, 'import numpy as np\n'), ((2058, 2069), 'numpy.mean', 'np.mean', (['ht'], {}), '(ht)\n', (2065, 2069), True, 'import numpy as np\n'), ((2071, 2082), 'numpy.mean', 'np.mean', (['hv'], {}), '(hv)\n', (2078, 2082), True, 'import numpy as np\n'), ((2497, 2508), 'time.time', 'time.time', ([], {}), '()\n', (2506, 2508), False, 'import time\n')]
|
"""
This module contains relatively simple functions needed for calculation of mean-squared displacements (MSD) of atoms from series of time snapshots.
The "simple" means that functions do not use sophisticated algorithms for recognition of different diffusion modes,
and can be correctly applied only if the dependence of MSD from modeling time is linear.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import copy
from md_format_converter_mi import structure
# atom data from phonopy (https://github.com/phonopy/phonopy/blob/develop/phonopy/structure/atoms.py)
atom_data = [
[0, "X", "X", None], # 0
[1, "H", "Hydrogen", 1.00794], # 1
[2, "He", "Helium", 4.002602], # 2
[3, "Li", "Lithium", 6.941], # 3
[4, "Be", "Beryllium", 9.012182], # 4
[5, "B", "Boron", 10.811], # 5
[6, "C", "Carbon", 12.0107], # 6
[7, "N", "Nitrogen", 14.0067], # 7
[8, "O", "Oxygen", 15.9994], # 8
[9, "F", "Fluorine", 18.9984032], # 9
[10, "Ne", "Neon", 20.1797], # 10
[11, "Na", "Sodium", 22.98976928], # 11
[12, "Mg", "Magnesium", 24.3050], # 12
[13, "Al", "Aluminium", 26.9815386], # 13
[14, "Si", "Silicon", 28.0855], # 14
[15, "P", "Phosphorus", 30.973762], # 15
[16, "S", "Sulfur", 32.065], # 16
[17, "Cl", "Chlorine", 35.453], # 17
[18, "Ar", "Argon", 39.948], # 18
[19, "K", "Potassium", 39.0983], # 19
[20, "Ca", "Calcium", 40.078], # 20
[21, "Sc", "Scandium", 44.955912], # 21
[22, "Ti", "Titanium", 47.867], # 22
[23, "V", "Vanadium", 50.9415], # 23
[24, "Cr", "Chromium", 51.9961], # 24
[25, "Mn", "Manganese", 54.938045], # 25
[26, "Fe", "Iron", 55.845], # 26
[27, "Co", "Cobalt", 58.933195], # 27
[28, "Ni", "Nickel", 58.6934], # 28
[29, "Cu", "Copper", 63.546], # 29
[30, "Zn", "Zinc", 65.38], # 30
[31, "Ga", "Gallium", 69.723], # 31
[32, "Ge", "Germanium", 72.64], # 32
[33, "As", "Arsenic", 74.92160], # 33
[34, "Se", "Selenium", 78.96], # 34
[35, "Br", "Bromine", 79.904], # 35
[36, "Kr", "Krypton", 83.798], # 36
[37, "Rb", "Rubidium", 85.4678], # 37
[38, "Sr", "Strontium", 87.62], # 38
[39, "Y", "Yttrium", 88.90585], # 39
[40, "Zr", "Zirconium", 91.224], # 40
[41, "Nb", "Niobium", 92.90638], # 41
[42, "Mo", "Molybdenum", 95.96], # 42
[43, "Tc", "Technetium", 98], # 43 (mass is from wikipedia)
[44, "Ru", "Ruthenium", 101.07], # 44
[45, "Rh", "Rhodium", 102.90550], # 45
[46, "Pd", "Palladium", 106.42], # 46
[47, "Ag", "Silver", 107.8682], # 47
[48, "Cd", "Cadmium", 112.411], # 48
[49, "In", "Indium", 114.818], # 49
[50, "Sn", "Tin", 118.710], # 50
[51, "Sb", "Antimony", 121.760], # 51
[52, "Te", "Tellurium", 127.60], # 52
[53, "I", "Iodine", 126.90447], # 53
[54, "Xe", "Xenon", 131.293], # 54
[55, "Cs", "Caesium", 132.9054519], # 55
[56, "Ba", "Barium", 137.327], # 56
[57, "La", "Lanthanum", 138.90547], # 57
[58, "Ce", "Cerium", 140.116], # 58
[59, "Pr", "Praseodymium", 140.90765], # 59
[60, "Nd", "Neodymium", 144.242], # 60
[61, "Pm", "Promethium", 145], # 61 (mass is from wikipedia)
[62, "Sm", "Samarium", 150.36], # 62
[63, "Eu", "Europium", 151.964], # 63
[64, "Gd", "Gadolinium", 157.25], # 64
[65, "Tb", "Terbium", 158.92535], # 65
[66, "Dy", "Dysprosium", 162.500], # 66
[67, "Ho", "Holmium", 164.93032], # 67
[68, "Er", "Erbium", 167.259], # 68
[69, "Tm", "Thulium", 168.93421], # 69
[70, "Yb", "Ytterbium", 173.054], # 70
[71, "Lu", "Lutetium", 174.9668], # 71
[72, "Hf", "Hafnium", 178.49], # 72
[73, "Ta", "Tantalum", 180.94788], # 73
[74, "W", "Tungsten", 183.84], # 74
[75, "Re", "Rhenium", 186.207], # 75
[76, "Os", "Osmium", 190.23], # 76
[77, "Ir", "Iridium", 192.217], # 77
[78, "Pt", "Platinum", 195.084], # 78
[79, "Au", "Gold", 196.966569], # 79
[80, "Hg", "Mercury", 200.59], # 80
[81, "Tl", "Thallium", 204.3833], # 81
[82, "Pb", "Lead", 207.2], # 82
[83, "Bi", "Bismuth", 208.98040], # 83
[84, "Po", "Polonium", None], # 84
[85, "At", "Astatine", None], # 85
[86, "Rn", "Radon", None], # 86
[87, "Fr", "Francium", None], # 87
[88, "Ra", "Radium", None], # 88
[89, "Ac", "Actinium", 227], # 89 (mass is from wikipedia)
[90, "Th", "Thorium", 232.03806], # 90
[91, "Pa", "Protactinium", 231.03588], # 91
[92, "U", "Uranium", 238.02891], # 92
[93, "Np", "Neptunium", 237], # 93 (mass is from wikipedia)
[94, "Pu", "Plutonium", None], # 94
[95, "Am", "Americium", None], # 95
[96, "Cm", "Curium", None], # 96
[97, "Bk", "Berkelium", None], # 97
[98, "Cf", "Californium", None], # 98
[99, "Es", "Einsteinium", None], # 99
[100, "Fm", "Fermium", None], # 100
[101, "Md", "Mendelevium", None], # 101
[102, "No", "Nobelium", None], # 102
[103, "Lr", "Lawrencium", None], # 103
[104, "Rf", "Rutherfordium", None], # 104
[105, "Db", "Dubnium", None], # 105
[106, "Sg", "Seaborgium", None], # 106
[107, "Bh", "Bohrium", None], # 107
[108, "Hs", "Hassium", None], # 108
[109, "Mt", "Meitnerium", None], # 109
[110, "Ds", "Darmstadtium", None], # 110
[111, "Rg", "Roentgenium", None], # 111
[112, "Cn", "Copernicium", None], # 112
[113, "Uut", "Ununtrium", None], # 113
[114, "Uuq", "Ununquadium", None], # 114
[115, "Uup", "Ununpentium", None], # 115
[116, "Uuh", "Ununhexium", None], # 116
[117, "Uus", "Ununseptium", None], # 117
[118, "Uuo", "Ununoctium", None], # 118
]
def convert_structure_to_numpy(st):
"""
This funciton converts structure object with properties described
by standard python types to analogous structure object but with properties
described by numpy arrays.
"""
st_numpy = structure()
st_numpy.n_at = st.n_at
st_numpy.n_mark_at = st.n_mark_at
st_numpy.mark_at = np.array(st.mark_at)
st_numpy.i_at = np.array(st.i_at)
st_numpy.r_at = np.array(st.r_at)
st_numpy.f_at = np.array(st.f_at)
st_numpy.v_at = np.array(st.v_at)
st_numpy.sizex = st.sizex
st_numpy.sizey = st.sizey
st_numpy.sizez = st.sizez
st_numpy.a_lattice3 = np.array(st.a_lattice3)
st_numpy.n_type_at = st.n_type_at
st_numpy.i_type_at = np.array(st.i_type_at)
st_numpy.i_mass_at = np.array(st.i_mass_at)
st_numpy.type_at = np.array(st.type_at)
st_numpy.mass_at = np.array(st.mass_at)
return st_numpy
def get_cm_corrected_structure(st_numpy):
""" Function for correction of atoms coordinates to the center of mass"""
cm_x = (st_numpy.i_mass_at*st_numpy.r_at[:,0]).sum()/st_numpy.i_mass_at.sum()
cm_y = (st_numpy.i_mass_at*st_numpy.r_at[:,1]).sum()/st_numpy.i_mass_at.sum()
cm_z = (st_numpy.i_mass_at*st_numpy.r_at[:,2]).sum()/st_numpy.i_mass_at.sum()
# print(f"cm_x = {cm_x}")
# print(f"cm_y = {cm_y}")
# print(f"cm_z = {cm_z}")
st_numpy_cm = copy.deepcopy(st_numpy)
st_numpy_cm.r_at[:,0] = st_numpy.r_at[:,0] - cm_x
st_numpy_cm.r_at[:,1] = st_numpy.r_at[:,1] - cm_y
st_numpy_cm.r_at[:,2] = st_numpy.r_at[:,2] - cm_z
return st_numpy_cm
def get_unwrapped_structures(sts_numpy):
"""
Function for unwrapping the atomic coodinates
for correct calculation of atomic displacements
get: sts_numpy - list of structure objects with attributes represented as numpy arrays
return: sts_numpy_new - list of structure objects with unwrapped coordinates
"""
sts_numpy_unwrapped = copy.deepcopy(sts_numpy) # deep copy of list with structure objects
num_sts = len(sts_numpy) # number of structures
# sts_numpy_new[0] = copy.deepcopy(sts_numpy[0]) # copying the first structure in list
for i in range(1,num_sts):
# st_numpy_i = sts_numpy[i]
# st_numpy_i
shift = np.zeros((sts_numpy[i].n_at,3)) # initializing array of atom shift with zeros
sts_numpy_unwrapped[i] = copy.deepcopy(sts_numpy[i]) # copying the input structure to the output structure
r_at_wrapped = sts_numpy[i].r_at # initial (wrapped within the periodic boundary conditions) coordinates of atoms
r_at_unwrapped = copy.deepcopy(r_at_wrapped)
dx_arr = sts_numpy[i].r_at[:,0] - sts_numpy[i-1].r_at[:,0] # array of diferrences of coordinates in x direction
dy_arr = sts_numpy[i].r_at[:,1] - sts_numpy[i-1].r_at[:,1] # array of diferrences of coordinates in y direction
dz_arr = sts_numpy[i].r_at[:,2] - sts_numpy[i-1].r_at[:,2] # array of diferrences of coordinates in z direction
# for iat, dx, dy, dz in zip(range(sts_numpy[i].n_at), dx_arr, dy_arr, dz_arr):
for iat in range(sts_numpy[i].n_at):
if (dx_arr[iat] > sts_numpy[i-1].sizex/2): shift[iat,0] = shift[iat,0] - 0.5*(sts_numpy[i].sizex + sts_numpy[i-1].sizex)
if (dx_arr[iat] < -sts_numpy[i-1].sizex/2): shift[iat,0] = shift[iat,0] + 0.5*(sts_numpy[i].sizex + sts_numpy[i-1].sizex)
if (dy_arr[iat] > sts_numpy[i-1].sizey/2): shift[iat,1] = shift[iat,1] - 0.5*(sts_numpy[i].sizey + sts_numpy[i-1].sizey)
if (dy_arr[iat] < -sts_numpy[i-1].sizey/2): shift[iat,1] = shift[iat,1] + 0.5*(sts_numpy[i].sizey + sts_numpy[i-1].sizey)
if (dz_arr[iat] > sts_numpy[i-1].sizez/2): shift[iat,2] = shift[iat,2] - 0.5*(sts_numpy[i].sizez + sts_numpy[i-1].sizez)
if (dz_arr[iat] < -sts_numpy[i-1].sizez/2): shift[iat,2] = shift[iat,2] + 0.5*(sts_numpy[i].sizez + sts_numpy[i-1].sizez)
r_at_unwrapped[:,0] = sts_numpy_unwrapped[i-1].r_at[:,0] + (sts_numpy[i].r_at[:,0] + shift[:,0] - sts_numpy[i-1].r_at[:,0])
r_at_unwrapped[:,1] = sts_numpy_unwrapped[i-1].r_at[:,1] + (sts_numpy[i].r_at[:,1] + shift[:,1] - sts_numpy[i-1].r_at[:,1])
r_at_unwrapped[:,2] = sts_numpy_unwrapped[i-1].r_at[:,2] + (sts_numpy[i].r_at[:,2] + shift[:,2] - sts_numpy[i-1].r_at[:,2])
sts_numpy_unwrapped[i].r_at = r_at_unwrapped
return sts_numpy_unwrapped
def calc_non_averaged_msd(sts_numpy, dt=7.5**(-11)):
"""
This function calculates the mean-squared displacements (msd) of atoms in structures from sts_numpy list
with respect to the first structure in sts_numpy list.
get: sts_numpy, dt - list of structure objects with unwrapped coordinates and time difference in seconds between structures, respectively
return: pandas dataframe, where the first column is time,
the second column is non-averaged msd of all atoms,
the third column is non-averaged msd of all atoms in x direction,
the fourth column is non-averaged msd of all atoms in y direction,
the fifth column is non-averaged msd of all atoms in z direction,
the sixth and other columns are non-averaged msds of atoms of i_mass i
"""
msd = {}
msd['time, s'] = []
msd['msd_x_all, m^2'] = []
msd['msd_y_all, m^2'] = []
msd['msd_z_all, m^2'] = []
msd['msd_all, m^2'] = []
for i_mass in sts_numpy[0].mass_at:
element = mass2element(i_mass)
msd[f'msd_x_{element}, m^2'] = []
msd[f'msd_y_{element}, m^2'] = []
msd[f'msd_z_{element}, m^2'] = []
msd[f'msd_{element}, m^2'] = []
num_sts = len(sts_numpy)
for i in range(num_sts):
msd['time, s'].append(i*dt)
# calculating MSD for all atoms in structure
msd_x_i = ((sts_numpy[i].r_at[:,0] - sts_numpy[0].r_at[:,0])**2).sum()/sts_numpy[i].n_at
msd_y_i = ((sts_numpy[i].r_at[:,1] - sts_numpy[0].r_at[:,1])**2).sum()/sts_numpy[i].n_at
msd_z_i = ((sts_numpy[i].r_at[:,2] - sts_numpy[0].r_at[:,2])**2).sum()/sts_numpy[i].n_at
msd_r_i = msd_x_i + msd_y_i + msd_z_i
msd['msd_x_all, m^2'].append(msd_x_i/10**20)
msd['msd_y_all, m^2'].append(msd_y_i/10**20)
msd['msd_z_all, m^2'].append(msd_z_i/10**20)
msd['msd_all, m^2'].append(msd_r_i/10**20)
# calculating MSD for each type of atom in structure
for i_mass in sts_numpy[0].mass_at:
element = mass2element(i_mass)
mask = sts_numpy[i].i_mass_at == i_mass
msd_x_i = ((sts_numpy[i].r_at[:,0][mask] - sts_numpy[0].r_at[:,0][mask])**2).sum()/mask.sum()
msd_y_i = ((sts_numpy[i].r_at[:,1][mask] - sts_numpy[0].r_at[:,1][mask])**2).sum()/mask.sum()
msd_z_i = ((sts_numpy[i].r_at[:,2][mask] - sts_numpy[0].r_at[:,2][mask])**2).sum()/mask.sum()
msd_r_i = msd_x_i + msd_y_i + msd_z_i
msd[f'msd_x_{element}, m^2'].append(msd_x_i/10**20)
msd[f'msd_y_{element}, m^2'].append(msd_y_i/10**20)
msd[f'msd_z_{element}, m^2'].append(msd_z_i/10**20)
msd[f'msd_{element}, m^2'].append(msd_r_i/10**20)
msd_df = pd.DataFrame(msd)
# msd_df.set_index('time', inplace = True)
msd_df.set_index('time, s')
return msd_df
def mass2element(mass):
"""
This function maps the atomic mass to the element name
"""
for row in atom_data:
if row[3] != None:
if np.round(row[3],2) == np.round(mass,2):
element = row[1]
break
return element
def calc_averaged_msd(sts_numpy, dt=7.5E-11):
"""
This function calculates the averaged mean-squared displacements (msd) of atoms in structures from sts_numpy list
with respect to the first structure in sts_numpy list.
get: sts_numpy, dt - list of structure objects with unwrapped coordinates and time difference in seconds between structure snapshots, respectively
return: pandas dataframe, where the first column is time,
the second column is non-averaged msd of all atoms,
the third column is non-averaged msd of all atoms in x direction,
the fourth column is non-averaged msd of all atoms in y direction,
the fifth column is non-averaged msd of all atoms in z direction,
the sixth and other columns are non-averaged msds of atoms of i_mass i
the MSD are outputed in m^2/atom units
"""
msd_av = {}
msd_av['time, s'] = []
msd_av['msd_all, m^2'] = []
msd_av['msd_x_all, m^2'] = []
msd_av['msd_y_all, m^2'] = []
msd_av['msd_z_all, m^2'] = []
for i_mass in sts_numpy[0].mass_at:
element = mass2element(i_mass)
msd_av[f'msd_{element}, m^2'] = []
msd_av[f'msd_x_{element}, m^2'] = []
msd_av[f'msd_y_{element}, m^2'] = []
msd_av[f'msd_z_{element}, m^2'] = []
n_write = len(sts_numpy)
n_time_diff=n_write
for i_time_diff in range(n_time_diff):
msd_av['time, s'].append(i_time_diff*dt)
# temporary working variables
msd_x_i_w = 0.0
msd_y_i_w = 0.0
msd_z_i_w = 0.0
msd_r_i_w = 0.0
# dictionaries with temporary working variables
msd_x_i_mass_w = {}
msd_y_i_mass_w = {}
msd_z_i_mass_w = {}
msd_r_i_mass_w = {}
# assigning zero values to the dictionaries with temporary working variables
for i_mass in sts_numpy[0].mass_at:
element = mass2element(i_mass)
msd_x_i_mass_w[f'msd_x_{element}'] = 0.0
msd_y_i_mass_w[f'msd_y_{element}'] = 0.0
msd_z_i_mass_w[f'msd_z_{element}'] = 0.0
msd_r_i_mass_w[f'msd_{element}'] = 0.0
for i_start in range(n_write-i_time_diff):
i_end = i_start + i_time_diff
# gaining the sum of MSD between the same time differences for all atoms
msd_x_i_ww = ((sts_numpy[i_end].r_at[:,0] - sts_numpy[i_start].r_at[:,0])**2).sum()/sts_numpy[i_end].n_at
msd_y_i_ww = ((sts_numpy[i_end].r_at[:,1] - sts_numpy[i_start].r_at[:,1])**2).sum()/sts_numpy[i_end].n_at
msd_z_i_ww = ((sts_numpy[i_end].r_at[:,2] - sts_numpy[i_start].r_at[:,2])**2).sum()/sts_numpy[i_end].n_at
msd_x_i_w += msd_x_i_ww
msd_y_i_w += msd_y_i_ww
msd_z_i_w += msd_z_i_ww
msd_r_i_w += msd_x_i_ww + msd_y_i_ww + msd_z_i_ww
# gaining the sum of MSD between the same time differences for each type of atom
for i_mass in sts_numpy[0].mass_at:
element = mass2element(i_mass)
mask = sts_numpy[i_end].i_mass_at == i_mass
msd_x_i_mass_ww = ((sts_numpy[i_end].r_at[:,0][mask] - sts_numpy[i_start].r_at[:,0][mask])**2).sum()/mask.sum()
msd_y_i_mass_ww = ((sts_numpy[i_end].r_at[:,1][mask] - sts_numpy[i_start].r_at[:,1][mask])**2).sum()/mask.sum()
msd_z_i_mass_ww = ((sts_numpy[i_end].r_at[:,2][mask] - sts_numpy[i_start].r_at[:,2][mask])**2).sum()/mask.sum()
msd_x_i_mass_w[f'msd_x_{element}'] += msd_x_i_mass_ww
msd_y_i_mass_w[f'msd_y_{element}'] += msd_y_i_mass_ww
msd_z_i_mass_w[f'msd_z_{element}'] += msd_z_i_mass_ww
msd_r_i_mass_w[f'msd_{element}'] += msd_x_i_mass_ww + msd_y_i_mass_ww + msd_z_i_mass_ww
# calculating MSD for all atoms in structure averaged over the same time differences
msd_x_i_av = msd_x_i_w/float(n_write-i_time_diff)
msd_y_i_av = msd_y_i_w/float(n_write-i_time_diff)
msd_z_i_av = msd_z_i_w/float(n_write-i_time_diff)
msd_r_i_av = msd_r_i_w/float(n_write-i_time_diff)
msd_av['msd_all, m^2'].append(msd_r_i_av/10**20)
msd_av['msd_x_all, m^2'].append(msd_x_i_av/10**20)
msd_av['msd_y_all, m^2'].append(msd_y_i_av/10**20)
msd_av['msd_z_all, m^2'].append(msd_z_i_av/10**20)
# calculating MSD for each type of atom in structure averaged over the same time differences
for i_mass in sts_numpy[0].mass_at:
element = mass2element(i_mass)
msd_x_i_mass_av = msd_x_i_mass_w[f'msd_x_{element}']/float(n_write-i_time_diff)
msd_y_i_mass_av = msd_y_i_mass_w[f'msd_y_{element}']/float(n_write-i_time_diff)
msd_z_i_mass_av = msd_z_i_mass_w[f'msd_z_{element}']/float(n_write-i_time_diff)
msd_r_i_mass_av = msd_r_i_mass_w[f'msd_{element}']/float(n_write-i_time_diff)
msd_av[f'msd_{element}, m^2'].append(msd_r_i_mass_av/10**20)
msd_av[f'msd_x_{element}, m^2'].append(msd_x_i_mass_av/10**20)
msd_av[f'msd_y_{element}, m^2'].append(msd_y_i_mass_av/10**20)
msd_av[f'msd_z_{element}, m^2'].append(msd_z_i_mass_av/10**20)
msd_av_df = pd.DataFrame(msd_av)
# msd_av_df.set_index('time', inplace = True)
msd_av_df.set_index('time, s')
return msd_av_df
def fit_x_y_linear(x,y):
"""
This function linearly fits the dependence y on x.
get: x, y - numpy arrays with x and y values
return: a, b - coefficients of the expression y = a*x + b
"""
A = np.vstack([x, np.ones(len(x))]).T
a, b = np.linalg.lstsq(A, y, rcond=None)[0]
return a, b
def fit_msd_linear(msd_df):
"""
This function linearly fits the time dependence of MSD of atoms.
get: msd_df - pandas dataframe with MSD
return: coeffs - list of tuples with coeffs a and b of linear fit (msd = a*t + b),
each tuple contain coeffs from fit of msd in each column in msd_df
"""
coeffs = []
t = msd_df['time, s']
for col in msd_df:
if col == 'Unnamed: 0' or col == 'time, s':
pass
else:
msd = msd_df[col]
a, b = fit_x_y_linear(t, msd)
coeffs.append((a,b))
return coeffs
def fit_and_plot_msd_linear(msd_df,prefix=None):
"""
This function fits and plots linear dependence of msd from the modeling time.
get: msd_df - pandas dataframe with msd
"""
coeffs = fit_msd_linear(msd_df)
t = msd_df['time, s']
# print(coeffs)
for coef, col in zip(coeffs,msd_df.drop(columns=['Unnamed: 0','time, s'])):
a = coef[0]
b = coef[1]
figname = col.split(',')[0]+'.png'
plt.title(f'{col}, D = {a/6:.2}')
plt.plot(t,msd_df[col],linestyle='',marker='D',label='data')
plt.plot(t, a*t + b, label=f'fit: msd = {a:.2}*t + {b:.2}')
plt.xlabel('time, $s$')
plt.ylabel('MSD, ${m^2}$')
plt.legend()
plt.savefig(figname, format = 'png')
plt.clf()
plt.cla()
def print_diffusion_coefficients_3d(msd_df, filename='D.txt'):
coeffs = fit_msd_linear(msd_df)
f = open(filename,'w')
f.write('MSD: '+' '.join(msd_df.drop(columns=['Unnamed: 0','time, s'])))
f.write('\n')
f.write('D: '+' '.join([str(c[0]/6) for c in coeffs]))
f.close()
return
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"copy.deepcopy",
"numpy.linalg.lstsq",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.array",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.round",
"matplotlib.pyplot.savefig",
"md_format_converter_mi.structure"
] |
[((5980, 5991), 'md_format_converter_mi.structure', 'structure', ([], {}), '()\n', (5989, 5991), False, 'from md_format_converter_mi import structure\n'), ((6081, 6101), 'numpy.array', 'np.array', (['st.mark_at'], {}), '(st.mark_at)\n', (6089, 6101), True, 'import numpy as np\n'), ((6122, 6139), 'numpy.array', 'np.array', (['st.i_at'], {}), '(st.i_at)\n', (6130, 6139), True, 'import numpy as np\n'), ((6160, 6177), 'numpy.array', 'np.array', (['st.r_at'], {}), '(st.r_at)\n', (6168, 6177), True, 'import numpy as np\n'), ((6198, 6215), 'numpy.array', 'np.array', (['st.f_at'], {}), '(st.f_at)\n', (6206, 6215), True, 'import numpy as np\n'), ((6236, 6253), 'numpy.array', 'np.array', (['st.v_at'], {}), '(st.v_at)\n', (6244, 6253), True, 'import numpy as np\n'), ((6370, 6393), 'numpy.array', 'np.array', (['st.a_lattice3'], {}), '(st.a_lattice3)\n', (6378, 6393), True, 'import numpy as np\n'), ((6457, 6479), 'numpy.array', 'np.array', (['st.i_type_at'], {}), '(st.i_type_at)\n', (6465, 6479), True, 'import numpy as np\n'), ((6505, 6527), 'numpy.array', 'np.array', (['st.i_mass_at'], {}), '(st.i_mass_at)\n', (6513, 6527), True, 'import numpy as np\n'), ((6551, 6571), 'numpy.array', 'np.array', (['st.type_at'], {}), '(st.type_at)\n', (6559, 6571), True, 'import numpy as np\n'), ((6595, 6615), 'numpy.array', 'np.array', (['st.mass_at'], {}), '(st.mass_at)\n', (6603, 6615), True, 'import numpy as np\n'), ((7114, 7137), 'copy.deepcopy', 'copy.deepcopy', (['st_numpy'], {}), '(st_numpy)\n', (7127, 7137), False, 'import copy\n'), ((7685, 7709), 'copy.deepcopy', 'copy.deepcopy', (['sts_numpy'], {}), '(sts_numpy)\n', (7698, 7709), False, 'import copy\n'), ((13011, 13028), 'pandas.DataFrame', 'pd.DataFrame', (['msd'], {}), '(msd)\n', (13023, 13028), True, 'import pandas as pd\n'), ((18670, 18690), 'pandas.DataFrame', 'pd.DataFrame', (['msd_av'], {}), '(msd_av)\n', (18682, 18690), True, 'import pandas as pd\n'), ((8000, 8032), 'numpy.zeros', 'np.zeros', (['(sts_numpy[i].n_at, 3)'], {}), '((sts_numpy[i].n_at, 3))\n', (8008, 8032), True, 'import numpy as np\n'), ((8112, 8139), 'copy.deepcopy', 'copy.deepcopy', (['sts_numpy[i]'], {}), '(sts_numpy[i])\n', (8125, 8139), False, 'import copy\n'), ((8344, 8371), 'copy.deepcopy', 'copy.deepcopy', (['r_at_wrapped'], {}), '(r_at_wrapped)\n', (8357, 8371), False, 'import copy\n'), ((19061, 19094), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {'rcond': 'None'}), '(A, y, rcond=None)\n', (19076, 19094), True, 'import numpy as np\n'), ((20172, 20207), 'matplotlib.pyplot.title', 'plt.title', (['f"""{col}, D = {a / 6:.2}"""'], {}), "(f'{col}, D = {a / 6:.2}')\n", (20181, 20207), True, 'import matplotlib.pyplot as plt\n'), ((20214, 20278), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'msd_df[col]'], {'linestyle': '""""""', 'marker': '"""D"""', 'label': '"""data"""'}), "(t, msd_df[col], linestyle='', marker='D', label='data')\n", (20222, 20278), True, 'import matplotlib.pyplot as plt\n'), ((20283, 20344), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(a * t + b)'], {'label': 'f"""fit: msd = {a:.2}*t + {b:.2}"""'}), "(t, a * t + b, label=f'fit: msd = {a:.2}*t + {b:.2}')\n", (20291, 20344), True, 'import matplotlib.pyplot as plt\n'), ((20351, 20374), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time, $s$"""'], {}), "('time, $s$')\n", (20361, 20374), True, 'import matplotlib.pyplot as plt\n'), ((20383, 20409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSD, ${m^2}$"""'], {}), "('MSD, ${m^2}$')\n", (20393, 20409), True, 'import matplotlib.pyplot as plt\n'), ((20418, 20430), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20428, 20430), True, 'import matplotlib.pyplot as plt\n'), ((20439, 20473), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {'format': '"""png"""'}), "(figname, format='png')\n", (20450, 20473), True, 'import matplotlib.pyplot as plt\n'), ((20484, 20493), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20491, 20493), True, 'import matplotlib.pyplot as plt\n'), ((20502, 20511), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (20509, 20511), True, 'import matplotlib.pyplot as plt\n'), ((13297, 13316), 'numpy.round', 'np.round', (['row[3]', '(2)'], {}), '(row[3], 2)\n', (13305, 13316), True, 'import numpy as np\n'), ((13319, 13336), 'numpy.round', 'np.round', (['mass', '(2)'], {}), '(mass, 2)\n', (13327, 13336), True, 'import numpy as np\n')]
|
import keras
from keras.models import load_model
import sys
import cv2
import numpy as np
x_test = np.zeros((0,4608))
model = load_model(r"D:\Code\Hackathons\BookJudger\NeuralNet\hdmodel.h5")
im = cv2.imread(sys.argv[1])
im = cv2.resize(im, (32, 48))
im = np.divide(im, 255)
im = im.flatten()
x_test = np.concatenate([x_test, im[None,:]])
f = open(r"D:\Code\Hackathons\BookJudger\Cam_to_book\data\hdoutput.txt", "w")
print(float(model.predict(x_test)))
f.write(str(float(model.predict(x_test))))
f.close()
|
[
"keras.models.load_model",
"numpy.divide",
"numpy.concatenate",
"numpy.zeros",
"cv2.imread",
"cv2.resize"
] |
[((100, 119), 'numpy.zeros', 'np.zeros', (['(0, 4608)'], {}), '((0, 4608))\n', (108, 119), True, 'import numpy as np\n'), ((127, 196), 'keras.models.load_model', 'load_model', (['"""D:\\\\Code\\\\Hackathons\\\\BookJudger\\\\NeuralNet\\\\hdmodel.h5"""'], {}), "('D:\\\\Code\\\\Hackathons\\\\BookJudger\\\\NeuralNet\\\\hdmodel.h5')\n", (137, 196), False, 'from keras.models import load_model\n'), ((198, 221), 'cv2.imread', 'cv2.imread', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (208, 221), False, 'import cv2\n'), ((227, 251), 'cv2.resize', 'cv2.resize', (['im', '(32, 48)'], {}), '(im, (32, 48))\n', (237, 251), False, 'import cv2\n'), ((257, 275), 'numpy.divide', 'np.divide', (['im', '(255)'], {}), '(im, 255)\n', (266, 275), True, 'import numpy as np\n'), ((303, 340), 'numpy.concatenate', 'np.concatenate', (['[x_test, im[None, :]]'], {}), '([x_test, im[None, :]])\n', (317, 340), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os
from typing import Union, Iterable, List, Tuple
# from typing import Callable
import numpy as np
import networkx as nx
# from scipy.integrate import odeint
from scipy.integrate import solve_ivp
import dill
import re
import yaml
import itertools
from scipy.special import softmax
from multiprocessing import Process, Manager
from collections import defaultdict
import sys
from matplotlib import pyplot as plt
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams.update({'font.size': 14})
class _signal:
def __init__(self, mode: str = 'gaussian_derivative', **kwargs):
self.mode = mode
_c = np.power(10., -12)
self.s = 0.5 * _c
_T = 50 * _c
self.t0 = _T / 3.
self.H0 = 0.25
self.omega0 = 2 * np.pi * 150 * np.power(10, 9)
self.const = 1.
self.__dict__.update(kwargs)
def set(self, **kwargs) -> None:
self.__dict__.update(kwargs)
def __call__(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray, None]:
if self.mode == 'gaussian':
_p = (t - self.t0) / self.s
return self.H0 * np.exp(-_p * _p) * np.sin(self.omega0 * t)
if self.mode == 'gaussian_derivative':
_p = (t - self.t0) / self.s
_e = np.exp(-_p * _p)
_s = self.s * self.s
return self.H0 * self.omega0 * _e * np.cos(self.omega0 * t) - \
(2 * self.H0 * (t - self.t0) * _e * np.sin(self.omega0 * t)) / _s
if self.mode == 'zero':
return 0 * t
if self.mode == 'const':
return self.const * t
return None
def __str__(self):
return str(self.__dict__)
class _Signal:
def __init__(self, n_nodes: int, *signals):
self.n_nodes = n_nodes
self.signals = []
if signals:
self.set(signals)
def set(self, signals) -> None:
assert self.n_nodes <= len(signals), 'wrong shape'
self.signals = [s for s in signals]
def append(self, signal: _signal) -> None:
# assert self.n_nodes > len(self.signals), 'overflow'
self.signals.append(signal)
def extend(self, signals: Union[List[_signal], Iterable[_signal]]) -> None:
for s in signals:
self.append(s)
def __call__(self, n: int, t: Union[float, np.ndarray]) -> Union[float, np.ndarray, None]:
if n >= self.n_nodes:
raise IndexError('n >= n_nodes')
return self.signals[n].__call__(t)
def __str__(self):
s = ''
for _i, signal in enumerate(self.signals):
s += '{}| {}\n'.format(_i, signal)
return s
@staticmethod
def generate(n_nodes: int,
_input: Union[List[float], Iterable[float]] = None,
_input_mode='gaussian_derivative') -> '_Signal':
if _input is None:
_input = []
assert n_nodes >= len(_input), 'wrong shape'
H = _Signal(n_nodes=n_nodes)
H.extend([_signal(mode=_input_mode, H0=H0, t0=0.) for H0 in _input])
H.extend([_signal(mode='zero') for _ in range(n_nodes - len(_input))])
return H
class AFM:
def __init__(self,
n_nodes: int = 10,
dt: float = 100 * np.power(10., -12) / 1000, T: float = 100 * np.power(10., -12),
adj_mat: Union[np.ndarray, Iterable] = None,
omega_e: float = 2. * np.pi * 1.75 * np.power(10., 9),
omega_ex: float = 2 * np.pi * 27.5 * np.power(10., 12),
alpha: float = 0.01,
sigma: float = 2. * np.pi * 4.32,
j_dc: float = 0.0,
gamma: float = 2. * np.pi * 28 * np.power(10., 9),
H: _Signal = _Signal(10, *[_signal(
'gaussian_derivative',
s=0.5 * np.power(10., -12),
t0=50 * np.power(10., -12) / 3.,
H0=0.25,
omega0=2. * np.pi * 150 * np.power(10., 9)
)] * 10),
phi_ini: Union[np.ndarray, Iterable] = None,
dot_phi_ini: Union[np.ndarray, Iterable] = None):
if n_nodes is None and adj_mat is None:
raise ValueError("n_nodes or adj_mat must be specified")
self.n_nodes = n_nodes
if n_nodes is None:
self.n_nodes = len(adj_mat)
self.dt, self.T = dt, T
self.adj_mat = None
self.set_adj_mat(adj_mat)
self.alpha = alpha
self.omega_e, self.omega_ex = omega_e, omega_ex
self.sigma, self.j_dc = sigma, j_dc
self.gamma = gamma
self.H = H
self.phi = None
self.set_phi(phi_ini)
self.dot_phi = None
self.set_dot_phi(dot_phi_ini)
self.time_elapsed = 0.
def save(self, name: str) -> None:
with open('{}.afm'.format(name), 'wb') as dump:
dill.dump(self, dump, recurse=True)
@staticmethod
def load(name: str) -> 'AFM':
with open('{}.afm'.format(name), 'rb') as dump:
return dill.load(dump)
def save_txt(self, fname: str) -> None:
with open(fname, 'w') as file:
file.write('\n\nn_nodes\n')
file.write('{}'.format(self.n_nodes))
file.write('\n\ndt\n')
file.write('{}'.format(self.dt))
file.write('\n\nT\n')
file.write('{}'.format(self.T))
file.write('\n\nadj_mat\n')
for line in self.adj_mat:
s = ''
for item in line:
s += '{}, '.format(item)
file.write('{}\n'.format(s))
file.write('\n\nomega_e\n')
file.write('{}'.format(self.omega_e))
file.write('\n\nomega_ex\n')
file.write('{}'.format(self.omega_ex))
file.write('\n\nalpha\n')
file.write('{}'.format(self.alpha))
file.write('\n\nsigma\n')
file.write('{}'.format(self.sigma))
file.write('\n\nj_dc\n')
file.write('{}'.format(self.j_dc))
file.write('\n\ngamma\n')
file.write('{}'.format(self.gamma))
file.write('\n\nphi\n')
s = ''
for phi in self.phi:
s += '{}, '.format(phi)
file.write('{}'.format(s))
file.write('\n\ndot_phi\n')
s = ''
for dot_phi in self.dot_phi:
s += '{}, '.format(dot_phi)
file.write('{}'.format(s))
file.write('\n\nSignals\n')
file.write('{}'.format(self.H))
@staticmethod
def load_txt(fname: str) -> 'AFM':
obj = AFM()
with open(fname, 'r') as file:
try:
for line in file:
name = re.sub(r'[^A-Za-z_0-9]', '', re.sub(r'[\r\n\t ]', '', line))
if name in ['n_nodes']:
val = int(re.sub(r'[^0-9.]', '', re.sub(r'[\r\n\t ]', '', file.readline())))
setattr(obj, name, val)
if name in ['omega_e', 'omega_ex', 'alpha', 'sigma', 'j_dc', 'gamma', 'dt', 'T']:
val = float(re.sub(r'[^0-9.e+-]', '', re.sub(r'[\r\n\t ]', '', file.readline())))
setattr(obj, name, val)
if name == 'adj_mat':
adj_mat = []
for _ in range(obj.n_nodes):
row = re.split(',', re.sub(r'[^0-9.,]', '', re.sub(r'[\r\n\t ]', '', file.readline())))
row = [float(item) for item in row if item]
adj_mat.append(row[:obj.n_nodes])
setattr(obj, name, np.array(adj_mat))
if name in ['phi', 'phi_ini', 'phi_init']:
row = re.split(',', re.sub(r'[^0-9.,]', '', re.sub(r'[\r\n\t ]', '', file.readline())))
row = [float(item) for item in row if item]
setattr(obj, 'phi', row[:obj.n_nodes])
if name in ['dot_phi', 'dot_phi_ini', 'dot_phi_init']:
row = re.split(',', re.sub(r'[^0-9.,]', '', re.sub(r'[\r\n\t ]', '', file.readline())))
row = [float(item) for item in row if item]
setattr(obj, 'dot_phi', row[:obj.n_nodes])
if name in ['Signals', 'signals']:
H = _Signal(obj.n_nodes)
for _ in range(obj.n_nodes):
row = re.split(r'\|',
re.sub(r'[^A-Za-z_0-9.,\'\-{}|:]', '',
re.sub(r'[\r\n\t ]', '', file.readline())))
# print(row)
d = yaml.safe_load(row[1])
signal = _signal()
for key in d.keys():
if key not in ['mode']:
d[key] = float(d[key])
setattr(signal, key, d[key])
H.append(signal)
setattr(obj, 'H', H)
except ValueError:
print('bad values')
exit(-1)
except IndexError:
print('bad file')
exit(-1)
return obj
def set_adj_mat(self, adj_mat: Union[np.ndarray, Iterable] = None) -> None:
if adj_mat is None:
self.adj_mat = nx.to_numpy_array(nx.erdos_renyi_graph(n=self.n_nodes, p=1))
else:
assert self.n_nodes == len(adj_mat), 'wrong shape'
# assert np.asarray(adj_mat == adj_mat.T).all(), 'adj_mat must be symmetric'
self.adj_mat = np.asarray(adj_mat)
def set_phi(self, phi_ini: Union[np.ndarray, Iterable] = None) -> None:
if phi_ini is None:
self.init_zero_phi()
else:
assert self.n_nodes == len(phi_ini), 'wrong shape'
self.phi = np.asarray(phi_ini)
def init_random_phi(self) -> None:
self.phi = 2 * np.pi * np.random.random(size=self.n_nodes)
def init_zero_phi(self) -> None:
self.phi = np.array([0 for _ in range(self.n_nodes)])
def set_dot_phi(self, dot_phi_ini: Union[np.ndarray, Iterable] = None) -> None:
if dot_phi_ini is None:
self.init_zero_dot_phi()
else:
assert self.n_nodes == len(dot_phi_ini), 'wrong shape'
self.dot_phi = np.asarray(dot_phi_ini)
def init_random_dot_phi(self) -> None:
self.dot_phi = 2 * np.pi * np.random.random(size=self.n_nodes)
def init_zero_dot_phi(self) -> None:
self.dot_phi = np.array([0 for _ in range(self.n_nodes)])
def init_random(self) -> None:
self.init_random_phi()
self.init_random_dot_phi()
def init_zero(self) -> None:
self.init_zero_phi()
self.init_zero_dot_phi()
@staticmethod
def __check_rearrange_mode(mode: str) -> None:
if mode not in ['pdpd', 'dpdp', 'ppdd', 'ddpp', 'pppp', 'dddd']:
raise ValueError("allowed modes: \'pdpd\', \'dpdp\', \'ppdd\', \'ddpp\', \'pppp\', \'dddd\'")
def get_state(self, mode: str = 'pdpd') -> np.ndarray:
self.__check_rearrange_mode(mode)
p, d = np.reshape(self.phi, (-1, 1)), \
np.reshape(self.dot_phi, (-1, 1))
out = np.concatenate((p, d), axis=-1) # 'pdpd'
if mode == 'pppp':
out = p
if mode == 'dddd':
out = d
if mode == 'ppdd':
out = np.concatenate((p, d), axis=0)
if mode == 'ddpp':
out = np.concatenate((d, p), axis=0)
if mode == 'dpdp':
out = np.concatenate((d, p), axis=-1)
return out.reshape(1, -1)[0]
# def __d(self, y, t) -> list: # for scipy.integrate.odeint
def __d(self, t, y) -> list: # for scipy.integrate.solve_ivp
_phi, _theta = y[::2], y[1::2]
d = []
for _i, values in enumerate(zip(_phi, _theta)):
phi, theta = values
d.extend([
theta,
-self.alpha * self.omega_ex * theta +
self.omega_ex * np.sum(self.adj_mat[_i] * _theta) -
0.5 * self.omega_e * self.omega_ex * np.sin(2 * phi) +
self.omega_ex * self.sigma * self.j_dc +
self.gamma * self.H(_i, t)
])
return d
def integrate(self, _dt: float = None, _t_stop: float = None, return_mode: Union[str, None] = 'ppdd',
change_state: bool = False, method='RK45') -> Union[np.ndarray, None]:
dt, t_stop = self.dt, self.T
if _dt:
dt = _dt
if _t_stop:
t_stop = _t_stop
t = np.linspace(0, t_stop, int(t_stop / dt), endpoint=True) + self.time_elapsed
initial_state = self.get_state('pdpd')
# series = odeint(self.__d, initial_state, t)
sol = solve_ivp(self.__d, y0=initial_state,
t_span=(self.time_elapsed, t_stop + self.time_elapsed),
t_eval=t, method=method)
series = sol.y.T
p, d = series[:, ::2], series[:, 1::2]
if change_state:
self.phi, self.dot_phi = p[-1, :], d[-1, :]
if return_mode:
self.__check_rearrange_mode(return_mode)
if return_mode == 'pppp':
return p
if return_mode == 'dddd':
return d
out = np.zeros_like(series)
if return_mode == 'pdpd':
out[:, :] = series[:, :]
if return_mode == 'dpdp':
out[:, ::2], out[:, 1::2] = d, p
if return_mode == 'ppdd':
out[:, :self.n_nodes], out[:, self.n_nodes:] = p, d
if return_mode == 'ddpp':
out[:, :self.n_nodes], out[:, self.n_nodes:] = d, p
return out
return
def step(self, _dt: float = None, n: int = 2, method='RK45', _return_dot_phi=False) -> Union[None, np.ndarray]:
dt = self.dt
if _dt:
dt = _dt
self.integrate(_dt=dt / n, _t_stop=dt, change_state=True, return_mode=None, method=method)
self.time_elapsed += dt
if _return_dot_phi:
return self.dot_phi
return
def get_phi(self) -> np.ndarray:
return self.phi
def get_dot_phi(self) -> np.ndarray:
return self.dot_phi
def execute(self, v='0.1.1'):
if v == '0.1.1':
ts = self.integrate(return_mode='ppdd', method='RK45')
_, n = ts.shape
fig, ax = plt.subplots(nrows=2, figsize=(12, 10))
y_labels = [r'$\varphi$', r'$\dot{\varphi}$']
l_labels = [r'$\varphi_' + str(j) + '$' for j in range(n // 2)]
l_labels.extend([r'$\dot{\varphi_' + str(j) + '}$' for j in range(n // 2)])
k = 0
for i in range(2):
ax[i].set_ylabel(y_labels[i], rotation=0, fontsize=20, labelpad=20)
ax[i].set_xlabel('time, s')
for j in range(n // 2):
ax[i].plot(np.linspace(0, self.T, len(ts)), ts[:, k], label=l_labels[k])
k += 1
ax[i].legend(loc='best', frameon=False)
plt.savefig('figure.png', dpi=300)
plt.show()
class MLP(AFM):
def __init__(self,
input_layer_size: int, output_layer_size: int,
hidden_layer_sizes=(4,),
oriented: bool = False,
coupling: float = np.power(10., -3),
dt: float = 100 * np.power(10., -12) / 1000,
T: float = 100 * np.power(10., -12) / 1000 * 60.,
omega_e: float = 2. * np.pi * 1.75 * np.power(10., 9),
omega_ex: float = 2 * np.pi * 27.5 * np.power(10., 12),
alpha: float = 0.01,
sigma: float = 2. * np.pi * 4.32,
j_dc: float = 0.0,
gamma: float = 2. * np.pi * 28 * np.power(10., 9)):
if hidden_layer_sizes is None:
hidden_layer_sizes = []
n_nodes = input_layer_size + int(np.sum(hidden_layer_sizes)) + output_layer_size
sizes = [input_layer_size] + [ls for ls in hidden_layer_sizes if ls] + [output_layer_size]
G = MLP.multilayered_graph(oriented, *sizes)
adj_mat = nx.to_numpy_array(G) * coupling
adj_mat *= np.random.random(adj_mat.shape)
H = _Signal.generate(n_nodes) # zero
super(MLP, self).__init__(n_nodes=n_nodes, dt=dt, T=T, adj_mat=adj_mat,
omega_e=omega_e, omega_ex=omega_ex, alpha=alpha, sigma=sigma,
j_dc=j_dc, gamma=gamma, H=H)
self.init_zero()
self.input_layer_size, self.output_layer_size = input_layer_size, output_layer_size
self.hidden_layer_sizes = hidden_layer_sizes
self.genes = list(itertools.product(range(self.n_nodes), range(self.n_nodes)))
self.coupling = coupling
@staticmethod
def multilayered_graph(oriented=False, *subset_sizes) -> nx.Graph:
extents = nx.utils.pairwise(itertools.accumulate((0,) + subset_sizes))
layers = [range(start, end) for start, end in extents]
if oriented:
G = nx.DiGraph()
else:
G = nx.Graph()
for _i, layer in enumerate(layers):
G.add_nodes_from(layer, layer=_i)
for layer1, layer2 in nx.utils.pairwise(layers):
G.add_edges_from(itertools.product(layer1, layer2))
return G
def forward(self, x: np.ndarray, _t_stop: float = None,
_normalize=False, _softmax=False, _max_pooling=False) -> np.ndarray:
assert self.input_layer_size == len(x), 'wrong shape (x)'
t_stop = self.T
if _t_stop:
t_stop = _t_stop
self.H = _Signal.generate(self.n_nodes, _input=x, _input_mode='gaussian_derivative')
dot_phi = self.integrate(_t_stop=t_stop, return_mode='dddd', change_state=False)[:, -self.output_layer_size:]
# plt.plot(dot_phi)
peaks = np.max(np.abs(dot_phi), axis=0)
if _normalize or _softmax:
peaks /= np.max(peaks)
if _softmax:
peaks = softmax(peaks)
if _max_pooling:
index = np.argmax(peaks)
peaks = np.zeros_like(peaks)
peaks[index] = 1
return peaks
def forward_multiple(self, X: np.ndarray, _t_stop: float = None,
_normalize=False, _softmax=False, _max_pooling=False,
verbose=False) -> np.ndarray:
out = []
for k, x in enumerate(X):
out.append(self.forward(x, _t_stop, _normalize, _softmax, _max_pooling))
if verbose:
print('\r{:.2f}% done'.format((k + 1) / len(X) * 100), end='', flush=True)
if verbose:
print('.\n')
return np.asarray(out)
def error_on_batch(self, X: np.ndarray, y: np.ndarray, _t_stop: float = None,
_normalize=False, _softmax=False,
verbose=False) -> float:
rows_x, _ = X.shape
rows_y, columns_y = y.shape
assert rows_x == rows_y, 'X and y must have the same number of samples'
assert self.output_layer_size == columns_y, 'wrong shape (y)'
out = self.forward_multiple(X, _t_stop, _normalize, _softmax, verbose)
return np.linalg.norm(y - out)
def crossover(self, obj: 'MLP', percentage: float = 0.5) -> None:
sh, sw = self.adj_mat.shape
oh, ow = obj.adj_mat.shape
assert sh == sw == oh == ow == self.n_nodes, 'wrong shape'
transfer = [obj.genes[i] for i in
np.random.choice(range(len(obj.genes)), int(len(obj.genes) * percentage))]
for i, j in transfer:
self.adj_mat[i][j] = obj.adj_mat[i][j]
def mutation(self, n: int):
mutable = [self.genes[i] for i in np.random.choice(range(len(self.genes)), n)]
r = np.random.random(n) * 2 - 1
for k, pos in enumerate(mutable):
i, j = pos
a = self.adj_mat[i][j]
val = a + r[k] * (a + self.coupling * 0.01)
if val <= 0.:
val = 0.
self.adj_mat[i][j] = val
if __name__ == '__main__':
# model = AFM(n_nodes=1)
# model.save('test1')
# model.save_txt('test1.txt')
# model = AFM.load('test1')
# model = AFM.load_txt('test1.txt')
# model.save_txt('test2.txt')
# model = AFM(n_nodes=10)
# model.save_txt('test3.txt')
# model = AFM.load_txt('test3.txt')
# Test 1
# model = AFM.load_txt('test1.txt')
# T = 100 * np.power(10., -12)
# ts = model.integrate(dt=T/1000, t_stop=T, return_mode='pdpd')
# _, n = ts.shape
#
# fig, ax = plt.subplots(nrows=n, figsize=(12, 10))
# y_labels = [r'$\varphi$', r'$\dot{\varphi}$']
# line_styles = ['-', '-']
# for i in range(n):
# ax[i].set_ylabel(y_labels[i], rotation=0, fontsize=20, labelpad=20)
# ax[i].set_xlabel('time, ps')
# ax[i].plot(np.linspace(0, 100, len(ts)), ts[:, i],
# label=y_labels[i], color='black', ls=line_styles[i])
# ax[i].legend(loc='best', frameon=False)
# plt.show()
# Test 2
# plt.ion()
# fig, ax = plt.subplots(nrows=2, figsize=(12, 10))
# y_labels = [r'$\varphi$', r'$\dot{\varphi}$']
# for i in range(2):
# ax[i].set_xlim((0, 100))
# ax[i].set_ylabel(y_labels[i], rotation=0, fontsize=20, labelpad=20)
# ax[i].set_xlabel('time, ps')
# ax[0].set_ylim((-0.003, 0.003))
# ax[1].set_ylim((-1 * np.power(10., 10), 0.5 * np.power(10., 10)))
#
# T = 100 * np.power(10., -12)
# N = 1000
# dt = T / N
# tx, phi, dot_phi = [], [], []
# for i in range(N):
# model.step(dt=dt)
#
# tx.append(model.time_elapsed * np.power(10., 12))
#
# phi.append(model.phi[0])
# ax[0].plot(tx, phi, color='black', ls='-')
#
# dot_phi.append(model.dot_phi[0])
# ax[1].plot(tx, dot_phi, color='black', ls='-')
#
# plt.show(block=False)
# # plt.savefig('pic/{}.png'.format(i), dpi=300)
# fig.canvas.flush_events()
# plt.ioff()
# plt.show()
# Test 3
# signal = _Signal(n_nodes=2)
# signal.extend([_signal(H0=0.25), _signal(H0=0.5)])
#
# model = AFM(n_nodes=2,
# adj_mat=np.array([[0., 7. * np.power(10., -4)],
# [2. * np.power(10., -4), 0.]]),
# H=signal)
#
# model.save_txt('model.txt')
# # EXE v0.0.1
# model = AFM.load_txt('model.txt')
#
# # T = 100 * np.power(10., -12)
# # ts = model.integrate(_dt=T / 1000, _t_stop=T, return_mode='ppdd', method='RK45')
#
# ts = model.integrate(return_mode='ppdd', method='RK45')
#
# # N = 1000
# # ts = np.zeros((N, 4))
# # tx = []
# # for i in range(N):
# # model.step(_dt=T / N)
# # tx.append(model.time_elapsed * np.power(10., 12))
# # phi = model.get_phi()
# # dot_phi = model.get_dot_phi()
# # ts[i, 0] = phi[0]
# # ts[i, 1] = phi[1]
# # ts[i, 2] = dot_phi[0]
# # ts[i, 3] = dot_phi[1]
#
# _, n = ts.shape
# # print(n)
#
# fig, ax = plt.subplots(nrows=2, figsize=(12, 10))
# y_labels = [r'$\varphi$', r'$\dot{\varphi}$']
# l_labels = [r'$\varphi_' + str(j) + '$' for j in range(n // 2)]
# l_labels.extend([r'$\dot{\varphi_' + str(j) + '}$' for j in range(n // 2)])
# k = 0
# for i in range(2):
# ax[i].set_ylabel(y_labels[i], rotation=0, fontsize=20, labelpad=20)
# ax[i].set_xlabel('time, ps')
# for j in range(n // 2):
# ax[i].plot(np.linspace(0, 100, len(ts)), ts[:, k], label=l_labels[k])
# k += 1
# ax[i].legend(loc='best', frameon=False)
#
# plt.savefig('figure.png', dpi=300)
# plt.show()
from sklearn.datasets import load_iris
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
X, y = load_iris(return_X_y=True)
le = preprocessing.LabelEncoder()
y = le.fit_transform(y)
y_categorical = np.zeros((len(y), len(le.classes_)))
for i, _y in enumerate(y):
y_categorical[i][_y] = 1
y = y_categorical
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
n_workers = 16
n_individuals = 500
# n_lonely = 5 # TBD
n_best = 5
n_new = 50
n_genes_mutable = 10
n_epochs = sys.maxsize
initial_population = []
for i in range(n_individuals):
initial_population.append(MLP(input_layer_size=4, output_layer_size=3, hidden_layer_sizes=None))
population = initial_population
if not os.path.exists('best'):
os.makedirs('best')
plt.ion()
fig, ax = plt.subplots(nrows=2, figsize=(10, 7))
# ax[0].set_title('loss')
# ax[0].set_xlabel('epoch')
ax[0].set_ylabel('value')
# ax[1].set_title('f1-score')
ax[1].set_xlabel('epoch')
ax[1].set_ylabel(r'$\%$')
losses = defaultdict(list)
f1_train_list, f1_test_list = [], []
colors = ['crimson', 'forestgreen', 'black']
for epoch in range(n_epochs):
with Manager() as manager:
scores = manager.list()
processes = []
for i, individual in enumerate(population):
p = Process(target=lambda _scores, _i, _individual:
_scores.append((_i, _individual.error_on_batch(X_train, y_train, _softmax=True))),
args=(scores, i, individual,)
)
processes.append(p)
for i in range(0, len(processes), n_workers):
for j in range(i, i + n_workers):
if j < len(processes):
processes[j].start()
for j in range(i, i + n_workers):
if j < len(processes):
processes[j].join()
scores = list(scores)
errors = sorted(scores, key=lambda item: item[1])
values = [val for _, val in errors]
s = '\r{:.2f}%\t-\tepoch: {}\t-\tloss: {:.2f} (1 best),\t{:.2f} ({} best),\t{:.2f} (total)\t-\t'.format(
(epoch + 1.) / n_epochs * 100,
epoch + 1,
np.mean(values[:1]),
np.mean(values[:n_best]),
n_best,
np.mean(values)
)
losses['best'].append(np.mean(values[:1]))
losses['{} best'.format(n_best)].append(np.mean(values[:n_best]))
losses['total'].append(np.mean(values))
epochs = np.array(list(range(0, epoch + 1))) + 1
for i, key in enumerate(losses.keys()):
ax[0].plot(epochs, losses[key], label='loss: {}'.format(key), color=colors[i])
if not epoch:
ax[0].legend(loc='best', frameon=False)
best, other = [i for i, _ in errors[:n_best]], [i for i, _ in errors[n_best:]]
p = np.asarray(population, dtype=object)
best_individuals, other_individuals = p[best], p[other]
best_individuals[0].save_txt('best/{}.txt'.format(epoch))
best_out_train = best_individuals[0].forward_multiple(X_train, _softmax=True, _max_pooling=True)
best_out_test = best_individuals[0].forward_multiple(X_test, _softmax=True, _max_pooling=True)
f1_train = f1_score(y_train, best_out_train, average='weighted')
f1_test = f1_score(y_test, best_out_test, average='weighted')
s += 'f1: {:.2f} (train),\t{:.2f} (test)'.format(
f1_train, f1_test
)
print(s, end='', flush=True)
f1_train_list.append(f1_train * 100.)
f1_test_list.append(f1_test * 100.)
ax[1].plot(epochs, f1_train_list, label='f1-score: train', color=colors[0])
ax[1].plot(epochs, f1_test_list, label='f1-score: test', color=colors[1])
if not epoch:
ax[1].legend(loc='best', frameon=False)
plt.savefig('evolution.png', dpi=300)
plt.show(block=False)
fig.canvas.flush_events()
np.random.shuffle(best_individuals)
np.random.shuffle(other_individuals)
# Новая кровь
for i in range(len(other_individuals) - n_new, len(other_individuals)):
other_individuals[i] = MLP(input_layer_size=4, output_layer_size=3, hidden_layer_sizes=None)
# Скрещивание
print('{} | crossover'.format(s), end='', flush=True)
for individual in other_individuals[-n_new:]:
best_parent = np.random.choice(best_individuals)
individual.crossover(best_parent, percentage=np.random.rand())
# individual.crossover(best_parent, percentage=0.5)
# Мутация
print('{} | mutation'.format(s), end='', flush=True)
for individual in other_individuals[-n_new:]:
individual.mutation(n=n_genes_mutable)
print(s, end='', flush=True)
new_population = best_individuals.tolist() + other_individuals.tolist()
np.random.shuffle(new_population)
population = new_population
plt.show()
plt.ioff()
plt.close()
|
[
"sklearn.datasets.load_iris",
"numpy.abs",
"numpy.sum",
"numpy.argmax",
"sklearn.model_selection.train_test_split",
"collections.defaultdict",
"sklearn.metrics.f1_score",
"numpy.mean",
"numpy.linalg.norm",
"numpy.exp",
"numpy.sin",
"yaml.safe_load",
"numpy.zeros_like",
"networkx.erdos_renyi_graph",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"numpy.power",
"scipy.integrate.solve_ivp",
"os.path.exists",
"sklearn.preprocessing.LabelEncoder",
"dill.load",
"numpy.max",
"numpy.reshape",
"numpy.random.choice",
"itertools.product",
"matplotlib.pyplot.subplots",
"re.sub",
"numpy.random.shuffle",
"matplotlib.pyplot.show",
"numpy.asarray",
"itertools.accumulate",
"matplotlib.pyplot.ion",
"numpy.cos",
"scipy.special.softmax",
"networkx.to_numpy_array",
"networkx.DiGraph",
"dill.dump",
"numpy.concatenate",
"os.makedirs",
"matplotlib.pyplot.ioff",
"multiprocessing.Manager",
"numpy.random.random",
"networkx.Graph",
"numpy.array",
"networkx.utils.pairwise",
"numpy.random.rand",
"matplotlib.pyplot.savefig"
] |
[((504, 542), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (523, 542), True, 'import matplotlib as mpl\n'), ((24404, 24430), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (24413, 24430), False, 'from sklearn.datasets import load_iris\n'), ((24440, 24468), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (24466, 24468), False, 'from sklearn import preprocessing\n'), ((24679, 24733), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(X, y, test_size=0.1, random_state=42)\n', (24695, 24733), False, 'from sklearn.model_selection import train_test_split\n'), ((25161, 25170), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (25168, 25170), True, 'from matplotlib import pyplot as plt\n'), ((25185, 25223), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': '(10, 7)'}), '(nrows=2, figsize=(10, 7))\n', (25197, 25223), True, 'from matplotlib import pyplot as plt\n'), ((25424, 25441), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (25435, 25441), False, 'from collections import defaultdict\n'), ((29450, 29460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29458, 29460), True, 'from matplotlib import pyplot as plt\n'), ((29465, 29475), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (29473, 29475), True, 'from matplotlib import pyplot as plt\n'), ((29480, 29491), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (29489, 29491), True, 'from matplotlib import pyplot as plt\n'), ((668, 687), 'numpy.power', 'np.power', (['(10.0)', '(-12)'], {}), '(10.0, -12)\n', (676, 687), True, 'import numpy as np\n'), ((11518, 11549), 'numpy.concatenate', 'np.concatenate', (['(p, d)'], {'axis': '(-1)'}), '((p, d), axis=-1)\n', (11532, 11549), True, 'import numpy as np\n'), ((13092, 13214), 'scipy.integrate.solve_ivp', 'solve_ivp', (['self.__d'], {'y0': 'initial_state', 't_span': '(self.time_elapsed, t_stop + self.time_elapsed)', 't_eval': 't', 'method': 'method'}), '(self.__d, y0=initial_state, t_span=(self.time_elapsed, t_stop +\n self.time_elapsed), t_eval=t, method=method)\n', (13101, 13214), False, 'from scipy.integrate import solve_ivp\n'), ((15711, 15729), 'numpy.power', 'np.power', (['(10.0)', '(-3)'], {}), '(10.0, -3)\n', (15719, 15729), True, 'import numpy as np\n'), ((16584, 16615), 'numpy.random.random', 'np.random.random', (['adj_mat.shape'], {}), '(adj_mat.shape)\n', (16600, 16615), True, 'import numpy as np\n'), ((17638, 17663), 'networkx.utils.pairwise', 'nx.utils.pairwise', (['layers'], {}), '(layers)\n', (17655, 17663), True, 'import networkx as nx\n'), ((19112, 19127), 'numpy.asarray', 'np.asarray', (['out'], {}), '(out)\n', (19122, 19127), True, 'import numpy as np\n'), ((19624, 19647), 'numpy.linalg.norm', 'np.linalg.norm', (['(y - out)'], {}), '(y - out)\n', (19638, 19647), True, 'import numpy as np\n'), ((25104, 25126), 'os.path.exists', 'os.path.exists', (['"""best"""'], {}), "('best')\n", (25118, 25126), False, 'import os\n'), ((25136, 25155), 'os.makedirs', 'os.makedirs', (['"""best"""'], {}), "('best')\n", (25147, 25155), False, 'import os\n'), ((27335, 27371), 'numpy.asarray', 'np.asarray', (['population'], {'dtype': 'object'}), '(population, dtype=object)\n', (27345, 27371), True, 'import numpy as np\n'), ((27730, 27783), 'sklearn.metrics.f1_score', 'f1_score', (['y_train', 'best_out_train'], {'average': '"""weighted"""'}), "(y_train, best_out_train, average='weighted')\n", (27738, 27783), False, 'from sklearn.metrics import f1_score\n'), ((27802, 27853), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'best_out_test'], {'average': '"""weighted"""'}), "(y_test, best_out_test, average='weighted')\n", (27810, 27853), False, 'from sklearn.metrics import f1_score\n'), ((28329, 28366), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""evolution.png"""'], {'dpi': '(300)'}), "('evolution.png', dpi=300)\n", (28340, 28366), True, 'from matplotlib import pyplot as plt\n'), ((28375, 28396), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (28383, 28396), True, 'from matplotlib import pyplot as plt\n'), ((28440, 28475), 'numpy.random.shuffle', 'np.random.shuffle', (['best_individuals'], {}), '(best_individuals)\n', (28457, 28475), True, 'import numpy as np\n'), ((28484, 28520), 'numpy.random.shuffle', 'np.random.shuffle', (['other_individuals'], {}), '(other_individuals)\n', (28501, 28520), True, 'import numpy as np\n'), ((29375, 29408), 'numpy.random.shuffle', 'np.random.shuffle', (['new_population'], {}), '(new_population)\n', (29392, 29408), True, 'import numpy as np\n'), ((823, 838), 'numpy.power', 'np.power', (['(10)', '(9)'], {}), '(10, 9)\n', (831, 838), True, 'import numpy as np\n'), ((1317, 1333), 'numpy.exp', 'np.exp', (['(-_p * _p)'], {}), '(-_p * _p)\n', (1323, 1333), True, 'import numpy as np\n'), ((3340, 3359), 'numpy.power', 'np.power', (['(10.0)', '(-12)'], {}), '(10.0, -12)\n', (3348, 3359), True, 'import numpy as np\n'), ((3476, 3493), 'numpy.power', 'np.power', (['(10.0)', '(9)'], {}), '(10.0, 9)\n', (3484, 3493), True, 'import numpy as np\n'), ((3548, 3566), 'numpy.power', 'np.power', (['(10.0)', '(12)'], {}), '(10.0, 12)\n', (3556, 3566), True, 'import numpy as np\n'), ((3742, 3759), 'numpy.power', 'np.power', (['(10.0)', '(9)'], {}), '(10.0, 9)\n', (3750, 3759), True, 'import numpy as np\n'), ((4960, 4995), 'dill.dump', 'dill.dump', (['self', 'dump'], {'recurse': '(True)'}), '(self, dump, recurse=True)\n', (4969, 4995), False, 'import dill\n'), ((5124, 5139), 'dill.load', 'dill.load', (['dump'], {}), '(dump)\n', (5133, 5139), False, 'import dill\n'), ((9864, 9883), 'numpy.asarray', 'np.asarray', (['adj_mat'], {}), '(adj_mat)\n', (9874, 9883), True, 'import numpy as np\n'), ((10122, 10141), 'numpy.asarray', 'np.asarray', (['phi_ini'], {}), '(phi_ini)\n', (10132, 10141), True, 'import numpy as np\n'), ((10213, 10248), 'numpy.random.random', 'np.random.random', ([], {'size': 'self.n_nodes'}), '(size=self.n_nodes)\n', (10229, 10248), True, 'import numpy as np\n'), ((10611, 10634), 'numpy.asarray', 'np.asarray', (['dot_phi_ini'], {}), '(dot_phi_ini)\n', (10621, 10634), True, 'import numpy as np\n'), ((10714, 10749), 'numpy.random.random', 'np.random.random', ([], {'size': 'self.n_nodes'}), '(size=self.n_nodes)\n', (10730, 10749), True, 'import numpy as np\n'), ((11422, 11451), 'numpy.reshape', 'np.reshape', (['self.phi', '(-1, 1)'], {}), '(self.phi, (-1, 1))\n', (11432, 11451), True, 'import numpy as np\n'), ((11470, 11503), 'numpy.reshape', 'np.reshape', (['self.dot_phi', '(-1, 1)'], {}), '(self.dot_phi, (-1, 1))\n', (11480, 11503), True, 'import numpy as np\n'), ((11699, 11729), 'numpy.concatenate', 'np.concatenate', (['(p, d)'], {'axis': '(0)'}), '((p, d), axis=0)\n', (11713, 11729), True, 'import numpy as np\n'), ((11775, 11805), 'numpy.concatenate', 'np.concatenate', (['(d, p)'], {'axis': '(0)'}), '((d, p), axis=0)\n', (11789, 11805), True, 'import numpy as np\n'), ((11851, 11882), 'numpy.concatenate', 'np.concatenate', (['(d, p)'], {'axis': '(-1)'}), '((d, p), axis=-1)\n', (11865, 11882), True, 'import numpy as np\n'), ((13634, 13655), 'numpy.zeros_like', 'np.zeros_like', (['series'], {}), '(series)\n', (13647, 13655), True, 'import numpy as np\n'), ((14763, 14802), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': '(12, 10)'}), '(nrows=2, figsize=(12, 10))\n', (14775, 14802), True, 'from matplotlib import pyplot as plt\n'), ((15430, 15464), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure.png"""'], {'dpi': '(300)'}), "('figure.png', dpi=300)\n", (15441, 15464), True, 'from matplotlib import pyplot as plt\n'), ((15477, 15487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15485, 15487), True, 'from matplotlib import pyplot as plt\n'), ((15913, 15930), 'numpy.power', 'np.power', (['(10.0)', '(9)'], {}), '(10.0, 9)\n', (15921, 15930), True, 'import numpy as np\n'), ((15985, 16003), 'numpy.power', 'np.power', (['(10.0)', '(12)'], {}), '(10.0, 12)\n', (15993, 16003), True, 'import numpy as np\n'), ((16179, 16196), 'numpy.power', 'np.power', (['(10.0)', '(9)'], {}), '(10.0, 9)\n', (16187, 16196), True, 'import numpy as np\n'), ((16533, 16553), 'networkx.to_numpy_array', 'nx.to_numpy_array', (['G'], {}), '(G)\n', (16550, 16553), True, 'import networkx as nx\n'), ((17321, 17362), 'itertools.accumulate', 'itertools.accumulate', (['((0,) + subset_sizes)'], {}), '((0,) + subset_sizes)\n', (17341, 17362), False, 'import itertools\n'), ((17464, 17476), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (17474, 17476), True, 'import networkx as nx\n'), ((17507, 17517), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (17515, 17517), True, 'import networkx as nx\n'), ((18293, 18308), 'numpy.abs', 'np.abs', (['dot_phi'], {}), '(dot_phi)\n', (18299, 18308), True, 'import numpy as np\n'), ((18374, 18387), 'numpy.max', 'np.max', (['peaks'], {}), '(peaks)\n', (18380, 18387), True, 'import numpy as np\n'), ((18429, 18443), 'scipy.special.softmax', 'softmax', (['peaks'], {}), '(peaks)\n', (18436, 18443), False, 'from scipy.special import softmax\n'), ((18489, 18505), 'numpy.argmax', 'np.argmax', (['peaks'], {}), '(peaks)\n', (18498, 18505), True, 'import numpy as np\n'), ((18526, 18546), 'numpy.zeros_like', 'np.zeros_like', (['peaks'], {}), '(peaks)\n', (18539, 18546), True, 'import numpy as np\n'), ((25581, 25590), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (25588, 25590), False, 'from multiprocessing import Process, Manager\n'), ((26675, 26694), 'numpy.mean', 'np.mean', (['values[:1]'], {}), '(values[:1])\n', (26682, 26694), True, 'import numpy as np\n'), ((26708, 26732), 'numpy.mean', 'np.mean', (['values[:n_best]'], {}), '(values[:n_best])\n', (26715, 26732), True, 'import numpy as np\n'), ((26766, 26781), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (26773, 26781), True, 'import numpy as np\n'), ((26822, 26841), 'numpy.mean', 'np.mean', (['values[:1]'], {}), '(values[:1])\n', (26829, 26841), True, 'import numpy as np\n'), ((26891, 26915), 'numpy.mean', 'np.mean', (['values[:n_best]'], {}), '(values[:n_best])\n', (26898, 26915), True, 'import numpy as np\n'), ((26948, 26963), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (26955, 26963), True, 'import numpy as np\n'), ((28892, 28926), 'numpy.random.choice', 'np.random.choice', (['best_individuals'], {}), '(best_individuals)\n', (28908, 28926), True, 'import numpy as np\n'), ((1189, 1212), 'numpy.sin', 'np.sin', (['(self.omega0 * t)'], {}), '(self.omega0 * t)\n', (1195, 1212), True, 'import numpy as np\n'), ((3296, 3315), 'numpy.power', 'np.power', (['(10.0)', '(-12)'], {}), '(10.0, -12)\n', (3304, 3315), True, 'import numpy as np\n'), ((9628, 9669), 'networkx.erdos_renyi_graph', 'nx.erdos_renyi_graph', ([], {'n': 'self.n_nodes', 'p': '(1)'}), '(n=self.n_nodes, p=1)\n', (9648, 9669), True, 'import networkx as nx\n'), ((15765, 15784), 'numpy.power', 'np.power', (['(10.0)', '(-12)'], {}), '(10.0, -12)\n', (15773, 15784), True, 'import numpy as np\n'), ((17694, 17727), 'itertools.product', 'itertools.product', (['layer1', 'layer2'], {}), '(layer1, layer2)\n', (17711, 17727), False, 'import itertools\n'), ((20207, 20226), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (20223, 20226), True, 'import numpy as np\n'), ((1170, 1186), 'numpy.exp', 'np.exp', (['(-_p * _p)'], {}), '(-_p * _p)\n', (1176, 1186), True, 'import numpy as np\n'), ((1415, 1438), 'numpy.cos', 'np.cos', (['(self.omega0 * t)'], {}), '(self.omega0 * t)\n', (1421, 1438), True, 'import numpy as np\n'), ((15826, 15845), 'numpy.power', 'np.power', (['(10.0)', '(-12)'], {}), '(10.0, -12)\n', (15834, 15845), True, 'import numpy as np\n'), ((16315, 16341), 'numpy.sum', 'np.sum', (['hidden_layer_sizes'], {}), '(hidden_layer_sizes)\n', (16321, 16341), True, 'import numpy as np\n'), ((28984, 29000), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (28998, 29000), True, 'import numpy as np\n'), ((1498, 1521), 'numpy.sin', 'np.sin', (['(self.omega0 * t)'], {}), '(self.omega0 * t)\n', (1504, 1521), True, 'import numpy as np\n'), ((6879, 6911), 're.sub', 're.sub', (['"""[\\\\r\\\\n\\\\t ]"""', '""""""', 'line'], {}), "('[\\\\r\\\\n\\\\t ]', '', line)\n", (6885, 6911), False, 'import re\n'), ((7785, 7802), 'numpy.array', 'np.array', (['adj_mat'], {}), '(adj_mat)\n', (7793, 7802), True, 'import numpy as np\n'), ((8888, 8910), 'yaml.safe_load', 'yaml.safe_load', (['row[1]'], {}), '(row[1])\n', (8902, 8910), False, 'import yaml\n'), ((3886, 3905), 'numpy.power', 'np.power', (['(10.0)', '(-12)'], {}), '(10.0, -12)\n', (3894, 3905), True, 'import numpy as np\n'), ((4037, 4054), 'numpy.power', 'np.power', (['(10.0)', '(9)'], {}), '(10.0, 9)\n', (4045, 4054), True, 'import numpy as np\n'), ((12419, 12434), 'numpy.sin', 'np.sin', (['(2 * phi)'], {}), '(2 * phi)\n', (12425, 12434), True, 'import numpy as np\n'), ((3935, 3954), 'numpy.power', 'np.power', (['(10.0)', '(-12)'], {}), '(10.0, -12)\n', (3943, 3954), True, 'import numpy as np\n'), ((12330, 12363), 'numpy.sum', 'np.sum', (['(self.adj_mat[_i] * _theta)'], {}), '(self.adj_mat[_i] * _theta)\n', (12336, 12363), True, 'import numpy as np\n')]
|
import numpy as np
from lib.deriv.adtools import cstest
def squeeze(A,axis=None):
A = np.squeeze(A,axis=axis)
return A.item() if A.ndim==0 else A
def logsumexp(X, axis=0, keepdims = False, deriv=False):
"""
This is a complex-step friendly version of logsumexp.
"""
maxX = np.real(X).max(axis=axis,keepdims=True)
Y = np.log(np.exp(X - maxX).sum(axis=axis,keepdims=True))
Y += maxX
Yshape = Y.shape
if not deriv:
return Y if keepdims else squeeze(Y,axis=axis)
S = np.exp(X - Y) # softmax
def back(dY=1):
if np.isscalar(dY) :
if dY == 1: return S
return dY*S
return dY.reshape(Yshape) * S
return Y if keepdims else squeeze(Y,axis=axis), back
if __name__ == "__main__":
print("Running test script for module adfunctions\n")
from numpy.random import randn
print("Testing logsumexp")
X = randn(2,3)
delta = cstest(logsumexp,X,keepdims=False,axis=0)
print(delta)
delta = cstest(logsumexp,X,keepdims=False,axis=1)
print(delta)
delta = cstest(logsumexp,X,keepdims=True,axis=0)
print(delta)
delta = cstest(logsumexp,X,keepdims=True,axis=1)
print(delta)
X = randn(3)
delta = cstest(logsumexp,X,keepdims=False)
print(delta)
delta = cstest(logsumexp,X,keepdims=True)
print(delta)
|
[
"lib.deriv.adtools.cstest",
"numpy.random.randn",
"numpy.isscalar",
"numpy.exp",
"numpy.real",
"numpy.squeeze"
] |
[((91, 115), 'numpy.squeeze', 'np.squeeze', (['A'], {'axis': 'axis'}), '(A, axis=axis)\n', (101, 115), True, 'import numpy as np\n'), ((519, 532), 'numpy.exp', 'np.exp', (['(X - Y)'], {}), '(X - Y)\n', (525, 532), True, 'import numpy as np\n'), ((950, 961), 'numpy.random.randn', 'randn', (['(2)', '(3)'], {}), '(2, 3)\n', (955, 961), False, 'from numpy.random import randn\n'), ((973, 1017), 'lib.deriv.adtools.cstest', 'cstest', (['logsumexp', 'X'], {'keepdims': '(False)', 'axis': '(0)'}), '(logsumexp, X, keepdims=False, axis=0)\n', (979, 1017), False, 'from lib.deriv.adtools import cstest\n'), ((1045, 1089), 'lib.deriv.adtools.cstest', 'cstest', (['logsumexp', 'X'], {'keepdims': '(False)', 'axis': '(1)'}), '(logsumexp, X, keepdims=False, axis=1)\n', (1051, 1089), False, 'from lib.deriv.adtools import cstest\n'), ((1117, 1160), 'lib.deriv.adtools.cstest', 'cstest', (['logsumexp', 'X'], {'keepdims': '(True)', 'axis': '(0)'}), '(logsumexp, X, keepdims=True, axis=0)\n', (1123, 1160), False, 'from lib.deriv.adtools import cstest\n'), ((1189, 1232), 'lib.deriv.adtools.cstest', 'cstest', (['logsumexp', 'X'], {'keepdims': '(True)', 'axis': '(1)'}), '(logsumexp, X, keepdims=True, axis=1)\n', (1195, 1232), False, 'from lib.deriv.adtools import cstest\n'), ((1260, 1268), 'numpy.random.randn', 'randn', (['(3)'], {}), '(3)\n', (1265, 1268), False, 'from numpy.random import randn\n'), ((1281, 1317), 'lib.deriv.adtools.cstest', 'cstest', (['logsumexp', 'X'], {'keepdims': '(False)'}), '(logsumexp, X, keepdims=False)\n', (1287, 1317), False, 'from lib.deriv.adtools import cstest\n'), ((1350, 1385), 'lib.deriv.adtools.cstest', 'cstest', (['logsumexp', 'X'], {'keepdims': '(True)'}), '(logsumexp, X, keepdims=True)\n', (1356, 1385), False, 'from lib.deriv.adtools import cstest\n'), ((575, 590), 'numpy.isscalar', 'np.isscalar', (['dY'], {}), '(dY)\n', (586, 590), True, 'import numpy as np\n'), ((299, 309), 'numpy.real', 'np.real', (['X'], {}), '(X)\n', (306, 309), True, 'import numpy as np\n'), ((354, 370), 'numpy.exp', 'np.exp', (['(X - maxX)'], {}), '(X - maxX)\n', (360, 370), True, 'import numpy as np\n')]
|
import logging
import re
import gensim
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from gensim.models import doc2vec
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
LabeledSentence = gensim.models.doc2vec.LabeledSentence
def read_dataset(path):
dataset = pd.read_csv(path, header=0, delimiter="\t")
x_train, x_test, y_train, y_test = train_test_split(dataset.review, dataset.sentiment, random_state=0, test_size=0.10)
data = x_train.tolist() + x_test.tolist()
x_train = label_sentences(x_train, 'Train')
x_test = label_sentences(x_test, 'Test')
all = label_sentences(data, 'All')
return x_train, x_test, y_train, y_test, all
def clean_text(text):
# Remove HTML
review_text = BeautifulSoup(text).get_text()
# Remove non-letters
review_text = re.sub("[^a-zA-Z]", " ", review_text)
# Convert words to lower case and split them
words = review_text.lower().split()
# Remove stopwords
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
return words
def label_sentences(corpus, label_type):
"""
Gensim's Doc2Vec implementation requires each document/paragraph to have a label associated with it.
We do this by using the LabeledSentence method. The format will be "TRAIN_i" or "TEST_i" where "i" is
a dummy index of the review.
"""
labeled = []
for i, v in enumerate(corpus):
label = label_type + '_' + str(i)
labeled.append(LabeledSentence([v], [label]))
return labeled
def get_vectors(doc2vec_model, corpus_size, vectors_size, vectors_type):
"""
Get vectors from trained doc2vec model
:param doc2vec_model: Trained Doc2Vec model
:param corpus_size: Size of the data
:param vectors_size: Size of the embedding vectors
:param vectors_type: Training or Testing vectors
:return: list of vectors
"""
vectors = np.zeros((corpus_size, vectors_size))
for i in range(0, corpus_size):
index = i
if vectors_type == 'Test':
index = index + len(x_train)
prefix = 'All_' + str(index)
vectors[i] = doc2vec_model.docvecs[prefix]
return vectors
def train_doc2vec(corpus):
logging.info("Building Doc2Vec model")
d2v = doc2vec.Doc2Vec(min_count=1, window=3, vector_size=100, sample=1e-3, seed=1, workers=5)
d2v.build_vocab(corpus)
return d2v
def train_classifier(d2v, training_vectors, training_labels):
logging.info("Train Doc2Vec on training set")
d2v.train(training_vectors, total_examples=len(training_vectors), epochs=d2v.iter)
train_vectors = get_vectors(d2v, len(training_vectors), 100, 'Train')
model = RandomForestClassifier(n_estimators=100)
model.fit(train_vectors, np.array(training_labels))
training_predictions = model.predict(train_vectors)
logging.info('Training predicted classes: {}'.format(np.unique(training_predictions)))
logging.info('Training accuracy: {}'.format(accuracy_score(training_labels, training_predictions)))
logging.info('Training F1 score: {}'.format(f1_score(training_labels, training_predictions, average='weighted')))
return model
def test_classifier(d2v, classifier, testing_vectors, testing_labels):
logging.info("Train Doc2Vec on testing set")
d2v.train(testing_vectors, total_examples=len(testing_vectors), epochs=d2v.iter)
test_vectors = get_vectors(d2v, len(testing_vectors), 100, 'Test')
testing_predictions = classifier.predict(test_vectors)
logging.info('Testing predicted classes: {}'.format(np.unique(testing_predictions)))
logging.info('Testing accuracy: {}'.format(accuracy_score(testing_labels, testing_predictions)))
logging.info('Testing F1 score: {}'.format(f1_score(testing_labels, testing_predictions, average='weighted')))
if __name__ == "__main__":
x_train, x_test, y_train, y_test, all = read_dataset('dataset.csv')
d2v_model = train_doc2vec(all)
classifier = train_classifier(d2v_model, x_train, y_train)
test_classifier(d2v_model, classifier, x_test, y_test)
|
[
"sklearn.ensemble.RandomForestClassifier",
"logging.basicConfig",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"numpy.zeros",
"logging.info",
"sklearn.metrics.f1_score",
"gensim.models.doc2vec.Doc2Vec",
"numpy.array",
"nltk.corpus.stopwords.words",
"bs4.BeautifulSoup",
"re.sub",
"numpy.unique"
] |
[((337, 432), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (356, 432), False, 'import logging\n'), ((525, 568), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': '(0)', 'delimiter': '"""\t"""'}), "(path, header=0, delimiter='\\t')\n", (536, 568), True, 'import pandas as pd\n'), ((608, 694), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dataset.review', 'dataset.sentiment'], {'random_state': '(0)', 'test_size': '(0.1)'}), '(dataset.review, dataset.sentiment, random_state=0,\n test_size=0.1)\n', (624, 694), False, 'from sklearn.model_selection import train_test_split\n'), ((1053, 1090), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'review_text'], {}), "('[^a-zA-Z]', ' ', review_text)\n", (1059, 1090), False, 'import re\n'), ((2157, 2194), 'numpy.zeros', 'np.zeros', (['(corpus_size, vectors_size)'], {}), '((corpus_size, vectors_size))\n', (2165, 2194), True, 'import numpy as np\n'), ((2465, 2503), 'logging.info', 'logging.info', (['"""Building Doc2Vec model"""'], {}), "('Building Doc2Vec model')\n", (2477, 2503), False, 'import logging\n'), ((2514, 2607), 'gensim.models.doc2vec.Doc2Vec', 'doc2vec.Doc2Vec', ([], {'min_count': '(1)', 'window': '(3)', 'vector_size': '(100)', 'sample': '(0.001)', 'seed': '(1)', 'workers': '(5)'}), '(min_count=1, window=3, vector_size=100, sample=0.001, seed=\n 1, workers=5)\n', (2529, 2607), False, 'from gensim.models import doc2vec\n'), ((2713, 2758), 'logging.info', 'logging.info', (['"""Train Doc2Vec on training set"""'], {}), "('Train Doc2Vec on training set')\n", (2725, 2758), False, 'import logging\n'), ((2932, 2972), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (2954, 2972), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3492, 3536), 'logging.info', 'logging.info', (['"""Train Doc2Vec on testing set"""'], {}), "('Train Doc2Vec on testing set')\n", (3504, 3536), False, 'import logging\n'), ((1219, 1245), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1234, 1245), False, 'from nltk.corpus import stopwords\n'), ((3002, 3027), 'numpy.array', 'np.array', (['training_labels'], {}), '(training_labels)\n', (3010, 3027), True, 'import numpy as np\n'), ((979, 998), 'bs4.BeautifulSoup', 'BeautifulSoup', (['text'], {}), '(text)\n', (992, 998), False, 'from bs4 import BeautifulSoup\n'), ((3142, 3173), 'numpy.unique', 'np.unique', (['training_predictions'], {}), '(training_predictions)\n', (3151, 3173), True, 'import numpy as np\n'), ((3224, 3277), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['training_labels', 'training_predictions'], {}), '(training_labels, training_predictions)\n', (3238, 3277), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3328, 3395), 'sklearn.metrics.f1_score', 'f1_score', (['training_labels', 'training_predictions'], {'average': '"""weighted"""'}), "(training_labels, training_predictions, average='weighted')\n", (3336, 3395), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3808, 3838), 'numpy.unique', 'np.unique', (['testing_predictions'], {}), '(testing_predictions)\n', (3817, 3838), True, 'import numpy as np\n'), ((3888, 3939), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testing_labels', 'testing_predictions'], {}), '(testing_labels, testing_predictions)\n', (3902, 3939), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3989, 4054), 'sklearn.metrics.f1_score', 'f1_score', (['testing_labels', 'testing_predictions'], {'average': '"""weighted"""'}), "(testing_labels, testing_predictions, average='weighted')\n", (3997, 4054), False, 'from sklearn.metrics import accuracy_score, f1_score\n')]
|
import networkx as nx
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
### requesting data via network
protein_list = ['TPH1','COMT','SLC18A2','HTR1B','HTR2C','HTR2A','MAOA',
'TPH2','HTR1A','HTR7','SLC6A4','GABBR2','POMC','GNAI3',
'NPY','ADCY1','PDYN','GRM2','GRM3','GABBR1']
proteins = '%0d'.join(protein_list)
url = 'https://string-db.org/api/tsv/network?identifiers=' + proteins + '&species=9606'
r = requests.get(url)
### parsing data
lines = r.text.split('\n') # pull the text from the response object and split based on new lines
data = [l.split('\t') for l in lines] # split each line into its components based on tabs
# convert to dataframe using the first row as the column names; drop empty, final row
df = pd.DataFrame(data[1:-1], columns = data[0])
# dataframe with the preferred names of the two proteins and the score of the interaction
interactions = df[['preferredName_A', 'preferredName_B', 'score']]
# printing out daaframe
# print(interactions)
# print(interactions[interactions['preferredName_A'] == 'TPH1'])
# print(interactions.size)
G=nx.Graph(name='Protein Interaction Graph')
interactions = np.array(interactions)
for i in range(len(interactions)):
interaction = interactions[i]
a = interaction[0] # protein a node
b = interaction[1] # protein b node
w = float(interaction[2]) # score as weighted edge where high scores = low weight
G.add_weighted_edges_from([(a,b,w)]) # add weighted edge to graph
pos = nx.spring_layout(G) # position the nodes using the spring layout
plt.figure(figsize=(11,11),facecolor=[0.7,0.7,0.7,0.4])
nx.draw_networkx(G)
plt.axis('off')
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"networkx.draw_networkx",
"matplotlib.pyplot.figure",
"networkx.spring_layout",
"networkx.Graph",
"numpy.array",
"requests.get"
] |
[((494, 511), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (506, 511), False, 'import requests\n'), ((808, 849), 'pandas.DataFrame', 'pd.DataFrame', (['data[1:-1]'], {'columns': 'data[0]'}), '(data[1:-1], columns=data[0])\n', (820, 849), True, 'import pandas as pd\n'), ((1156, 1198), 'networkx.Graph', 'nx.Graph', ([], {'name': '"""Protein Interaction Graph"""'}), "(name='Protein Interaction Graph')\n", (1164, 1198), True, 'import networkx as nx\n'), ((1214, 1236), 'numpy.array', 'np.array', (['interactions'], {}), '(interactions)\n', (1222, 1236), True, 'import numpy as np\n'), ((1555, 1574), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (1571, 1574), True, 'import networkx as nx\n'), ((1620, 1680), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 11)', 'facecolor': '[0.7, 0.7, 0.7, 0.4]'}), '(figsize=(11, 11), facecolor=[0.7, 0.7, 0.7, 0.4])\n', (1630, 1680), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1695), 'networkx.draw_networkx', 'nx.draw_networkx', (['G'], {}), '(G)\n', (1692, 1695), True, 'import networkx as nx\n'), ((1696, 1711), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1704, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1722), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1720, 1722), True, 'import matplotlib.pyplot as plt\n')]
|
# coding=utf-8
"""Text Classifier based on bert."""
from __future__ import absolute_import, division, print_function
import collections
import csv
import gc
import io
import logging
import math
import multiprocessing
import os
import pickle
import time
from multiprocessing import Pool, cpu_count
from multiprocessing.dummy import Pool as ThreadPool
from urllib.parse import urlparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import PIL
import seaborn as sns
import sklearn
import tensorflow as tf
from sklearn.metrics import (
classification_report, confusion_matrix, f1_score, precision_score,
recall_score, roc_auc_score)
from tqdm import tqdm
import modeling
import optimization
import tokenization
from saver import ModelSaver
from tokenization import _is_punctuation
n_jobs = cpu_count()
class BertClassifier:
def __init__(self, data_processor, num_labels, bert_config_file, max_seq_length, vocab_file, logdir, init_checkpoint, keep_checkpoint_max, use_GPU=False, label_smoothing=0.0, cycle=1):
config = tf.ConfigProto(allow_soft_placement=True)
# config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.output_dropout_keep_prob = np.array([0.9])
self.hidden_dropout_prob = np.array([0.1])
self.attention_probs_dropout_prob = np.array([0.1])
self.init_checkpoint = init_checkpoint
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
# self.train_op, self.loss, self.logits, self.probabilities, self.feed_dict, self.attention_probs = create_model(
self.train_op, self.loss, self.logits, self.probabilities, self.feed_dict = create_model(
bert_config, num_labels, max_seq_length, self.sess, init_checkpoint=self.init_checkpoint, use_GPU=use_GPU, label_smoothing=label_smoothing, cycle=cycle)
self.max_seq_length = max_seq_length
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=True)
if not os.path.exists(logdir):
os.makedirs(logdir)
self.summary_writer = tf.summary.FileWriter(logdir, self.sess.graph)
self.prob_hist = None
self.logits_hist = None
self.eval_iterator = None
self.num_eval_steps = None
self.num_labels = num_labels
self.model_saver = ModelSaver(keep_checkpoint_max=keep_checkpoint_max)
self.data_processor = data_processor
def train(self, data_dir, epochs, train_batch_size, eval_batch_size, learning_rate, warmup_proportion, save_checkpoints_steps, save_checkpoints_dir):
'''Model train and eval.'''
next_batch, num_train_steps, num_warmup_steps = create_data_iterator(self.data_processor,
"train", data_dir, self.tokenizer, train_batch_size, self.max_seq_length, epochs, warmup_proportion)
summary_ops = tf.summary.merge_all()
# 断点训练
ckpt = tf.train.get_checkpoint_state(save_checkpoints_dir)
if ckpt and ckpt.model_checkpoint_path:
tf.logging.info("Reload training state" +
(ckpt.model_checkpoint_path))
self.model_saver.saver.restore(
self.sess, ckpt.model_checkpoint_path)
for step in range(num_train_steps):
try:
start = time.time()
data = self.sess.run(next_batch)
_, loss, global_step, merged_summary = self.sess.run((self.train_op, self.loss, tf.train.get_global_step(), summary_ops), feed_dict={self.feed_dict['input_ids']: data['input_ids'],
self.feed_dict['input_mask']: data['input_mask'],
self.feed_dict['segment_ids']: data['segment_ids'],
self.feed_dict['label_ids']: data['label_ids'],
self.feed_dict['sample_weight']: data['sample_weight'],
self.feed_dict['output_dropout_keep_prob']: self.output_dropout_keep_prob,
self.feed_dict['hidden_dropout_prob']: self.hidden_dropout_prob,
self.feed_dict['attention_probs_dropout_prob']: self.attention_probs_dropout_prob,
self.feed_dict['learning_rate']: learning_rate,
self.feed_dict['num_train_steps']: num_train_steps,
self.feed_dict['num_warmup_steps']: num_warmup_steps,
self.feed_dict['batch_size']: train_batch_size})
summary = tf.Summary(value=[tf.Summary.Value(
tag="Loss/Train", simple_value=loss)])
self.summary_writer.add_summary(
summary, global_step=global_step)
self.summary_writer.add_summary(
merged_summary, global_step=global_step)
end = time.time()
tf.logging.info("[%.2f%%] step: %d\tloss: %f\tcost time: %.3f" % (
(global_step/num_train_steps)*100, global_step, loss, (end-start)))
if global_step % save_checkpoints_steps == 0 and global_step != 0:
fscore, auc, precision, recall = self.eval(
data_dir, eval_batch_size, is_training=True, global_step=global_step)
# 优先队列,保存前10个最好的checkpoints,之后可以做参数平均融合
self.model_saver.check_and_save_model(
save_checkpoints_dir, fscore, self.sess)
if global_step > num_train_steps:
break
except tf.errors.OutOfRangeError:
break
tf.logging.info("Train Finished.")
# self.summary_writer.close()
def eval(self, data_dir, eval_batch_size, is_training=False, global_step=None):
if self.prob_hist == None:
self.prob_hist = tf.summary.histogram(
'prob_hist', self.probabilities)
if self.logits_hist == None:
self.logits_hist = tf.summary.histogram('logits_hist', self.logits)
if (not is_training) or self.eval_iterator == None:
self.eval_iterator, self.num_eval_steps, self.label_name = create_data_iterator(self.data_processor,
"eval", data_dir, self.tokenizer, eval_batch_size, self.max_seq_length)
self.sess.run(self.eval_iterator.initializer)
loss_acc = []
label_acc = None
prob_acc = None
start = time.time()
for _ in tqdm(range(self.num_eval_steps), desc="Evaluation:"):
try:
data = self.sess.run(self.eval_iterator.get_next())
loss, prob, prob_hist, logits_hist = self.sess.run((self.loss, self.probabilities, self.prob_hist, self.logits_hist), feed_dict={self.feed_dict['input_ids']: data['input_ids'],
self.feed_dict['input_mask']: data['input_mask'],
self.feed_dict['segment_ids']: data['segment_ids'],
self.feed_dict['label_ids']: data['label_ids'],
self.feed_dict['sample_weight']: data['sample_weight'],
self.feed_dict['output_dropout_keep_prob']: np.array([1.0]),
self.feed_dict['hidden_dropout_prob']: np.array([0.0]),
self.feed_dict['attention_probs_dropout_prob']: np.array([0.0]),
self.feed_dict['batch_size']: eval_batch_size})
if isinstance(label_acc, type(None)):
assert loss_acc == [] and prob_acc == None
loss_acc.append(loss)
label_acc = data['label_ids']
prob_acc = prob
else:
loss_acc.append(loss)
label_acc = np.concatenate(
(label_acc, data['label_ids']), axis=0)
prob_acc = np.concatenate((prob_acc, prob), axis=0)
except tf.errors.OutOfRangeError:
break
assert len(prob_acc) == len(label_acc)
# Classification report
report = classification_report(label_acc, np.argmax(prob_acc, axis=-1), labels=[
i for i in range(len(self.label_name))], target_names=self.label_name)
tf.logging.info("***** Classification Report *****")
tf.logging.info(report)
# f1 score
fscore = f1_score(label_acc, np.argmax(
prob_acc, axis=-1), average='macro')
# precision
precision = precision_score(label_acc, np.argmax(
prob_acc, axis=-1), average='macro')
# recall
recall = recall_score(label_acc, np.argmax(
prob_acc, axis=-1), average='macro')
# AUC
auc = roc_auc_score(np.eye(self.num_labels)[
label_acc], prob_acc, average='macro')
roc_curve, confusion_matrix = draw_image(prob_acc, np.eye(
self.num_labels)[label_acc], label_acc, np.argmax(prob_acc, axis=-1), self.label_name)
if is_training:
summary = tf.Summary(value=[tf.Summary.Value(tag="Loss/Eval", simple_value=np.mean(loss_acc)),
tf.Summary.Value(
tag="Eval/auc", simple_value=auc),
tf.Summary.Value(
tag="Eval/f1_score", simple_value=fscore),
tf.Summary.Value(
tag="Eval/precision", simple_value=precision),
tf.Summary.Value(
tag="Eval/recall", simple_value=recall),
tf.Summary.Value(tag='Eval_ROC', image=tf.Summary.Image(
encoded_image_string=roc_curve)),
tf.Summary.Value(tag='Eval_Confusion_Matrix', image=tf.Summary.Image(encoded_image_string=confusion_matrix))])
self.summary_writer.add_summary(
prob_hist, global_step=global_step)
self.summary_writer.add_summary(
logits_hist, global_step=global_step)
self.summary_writer.add_summary(
summary, global_step=global_step)
end = time.time()
tf.logging.info("Evaluation Finished.\tcost time: %.3f\tF1 Score:%.3f\tAuc:%.3f\tprecision:%.3f\trecall:%.3f\t" % (
(end-start), fscore, auc, precision, recall))
return fscore, auc, precision, recall
def predict(self, predict_batch_size=1, output_dir='./predict', file_path=None, input_example=None):
# print((file_path != None or input_example != None))
assert (file_path != None or input_example !=
None), "file_path and input_example must have one not None."
if file_path != None:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
out_file = open(os.path.join(output_dir, "out.txt"),
'w', encoding="utf-8")
next_batch, num_predict_steps, label_name = create_data_iterator(self.data_processor,
"predict", file_path, self.tokenizer, predict_batch_size, self.max_seq_length)
out_file.write("prob,label\n")
for _ in tqdm(range(num_predict_steps), desc="Predicting:"):
try:
data = self.sess.run(next_batch)
prob = self.sess.run((self.probabilities), feed_dict={self.feed_dict['input_ids']: data['input_ids'],
self.feed_dict['input_mask']: data['input_mask'],
self.feed_dict['segment_ids']: data['segment_ids'],
self.feed_dict['output_dropout_keep_prob']: np.array([1.0]),
self.feed_dict['hidden_dropout_prob']: np.array([0.0]),
self.feed_dict['attention_probs_dropout_prob']: np.array([0.0]),
self.feed_dict['batch_size']: predict_batch_size})
for p in prob:
out_file.write("%s,%s\n" %
(p, label_name[np.argmax(p)]))
except tf.errors.OutOfRangeError:
break
out_file.close()
else:
s = time.time()
input_feature = convert_single_example(
0, input_example, None, self.max_seq_length, self.tokenizer, is_predict=True)
e = time.time()
print("process:", e-s)
s = time.time()
prob = self.sess.run((self.probabilities), feed_dict={self.feed_dict['input_ids']: np.expand_dims(np.array(input_feature.input_ids), axis=0),
self.feed_dict['input_mask']: np.expand_dims(np.array(input_feature.input_mask), axis=0),
self.feed_dict['segment_ids']: np.expand_dims(np.array(input_feature.segment_ids), axis=0),
self.feed_dict['output_dropout_keep_prob']: np.array([1.0]),
self.feed_dict['hidden_dropout_prob']: np.array([0.0]),
self.feed_dict['attention_probs_dropout_prob']: np.array([0.0]),
self.feed_dict['batch_size']: predict_batch_size})
e = time.time()
print("inference:", e-s)
return prob
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, text_c=None, label=None, weight=1):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
text_c: (Optional) string. The untokenized text of the third sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
weight: (Optional) float. The weight of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.text_c = text_c
self.label = label
self.weight = weight
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
weight):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.sample_weight = weight
def create_data_iterator(processor, mode, data_dir, tokenizer, batch_size, max_seq_length=192, epochs=1, warmup_proportion=None):
label_list = processor.get_labels()
if mode == "train":
# 如果数据集存在,则不用导入数据,直接训练
if os.path.exists(os.path.join(data_dir, "train.tf_record_0")):
train_examples_cnt = 0
with open(os.path.join(data_dir, "train.tsv"), 'r', encoding='utf-8') as f:
for _ in f:
train_examples_cnt += 1
num_train_steps = math.ceil(
train_examples_cnt / batch_size * epochs)
num_warmup_steps = int(num_train_steps * warmup_proportion)
# print log
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", train_examples_cnt)
tf.logging.info(" Batch size = %d", batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
else:
train_examples = processor.get_train_examples(data_dir)
num_train_steps = math.ceil(
len(train_examples) / batch_size * epochs)
num_warmup_steps = int(num_train_steps * warmup_proportion)
# print log
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
# convert examples to features and write to tf-record files.
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length, tokenizer, os.path.join(data_dir, "train.tf_record"))
# Load multiple tf-record dateset files.
filenames = [os.path.join(data_dir, "train.tf_record_%d" % i)
for i in range(n_jobs)]
data_set = tf.data.TFRecordDataset(filenames)
# Create a description of the features.
feature_description = {
'input_ids': tf.io.FixedLenFeature([max_seq_length], tf.int64, default_value=[0 for i in range(max_seq_length)]),
'input_mask': tf.io.FixedLenFeature([max_seq_length], tf.int64, default_value=[0 for i in range(max_seq_length)]),
'segment_ids': tf.io.FixedLenFeature([max_seq_length], tf.int64, default_value=[0 for i in range(max_seq_length)]),
'label_ids': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'sample_weight': tf.io.FixedLenFeature([], tf.float32, default_value=0.0),
}
def _parse_function(example_proto):
# Parse the input `tf.Example` proto using the dictionary above.
return tf.io.parse_single_example(example_proto, feature_description)
data_set = data_set.map(_parse_function)
data_set = data_set.repeat(int(epochs))
data_set = data_set.shuffle(buffer_size=100)
data_set = data_set.batch(batch_size=batch_size)
iterator = data_set.make_one_shot_iterator()
next_batch = iterator.get_next()
return next_batch, num_train_steps, num_warmup_steps
elif mode == "eval":
eval_examples = processor.get_eval_examples(data_dir)
num_eval_steps = math.ceil(len(eval_examples) / batch_size)
# print log
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d ", len(eval_examples))
tf.logging.info(" Batch size = %d", batch_size)
tf.logging.info(" Num steps = %d", num_eval_steps)
features = convert_examples_to_features(
eval_examples, label_list, max_seq_length, tokenizer)
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
all_sample_weight = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
all_sample_weight.append(feature.sample_weight)
num_examples = len(features)
data_set = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, max_seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, max_seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, max_seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[
num_examples], dtype=tf.int32),
"sample_weight":
tf.constant(all_sample_weight, shape=[
num_examples], dtype=tf.float32),
})
data_set = data_set.batch(batch_size=batch_size)
iterator = data_set.make_initializable_iterator()
# next_batch = iterator.get_next()
return iterator, num_eval_steps, label_list
elif mode == "predict":
predict_examples = processor.get_test_examples(data_dir)
num_predict_steps = math.ceil(len(predict_examples) / batch_size)
# print log
tf.logging.info("***** Running predict *****")
tf.logging.info(" Num examples = %d ", len(predict_examples))
tf.logging.info(" Batch size = %d", batch_size)
tf.logging.info(" Num steps = %d", num_predict_steps)
features = convert_examples_to_features(
predict_examples, label_list, max_seq_length, tokenizer)
all_input_ids = []
all_input_mask = []
all_segment_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
num_examples = len(features)
data_set = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, max_seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, max_seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, max_seq_length],
dtype=tf.int32),
})
data_set = data_set.batch(batch_size=batch_size)
iterator = data_set.make_one_shot_iterator()
next_batch = iterator.get_next()
return next_batch, num_predict_steps, label_list
else:
raise ValueError("Mode must be 'train', 'eval' or 'predict'.")
def plot_roc_curve(prob, label_onehot):
fpr, tpr, thresholds = sklearn.metrics.roc_curve(
label_onehot.ravel(), prob.ravel())
auc = sklearn.metrics.auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, c='r', lw=2, alpha=0.7,
label=u'AUC=%.3f' % auc)
plt.plot((0, 1), (0, 1), c='#808080', lw=1, ls='--', alpha=0.7)
plt.xlim((-0.01, 1.02))
plt.ylim((-0.01, 1.02))
plt.xticks(np.arange(0, 1.1, 0.1))
plt.yticks(np.arange(0, 1.1, 0.1))
plt.xlabel('False Positive Rate', fontsize=13)
plt.ylabel('True Positive Rate', fontsize=13)
plt.grid(b=True, ls=':')
plt.legend(loc='lower right', fancybox=True,
framealpha=0.8, fontsize=12)
plt.title(u'Eval ROC And AUC', fontsize=17)
buffer_ = io.BytesIO()
plt.savefig(buffer_, format='png')
buffer_.seek(0)
png_string = buffer_.getvalue()
buffer_.close()
plt.close()
return png_string
def plot_confusion_matrix(label_id, predicted_classes, label_names):
# Compute confusion matrix
cm = sklearn.metrics.confusion_matrix(
label_id, predicted_classes)
plt.figure()
sns.heatmap(cm, annot=True, yticklabels=label_names,
xticklabels=label_names, linewidths=.5)
plt.yticks(rotation=360)
plt.title(u'Confusion Matrix Heat Map', fontsize=17)
plt.tight_layout()
buffer_ = io.BytesIO()
plt.savefig(buffer_, format='png')
buffer_.seek(0)
png_string = buffer_.getvalue()
buffer_.close()
plt.close()
return png_string
def draw_image(prob, label_onehot, label_id, predicted_classes, label_names):
roc_curve = plot_roc_curve(prob, label_onehot)
confusion_matrix = plot_confusion_matrix(
label_id, predicted_classes, label_names)
return roc_curve, confusion_matrix
def process_url(url):
"""Converts a string url to a list of token string."""
# only get url path, remove host,params.
url = urlparse(url).path
# url = list(url)
# for i in range(len(url)):
# if _is_punctuation(url[i]):
# url[i] = " "
# url = ''.join(url)
# url = ' '.join(url.split())
return url
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, is_predict=False):
"""Converts a single `InputExample` into a single `InputFeatures`."""
# title
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
tokens_c = None
if example.text_b:
# URL pre-process
url = process_url(example.text_b)
tokens_b = tokenizer.tokenize(url)
if example.text_c:
# body
tokens_c = tokenizer.tokenize(example.text_c)
if tokens_b and tokens_c:
# Modifies `tokens_a`, `tokens_b` and `tokens_c` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP], [SEP]with "- 4"
_truncate_seq_pair_3(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
elif tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
if tokens_c:
for token in tokens_c:
tokens.append(token)
segment_ids.append(2)
tokens.append("[SEP]")
segment_ids.append(2)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if is_predict:
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=None,
weight=None)
return feature
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
label_id = label_map[example.label]
# if ex_index < 5:
# print("*** Example ***")
# print("guid: %s" % (example.guid))
# print("tokens: %s" % " ".join(
# [tokenization.printable_text(x) for x in tokens]))
# print("input_ids: %s" %
# " ".join([str(x) for x in input_ids]))
# print("input_mask: %s" %
# " ".join([str(x) for x in input_mask]))
# print("segment_ids: %s" %
# " ".join([str(x) for x in segment_ids]))
# print("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
weight=example.weight)
return feature
def file_based_convert(task_id, output_file, examples, label_list, max_seq_length, tokenizer):
try:
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
print("Task %d\t:Writing example %d of %d" %
(task_id, ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features = collections.OrderedDict()
features["input_ids"] = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(feature.input_ids)))
features["input_mask"] = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(feature.input_mask)))
features["segment_ids"] = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(feature.segment_ids)))
features["label_ids"] = tf.train.Feature(
int64_list=tf.train.Int64List(value=list([feature.label_id])))
features["sample_weight"] = tf.train.Feature(
float_list=tf.train.FloatList(value=list([feature.sample_weight])))
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
except Exception as e:
print(e)
writer.close()
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
if os.path.exists(output_file+"_0"):
return
p = Pool(n_jobs)
chunk_size = int(len(examples) / n_jobs)
for i in range(n_jobs):
if i < n_jobs - 1:
p.apply_async(file_based_convert, args=(
i, "%s_%d" % (output_file, i), examples[i*chunk_size: (i+1) * chunk_size], label_list, max_seq_length, tokenizer,))
else:
p.apply_async(file_based_convert, args=(
i, "%s_%d" % (output_file, i), examples[i*chunk_size: len(examples)], label_list, max_seq_length, tokenizer,))
p.close()
p.join()
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" %
(ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def _truncate_seq_pair_3(tokens_a, tokens_b, tokens_c, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)
if total_length <= max_length:
break
if len(tokens_b) > len(tokens_c):
tokens_b.pop()
else:
tokens_c.pop()
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def get_available_gpus():
"""
code from http://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow
"""
from tensorflow.python.client import device_lib as _device_lib
local_device_protos = _device_lib.list_local_devices()
return [x.name.split(":")[-1] for x in local_device_protos if x.device_type == 'GPU']
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expend_g = tf.expand_dims(g, 0)
grads.append(expend_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def assign_to_device(device, ps_device='/cpu:0'):
def _assign(op):
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op in ['Variable', 'VariableV2', 'AutoReloadVariable']:
return "/" + ps_device
else:
return device
return _assign
def create_model(bert_config, num_labels, max_seq_length, sess, init_checkpoint=None, use_GPU=False, label_smoothing=0.0, cycle=1):
"""Creates a classification model."""
GPUs = get_available_gpus()
defalut_device = '/cpu:0'
if use_GPU and len(GPUs) != 0:
defalut_device = '/gpu:{}'.format(GPUs[0])
# Place all ops on CPU by default
with tf.device(defalut_device):
tower_grads = []
loss_list = []
logits_list = []
probabilities_list = []
train_op = None
loss = None
logits = None
probabilities = None
global_step = tf.train.get_or_create_global_step()
# input placeholder
_input_ids = tf.placeholder(tf.int64, shape=(None, max_seq_length))
_input_mask = tf.placeholder(tf.int64, shape=(None, max_seq_length))
_segment_ids = tf.placeholder(tf.int64, shape=(None, max_seq_length))
_label_ids = tf.placeholder(tf.int64, shape=None)
_sample_weight = tf.placeholder(tf.float32, shape=None)
_output_dropout_keep_prob = tf.placeholder(tf.float32, shape=None)
_hidden_dropout_prob = tf.placeholder(tf.float32, shape=None)
_attention_probs_dropout_prob = tf.placeholder(tf.float32, shape=None)
# optimizer placeholder
_learning_rate = tf.placeholder(tf.float32, shape=None)
_num_train_steps = tf.placeholder(tf.int32, shape=None)
_num_warmup_steps = tf.placeholder(tf.int32, shape=None)
_batch_size = tf.placeholder(tf.int32, shape=None)
# feed dict
feed_dict = {'input_ids': _input_ids,
'input_mask': _input_mask,
'segment_ids': _segment_ids,
'label_ids': _label_ids,
'sample_weight': _sample_weight,
'output_dropout_keep_prob': _output_dropout_keep_prob,
'hidden_dropout_prob': _hidden_dropout_prob,
'attention_probs_dropout_prob': _attention_probs_dropout_prob,
'learning_rate': _learning_rate,
'num_train_steps': _num_train_steps,
'num_warmup_steps': _num_warmup_steps,
'batch_size': _batch_size}
optimizer = optimization.create_optimizer(
_learning_rate, tf.cast((_num_train_steps / cycle), tf.int32), _num_warmup_steps)
if use_GPU:
batch_size = tf.to_int32(_batch_size / len(GPUs))
for i in range(len(GPUs)):
# with tf.device(assign_to_device('/gpu:{}'.format(GPUs[i]), ps_device='/gpu:0')):
with tf.device('/gpu:{}'.format(GPUs[i])):
# split input data for every gpu device.
with tf.name_scope("input_slice"):
input_ids = _input_ids[i *
batch_size:(i + 1) * batch_size]
input_mask = _input_mask[i *
batch_size:(i + 1) * batch_size]
segment_ids = _segment_ids[i *
batch_size:(i + 1) * batch_size]
label_ids = _label_ids[i *
batch_size:(i + 1) * batch_size]
sample_weight = _sample_weight[i *
batch_size:(i + 1) * batch_size]
# build model
model = modeling.BertModel(
config=bert_config,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
hidden_dropout_prob=_hidden_dropout_prob,
attention_probs_dropout_prob=_attention_probs_dropout_prob,
scope="bert")
# If you want to use the token-level output, use model.get_sequence_output() instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
with tf.variable_scope("output", reuse=tf.AUTO_REUSE):
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(
output_layer, keep_prob=_output_dropout_keep_prob)
logits_ = tf.matmul(
output_layer, output_weights, transpose_b=True)
logits_ = tf.nn.bias_add(logits_, output_bias)
probabilities_ = tf.nn.softmax(logits_, axis=-1)
one_hot_labels = tf.one_hot(
label_ids, depth=num_labels, dtype=tf.float32)
loss_ = tf.losses.softmax_cross_entropy(
one_hot_labels,
logits_,
weights=sample_weight,
label_smoothing=label_smoothing
)
grads_ = optimizer.compute_gradients(loss_)
tower_grads.append(grads_)
loss_list.append(loss_)
logits_list.append(logits_)
probabilities_list.append(probabilities_)
loss = tf.reduce_mean(loss_list)
if len(GPUs) == 1:
logits = tf.squeeze(logits_list, [0])
probabilities = tf.squeeze(probabilities_list, [0])
else:
logits = tf.keras.layers.concatenate(logits_list, axis=0)
probabilities = tf.keras.layers.concatenate(
probabilities_list, axis=0)
# Merge grads
with tf.name_scope("merge_grads"):
grads = average_gradients(tower_grads)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var)
for grad, var in grads]
train_op = optimizer.apply_gradients(
capped_gvs, global_step=global_step)
else:
# build model
model = modeling.BertModel(
config=bert_config,
input_ids=_input_ids,
input_mask=_input_mask,
token_type_ids=_segment_ids,
hidden_dropout_prob=_hidden_dropout_prob,
attention_probs_dropout_prob=_attention_probs_dropout_prob,
scope="bert")
# If you want to use the token-level output, use model.get_sequence_output() instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
with tf.variable_scope("output", reuse=tf.AUTO_REUSE):
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(
output_layer, keep_prob=_output_dropout_keep_prob)
logits = tf.matmul(
output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
_label_ids, depth=num_labels, dtype=tf.float32)
loss = tf.losses.softmax_cross_entropy(
one_hot_labels,
logits,
weights=_sample_weight,
label_smoothing=label_smoothing
)
with tf.name_scope("merge_grads"):
grads = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var)
for grad, var in grads]
train_op = optimizer.apply_gradients(
capped_gvs, global_step=global_step)
# initial model's variables.
tf.logging.info("Load model checkpoint : %s" % init_checkpoint)
tvars = tf.trainable_variables()
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# # print variables
# tf.logging.info("**** Trainable Variables ****")
# for var in tvars:
# init_string = ""
# if var.name in initialized_variable_names:
# init_string = ", *INIT_FROM_CKPT*"
# tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
# init_string)
# attention_probs = model.get_all_layer_attention_probs()
# return (train_op, loss, logits, probabilities, feed_dict, attention_probs)
return (train_op, loss, logits, probabilities, feed_dict)
|
[
"tensorflow.clip_by_value",
"modeling.BertModel",
"matplotlib.pyplot.figure",
"numpy.arange",
"tokenization.FullTokenizer",
"tensorflow.train.get_or_create_global_step",
"tensorflow.squeeze",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.grid",
"tensorflow.expand_dims",
"tensorflow.python_io.TFRecordWriter",
"os.makedirs",
"tensorflow.device",
"numpy.array",
"collections.OrderedDict",
"sklearn.metrics.confusion_matrix",
"tensorflow.keras.layers.concatenate",
"modeling.BertConfig.from_json_file",
"matplotlib.pyplot.tight_layout",
"urllib.parse.urlparse",
"multiprocessing.cpu_count",
"matplotlib.pyplot.close",
"tensorflow.summary.FileWriter",
"tensorflow.name_scope",
"tensorflow.truncated_normal_initializer",
"tensorflow.nn.bias_add",
"io.BytesIO",
"math.ceil",
"saver.ModelSaver",
"tensorflow.Summary.Image",
"matplotlib.pyplot.plot",
"tensorflow.data.TFRecordDataset",
"numpy.eye",
"tensorflow.losses.softmax_cross_entropy",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"tensorflow.logging.info",
"tensorflow.trainable_variables",
"numpy.argmax",
"tensorflow.ConfigProto",
"numpy.mean",
"os.path.join",
"tensorflow.one_hot",
"os.path.exists",
"tensorflow.concat",
"tensorflow.train.get_global_step",
"matplotlib.pyplot.legend",
"tensorflow.train.Features",
"tensorflow.reduce_mean",
"tensorflow.zeros_initializer",
"time.time",
"tensorflow.train.init_from_checkpoint",
"tensorflow.Summary.Value",
"tensorflow.nn.dropout",
"matplotlib.pyplot.savefig",
"seaborn.heatmap",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"matplotlib.pyplot.yticks",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.summary.histogram",
"tensorflow.summary.merge_all",
"tensorflow.train.get_checkpoint_state",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.constant",
"multiprocessing.Pool",
"matplotlib.pyplot.ylabel",
"modeling.get_assignment_map_from_checkpoint",
"numpy.concatenate",
"matplotlib.pyplot.xlim",
"tensorflow.io.parse_single_example",
"sklearn.metrics.auc",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.io.FixedLenFeature"
] |
[((823, 834), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (832, 834), False, 'from multiprocessing import Pool, cpu_count\n'), ((25365, 25394), 'sklearn.metrics.auc', 'sklearn.metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (25384, 25394), False, 'import sklearn\n'), ((25399, 25411), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25409, 25411), True, 'import matplotlib.pyplot as plt\n'), ((25416, 25483), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'c': '"""r"""', 'lw': '(2)', 'alpha': '(0.7)', 'label': "(u'AUC=%.3f' % auc)"}), "(fpr, tpr, c='r', lw=2, alpha=0.7, label=u'AUC=%.3f' % auc)\n", (25424, 25483), True, 'import matplotlib.pyplot as plt\n'), ((25501, 25564), 'matplotlib.pyplot.plot', 'plt.plot', (['(0, 1)', '(0, 1)'], {'c': '"""#808080"""', 'lw': '(1)', 'ls': '"""--"""', 'alpha': '(0.7)'}), "((0, 1), (0, 1), c='#808080', lw=1, ls='--', alpha=0.7)\n", (25509, 25564), True, 'import matplotlib.pyplot as plt\n'), ((25569, 25592), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.01, 1.02)'], {}), '((-0.01, 1.02))\n', (25577, 25592), True, 'import matplotlib.pyplot as plt\n'), ((25597, 25620), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.01, 1.02)'], {}), '((-0.01, 1.02))\n', (25605, 25620), True, 'import matplotlib.pyplot as plt\n'), ((25703, 25749), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(13)'}), "('False Positive Rate', fontsize=13)\n", (25713, 25749), True, 'import matplotlib.pyplot as plt\n'), ((25754, 25799), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(13)'}), "('True Positive Rate', fontsize=13)\n", (25764, 25799), True, 'import matplotlib.pyplot as plt\n'), ((25804, 25828), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'ls': '""":"""'}), "(b=True, ls=':')\n", (25812, 25828), True, 'import matplotlib.pyplot as plt\n'), ((25833, 25906), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'fancybox': '(True)', 'framealpha': '(0.8)', 'fontsize': '(12)'}), "(loc='lower right', fancybox=True, framealpha=0.8, fontsize=12)\n", (25843, 25906), True, 'import matplotlib.pyplot as plt\n'), ((25926, 25969), 'matplotlib.pyplot.title', 'plt.title', (['u"""Eval ROC And AUC"""'], {'fontsize': '(17)'}), "(u'Eval ROC And AUC', fontsize=17)\n", (25935, 25969), True, 'import matplotlib.pyplot as plt\n'), ((25984, 25996), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (25994, 25996), False, 'import io\n'), ((26001, 26035), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer_'], {'format': '"""png"""'}), "(buffer_, format='png')\n", (26012, 26035), True, 'import matplotlib.pyplot as plt\n'), ((26116, 26127), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26125, 26127), True, 'import matplotlib.pyplot as plt\n'), ((26261, 26322), 'sklearn.metrics.confusion_matrix', 'sklearn.metrics.confusion_matrix', (['label_id', 'predicted_classes'], {}), '(label_id, predicted_classes)\n', (26293, 26322), False, 'import sklearn\n'), ((26336, 26348), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26346, 26348), True, 'import matplotlib.pyplot as plt\n'), ((26353, 26451), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'yticklabels': 'label_names', 'xticklabels': 'label_names', 'linewidths': '(0.5)'}), '(cm, annot=True, yticklabels=label_names, xticklabels=\n label_names, linewidths=0.5)\n', (26364, 26451), True, 'import seaborn as sns\n'), ((26466, 26490), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'rotation': '(360)'}), '(rotation=360)\n', (26476, 26490), True, 'import matplotlib.pyplot as plt\n'), ((26495, 26547), 'matplotlib.pyplot.title', 'plt.title', (['u"""Confusion Matrix Heat Map"""'], {'fontsize': '(17)'}), "(u'Confusion Matrix Heat Map', fontsize=17)\n", (26504, 26547), True, 'import matplotlib.pyplot as plt\n'), ((26552, 26570), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26568, 26570), True, 'import matplotlib.pyplot as plt\n'), ((26585, 26597), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (26595, 26597), False, 'import io\n'), ((26602, 26636), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer_'], {'format': '"""png"""'}), "(buffer_, format='png')\n", (26613, 26636), True, 'import matplotlib.pyplot as plt\n'), ((26717, 26728), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26726, 26728), True, 'import matplotlib.pyplot as plt\n'), ((33535, 33569), 'os.path.exists', 'os.path.exists', (["(output_file + '_0')"], {}), "(output_file + '_0')\n", (33549, 33569), False, 'import os\n'), ((33593, 33605), 'multiprocessing.Pool', 'Pool', (['n_jobs'], {}), '(n_jobs)\n', (33597, 33605), False, 'from multiprocessing import Pool, cpu_count\n'), ((36356, 36388), 'tensorflow.python.client.device_lib.list_local_devices', '_device_lib.list_local_devices', ([], {}), '()\n', (36386, 36388), True, 'from tensorflow.python.client import device_lib as _device_lib\n'), ((1065, 1106), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1079, 1106), True, 'import tensorflow as tf\n'), ((1176, 1201), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1186, 1201), True, 'import tensorflow as tf\n'), ((1242, 1257), 'numpy.array', 'np.array', (['[0.9]'], {}), '([0.9])\n', (1250, 1257), True, 'import numpy as np\n'), ((1293, 1308), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (1301, 1308), True, 'import numpy as np\n'), ((1353, 1368), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (1361, 1368), True, 'import numpy as np\n'), ((1438, 1490), 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['bert_config_file'], {}), '(bert_config_file)\n', (1472, 1490), False, 'import modeling\n'), ((1946, 2015), 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'vocab_file', 'do_lower_case': '(True)'}), '(vocab_file=vocab_file, do_lower_case=True)\n', (1972, 2015), False, 'import tokenization\n'), ((2130, 2176), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['logdir', 'self.sess.graph'], {}), '(logdir, self.sess.graph)\n', (2151, 2176), True, 'import tensorflow as tf\n'), ((2372, 2423), 'saver.ModelSaver', 'ModelSaver', ([], {'keep_checkpoint_max': 'keep_checkpoint_max'}), '(keep_checkpoint_max=keep_checkpoint_max)\n', (2382, 2423), False, 'from saver import ModelSaver\n'), ((2959, 2981), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (2979, 2981), True, 'import tensorflow as tf\n'), ((3012, 3063), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['save_checkpoints_dir'], {}), '(save_checkpoints_dir)\n', (3041, 3063), True, 'import tensorflow as tf\n'), ((7054, 7088), 'tensorflow.logging.info', 'tf.logging.info', (['"""Train Finished."""'], {}), "('Train Finished.')\n", (7069, 7088), True, 'import tensorflow as tf\n'), ((7943, 7954), 'time.time', 'time.time', ([], {}), '()\n', (7952, 7954), False, 'import time\n'), ((10750, 10802), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Classification Report *****"""'], {}), "('***** Classification Report *****')\n", (10765, 10802), True, 'import tensorflow as tf\n'), ((10811, 10834), 'tensorflow.logging.info', 'tf.logging.info', (['report'], {}), '(report)\n', (10826, 10834), True, 'import tensorflow as tf\n'), ((12833, 12844), 'time.time', 'time.time', ([], {}), '()\n', (12842, 12844), False, 'import time\n'), ((12853, 13023), 'tensorflow.logging.info', 'tf.logging.info', (["('Evaluation Finished.\\tcost time: %.3f\\tF1 Score:%.3f\\tAuc:%.3f\\tprecision:%.3f\\trecall:%.3f\\t'\n % (end - start, fscore, auc, precision, recall))"], {}), "(\n 'Evaluation Finished.\\tcost time: %.3f\\tF1 Score:%.3f\\tAuc:%.3f\\tprecision:%.3f\\trecall:%.3f\\t'\n % (end - start, fscore, auc, precision, recall))\n", (12868, 13023), True, 'import tensorflow as tf\n'), ((20079, 20113), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['filenames'], {}), '(filenames)\n', (20102, 20113), True, 'import tensorflow as tf\n'), ((25636, 25658), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (25645, 25658), True, 'import numpy as np\n'), ((25675, 25697), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (25684, 25697), True, 'import numpy as np\n'), ((27157, 27170), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (27165, 27170), False, 'from urllib.parse import urlparse\n'), ((31994, 32034), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), '(output_file)\n', (32021, 32034), True, 'import tensorflow as tf\n'), ((36732, 36751), 'tensorflow.concat', 'tf.concat', (['grads', '(0)'], {}), '(grads, 0)\n', (36741, 36751), True, 'import tensorflow as tf\n'), ((36767, 36790), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (36781, 36790), True, 'import tensorflow as tf\n'), ((37608, 37633), 'tensorflow.device', 'tf.device', (['defalut_device'], {}), '(defalut_device)\n', (37617, 37633), True, 'import tensorflow as tf\n'), ((37857, 37893), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (37891, 37893), True, 'import tensorflow as tf\n'), ((37943, 37997), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '(None, max_seq_length)'}), '(tf.int64, shape=(None, max_seq_length))\n', (37957, 37997), True, 'import tensorflow as tf\n'), ((38020, 38074), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '(None, max_seq_length)'}), '(tf.int64, shape=(None, max_seq_length))\n', (38034, 38074), True, 'import tensorflow as tf\n'), ((38098, 38152), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '(None, max_seq_length)'}), '(tf.int64, shape=(None, max_seq_length))\n', (38112, 38152), True, 'import tensorflow as tf\n'), ((38174, 38210), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': 'None'}), '(tf.int64, shape=None)\n', (38188, 38210), True, 'import tensorflow as tf\n'), ((38236, 38274), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (38250, 38274), True, 'import tensorflow as tf\n'), ((38311, 38349), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (38325, 38349), True, 'import tensorflow as tf\n'), ((38381, 38419), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (38395, 38419), True, 'import tensorflow as tf\n'), ((38460, 38498), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (38474, 38498), True, 'import tensorflow as tf\n'), ((38556, 38594), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (38570, 38594), True, 'import tensorflow as tf\n'), ((38622, 38658), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None'}), '(tf.int32, shape=None)\n', (38636, 38658), True, 'import tensorflow as tf\n'), ((38687, 38723), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None'}), '(tf.int32, shape=None)\n', (38701, 38723), True, 'import tensorflow as tf\n'), ((38746, 38782), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None'}), '(tf.int32, shape=None)\n', (38760, 38782), True, 'import tensorflow as tf\n'), ((46099, 46162), 'tensorflow.logging.info', 'tf.logging.info', (["('Load model checkpoint : %s' % init_checkpoint)"], {}), "('Load model checkpoint : %s' % init_checkpoint)\n", (46114, 46162), True, 'import tensorflow as tf\n'), ((46179, 46203), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (46201, 46203), True, 'import tensorflow as tf\n'), ((46468, 46501), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (46499, 46501), True, 'import tensorflow as tf\n'), ((2044, 2066), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (2058, 2066), False, 'import os\n'), ((2080, 2099), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (2091, 2099), False, 'import os\n'), ((3124, 3193), 'tensorflow.logging.info', 'tf.logging.info', (["('Reload training state' + ckpt.model_checkpoint_path)"], {}), "('Reload training state' + ckpt.model_checkpoint_path)\n", (3139, 3193), True, 'import tensorflow as tf\n'), ((7276, 7329), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""prob_hist"""', 'self.probabilities'], {}), "('prob_hist', self.probabilities)\n", (7296, 7329), True, 'import tensorflow as tf\n'), ((7415, 7463), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""logits_hist"""', 'self.logits'], {}), "('logits_hist', self.logits)\n", (7435, 7463), True, 'import tensorflow as tf\n'), ((10593, 10621), 'numpy.argmax', 'np.argmax', (['prob_acc'], {'axis': '(-1)'}), '(prob_acc, axis=-1)\n', (10602, 10621), True, 'import numpy as np\n'), ((10891, 10919), 'numpy.argmax', 'np.argmax', (['prob_acc'], {'axis': '(-1)'}), '(prob_acc, axis=-1)\n', (10900, 10919), True, 'import numpy as np\n'), ((11018, 11046), 'numpy.argmax', 'np.argmax', (['prob_acc'], {'axis': '(-1)'}), '(prob_acc, axis=-1)\n', (11027, 11046), True, 'import numpy as np\n'), ((11136, 11164), 'numpy.argmax', 'np.argmax', (['prob_acc'], {'axis': '(-1)'}), '(prob_acc, axis=-1)\n', (11145, 11164), True, 'import numpy as np\n'), ((11450, 11478), 'numpy.argmax', 'np.argmax', (['prob_acc'], {'axis': '(-1)'}), '(prob_acc, axis=-1)\n', (11459, 11478), True, 'import numpy as np\n'), ((15242, 15253), 'time.time', 'time.time', ([], {}), '()\n', (15251, 15253), False, 'import time\n'), ((15416, 15427), 'time.time', 'time.time', ([], {}), '()\n', (15425, 15427), False, 'import time\n'), ((15479, 15490), 'time.time', 'time.time', ([], {}), '()\n', (15488, 15490), False, 'import time\n'), ((16472, 16483), 'time.time', 'time.time', ([], {}), '()\n', (16481, 16483), False, 'import time\n'), ((18426, 18469), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tf_record_0"""'], {}), "(data_dir, 'train.tf_record_0')\n", (18438, 18469), False, 'import os\n'), ((18697, 18748), 'math.ceil', 'math.ceil', (['(train_examples_cnt / batch_size * epochs)'], {}), '(train_examples_cnt / batch_size * epochs)\n', (18706, 18748), False, 'import math\n'), ((18874, 18921), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), "('***** Running training *****')\n", (18889, 18921), True, 'import tensorflow as tf\n'), ((18934, 18992), 'tensorflow.logging.info', 'tf.logging.info', (['""" Num examples = %d"""', 'train_examples_cnt'], {}), "(' Num examples = %d', train_examples_cnt)\n", (18949, 18992), True, 'import tensorflow as tf\n'), ((19005, 19053), 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'batch_size'], {}), "(' Batch size = %d', batch_size)\n", (19020, 19053), True, 'import tensorflow as tf\n'), ((19066, 19118), 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), "(' Num steps = %d', num_train_steps)\n", (19081, 19118), True, 'import tensorflow as tf\n'), ((19409, 19456), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), "('***** Running training *****')\n", (19424, 19456), True, 'import tensorflow as tf\n'), ((19541, 19589), 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'batch_size'], {}), "(' Batch size = %d', batch_size)\n", (19556, 19589), True, 'import tensorflow as tf\n'), ((19602, 19654), 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), "(' Num steps = %d', num_train_steps)\n", (19617, 19654), True, 'import tensorflow as tf\n'), ((19966, 20014), 'os.path.join', 'os.path.join', (['data_dir', "('train.tf_record_%d' % i)"], {}), "(data_dir, 'train.tf_record_%d' % i)\n", (19978, 20014), False, 'import os\n'), ((20600, 20652), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {'default_value': '(0)'}), '([], tf.int64, default_value=0)\n', (20621, 20652), True, 'import tensorflow as tf\n'), ((20683, 20739), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.float32'], {'default_value': '(0.0)'}), '([], tf.float32, default_value=0.0)\n', (20704, 20739), True, 'import tensorflow as tf\n'), ((20892, 20954), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (20918, 20954), True, 'import tensorflow as tf\n'), ((21505, 21554), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), "('***** Running evaluation *****')\n", (21520, 21554), True, 'import tensorflow as tf\n'), ((21631, 21679), 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'batch_size'], {}), "(' Batch size = %d', batch_size)\n", (21646, 21679), True, 'import tensorflow as tf\n'), ((21688, 21739), 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_eval_steps'], {}), "(' Num steps = %d', num_eval_steps)\n", (21703, 21739), True, 'import tensorflow as tf\n'), ((32419, 32444), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (32442, 32444), False, 'import collections\n'), ((36661, 36681), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (36675, 36681), True, 'import tensorflow as tf\n'), ((39573, 39616), 'tensorflow.cast', 'tf.cast', (['(_num_train_steps / cycle)', 'tf.int32'], {}), '(_num_train_steps / cycle, tf.int32)\n', (39580, 39616), True, 'import tensorflow as tf\n'), ((43146, 43171), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_list'], {}), '(loss_list)\n', (43160, 43171), True, 'import tensorflow as tf\n'), ((43940, 44184), 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'input_ids': '_input_ids', 'input_mask': '_input_mask', 'token_type_ids': '_segment_ids', 'hidden_dropout_prob': '_hidden_dropout_prob', 'attention_probs_dropout_prob': '_attention_probs_dropout_prob', 'scope': '"""bert"""'}), "(config=bert_config, input_ids=_input_ids, input_mask=\n _input_mask, token_type_ids=_segment_ids, hidden_dropout_prob=\n _hidden_dropout_prob, attention_probs_dropout_prob=\n _attention_probs_dropout_prob, scope='bert')\n", (43958, 44184), False, 'import modeling\n'), ((46305, 46372), 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (46348, 46372), False, 'import modeling\n'), ((46386, 46448), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (46415, 46448), True, 'import tensorflow as tf\n'), ((3409, 3420), 'time.time', 'time.time', ([], {}), '()\n', (3418, 3420), False, 'import time\n'), ((6294, 6305), 'time.time', 'time.time', ([], {}), '()\n', (6303, 6305), False, 'import time\n'), ((6322, 6462), 'tensorflow.logging.info', 'tf.logging.info', (["('[%.2f%%] step: %d\\tloss: %f\\tcost time: %.3f' % (global_step /\n num_train_steps * 100, global_step, loss, end - start))"], {}), "('[%.2f%%] step: %d\\tloss: %f\\tcost time: %.3f' % (\n global_step / num_train_steps * 100, global_step, loss, end - start))\n", (6337, 6462), True, 'import tensorflow as tf\n'), ((11238, 11261), 'numpy.eye', 'np.eye', (['self.num_labels'], {}), '(self.num_labels)\n', (11244, 11261), True, 'import numpy as np\n'), ((11390, 11413), 'numpy.eye', 'np.eye', (['self.num_labels'], {}), '(self.num_labels)\n', (11396, 11413), True, 'import numpy as np\n'), ((13422, 13448), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (13436, 13448), False, 'import os\n'), ((13466, 13489), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (13477, 13489), False, 'import os\n'), ((13518, 13553), 'os.path.join', 'os.path.join', (['output_dir', '"""out.txt"""'], {}), "(output_dir, 'out.txt')\n", (13530, 13553), False, 'import os\n'), ((19852, 19893), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tf_record"""'], {}), "(data_dir, 'train.tf_record')\n", (19864, 19893), False, 'import os\n'), ((23641, 23687), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running predict *****"""'], {}), "('***** Running predict *****')\n", (23656, 23687), True, 'import tensorflow as tf\n'), ((23767, 23815), 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'batch_size'], {}), "(' Batch size = %d', batch_size)\n", (23782, 23815), True, 'import tensorflow as tf\n'), ((23824, 23878), 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_predict_steps'], {}), "(' Num steps = %d', num_predict_steps)\n", (23839, 23878), True, 'import tensorflow as tf\n'), ((43228, 43256), 'tensorflow.squeeze', 'tf.squeeze', (['logits_list', '[0]'], {}), '(logits_list, [0])\n', (43238, 43256), True, 'import tensorflow as tf\n'), ((43289, 43324), 'tensorflow.squeeze', 'tf.squeeze', (['probabilities_list', '[0]'], {}), '(probabilities_list, [0])\n', (43299, 43324), True, 'import tensorflow as tf\n'), ((43368, 43416), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['logits_list'], {'axis': '(0)'}), '(logits_list, axis=0)\n', (43395, 43416), True, 'import tensorflow as tf\n'), ((43449, 43504), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['probabilities_list'], {'axis': '(0)'}), '(probabilities_list, axis=0)\n', (43476, 43504), True, 'import tensorflow as tf\n'), ((43569, 43597), 'tensorflow.name_scope', 'tf.name_scope', (['"""merge_grads"""'], {}), "('merge_grads')\n", (43582, 43597), True, 'import tensorflow as tf\n'), ((44506, 44554), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {'reuse': 'tf.AUTO_REUSE'}), "('output', reuse=tf.AUTO_REUSE)\n", (44523, 44554), True, 'import tensorflow as tf\n'), ((45739, 45767), 'tensorflow.name_scope', 'tf.name_scope', (['"""merge_grads"""'], {}), "('merge_grads')\n", (45752, 45767), True, 'import tensorflow as tf\n'), ((10242, 10296), 'numpy.concatenate', 'np.concatenate', (["(label_acc, data['label_ids'])"], {'axis': '(0)'}), "((label_acc, data['label_ids']), axis=0)\n", (10256, 10296), True, 'import numpy as np\n'), ((10353, 10393), 'numpy.concatenate', 'np.concatenate', (['(prob_acc, prob)'], {'axis': '(0)'}), '((prob_acc, prob), axis=0)\n', (10367, 10393), True, 'import numpy as np\n'), ((18529, 18564), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (18541, 18564), False, 'import os\n'), ((22442, 22527), 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, max_seq_length]', 'dtype': 'tf.int32'}), '(all_input_ids, shape=[num_examples, max_seq_length], dtype=tf.int32\n )\n', (22453, 22527), True, 'import tensorflow as tf\n'), ((22607, 22693), 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, max_seq_length]', 'dtype': 'tf.int32'}), '(all_input_mask, shape=[num_examples, max_seq_length], dtype=tf.\n int32)\n', (22618, 22693), True, 'import tensorflow as tf\n'), ((22794, 22881), 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, max_seq_length]', 'dtype': 'tf.int32'}), '(all_segment_ids, shape=[num_examples, max_seq_length], dtype=tf\n .int32)\n', (22805, 22881), True, 'import tensorflow as tf\n'), ((22980, 23044), 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples]', 'dtype': 'tf.int32'}), '(all_label_ids, shape=[num_examples], dtype=tf.int32)\n', (22991, 23044), True, 'import tensorflow as tf\n'), ((23120, 23190), 'tensorflow.constant', 'tf.constant', (['all_sample_weight'], {'shape': '[num_examples]', 'dtype': 'tf.float32'}), '(all_sample_weight, shape=[num_examples], dtype=tf.float32)\n', (23131, 23190), True, 'import tensorflow as tf\n'), ((33190, 33225), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), '(feature=features)\n', (33207, 33225), True, 'import tensorflow as tf\n'), ((40780, 41021), 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'hidden_dropout_prob': '_hidden_dropout_prob', 'attention_probs_dropout_prob': '_attention_probs_dropout_prob', 'scope': '"""bert"""'}), "(config=bert_config, input_ids=input_ids, input_mask=\n input_mask, token_type_ids=segment_ids, hidden_dropout_prob=\n _hidden_dropout_prob, attention_probs_dropout_prob=\n _attention_probs_dropout_prob, scope='bert')\n", (40798, 41021), False, 'import modeling\n'), ((44904, 44929), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (44921, 44929), True, 'import tensorflow as tf\n'), ((45006, 45070), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '_output_dropout_keep_prob'}), '(output_layer, keep_prob=_output_dropout_keep_prob)\n', (45019, 45070), True, 'import tensorflow as tf\n'), ((45126, 45183), 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), '(output_layer, output_weights, transpose_b=True)\n', (45135, 45183), True, 'import tensorflow as tf\n'), ((45238, 45273), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), '(logits, output_bias)\n', (45252, 45273), True, 'import tensorflow as tf\n'), ((45310, 45340), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (45323, 45340), True, 'import tensorflow as tf\n'), ((45379, 45437), 'tensorflow.one_hot', 'tf.one_hot', (['_label_ids'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), '(_label_ids, depth=num_labels, dtype=tf.float32)\n', (45389, 45437), True, 'import tensorflow as tf\n'), ((45491, 45608), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['one_hot_labels', 'logits'], {'weights': '_sample_weight', 'label_smoothing': 'label_smoothing'}), '(one_hot_labels, logits, weights=\n _sample_weight, label_smoothing=label_smoothing)\n', (45522, 45608), True, 'import tensorflow as tf\n'), ((3566, 3592), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (3590, 3592), True, 'import tensorflow as tf\n'), ((11668, 11718), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Eval/auc"""', 'simple_value': 'auc'}), "(tag='Eval/auc', simple_value=auc)\n", (11684, 11718), True, 'import tensorflow as tf\n'), ((11805, 11863), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Eval/f1_score"""', 'simple_value': 'fscore'}), "(tag='Eval/f1_score', simple_value=fscore)\n", (11821, 11863), True, 'import tensorflow as tf\n'), ((11950, 12012), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Eval/precision"""', 'simple_value': 'precision'}), "(tag='Eval/precision', simple_value=precision)\n", (11966, 12012), True, 'import tensorflow as tf\n'), ((12099, 12155), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Eval/recall"""', 'simple_value': 'recall'}), "(tag='Eval/recall', simple_value=recall)\n", (12115, 12155), True, 'import tensorflow as tf\n'), ((16069, 16084), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (16077, 16084), True, 'import numpy as np\n'), ((16191, 16206), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (16199, 16206), True, 'import numpy as np\n'), ((16322, 16337), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (16330, 16337), True, 'import numpy as np\n'), ((24415, 24500), 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, max_seq_length]', 'dtype': 'tf.int32'}), '(all_input_ids, shape=[num_examples, max_seq_length], dtype=tf.int32\n )\n', (24426, 24500), True, 'import tensorflow as tf\n'), ((24580, 24666), 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, max_seq_length]', 'dtype': 'tf.int32'}), '(all_input_mask, shape=[num_examples, max_seq_length], dtype=tf.\n int32)\n', (24591, 24666), True, 'import tensorflow as tf\n'), ((24767, 24854), 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, max_seq_length]', 'dtype': 'tf.int32'}), '(all_segment_ids, shape=[num_examples, max_seq_length], dtype=tf\n .int32)\n', (24778, 24854), True, 'import tensorflow as tf\n'), ((40004, 40032), 'tensorflow.name_scope', 'tf.name_scope', (['"""input_slice"""'], {}), "('input_slice')\n", (40017, 40032), True, 'import tensorflow as tf\n'), ((41431, 41479), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {'reuse': 'tf.AUTO_REUSE'}), "('output', reuse=tf.AUTO_REUSE)\n", (41448, 41479), True, 'import tensorflow as tf\n'), ((43685, 43718), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-1.0)', '(1.0)'], {}), '(grad, -1.0, 1.0)\n', (43701, 43718), True, 'import tensorflow as tf\n'), ((44703, 44747), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (44734, 44747), True, 'import tensorflow as tf\n'), ((44858, 44880), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (44878, 44880), True, 'import tensorflow as tf\n'), ((45858, 45891), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-1.0)', '(1.0)'], {}), '(grad, -1.0, 1.0)\n', (45874, 45891), True, 'import tensorflow as tf\n'), ((5982, 6035), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Loss/Train"""', 'simple_value': 'loss'}), "(tag='Loss/Train', simple_value=loss)\n", (5998, 6035), True, 'import tensorflow as tf\n'), ((9279, 9294), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (9287, 9294), True, 'import numpy as np\n'), ((9480, 9495), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (9488, 9495), True, 'import numpy as np\n'), ((9690, 9705), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (9698, 9705), True, 'import numpy as np\n'), ((15601, 15634), 'numpy.array', 'np.array', (['input_feature.input_ids'], {}), '(input_feature.input_ids)\n', (15609, 15634), True, 'import numpy as np\n'), ((15756, 15790), 'numpy.array', 'np.array', (['input_feature.input_mask'], {}), '(input_feature.input_mask)\n', (15764, 15790), True, 'import numpy as np\n'), ((15913, 15948), 'numpy.array', 'np.array', (['input_feature.segment_ids'], {}), '(input_feature.segment_ids)\n', (15921, 15948), True, 'import numpy as np\n'), ((41877, 41902), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (41894, 41902), True, 'import tensorflow as tf\n'), ((41995, 42059), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '_output_dropout_keep_prob'}), '(output_layer, keep_prob=_output_dropout_keep_prob)\n', (42008, 42059), True, 'import tensorflow as tf\n'), ((42132, 42189), 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), '(output_layer, output_weights, transpose_b=True)\n', (42141, 42189), True, 'import tensorflow as tf\n'), ((42261, 42297), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits_', 'output_bias'], {}), '(logits_, output_bias)\n', (42275, 42297), True, 'import tensorflow as tf\n'), ((42343, 42374), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits_'], {'axis': '(-1)'}), '(logits_, axis=-1)\n', (42356, 42374), True, 'import tensorflow as tf\n'), ((42421, 42478), 'tensorflow.one_hot', 'tf.one_hot', (['label_ids'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), '(label_ids, depth=num_labels, dtype=tf.float32)\n', (42431, 42478), True, 'import tensorflow as tf\n'), ((42549, 42666), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['one_hot_labels', 'logits_'], {'weights': 'sample_weight', 'label_smoothing': 'label_smoothing'}), '(one_hot_labels, logits_, weights=\n sample_weight, label_smoothing=label_smoothing)\n', (42580, 42666), True, 'import tensorflow as tf\n'), ((11608, 11625), 'numpy.mean', 'np.mean', (['loss_acc'], {}), '(loss_acc)\n', (11615, 11625), True, 'import numpy as np\n'), ((12281, 12329), 'tensorflow.Summary.Image', 'tf.Summary.Image', ([], {'encoded_image_string': 'roc_curve'}), '(encoded_image_string=roc_curve)\n', (12297, 12329), True, 'import tensorflow as tf\n'), ((12469, 12524), 'tensorflow.Summary.Image', 'tf.Summary.Image', ([], {'encoded_image_string': 'confusion_matrix'}), '(encoded_image_string=confusion_matrix)\n', (12485, 12524), True, 'import tensorflow as tf\n'), ((14540, 14555), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (14548, 14555), True, 'import numpy as np\n'), ((14670, 14685), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (14678, 14685), True, 'import numpy as np\n'), ((14809, 14824), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (14817, 14824), True, 'import numpy as np\n'), ((41652, 41696), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (41683, 41696), True, 'import tensorflow as tf\n'), ((41823, 41845), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (41843, 41845), True, 'import tensorflow as tf\n'), ((15091, 15103), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (15100, 15103), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def plotLossGraph(loss_history, iter_count):
plt.figure()
plt.plot(np.arange(iter_count), loss_history)
plt.xlabel('Number of Iterations')
plt.ylabel('Loss')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((102, 114), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (112, 114), True, 'import matplotlib.pyplot as plt\n'), ((169, 203), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Iterations"""'], {}), "('Number of Iterations')\n", (179, 203), True, 'import matplotlib.pyplot as plt\n'), ((208, 226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (218, 226), True, 'import matplotlib.pyplot as plt\n'), ((231, 241), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (239, 241), True, 'import matplotlib.pyplot as plt\n'), ((128, 149), 'numpy.arange', 'np.arange', (['iter_count'], {}), '(iter_count)\n', (137, 149), True, 'import numpy as np\n')]
|
"""
@version:
author:yunnaidan
@time: 2018/10/26
@file: PIdatabase.py
@function:
"""
import os
import time
import obspy
import numpy as np
from scipy.integrate import simps
from obspy.core.utcdatetime import UTCDateTime
import multiprocessing
from dyntripy.utils import psd, load_gf, gf
def psd_integral(pxx_all, f, f_win, gf_parameters):
f_min = f_win[0]
f_max = f_win[1]
f_index = np.where((f >= f_min) & (f <= f_max))
f_tar = f[f_index]
pxx_tar = pxx_all[f_index]
sensitivity, normalizing, zeros, poles = gf_parameters
gf_list = [gf(sensitivity, normalizing, zeros, poles, f) for f in f_tar]
counts = len(gf_list)
pxx_tar_remove_response = [(pxx_tar[i] / (gf_list[i]**2)) * (10**18)
for i in range(counts)] # m/s to nm/s
pi = simps(pxx_tar_remove_response, f_tar)
return pi
def abs_time(day_date, time_segment, i):
year = int(str(day_date)[:4])
month = int(str(day_date)[4:6])
day = int(str(day_date)[6:8])
hour = int((i * time_segment) / 3600)
minute = int(((i * time_segment) % 3600) / 60)
second = int(((i * time_segment) % 3600) % 60)
abs_time_value = UTCDateTime(year, month, day, hour, minute, second)
return abs_time_value
def pi41day(
sac_file,
gf_info_file,
year,
day,
sta,
data_path,
time_segment,
f_win_list,
out_file):
try:
_, sensitivity, normalizing, zeros, poles = load_gf(sac_file, gf_info_file)
gf_parameters = [sensitivity, normalizing, zeros, poles]
with open(out_file, 'a') as f:
f.write('time')
for f_win in f_win_list:
f.write(',' + str(int(f_win[0])) + '-' + str(int(f_win[1])))
f.write('\n')
st = obspy.read(os.path.join(data_path, str(year),
day, '.'.join([day, sta])))
tr = st[0]
tr.detrend('linear')
tr.detrend('constant')
fs = tr.stats.sampling_rate
start_time = tr.stats.starttime
segment_count = int(86400 / time_segment)
for i in range(segment_count):
abs_time_value = abs_time(day, time_segment, i)
start_point_index = max(0, round((abs_time_value - start_time) * fs))
end_point_index = max(
0, round(
(abs_time_value + time_segment - start_time) * fs))
# If the index exceeds the length of the data, no error will be thrown,
# but empty array.
data = tr.data[start_point_index:end_point_index]
if len(data) != 0:
pxx_all, f = psd(data, fs)
pi_list = []
for f_win in f_win_list:
pi = psd_integral(pxx_all, f, f_win, gf_parameters)
pi_list.append(pi)
else:
pi_list = [0.0 for _ in f_win_list]
with open(out_file, 'a') as f:
f.write(str(abs_time_value)[:-4] + 'Z')
for pi in pi_list:
f.write(',' + str(pi))
f.write('\n')
except Exception as err_msg:
print(err_msg)
def run_pi_parallel(
sta,
target_dates,
data_path,
time_segment,
f_win_list,
gf_info_file,
out_folder,
cores):
pool = multiprocessing.Pool(processes=cores)
tasks = []
for target_date in sorted(target_dates):
print('Prepare database task ' + str(target_date), end='\r')
year = target_date.year
day = ''.join([str(target_date.year).zfill(4),
str(target_date.month).zfill(2),
str(target_date.day).zfill(2)])
sac_file = '.'.join([day, sta])
if not os.path.exists(
os.path.join(
data_path,
str(year),
day,
sac_file)):
continue
out_file = os.path.join(out_folder, 'PI_' + str(sta) + '_' + str(day) + '.csv')
if os.path.exists(out_file):
continue
# pi41day(year, day, sta, data_path, time_segment, f_win_list, gf_parameters, out_file)
tasks.append(
(sac_file,
gf_info_file,
year,
day,
sta,
data_path,
time_segment,
f_win_list,
out_file))
print('\n')
# chunksize is how many tasks will be processed by one processor
rs = pool.starmap_async(pi41day, tasks, chunksize=1)
# close() & join() is necessary
pool.close()
# simple progress bar
while (True):
remaining = rs._number_left
print("finished:{0}/{1}".format(len(tasks) - remaining, len(tasks)),
end='\r') # '\r' means remove the last line
if (rs.ready()):
break
time.sleep(0.5)
pool.join()
print('\n')
|
[
"dyntripy.utils.gf",
"os.path.exists",
"obspy.core.utcdatetime.UTCDateTime",
"time.sleep",
"numpy.where",
"dyntripy.utils.load_gf",
"dyntripy.utils.psd",
"multiprocessing.Pool",
"scipy.integrate.simps"
] |
[((398, 435), 'numpy.where', 'np.where', (['((f >= f_min) & (f <= f_max))'], {}), '((f >= f_min) & (f <= f_max))\n', (406, 435), True, 'import numpy as np\n'), ((805, 842), 'scipy.integrate.simps', 'simps', (['pxx_tar_remove_response', 'f_tar'], {}), '(pxx_tar_remove_response, f_tar)\n', (810, 842), False, 'from scipy.integrate import simps\n'), ((1169, 1220), 'obspy.core.utcdatetime.UTCDateTime', 'UTCDateTime', (['year', 'month', 'day', 'hour', 'minute', 'second'], {}), '(year, month, day, hour, minute, second)\n', (1180, 1220), False, 'from obspy.core.utcdatetime import UTCDateTime\n'), ((3388, 3425), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cores'}), '(processes=cores)\n', (3408, 3425), False, 'import multiprocessing\n'), ((565, 610), 'dyntripy.utils.gf', 'gf', (['sensitivity', 'normalizing', 'zeros', 'poles', 'f'], {}), '(sensitivity, normalizing, zeros, poles, f)\n', (567, 610), False, 'from dyntripy.utils import psd, load_gf, gf\n'), ((1483, 1514), 'dyntripy.utils.load_gf', 'load_gf', (['sac_file', 'gf_info_file'], {}), '(sac_file, gf_info_file)\n', (1490, 1514), False, 'from dyntripy.utils import psd, load_gf, gf\n'), ((4074, 4098), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (4088, 4098), False, 'import os\n'), ((4909, 4924), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4919, 4924), False, 'import time\n'), ((2670, 2683), 'dyntripy.utils.psd', 'psd', (['data', 'fs'], {}), '(data, fs)\n', (2673, 2683), False, 'from dyntripy.utils import psd, load_gf, gf\n')]
|
from .series import Series
from collections.abc import Iterable
import numpy as np
import pandas as pd
import random
class DataFrame:
def __init__(self, values, index=None, columns=None, copy=True):
self.dict = {}
l = 0
try:
if columns is not None:
for key in columns:
try:
value = values[key]
except:
continue
if l == 0:
l = len(value)
else:
assert l == len(value), "the length of each column must match!"
self.dict[key] = np.array(value, copy=copy)
for key in columns:
if key not in self.dict:
self.dict[key] = np.empty(l)
self.dict[key][:] = np.nan
else:
for key, value in values.items():
if l == 0:
l = len(value)
else:
assert l == len(value), "the length of each column must match!"
self.dict[key] = np.array(value, copy=copy)
except:
for key, value in values.items():
self.dict[key] = np.array([value], copy=copy)
if index is not None:
assert l == len(index), "the length of each column must match!"
self.index = np.array(index, copy=copy)
else:
self.index = np.array(range(l), copy=copy)
def __str__(self):
return str(pd.DataFrame(self.dict, self.index, copy=False))
def head(self, l = 5):
return pd.DataFrame(self.dict, self.index, copy=False).head(l)
def __len__(self):
return len(self.index)
def __getitem__(self, key):
# index with column name
if isinstance(key, list):
if len(key) == 1:
return Series(self.dict[key[0]], self.index, copy=False)
else:
return DataFrame({k: self.dict[k] for k in key}, self.index, copy=False)
# index with boolean Series
elif isinstance(key, Series):
assert len(key) == len(self)
return DataFrame({k: v[key.values] for k, v in self.dict.items()}, self.index[key.values], copy=False)
else:
if not isinstance(key, Iterable) and not isinstance(key, slice):
key = [key]
return DataFrame({k: v[key] for k, v in self.dict.items()}, self.index[key], copy=False)
def get_by_index(self, key):
if not isinstance(key, list):
key = [key]
row2index = dict(zip(range(len(self)), self.index))
key_row = [r for r, i in row2index.items() if i in key]
return self[np.array(key_row, copy=False)]
# when assigning with column and row,
# please always use the row first and column second
# because numpy advance indexing will return a copy instead for a view
def __setitem__(self, key, value):
# index with column name
if isinstance(key, list):
if isinstance(value, Series):
value = value.values
for k in key:
if k in self.dict:
self.dict[k][:] = value
else: # add new column
self.dict[k] = np.empty(len(self))
self.dict[k][:] = value
# index with boolean Series
elif isinstance(key, Series):
for k in self:
self.dict[k][key.values] = value
else:
if not isinstance(key, Iterable) and not isinstance(key, slice):
key = np.array([key])
for k in self:
self.dict[k][key] = value
def set_by_index(self, key, value):
try:
k2v = dict(zip(key, value))
except:
k2v = {key: value}
row2index = dict(zip(range(len(self)), self.index))
key_row = [r for r, i in row2index.items() if i in k2v]
value_row = [k2v[row2index[r]] for r in key_row]
assert len(key_row) == len(value_row)
self[np.array(key_row, copy=False)] = value_row
def __delitem__(self, key):
try:
assert not isinstance(key, str)
for k in key:
try:
del self.dict[k]
except:
pass
except:
del self.dict[key]
def __iter__(self):
for k in self.dict:
yield k
def items(self):
for k, v in self.dict.items():
yield k, v
def iterrows(self):
for i in range(len(self.index)):
yield self.index[i], {k: v[i] for k, v in self.dict.items()}
def append(self, s):
# only support append list of DataFrame or dict now
if isinstance(s, list):
append_dict = {k: [] for k in self.keys()}
append_index = []
for d in s:
assert d.keys() == self.keys(), "keys not conform"
if not isinstance(d, DataFrame):
d = DataFrame(d)
for k in append_dict.keys():
append_dict[k].append(d.dict[k])
append_index.append(d.index)
res_dict = {k: np.append(self.dict[k], append_dict[k]) for k in self.keys()}
res_index = np.append(self.index, append_index)
return DataFrame(res_dict, res_index)
else:
if not isinstance(s, DataFrame):
s = DataFrame(s)
res_dict = {k: np.append(self.dict[k], s.dict[k]) for k in self.keys()}
res_index = np.append(self.index, s.index)
return DataFrame(res_dict, res_index)
def keys(self):
return list(self.dict.keys())
def copy(self):
return DataFrame(self.dict, self.index)
@property
def columns(self):
return list(self.dict.keys())
@property
def T(self):
res_index = self.columns
res_columns = self.index
res_values = np.array([self.dict[k] for k in res_index], copy=False).T
res_dict = dict(zip(res_columns, res_values))
return DataFrame(res_dict, index=res_index, columns=res_columns)
def apply(self, func, type='element'):
if type == 'element':
f = np.vectorize(func)
res_dict = {k: f(v) for k, v in self.dict.items()}
return DataFrame(res_dict, self.index)
elif type == 'row':
res_values = []
for _, v in self.iterrows():
res_values.append(func(v))
return Series(res_values, self.index)
elif type == 'column':
return Series([func(v) for _, v in self.items()], self.keys())
def sample(self, n):
perm = np.array(random.sample(range(len(self)), n), copy=False)
return DataFrame({k: v[perm] for k, v in self.items()}, self.index[perm])
def to_pd(self, copy=True):
return pd.DataFrame(self.dict, self.index, copy=copy)
|
[
"pandas.DataFrame",
"numpy.vectorize",
"numpy.empty",
"numpy.append",
"numpy.array"
] |
[((7065, 7111), 'pandas.DataFrame', 'pd.DataFrame', (['self.dict', 'self.index'], {'copy': 'copy'}), '(self.dict, self.index, copy=copy)\n', (7077, 7111), True, 'import pandas as pd\n'), ((1460, 1486), 'numpy.array', 'np.array', (['index'], {'copy': 'copy'}), '(index, copy=copy)\n', (1468, 1486), True, 'import numpy as np\n'), ((1599, 1646), 'pandas.DataFrame', 'pd.DataFrame', (['self.dict', 'self.index'], {'copy': '(False)'}), '(self.dict, self.index, copy=False)\n', (1611, 1646), True, 'import pandas as pd\n'), ((2806, 2835), 'numpy.array', 'np.array', (['key_row'], {'copy': '(False)'}), '(key_row, copy=False)\n', (2814, 2835), True, 'import numpy as np\n'), ((4178, 4207), 'numpy.array', 'np.array', (['key_row'], {'copy': '(False)'}), '(key_row, copy=False)\n', (4186, 4207), True, 'import numpy as np\n'), ((5423, 5458), 'numpy.append', 'np.append', (['self.index', 'append_index'], {}), '(self.index, append_index)\n', (5432, 5458), True, 'import numpy as np\n'), ((5709, 5739), 'numpy.append', 'np.append', (['self.index', 's.index'], {}), '(self.index, s.index)\n', (5718, 5739), True, 'import numpy as np\n'), ((6125, 6180), 'numpy.array', 'np.array', (['[self.dict[k] for k in res_index]'], {'copy': '(False)'}), '([self.dict[k] for k in res_index], copy=False)\n', (6133, 6180), True, 'import numpy as np\n'), ((6400, 6418), 'numpy.vectorize', 'np.vectorize', (['func'], {}), '(func)\n', (6412, 6418), True, 'import numpy as np\n'), ((1691, 1738), 'pandas.DataFrame', 'pd.DataFrame', (['self.dict', 'self.index'], {'copy': '(False)'}), '(self.dict, self.index, copy=False)\n', (1703, 1738), True, 'import pandas as pd\n'), ((5337, 5376), 'numpy.append', 'np.append', (['self.dict[k]', 'append_dict[k]'], {}), '(self.dict[k], append_dict[k])\n', (5346, 5376), True, 'import numpy as np\n'), ((5628, 5662), 'numpy.append', 'np.append', (['self.dict[k]', 's.dict[k]'], {}), '(self.dict[k], s.dict[k])\n', (5637, 5662), True, 'import numpy as np\n'), ((677, 703), 'numpy.array', 'np.array', (['value'], {'copy': 'copy'}), '(value, copy=copy)\n', (685, 703), True, 'import numpy as np\n'), ((1178, 1204), 'numpy.array', 'np.array', (['value'], {'copy': 'copy'}), '(value, copy=copy)\n', (1186, 1204), True, 'import numpy as np\n'), ((1300, 1328), 'numpy.array', 'np.array', (['[value]'], {'copy': 'copy'}), '([value], copy=copy)\n', (1308, 1328), True, 'import numpy as np\n'), ((3708, 3723), 'numpy.array', 'np.array', (['[key]'], {}), '([key])\n', (3716, 3723), True, 'import numpy as np\n'), ((826, 837), 'numpy.empty', 'np.empty', (['l'], {}), '(l)\n', (834, 837), True, 'import numpy as np\n')]
|
import inspect
import tubular.testing.helpers as h
import tubular
import pandas as pd
import numpy as np
from unittest import mock
from _pytest.mark.structures import ParameterSet
def test_arguments():
"""Test arguments for arguments of tubular.testing.helpers.index_preserved_params."""
expected_arguments = ["df_1", "df_2", "seed"]
arg_spec = inspect.getfullargspec(h.index_preserved_params)
arguments = arg_spec.args
assert len(expected_arguments) == len(
arguments
), f"Incorrect number of arguments -\n Expected: {len(expected_arguments)}\n Actual: {len(arguments)}"
for i, (e, a) in enumerate(zip(expected_arguments, arguments)):
assert e == a, f"Incorrect arg at index {i} -\n Expected: {e}\n Actual: {a}"
default_values = arg_spec.defaults
assert default_values == (
0,
), f"Unexpected default values -\n Expected: {(0, )}\n Actual: {default_values}"
def test__check_dfs_passed_call():
"""Test the call to _check_dfs_passed."""
df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[7, 8, 9])
df2 = pd.DataFrame({"a": [2, 3, 4], "b": [5, 6, 7]}, index=[7, 8, 9])
with mock.patch.object(tubular.testing.helpers, "_check_dfs_passed") as mocked:
h.index_preserved_params(df1, df2, seed=1)
assert mocked.call_count == 1, "unexpected number of calls to _check_dfs_passed"
call_args = mocked.call_args_list[0]
assert call_args[1] == {}, "unexpected kwargs in _check_dfs_passed call"
assert call_args[0] == (
df1,
df2,
), "unexpected positional args in _check_dfs_passed call"
def test_returned_object():
"""Test the function returns the expected output."""
df1_1 = pd.DataFrame({"a": [1], "b": [4]}, index=[7])
df1_2 = pd.DataFrame({"a": [2], "b": [5]}, index=[8])
df1_3 = pd.DataFrame({"a": [3], "b": [6]}, index=[9])
df2_1 = pd.DataFrame({"c": [10], "d": [13]}, index=[7])
df2_2 = pd.DataFrame({"c": [11], "d": [14]}, index=[8])
df2_3 = pd.DataFrame({"c": [12], "d": [15]}, index=[9])
df1 = pd.concat([df1_1, df1_2, df1_3], axis=0)
df2 = pd.concat([df2_1, df2_2, df2_3], axis=0)
seed_value = 111
np.random.seed(seed_value)
random_index = np.random.randint(low=-99999999, high=100000000, size=df1.shape[0])
start_decreasing_index = np.random.randint(low=-99999999, high=100000000, size=1)[0]
decreasing_index = range(
start_decreasing_index, start_decreasing_index - df1.shape[0], -1
)
start_increasing_index = np.random.randint(low=-99999999, high=100000000, size=1)[0]
increasing_index = range(
start_increasing_index, start_increasing_index + df1.shape[0], 1
)
df1_copy = df1.copy()
df2_copy = df2.copy()
df1_copy.index = random_index
df2_copy.index = random_index
expected_df_pairs = [(df1_copy, df2_copy)]
df1_copy = df1.copy()
df2_copy = df2.copy()
df1_copy.index = decreasing_index
df2_copy.index = decreasing_index
expected_df_pairs.append((df1_copy, df2_copy))
df1_copy = df1.copy()
df2_copy = df2.copy()
df1_copy.index = increasing_index
df2_copy.index = increasing_index
expected_df_pairs.append((df1_copy, df2_copy))
expected_df_pairs.append((df1, df2))
expected_ids = [
"random index",
"decreasing index",
"increasing index",
"original index",
]
results = h.index_preserved_params(df1, df2, seed=seed_value)
assert (
type(results) is list
), "unexpected type for object returned from index_preserved_params"
assert len(results) == len(
expected_df_pairs
), "unexpected len of object returned from index_preserved_params"
for i in range(len(expected_df_pairs)):
assert (
type(results[i]) is ParameterSet
), f"unexpected type for {i}th item in returned list"
h.assert_equal_dispatch(
expected_df_pairs[i],
results[i].values,
f"unexpected values for {i}th item in returned list",
)
assert (
results[i].marks == ()
), f"unexpected marks for {i}th item in returned list"
assert (
results[i].id == expected_ids[i]
), f"unexpected id for {i}th item in returned list"
|
[
"pandas.DataFrame",
"unittest.mock.patch.object",
"numpy.random.seed",
"inspect.getfullargspec",
"tubular.testing.helpers.assert_equal_dispatch",
"tubular.testing.helpers.index_preserved_params",
"numpy.random.randint",
"pandas.concat"
] |
[((361, 409), 'inspect.getfullargspec', 'inspect.getfullargspec', (['h.index_preserved_params'], {}), '(h.index_preserved_params)\n', (383, 409), False, 'import inspect\n'), ((1033, 1096), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3], 'b': [4, 5, 6]}"], {'index': '[7, 8, 9]'}), "({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=[7, 8, 9])\n", (1045, 1096), True, 'import pandas as pd\n'), ((1107, 1170), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [2, 3, 4], 'b': [5, 6, 7]}"], {'index': '[7, 8, 9]'}), "({'a': [2, 3, 4], 'b': [5, 6, 7]}, index=[7, 8, 9])\n", (1119, 1170), True, 'import pandas as pd\n'), ((1732, 1777), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1], 'b': [4]}"], {'index': '[7]'}), "({'a': [1], 'b': [4]}, index=[7])\n", (1744, 1777), True, 'import pandas as pd\n'), ((1790, 1835), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [2], 'b': [5]}"], {'index': '[8]'}), "({'a': [2], 'b': [5]}, index=[8])\n", (1802, 1835), True, 'import pandas as pd\n'), ((1848, 1893), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [3], 'b': [6]}"], {'index': '[9]'}), "({'a': [3], 'b': [6]}, index=[9])\n", (1860, 1893), True, 'import pandas as pd\n'), ((1907, 1954), 'pandas.DataFrame', 'pd.DataFrame', (["{'c': [10], 'd': [13]}"], {'index': '[7]'}), "({'c': [10], 'd': [13]}, index=[7])\n", (1919, 1954), True, 'import pandas as pd\n'), ((1967, 2014), 'pandas.DataFrame', 'pd.DataFrame', (["{'c': [11], 'd': [14]}"], {'index': '[8]'}), "({'c': [11], 'd': [14]}, index=[8])\n", (1979, 2014), True, 'import pandas as pd\n'), ((2027, 2074), 'pandas.DataFrame', 'pd.DataFrame', (["{'c': [12], 'd': [15]}"], {'index': '[9]'}), "({'c': [12], 'd': [15]}, index=[9])\n", (2039, 2074), True, 'import pandas as pd\n'), ((2086, 2126), 'pandas.concat', 'pd.concat', (['[df1_1, df1_2, df1_3]'], {'axis': '(0)'}), '([df1_1, df1_2, df1_3], axis=0)\n', (2095, 2126), True, 'import pandas as pd\n'), ((2137, 2177), 'pandas.concat', 'pd.concat', (['[df2_1, df2_2, df2_3]'], {'axis': '(0)'}), '([df2_1, df2_2, df2_3], axis=0)\n', (2146, 2177), True, 'import pandas as pd\n'), ((2205, 2231), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (2219, 2231), True, 'import numpy as np\n'), ((2251, 2318), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-99999999)', 'high': '(100000000)', 'size': 'df1.shape[0]'}), '(low=-99999999, high=100000000, size=df1.shape[0])\n', (2268, 2318), True, 'import numpy as np\n'), ((3441, 3492), 'tubular.testing.helpers.index_preserved_params', 'h.index_preserved_params', (['df1', 'df2'], {'seed': 'seed_value'}), '(df1, df2, seed=seed_value)\n', (3465, 3492), True, 'import tubular.testing.helpers as h\n'), ((1181, 1244), 'unittest.mock.patch.object', 'mock.patch.object', (['tubular.testing.helpers', '"""_check_dfs_passed"""'], {}), "(tubular.testing.helpers, '_check_dfs_passed')\n", (1198, 1244), False, 'from unittest import mock\n'), ((1265, 1307), 'tubular.testing.helpers.index_preserved_params', 'h.index_preserved_params', (['df1', 'df2'], {'seed': '(1)'}), '(df1, df2, seed=1)\n', (1289, 1307), True, 'import tubular.testing.helpers as h\n'), ((2348, 2404), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-99999999)', 'high': '(100000000)', 'size': '(1)'}), '(low=-99999999, high=100000000, size=1)\n', (2365, 2404), True, 'import numpy as np\n'), ((2547, 2603), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-99999999)', 'high': '(100000000)', 'size': '(1)'}), '(low=-99999999, high=100000000, size=1)\n', (2564, 2603), True, 'import numpy as np\n'), ((3918, 4040), 'tubular.testing.helpers.assert_equal_dispatch', 'h.assert_equal_dispatch', (['expected_df_pairs[i]', 'results[i].values', 'f"""unexpected values for {i}th item in returned list"""'], {}), "(expected_df_pairs[i], results[i].values,\n f'unexpected values for {i}th item in returned list')\n", (3941, 4040), True, 'import tubular.testing.helpers as h\n')]
|
import cv2 as cv
import numpy as np
from skimage.color import label2rgb
from skimage.morphology import diamond
from matching_pcb.image import to_rgb
def _color(mask, color):
return np.stack([mask * c for c in color], axis=2)
def draw_bbox(image, bbox, color=(255, 0, 0)):
x1, y1, w, h = list(map(int, bbox))
pt1, pt2 = (x1, y1), (x1 + w, y1 + h)
return cv.rectangle(image, pt1, pt2, color)
def show_image(image, color="RGB"):
from IPython.display import display
from PIL import Image as Image2
rgb = to_rgb(image, color=color)
display(Image2.fromarray(rgb, "RGB"))
def show_images(images, cols=2, color="RGB"):
from IPython.display import display
from PIL import Image as Image2
images = [to_rgb(img, color=color) for img in images]
h_ = min([img.shape[0] for img in images])
w_ = min([img.shape[1] for img in images])
images = [img[:h_, :w_] for img in images]
for i in range(0, len(images), cols):
batch_rgb = np.concatenate(images[i:i + cols], axis=1)
display(Image2.fromarray(batch_rgb, "RGB"))
def label_regions(image, masks, colors=(255, 0, 0), only_edge=True):
if not isinstance(masks, list):
masks = [masks]
if not isinstance(colors, list):
colors = [colors]
masks = [(m > 0).astype(np.uint8) for m in masks]
if only_edge:
kernel = diamond(2)
# kernel = np.ones((3, 3), np.uint8)
masks = [cv.dilate(m, kernel) - m for m in masks]
_mask = np.stack(masks, axis=2).sum(axis=2)
_mask = (_mask > 0).astype(np.uint8)
area = np.zeros_like(image, dtype=np.int32)
for m, c in zip(masks, colors):
area += _color(m, c)
area = np.clip(area, 0, 255).astype(np.uint8)
_mask = np.stack((_mask, _mask, _mask), axis=2)
img1 = image * (1 - _mask)
img2 = area * _mask
return img1 + img2
def label_to_rgb(label, image, color="red", alpha=0.3):
return label2rgb(label, image, colors=[color], alpha=alpha,
bg_label=0, bg_color=None)
|
[
"numpy.stack",
"numpy.zeros_like",
"skimage.color.label2rgb",
"cv2.dilate",
"skimage.morphology.diamond",
"matching_pcb.image.to_rgb",
"numpy.clip",
"cv2.rectangle",
"PIL.Image.fromarray",
"numpy.concatenate"
] |
[((188, 233), 'numpy.stack', 'np.stack', (['[(mask * c) for c in color]'], {'axis': '(2)'}), '([(mask * c) for c in color], axis=2)\n', (196, 233), True, 'import numpy as np\n'), ((374, 410), 'cv2.rectangle', 'cv.rectangle', (['image', 'pt1', 'pt2', 'color'], {}), '(image, pt1, pt2, color)\n', (386, 410), True, 'import cv2 as cv\n'), ((536, 562), 'matching_pcb.image.to_rgb', 'to_rgb', (['image'], {'color': 'color'}), '(image, color=color)\n', (542, 562), False, 'from matching_pcb.image import to_rgb\n'), ((1590, 1626), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'np.int32'}), '(image, dtype=np.int32)\n', (1603, 1626), True, 'import numpy as np\n'), ((1755, 1794), 'numpy.stack', 'np.stack', (['(_mask, _mask, _mask)'], {'axis': '(2)'}), '((_mask, _mask, _mask), axis=2)\n', (1763, 1794), True, 'import numpy as np\n'), ((1943, 2022), 'skimage.color.label2rgb', 'label2rgb', (['label', 'image'], {'colors': '[color]', 'alpha': 'alpha', 'bg_label': '(0)', 'bg_color': 'None'}), '(label, image, colors=[color], alpha=alpha, bg_label=0, bg_color=None)\n', (1952, 2022), False, 'from skimage.color import label2rgb\n'), ((575, 603), 'PIL.Image.fromarray', 'Image2.fromarray', (['rgb', '"""RGB"""'], {}), "(rgb, 'RGB')\n", (591, 603), True, 'from PIL import Image as Image2\n'), ((744, 768), 'matching_pcb.image.to_rgb', 'to_rgb', (['img'], {'color': 'color'}), '(img, color=color)\n', (750, 768), False, 'from matching_pcb.image import to_rgb\n'), ((993, 1035), 'numpy.concatenate', 'np.concatenate', (['images[i:i + cols]'], {'axis': '(1)'}), '(images[i:i + cols], axis=1)\n', (1007, 1035), True, 'import numpy as np\n'), ((1374, 1384), 'skimage.morphology.diamond', 'diamond', (['(2)'], {}), '(2)\n', (1381, 1384), False, 'from skimage.morphology import diamond\n'), ((1052, 1086), 'PIL.Image.fromarray', 'Image2.fromarray', (['batch_rgb', '"""RGB"""'], {}), "(batch_rgb, 'RGB')\n", (1068, 1086), True, 'from PIL import Image as Image2\n'), ((1501, 1524), 'numpy.stack', 'np.stack', (['masks'], {'axis': '(2)'}), '(masks, axis=2)\n', (1509, 1524), True, 'import numpy as np\n'), ((1703, 1724), 'numpy.clip', 'np.clip', (['area', '(0)', '(255)'], {}), '(area, 0, 255)\n', (1710, 1724), True, 'import numpy as np\n'), ((1447, 1467), 'cv2.dilate', 'cv.dilate', (['m', 'kernel'], {}), '(m, kernel)\n', (1456, 1467), True, 'import cv2 as cv\n')]
|
#!/usr/local/bin/python
import argparse
import base64
import collections
import filecmp
import json
import logging
import ntpath
import numpy
import os
import re
import shutil
from io import BytesIO
from PIL import Image
from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils
from _gltf2usd.gltf2loader import GLTF2Loader, PrimitiveMode, TextureWrap, MinFilter, MagFilter
from _gltf2usd.gltf2usdUtils import GLTF2USDUtils
from _gltf2usd.usd_material import USDMaterial
from _gltf2usd import version
__version__ = version.Version.get_version_name()
class GLTF2USD(object):
"""
Class for converting glTF 2.0 models to Pixar's USD format. Currently openly supports .gltf files
with non-embedded data and exports to .usda .
"""
TEXTURE_SAMPLER_WRAP = {
TextureWrap.CLAMP_TO_EDGE : 'clamp',
TextureWrap.MIRRORED_REPEAT : 'mirror',
TextureWrap.REPEAT: 'repeat',
}
def __init__(self, gltf_file, usd_file, fps, scale, verbose=False, use_euler_rotation=False, optimize_textures=False, generate_texture_transform_texture=True):
"""Initializes the glTF to USD converter
Arguments:
gltf_file {str} -- path to the glTF file
usd_file {str} -- path to store the generated usda file
verbose {boolean} -- specifies if the output should be verbose from this tool
"""
self.logger = logging.getLogger('gltf2usd')
self.logger.setLevel(logging.DEBUG)
self.fps = fps
self.gltf_loader = GLTF2Loader(gltf_file, optimize_textures, generate_texture_transform_texture)
self.verbose = verbose
self.scale = scale
self.use_euler_rotation = use_euler_rotation
self.output_dir = os.path.dirname(os.path.abspath(usd_file))
if self.verbose:
self.logger.info("Converting {0} to {1}".format(gltf_file, usd_file))
#if usdz file is desired, change to usdc file
if usd_file.endswith('usdz'):
usd_file = usd_file[:-1] + 'c'
self.logger.info("converted usd file extension from .usdz to .usdc: {}".format(usd_file))
self.stage = Usd.Stage.CreateNew(usd_file)
self.gltf_usd_nodemap = {}
self.gltf_usdskel_nodemap = {}
self._usd_mesh_skin_map = {}
self._joint_hierarchy_name_map = {}
self.convert()
def convert_nodes_to_xform(self):
"""
Converts the glTF nodes to USD Xforms. The models get a parent Xform that scales the geometry by 100
to convert from meters (glTF) to centimeters (USD).
"""
parent_transform = UsdGeom.Xform.Define(self.stage, '/root')
parent_transform.AddScaleOp().Set((self.scale, self.scale, self.scale))
main_scene = self.gltf_loader.get_main_scene()
nodes = main_scene.get_nodes()
root_nodes = [node for node in nodes if node.parent == None]
for node in root_nodes:
self._convert_node_to_xform(node, parent_transform)
def _convert_node_to_xform(self, node, usd_xform):
"""Converts a glTF node to a USD transform node.
Arguments:
node {dict} -- glTF node
node_index {int} -- glTF node index
xform_name {str} -- USD xform name
"""
xformPrim = UsdGeom.Xform.Define(self.stage, '{0}/{1}'.format(usd_xform.GetPath(), GLTF2USDUtils.convert_to_usd_friendly_node_name(node.name)))
if self._node_has_animations(node):
self._convert_animation_to_usd(node, xformPrim)
else:
xformPrim.AddTransformOp().Set(self._compute_rest_matrix(node))
mesh = node.get_mesh()
if mesh != None:
usd_mesh = self._convert_mesh_to_xform(mesh, xformPrim, node)
children = node.get_children()
for child in children:
self._convert_node_to_xform(child, xformPrim)
def _create_usd_skeleton(self, gltf_skin, usd_xform, usd_joint_names):
"""Creates a USD skeleton from a glTF skin
Arguments:
gltf_skin {Skin} -- gltf skin
usd_xform {Xform} -- USD Xform
Returns:
Skeleton -- USD skeleton
"""
# create skeleton
root_joints = gltf_skin.root_joints
root_joint_names = [GLTF2USDUtils.convert_to_usd_friendly_node_name(root_joint.name) for root_joint in root_joints]
skeleton = None
if len(root_joints) == 1:
skeleton = UsdSkel.Skeleton.Define(self.stage, '{0}/{1}'.format(usd_xform.GetPath(), root_joint_names[0]))
else:
skeleton = UsdSkel.Skeleton.Define(self.stage, '{0}/{1}'.format(usd_xform.GetPath(), '__root__'))
gltf_bind_transforms = [Gf.Matrix4d(*xform).GetInverse() for xform in gltf_skin.get_inverse_bind_matrices()]
gltf_rest_transforms = [GLTF2USDUtils.compute_usd_transform_matrix_from_gltf_node(joint) for joint in gltf_skin.get_joints()]
if len(root_joints) > 1:
matrix = Gf.Matrix4d()
matrix.SetIdentity()
skeleton.CreateJointsAttr().Set(usd_joint_names)
skeleton.CreateBindTransformsAttr(gltf_bind_transforms)
skeleton.CreateRestTransformsAttr(gltf_rest_transforms)
return skeleton
def _create_usd_skeleton_animation(self, gltf_skin, usd_skeleton, joint_names):
#get the animation data per joint
skelAnim = None
gltf_animations = self.gltf_loader.get_animations()
if len(gltf_animations):
skelAnim = UsdSkel.Animation.Define(self.stage, '{0}/{1}'.format(usd_skeleton.GetPath(), 'anim'))
usd_skel_root_path = usd_skeleton.GetPath().GetParentPath()
usd_skel_root = self.stage.GetPrimAtPath(usd_skel_root_path)
skelAnim.CreateJointsAttr().Set(joint_names)
gltf_animation = self.gltf_loader.get_animations()[0]
min_sample = 999
max_sample = -999
for sampler in gltf_animation.get_samplers():
input_data = sampler.get_input_data()
min_sample = min(min_sample, input_data[0])
max_sample = max(max_sample, input_data[-1])
rotate_attr = skelAnim.CreateRotationsAttr()
for input_key in numpy.arange(min_sample, max_sample, 1./self.fps):
entries = []
for joint in gltf_skin.get_joints():
anim = self._get_anim_data_for_joint_and_path(gltf_animation, joint, 'rotation', input_key)
entries.append(anim)
if len(gltf_skin.get_joints()) != len(entries):
raise Exception('up oh!')
rotate_attr.Set(Vt.QuatfArray(entries), Usd.TimeCode(input_key * self.fps))
translate_attr = skelAnim.CreateTranslationsAttr()
for input_key in numpy.arange(min_sample, max_sample, 1./self.fps):
entries = []
for joint in gltf_skin.get_joints():
anim = self._get_anim_data_for_joint_and_path(gltf_animation, joint, 'translation', input_key)
entries.append(anim)
if len(gltf_skin.get_joints()) != len(entries):
raise Exception('up oh!')
translate_attr.Set(entries, Usd.TimeCode(input_key * self.fps))
scale_attr = skelAnim.CreateScalesAttr()
for input_key in numpy.arange(min_sample, max_sample, 1./self.fps):
entries = []
for joint in gltf_skin.get_joints():
anim = self._get_anim_data_for_joint_and_path(gltf_animation, joint, 'scale', input_key)
entries.append(anim)
if len(gltf_skin.get_joints()) != len(entries):
raise Exception('up oh!')
scale_attr.Set(entries, Usd.TimeCode(input_key * self.fps))
return skelAnim
def _get_anim_data_for_joint_and_path(self, gltf_animation, gltf_joint, path, time_sample):
anim_channel = gltf_animation.get_animation_channel_for_node_and_path(gltf_joint, path)
if not anim_channel:
if path == 'translation':
return gltf_joint.translation
elif path == 'rotation':
gltf_rotation = gltf_joint.rotation
usd_rotation = Gf.Quatf(gltf_rotation[3], gltf_rotation[0], gltf_rotation[1], gltf_rotation[2])
return usd_rotation
elif path == 'scale':
return gltf_joint.scale
else:
if path == 'rotation':
rotation = anim_channel.sampler.get_interpolated_output_data(time_sample)
return rotation
elif path == 'scale' or path =='translation':
return anim_channel.sampler.get_interpolated_output_data(time_sample)
else:
raise Exception('unsupported animation type: {}'.format(path))
def _get_usd_joint_hierarchy_name(self, gltf_joint, root_joints):
if gltf_joint in self._joint_hierarchy_name_map:
return GLTF2USDUtils.convert_to_usd_friendly_node_name(self._joint_hierarchy_name_map[gltf_joint])
joint = gltf_joint
joint_name_stack = [GLTF2USDUtils.convert_to_usd_friendly_node_name(joint.name)]
while joint.parent != None and joint not in root_joints:
joint = joint.parent
joint_name_stack.append(GLTF2USDUtils.convert_to_usd_friendly_node_name(joint.name))
joint_name = ''
while len(joint_name_stack) > 0:
if joint_name:
joint_name = '{0}/{1}'.format(joint_name, joint_name_stack.pop())
else:
joint_name = joint_name_stack.pop()
self._joint_hierarchy_name_map[gltf_joint] = joint_name
return GLTF2USDUtils.convert_to_usd_friendly_node_name(joint_name)
def _convert_mesh_to_xform(self, gltf_mesh, usd_node, gltf_node):
"""
Converts a glTF mesh to a USD Xform.
Each primitive becomes a submesh of the Xform.
Arguments:
mesh {dict} -- glTF mesh
parent_path {str} -- xform path
node_index {int} -- glTF node index
"""
#for each mesh primitive, create a USD mesh
for primitive in gltf_mesh.get_primitives():
self._convert_primitive_to_mesh(primitive, usd_node, gltf_node, gltf_mesh)
def _convert_primitive_to_mesh(self, gltf_primitive, usd_node, gltf_node, gltf_mesh):
"""
Converts a glTF mesh primitive to a USD mesh
Arguments:
name {str} -- name of the primitive
primitive {dict} -- glTF primitive
usd_parent_node {str} -- USD parent xform
node_index {int} -- glTF node index
double_sided {bool} -- specifies if the primitive is double sided
"""
parent_node = usd_node
parent_path = parent_node.GetPath()
attributes = gltf_primitive.get_attributes()
skel_root = None
targets = gltf_primitive.get_morph_targets()
if 'JOINTS_0' in attributes or len(targets) > 0:
skeleton_path = '{0}/{1}'.format(usd_node.GetPath(), 'skeleton_root')
skel_root = UsdSkel.Root.Define(self.stage, skeleton_path)
parent_node = skel_root
parent_path = parent_node.GetPath()
mesh = UsdGeom.Mesh.Define(self.stage, '{0}/{1}'.format(parent_node.GetPath(), GLTF2USDUtils.convert_to_usd_friendly_node_name(gltf_primitive.get_name())))
mesh.CreateSubdivisionSchemeAttr().Set('none')
material = gltf_primitive.get_material()
if material != None:
if material.is_double_sided():
mesh.CreateDoubleSidedAttr().Set(True)
usd_material = self.usd_materials[material.get_index()]
UsdShade.MaterialBindingAPI(mesh).Bind(usd_material.get_usd_material())
for attribute_name in attributes:
attribute = attributes[attribute_name]
if attribute_name == 'POSITION':
override_prim = self.stage.OverridePrim(mesh.GetPath())
override_prim.CreateAttribute('extent', Sdf.ValueTypeNames.Float3Array).Set([attribute.get_min_value(), attribute.get_max_value()])
mesh.CreatePointsAttr(attribute.get_data())
if attribute_name == 'NORMAL':
mesh.CreateNormalsAttr(attribute.get_data())
if attribute_name == 'COLOR_0':
prim_var = UsdGeom.PrimvarsAPI(mesh)
data = attribute.get_data()
if attribute.accessor_type == 'VEC4':
print('Vertex color alpha currently not supported. Defaulting to vertex color without alpha.')
data = [Gf.Vec3f(entry[0:3]) for entry in attribute.get_data()]
colors = prim_var.CreatePrimvar('displayColor', Sdf.ValueTypeNames.Color3f, 'vertex').Set(data)
if attribute_name == 'TEXCOORD_0':
data = attribute.get_data()
invert_uvs = []
for uv in data:
new_uv = (uv[0], 1 - uv[1])
invert_uvs.append(new_uv)
prim_var = UsdGeom.PrimvarsAPI(mesh)
uv = prim_var.CreatePrimvar('primvars:st0', Sdf.ValueTypeNames.TexCoord2fArray, 'vertex')
uv.Set(invert_uvs)
if attribute_name == 'JOINTS_0':
self._convert_skin_to_usd(gltf_node, gltf_primitive, parent_node, mesh)
weights = gltf_mesh.get_weights()
if targets:
skinBinding = UsdSkel.BindingAPI.Apply(mesh.GetPrim())
skeleton = UsdSkel.Skeleton.Define(self.stage, '{0}/skel'.format(parent_path))
# Create an animation for this mesh to hold the blendshapes
skelAnim = UsdSkel.Animation.Define(self.stage, '{0}/skel/anim'.format(parent_path))
# link the skeleton animation to skelAnim
skinBinding.CreateAnimationSourceRel().AddTarget(skelAnim.GetPath())
# link the skeleton to skeleton
skinBinding.CreateSkeletonRel().AddTarget(skeleton.GetPath())
# Set blendshape names on the animation
names = []
for i, _ in enumerate(gltf_mesh.get_weights()):
targets[i].get_name()
blend_shape_name = GLTF2USDUtils.convert_to_usd_friendly_node_name(targets[i].get_name())
names.append(blend_shape_name)
skelAnim.CreateBlendShapesAttr().Set(names)
skinBinding.CreateBlendShapesAttr(names)
# Set the starting weights of each blendshape to the weights defined in the glTF primitive
skelAnim.CreateBlendShapeWeightsAttr().Set(weights)
blend_shape_targets = skinBinding.CreateBlendShapeTargetsRel()
# Define offsets for each blendshape, and add them as skel:blendShapes and skel:blendShapeTargets
for i, name in enumerate(names):
offsets = targets[i].get_attributes()['POSITION']
blend_shape_name = '{0}/{1}'.format(mesh.GetPath(), name)
# define blendshapes in the mesh
blend_shape = UsdSkel.BlendShape.Define(self.stage, blend_shape_name)
blend_shape.CreateOffsetsAttr(offsets)
blend_shape_targets.AddTarget(name)
indices = gltf_primitive.get_indices()
num_faces = len(indices)/3
face_count = [3] * num_faces
mesh.CreateFaceVertexCountsAttr(face_count)
mesh.CreateFaceVertexIndicesAttr(indices)
def _get_texture__wrap_modes(self, texture):
"""Get the USD texture wrap modes from a glTF texture
Arguments:
texture {dict} -- glTF texture
Returns:
dict -- dictionary mapping wrapS and wrapT to
a USD texture sampler mode
"""
texture_data = {'wrapS': 'repeat', 'wrapT': 'repeat'}
if 'sampler' in texture:
sampler = self.gltf_loader.json_data['samplers'][texture['sampler']]
if 'wrapS' in sampler:
texture_data['wrapS'] = GLTF2USD.TEXTURE_SAMPLER_WRAP[TextureWrap(sampler['wrapS'])]
if 'wrapT' in sampler:
texture_data['wrapT'] = GLTF2USD.TEXTURE_SAMPLER_WRAP[TextureWrap(sampler['wrapT'])]
return texture_data
def _convert_images_to_usd(self):
"""
Converts the glTF images to USD images
"""
if 'images' in self.gltf_loader.json_data:
self.images = []
for i, image in enumerate(self.gltf_loader.json_data['images']):
image_name = ''
# save data-uri textures
if 'bufferView' in image or image['uri'].startswith('data:image'):
img = None
if 'bufferView' in image:
buffer_view = self.gltf_loader.json_data['bufferViews'][image['bufferView']]
buffer = self.gltf_loader.json_data['buffers'][buffer_view['buffer']]
img_base64 = buffer['uri'].split(',')[1]
buff = BytesIO()
buff.write(base64.b64decode(img_base64))
buff.seek(buffer_view['byteOffset'])
img = Image.open(BytesIO(buff.read(buffer_view['byteLength'])))
elif image['uri'].startswith('data:image'):
uri_data = image['uri'].split(',')[1]
img = Image.open(BytesIO(base64.b64decode(uri_data)))
# NOTE: image might not have a name
image_name = image['name'] if 'name' in image else 'image{}.{}'.format(i, img.format.lower())
image_path = os.path.join(self.gltf_loader.root_dir, image_name)
img.save(image_path)
# otherwise just copy the texture over
else:
image_path = os.path.join(self.gltf_loader.root_dir, image['uri'])
image_name = os.path.join(self.output_dir, ntpath.basename(image_path))
if (self.gltf_loader.root_dir is not self.output_dir) and (image_path is not image_name):
if not (os.path.isfile(image_name) and filecmp.cmp(image_path, image_name)):
shutil.copyfile(image_path, image_name)
self.images.append(ntpath.basename(image_name))
def _convert_materials_to_preview_surface_new(self):
"""
Converts the glTF materials to preview surfaces
"""
self.usd_materials = []
material_path_root = '/Materials'
scope = UsdGeom.Scope.Define(self.stage, material_path_root)
for i, material in enumerate(self.gltf_loader.get_materials()):
usd_material = USDMaterial(self.stage, scope, i, self.gltf_loader)
usd_material.convert_material_to_usd_preview_surface(material, self.output_dir)
self.usd_materials.append(usd_material)
def _node_has_animations(self, gltf_node):
animations = self.gltf_loader.get_animations()
for animation in animations:
animation_channels = animation.get_animation_channels_for_node(gltf_node)
if len(animation_channels) > 0:
return True
return False
def _convert_animation_to_usd(self, gltf_node, usd_node):
animations = self.gltf_loader.get_animations()
if (len(animations) > 0): # only support first animation group
animation = animations[0]
animation_channels = animation.get_animation_channels_for_node(gltf_node)
if len(animation_channels) > 0:
total_max_time = -999
total_min_time = 999
min_max_time = self._create_usd_animation2(usd_node, gltf_node, animation_channels)
total_max_time = max(total_max_time, min_max_time.max)
total_min_time = min(total_min_time, min_max_time.min)
self.stage.SetStartTimeCode(total_min_time * self.fps)
self.stage.SetEndTimeCode(total_max_time * self.fps)
self.stage.SetTimeCodesPerSecond(self.fps)
def _create_keyframe_transform_node(self, gltf_node, animation_channels, input_sample):
matrix = gltf_node.matrix
if matrix:
translation = Gf.Vec3f()
rotation = Gf.Quatf()
scale = Gf.Vec3h()
usd_matrix = self._convert_to_usd_matrix(matrix)
UsdSkel.DecomposeTransform(usd_matrix, translation, rotation, scale)
else:
translation = Gf.Vec3f(gltf_node.translation)
rotation = Gf.Quatf(gltf_node.rotation[3], gltf_node.rotation[0], gltf_node.rotation[1], gltf_node.rotation[2])
scale = Gf.Vec3h(gltf_node.scale)
for animation_channel in animation_channels:
if animation_channel.target.path == 'translation':
translation = animation_channel.sampler.get_interpolated_output_data(input_sample)
elif animation_channel.target.path == 'rotation':
rotation = animation_channel.sampler.get_interpolated_output_data(input_sample)
elif animation_channel.target.path == 'scale':
scale = animation_channel.sampler.get_interpolated_output_data(input_sample)
return UsdSkel.MakeTransform(translation, rotation, scale)
def _convert_skin_to_usd(self, gltf_node, gltf_primitive, usd_node, usd_mesh):
"""Converts a glTF skin to a UsdSkel
Arguments:
gltf_node {dict} -- glTF node
node_index {int} -- index of the glTF node
usd_parent_node {UsdPrim} -- parent node of the usd node
usd_mesh {[type]} -- [description]
"""
skel_binding_api = UsdSkel.BindingAPI(usd_mesh)
gltf_skin = gltf_node.get_skin()
gltf_joint_names = [GLTF2USDUtils.convert_to_usd_friendly_node_name(joint.name) for joint in gltf_skin.get_joints()]
usd_joint_names = [Sdf.Path(self._get_usd_joint_hierarchy_name(joint, gltf_skin.root_joints)) for joint in gltf_skin.get_joints()]
skeleton = self._create_usd_skeleton(gltf_skin, usd_node, usd_joint_names)
skeleton_animation = self._create_usd_skeleton_animation(gltf_skin, skeleton, usd_joint_names)
parent_path = usd_node.GetPath()
bind_matrices = []
rest_matrices = []
skeleton_root = self.stage.GetPrimAtPath(parent_path)
skel_binding_api = UsdSkel.BindingAPI(usd_mesh)
skel_binding_api.CreateGeomBindTransformAttr(Gf.Matrix4d(((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1))))
skel_binding_api.CreateSkeletonRel().AddTarget(skeleton.GetPath())
if skeleton_animation:
skel_binding_api.CreateAnimationSourceRel().AddTarget(skeleton_animation.GetPath())
bind_matrices = self._compute_bind_transforms(gltf_skin)
primitive_attributes = gltf_primitive.get_attributes()
if 'WEIGHTS_0' in primitive_attributes and 'JOINTS_0' in primitive_attributes:
total_vertex_weights = primitive_attributes['WEIGHTS_0'].get_data()
total_vertex_joints = primitive_attributes['JOINTS_0'].get_data()
total_joint_indices = []
total_joint_weights = []
for joint_indices, weights in zip(total_vertex_joints, total_vertex_weights):
for joint_index, weight in zip(joint_indices, weights):
total_joint_indices.append(joint_index)
total_joint_weights.append(weight)
joint_indices_attr = skel_binding_api.CreateJointIndicesPrimvar(False, 4).Set(total_joint_indices)
total_joint_weights = Vt.FloatArray(total_joint_weights)
UsdSkel.NormalizeWeights(total_joint_weights, 4)
joint_weights_attr = skel_binding_api.CreateJointWeightsPrimvar(False, 4).Set(total_joint_weights)
def _compute_bind_transforms(self, gltf_skin):
"""Compute the bind matrices from the skin
Arguments:
gltf_skin {Skin} -- glTF skin
Returns:
[list] -- List of bind matrices
"""
bind_matrices = []
inverse_bind_matrices = gltf_skin.get_inverse_bind_matrices()
for matrix in inverse_bind_matrices:
bind_matrix = self._convert_to_usd_matrix(matrix).GetInverse()
bind_matrices.append(bind_matrix)
return bind_matrices
def _convert_to_usd_matrix(self, matrix):
"""Converts a glTF matrix to a Usd Matrix
Arguments:
matrix {[type]} -- [description]
Returns:
[type] -- [description]
"""
return Gf.Matrix4d(
matrix[0], matrix[1], matrix[2], matrix[3],
matrix[4], matrix[5], matrix[6], matrix[7],
matrix[8], matrix[9], matrix[10], matrix[11],
matrix[12], matrix[13], matrix[14], matrix[15]
)
def _compute_rest_matrix(self, gltf_node):
"""
Compute the rest matrix from a glTF node.
The translation, rotation and scale are combined into a transformation matrix
Returns:
Matrix4d -- USD matrix
"""
xform_matrix = None
matrix = gltf_node.matrix
if matrix != None:
xform_matrix = self._convert_to_usd_matrix(matrix)
return xform_matrix
else:
usd_scale = Gf.Vec3h(1,1,1)
usd_rotation = Gf.Quatf().GetIdentity()
usd_translation = Gf.Vec3f(0,0,0)
scale = gltf_node.scale
usd_scale = Gf.Vec3h(scale[0], scale[1], scale[2])
rotation = gltf_node.rotation
usd_rotation = Gf.Quatf(rotation[3], rotation[0], rotation[1], rotation[2])
translation = gltf_node.translation
usd_translation = Gf.Vec3f(translation[0], translation[1], translation[2])
return UsdSkel.MakeTransform(usd_translation, usd_rotation, usd_scale)
def _create_usd_animation(self, usd_node, animation_channel):
"""Converts a glTF animation to a USD animation
Arguments:
usd_node {[type]} -- usd node
animation_channel {AnimationMap} -- map of animation target path and animation sampler indices
Returns:
[type] -- [description]
"""
sampler = animation_channel.sampler
max_time = int(round(sampler.get_input_max()[0] ))
min_time = int(round(sampler.get_input_min()[0] ))
input_keyframes = sampler.get_input_data()
output_keyframes = sampler.get_output_data()
num_values = sampler.get_output_count() / sampler.get_input_count()
(transform, convert_func) = self._get_keyframe_conversion_func(usd_node, animation_channel)
for i, keyframe in enumerate(numpy.arange(min_time, max_time, 1./self.fps)):
convert_func(transform, keyframe, output_keyframes, i, num_values)
MinMaxTime = collections.namedtuple('MinMaxTime', ('max', 'min'))
return MinMaxTime(max=max_time, min=min_time)
def _create_usd_animation2(self, usd_node, gltf_node, animation_channels):
"""Converts a glTF animation to a USD animation
Arguments:
usd_node {[type]} -- usd node
gltf_node {[type]} -- glTF node
animation_channel {AnimationMap} -- map of animation target path and animation sampler indices
Returns:
[type] -- [description]
"""
max_time = -999
min_time = 999
for channel in animation_channels:
max_time = max(max_time, channel.sampler.get_input_max()[0])
min_time = min(min_time, channel.sampler.get_input_min()[0])
transform = usd_node.AddTransformOp(opSuffix='transform')
for i, keyframe in enumerate(numpy.arange(min_time, max_time, 1./self.fps)):
transform_node = self._create_keyframe_transform_node(gltf_node, animation_channels, keyframe)
transform.Set(transform_node, Usd.TimeCode(i))
MinMaxTime = collections.namedtuple('MinMaxTime', ('max', 'min'))
return MinMaxTime(max=max_time, min=min_time)
def _get_keyframe_conversion_func(self, usd_node, animation_channel):
"""Convert glTF key frames to USD animation key frames
Arguments:
usd_node {UsdPrim} -- USD node to apply animations to
animation_channel {obj} -- glTF animation
Raises:
Exception -- [description]
Returns:
[func] -- USD animation conversion function
"""
path = animation_channel.target.path
animation_sampler = animation_channel.sampler
def convert_translation(transform, time, output, i, _):
value = animation_sampler.get_interpolated_output_data(time)
transform.Set(time=time * self.fps, value=(value[0], value[1], value[2]))
def convert_scale(transform, time, output, i, _):
value = animation_sampler.get_interpolated_output_data(time)
transform.Set(time=time * self.fps, value=(value[0], value[1], value[2]))
def convert_rotation(transform, time, output, i, _):
value = animation_sampler.get_interpolated_output_data(time)
if self.use_euler_rotation:
value = Gf.Rotation(value).Decompose((1,0,0), (0,1,0), (0,0,1))
transform.Set(time=time * self.fps, value=value)
def convert_weights(transform, time, output, i, values_per_step):
start = i * values_per_step
end = start + values_per_step
values = output[start:end]
value = list(map(lambda x: round(x, 5) + 0, values))
transform.Set(time=time * self.fps, value=value)
if path == 'translation':
return (usd_node.AddTranslateOp(opSuffix='translate'), convert_translation)
elif path == 'rotation':
if self.use_euler_rotation:
return (usd_node.AddRotateXYZOp(opSuffix='rotate'), convert_rotation)
else:
return (usd_node.AddOrientOp(opSuffix='rotate'), convert_rotation)
elif path == 'scale':
return (usd_node.AddScaleOp(opSuffix='scale'), convert_scale)
elif path == 'weights':
prim = usd_node.GetPrim().GetChild("skeleton_root").GetChild("skel").GetChild("anim")
anim_attr = prim.GetAttribute('blendShapeWeights')
return (anim_attr, convert_weights)
else:
raise Exception('Unsupported animation target path! {}'.format(path))
def convert(self):
if hasattr(self, 'gltf_loader'):
self._convert_images_to_usd()
self._convert_materials_to_preview_surface_new()
self.convert_nodes_to_xform()
def check_usd_compliance(rootLayer, arkit=False):
#An API change in v18.11 changed the sytax for UsdUtils.ComplianceChecker...
if Usd.GetMinorVersion() > 18 or (Usd.GetMinorVersion() == 18 and Usd.GetPatchVersion() >= 11):
checker = UsdUtils.ComplianceChecker(arkit=arkit, skipARKitRootLayerCheck=False)
checker.CheckCompliance(rootLayer)
else:
#Behavior in v18.09
checker = UsdUtils.ComplianceChecker(rootLayer, arkit=arkit, skipARKitRootLayerCheck=False)
errors = checker.GetErrors()
failedChecks = checker.GetFailedChecks()
for msg in errors + failedChecks:
print(msg)
return len(errors) == 0 and len(failedChecks) == 0
def convert_to_usd(gltf_file, usd_file, fps, scale, arkit=False, verbose=False, use_euler_rotation=False, optimize_textures=False, generate_texture_transform_texture=True):
"""Converts a glTF file to USD
Arguments:
gltf_file {str} -- path to glTF file
usd_file {str} -- path to write USD file
Keyword Arguments:
verbose {bool} -- [description] (default: {False})
"""
usd = GLTF2USD(gltf_file=gltf_file, usd_file=usd_file, fps=fps, scale=scale, verbose=verbose, use_euler_rotation=use_euler_rotation, optimize_textures=optimize_textures, generate_texture_transform_texture=generate_texture_transform_texture)
if usd.stage:
asset = usd.stage.GetRootLayer()
usd.logger.info('Conversion complete!')
asset.Save()
usd.logger.info('created {}'.format(asset.realPath))
if usd_file.endswith('.usdz') or usd_file.endswith('.usdc'):
usdc_file = '%s.%s' % (os.path.splitext(usd_file)[0], 'usdc')
asset.Export(usdc_file, args=dict(format='usdc'))
usd.logger.info('created {}'.format(usdc_file))
if usd_file.endswith('.usdz'):
#change to directory of the generated usd files to avoid issues with
# relative paths with CreateNewUsdzPackage
os.chdir(os.path.dirname(usdc_file))
usd_file = ntpath.basename(usd_file)
r = Ar.GetResolver()
resolved_asset = r.Resolve(ntpath.basename(usdc_file))
context = r.CreateDefaultContextForAsset(resolved_asset)
success = check_usd_compliance(resolved_asset, arkit=arkit)
with Ar.ResolverContextBinder(context):
if arkit and not success:
usd.logger.warning('USD is not ARKit compliant')
return
success = UsdUtils.CreateNewUsdzPackage(resolved_asset, usd_file) and success
if success:
usd.logger.info('created package {} with contents:'.format(usd_file))
zip_file = Usd.ZipFile.Open(usd_file)
file_names = zip_file.GetFileNames()
for file_name in file_names:
usd.logger.info('\t{}'.format(file_name))
else:
usd.logger.error('could not create {}'.format(usd_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert glTF to USD: v{}'.format(__version__))
parser.add_argument('--gltf', '-g', action='store', dest='gltf_file', help='glTF file (in .gltf format)', required=True)
parser.add_argument('--fps', action='store', dest='fps', help='The frames per second for the animations', type=float, default=24.0)
parser.add_argument('--output', '-o', action='store', dest='usd_file', help='destination to store generated .usda file', required=True)
parser.add_argument('--verbose', '-v', action='store_true', dest='verbose', help='Enable verbose mode')
parser.add_argument('--scale', '-s', action='store', dest='scale', help='Scale the resulting USDA', type=float, default=100)
parser.add_argument('--arkit', action='store_true', dest='arkit', help='Check USD with ARKit compatibility before making USDZ file')
parser.add_argument('--use-euler-rotation', action='store_true', dest='use_euler_rotation', help='sets euler rotations for node animations instead of quaternion rotations')
parser.add_argument('--optimize-textures', action='store_true', dest='optimize_textures', default=False, help='Specifies if image file size should be optimized and reduced at the expense of longer export time')
parser.add_argument('--generate_texture_transform_texture', dest='generate_texture_transform_texture', action='store_true', help='Enables texture transform texture generation')
parser.add_argument('--no-generate_texture_transform_texture', dest='generate_texture_transform_texture', action='store_false', help='Disables texture transform texture generation')
parser.set_defaults(generate_texture_transform_texture=True)
args = parser.parse_args()
if args.gltf_file:
convert_to_usd(os.path.expanduser(args.gltf_file), os.path.abspath(os.path.expanduser(args.usd_file)), args.fps, args.scale, args.arkit, args.verbose, args.use_euler_rotation, args.optimize_textures, args.generate_texture_transform_texture)
|
[
"pxr.UsdSkel.DecomposeTransform",
"pxr.Vt.FloatArray",
"pxr.Ar.GetResolver",
"base64.b64decode",
"pxr.Vt.QuatfArray",
"os.path.isfile",
"numpy.arange",
"filecmp.cmp",
"os.path.join",
"pxr.UsdGeom.Xform.Define",
"pxr.Usd.ZipFile.Open",
"os.path.abspath",
"_gltf2usd.version.Version.get_version_name",
"os.path.dirname",
"pxr.Usd.TimeCode",
"pxr.UsdSkel.BlendShape.Define",
"pxr.UsdSkel.BindingAPI",
"_gltf2usd.usd_material.USDMaterial",
"pxr.Usd.Stage.CreateNew",
"pxr.UsdSkel.MakeTransform",
"pxr.UsdUtils.ComplianceChecker",
"shutil.copyfile",
"pxr.UsdUtils.CreateNewUsdzPackage",
"pxr.Gf.Vec3f",
"pxr.UsdGeom.PrimvarsAPI",
"_gltf2usd.gltf2usdUtils.GLTF2USDUtils.compute_usd_transform_matrix_from_gltf_node",
"io.BytesIO",
"pxr.Usd.GetPatchVersion",
"pxr.UsdSkel.NormalizeWeights",
"_gltf2usd.gltf2loader.TextureWrap",
"pxr.UsdGeom.Scope.Define",
"pxr.Gf.Vec3h",
"pxr.Usd.GetMinorVersion",
"pxr.Gf.Rotation",
"pxr.Gf.Matrix4d",
"ntpath.basename",
"pxr.Ar.ResolverContextBinder",
"_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name",
"pxr.UsdSkel.Root.Define",
"pxr.UsdShade.MaterialBindingAPI",
"_gltf2usd.gltf2loader.GLTF2Loader",
"collections.namedtuple",
"os.path.splitext",
"pxr.Gf.Quatf",
"os.path.expanduser",
"logging.getLogger"
] |
[((539, 573), '_gltf2usd.version.Version.get_version_name', 'version.Version.get_version_name', ([], {}), '()\n', (571, 573), False, 'from _gltf2usd import version\n'), ((1414, 1443), 'logging.getLogger', 'logging.getLogger', (['"""gltf2usd"""'], {}), "('gltf2usd')\n", (1431, 1443), False, 'import logging\n'), ((1539, 1616), '_gltf2usd.gltf2loader.GLTF2Loader', 'GLTF2Loader', (['gltf_file', 'optimize_textures', 'generate_texture_transform_texture'], {}), '(gltf_file, optimize_textures, generate_texture_transform_texture)\n', (1550, 1616), False, 'from _gltf2usd.gltf2loader import GLTF2Loader, PrimitiveMode, TextureWrap, MinFilter, MagFilter\n'), ((2164, 2193), 'pxr.Usd.Stage.CreateNew', 'Usd.Stage.CreateNew', (['usd_file'], {}), '(usd_file)\n', (2183, 2193), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((2634, 2675), 'pxr.UsdGeom.Xform.Define', 'UsdGeom.Xform.Define', (['self.stage', '"""/root"""'], {}), "(self.stage, '/root')\n", (2654, 2675), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((9922, 9981), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name', 'GLTF2USDUtils.convert_to_usd_friendly_node_name', (['joint_name'], {}), '(joint_name)\n', (9969, 9981), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((18870, 18922), 'pxr.UsdGeom.Scope.Define', 'UsdGeom.Scope.Define', (['self.stage', 'material_path_root'], {}), '(self.stage, material_path_root)\n', (18890, 18922), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((21597, 21648), 'pxr.UsdSkel.MakeTransform', 'UsdSkel.MakeTransform', (['translation', 'rotation', 'scale'], {}), '(translation, rotation, scale)\n', (21618, 21648), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((22050, 22078), 'pxr.UsdSkel.BindingAPI', 'UsdSkel.BindingAPI', (['usd_mesh'], {}), '(usd_mesh)\n', (22068, 22078), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((22757, 22785), 'pxr.UsdSkel.BindingAPI', 'UsdSkel.BindingAPI', (['usd_mesh'], {}), '(usd_mesh)\n', (22775, 22785), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((24969, 25171), 'pxr.Gf.Matrix4d', 'Gf.Matrix4d', (['matrix[0]', 'matrix[1]', 'matrix[2]', 'matrix[3]', 'matrix[4]', 'matrix[5]', 'matrix[6]', 'matrix[7]', 'matrix[8]', 'matrix[9]', 'matrix[10]', 'matrix[11]', 'matrix[12]', 'matrix[13]', 'matrix[14]', 'matrix[15]'], {}), '(matrix[0], matrix[1], matrix[2], matrix[3], matrix[4], matrix[5\n ], matrix[6], matrix[7], matrix[8], matrix[9], matrix[10], matrix[11],\n matrix[12], matrix[13], matrix[14], matrix[15])\n', (24980, 25171), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((26202, 26265), 'pxr.UsdSkel.MakeTransform', 'UsdSkel.MakeTransform', (['usd_translation', 'usd_rotation', 'usd_scale'], {}), '(usd_translation, usd_rotation, usd_scale)\n', (26223, 26265), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((27270, 27322), 'collections.namedtuple', 'collections.namedtuple', (['"""MinMaxTime"""', "('max', 'min')"], {}), "('MinMaxTime', ('max', 'min'))\n", (27292, 27322), False, 'import collections\n'), ((28371, 28423), 'collections.namedtuple', 'collections.namedtuple', (['"""MinMaxTime"""', "('max', 'min')"], {}), "('MinMaxTime', ('max', 'min'))\n", (28393, 28423), False, 'import collections\n'), ((31363, 31433), 'pxr.UsdUtils.ComplianceChecker', 'UsdUtils.ComplianceChecker', ([], {'arkit': 'arkit', 'skipARKitRootLayerCheck': '(False)'}), '(arkit=arkit, skipARKitRootLayerCheck=False)\n', (31389, 31433), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((31533, 31619), 'pxr.UsdUtils.ComplianceChecker', 'UsdUtils.ComplianceChecker', (['rootLayer'], {'arkit': 'arkit', 'skipARKitRootLayerCheck': '(False)'}), '(rootLayer, arkit=arkit, skipARKitRootLayerCheck=\n False)\n', (31559, 31619), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((1771, 1796), 'os.path.abspath', 'os.path.abspath', (['usd_file'], {}), '(usd_file)\n', (1786, 1796), False, 'import os\n'), ((4365, 4429), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name', 'GLTF2USDUtils.convert_to_usd_friendly_node_name', (['root_joint.name'], {}), '(root_joint.name)\n', (4412, 4429), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((4915, 4979), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.compute_usd_transform_matrix_from_gltf_node', 'GLTF2USDUtils.compute_usd_transform_matrix_from_gltf_node', (['joint'], {}), '(joint)\n', (4972, 4979), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((5071, 5084), 'pxr.Gf.Matrix4d', 'Gf.Matrix4d', ([], {}), '()\n', (5082, 5084), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((6335, 6387), 'numpy.arange', 'numpy.arange', (['min_sample', 'max_sample', '(1.0 / self.fps)'], {}), '(min_sample, max_sample, 1.0 / self.fps)\n', (6347, 6387), False, 'import numpy\n'), ((6918, 6970), 'numpy.arange', 'numpy.arange', (['min_sample', 'max_sample', '(1.0 / self.fps)'], {}), '(min_sample, max_sample, 1.0 / self.fps)\n', (6930, 6970), False, 'import numpy\n'), ((7482, 7534), 'numpy.arange', 'numpy.arange', (['min_sample', 'max_sample', '(1.0 / self.fps)'], {}), '(min_sample, max_sample, 1.0 / self.fps)\n', (7494, 7534), False, 'import numpy\n'), ((9184, 9280), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name', 'GLTF2USDUtils.convert_to_usd_friendly_node_name', (['self._joint_hierarchy_name_map[gltf_joint]'], {}), '(self.\n _joint_hierarchy_name_map[gltf_joint])\n', (9231, 9280), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((9340, 9399), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name', 'GLTF2USDUtils.convert_to_usd_friendly_node_name', (['joint.name'], {}), '(joint.name)\n', (9387, 9399), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((11352, 11398), 'pxr.UsdSkel.Root.Define', 'UsdSkel.Root.Define', (['self.stage', 'skeleton_path'], {}), '(self.stage, skeleton_path)\n', (11371, 11398), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((19023, 19074), '_gltf2usd.usd_material.USDMaterial', 'USDMaterial', (['self.stage', 'scope', 'i', 'self.gltf_loader'], {}), '(self.stage, scope, i, self.gltf_loader)\n', (19034, 19074), False, 'from _gltf2usd.usd_material import USDMaterial\n'), ((20591, 20601), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ([], {}), '()\n', (20599, 20601), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((20625, 20635), 'pxr.Gf.Quatf', 'Gf.Quatf', ([], {}), '()\n', (20633, 20635), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((20656, 20666), 'pxr.Gf.Vec3h', 'Gf.Vec3h', ([], {}), '()\n', (20664, 20666), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((20740, 20808), 'pxr.UsdSkel.DecomposeTransform', 'UsdSkel.DecomposeTransform', (['usd_matrix', 'translation', 'rotation', 'scale'], {}), '(usd_matrix, translation, rotation, scale)\n', (20766, 20808), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((20849, 20880), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['gltf_node.translation'], {}), '(gltf_node.translation)\n', (20857, 20880), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((20904, 21009), 'pxr.Gf.Quatf', 'Gf.Quatf', (['gltf_node.rotation[3]', 'gltf_node.rotation[0]', 'gltf_node.rotation[1]', 'gltf_node.rotation[2]'], {}), '(gltf_node.rotation[3], gltf_node.rotation[0], gltf_node.rotation[1\n ], gltf_node.rotation[2])\n', (20912, 21009), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((21025, 21050), 'pxr.Gf.Vec3h', 'Gf.Vec3h', (['gltf_node.scale'], {}), '(gltf_node.scale)\n', (21033, 21050), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((22148, 22207), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name', 'GLTF2USDUtils.convert_to_usd_friendly_node_name', (['joint.name'], {}), '(joint.name)\n', (22195, 22207), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((22839, 22908), 'pxr.Gf.Matrix4d', 'Gf.Matrix4d', (['((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1))'], {}), '(((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1)))\n', (22850, 22908), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((23979, 24013), 'pxr.Vt.FloatArray', 'Vt.FloatArray', (['total_joint_weights'], {}), '(total_joint_weights)\n', (23992, 24013), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((24026, 24074), 'pxr.UsdSkel.NormalizeWeights', 'UsdSkel.NormalizeWeights', (['total_joint_weights', '(4)'], {}), '(total_joint_weights, 4)\n', (24050, 24074), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((25705, 25722), 'pxr.Gf.Vec3h', 'Gf.Vec3h', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (25713, 25722), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((25803, 25820), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (25811, 25820), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((25880, 25918), 'pxr.Gf.Vec3h', 'Gf.Vec3h', (['scale[0]', 'scale[1]', 'scale[2]'], {}), '(scale[0], scale[1], scale[2])\n', (25888, 25918), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((25989, 26049), 'pxr.Gf.Quatf', 'Gf.Quatf', (['rotation[3]', 'rotation[0]', 'rotation[1]', 'rotation[2]'], {}), '(rotation[3], rotation[0], rotation[1], rotation[2])\n', (25997, 26049), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((26129, 26185), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['translation[0]', 'translation[1]', 'translation[2]'], {}), '(translation[0], translation[1], translation[2])\n', (26137, 26185), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((27121, 27169), 'numpy.arange', 'numpy.arange', (['min_time', 'max_time', '(1.0 / self.fps)'], {}), '(min_time, max_time, 1.0 / self.fps)\n', (27133, 27169), False, 'import numpy\n'), ((28135, 28183), 'numpy.arange', 'numpy.arange', (['min_time', 'max_time', '(1.0 / self.fps)'], {}), '(min_time, max_time, 1.0 / self.fps)\n', (28147, 28183), False, 'import numpy\n'), ((31252, 31273), 'pxr.Usd.GetMinorVersion', 'Usd.GetMinorVersion', ([], {}), '()\n', (31271, 31273), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((33168, 33193), 'ntpath.basename', 'ntpath.basename', (['usd_file'], {}), '(usd_file)\n', (33183, 33193), False, 'import ntpath\n'), ((33210, 33226), 'pxr.Ar.GetResolver', 'Ar.GetResolver', ([], {}), '()\n', (33224, 33226), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((35972, 36006), 'os.path.expanduser', 'os.path.expanduser', (['args.gltf_file'], {}), '(args.gltf_file)\n', (35990, 36006), False, 'import os\n'), ((3394, 3452), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name', 'GLTF2USDUtils.convert_to_usd_friendly_node_name', (['node.name'], {}), '(node.name)\n', (3441, 3452), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((9536, 9595), '_gltf2usd.gltf2usdUtils.GLTF2USDUtils.convert_to_usd_friendly_node_name', 'GLTF2USDUtils.convert_to_usd_friendly_node_name', (['joint.name'], {}), '(joint.name)\n', (9583, 9595), False, 'from _gltf2usd.gltf2usdUtils import GLTF2USDUtils\n'), ((12628, 12653), 'pxr.UsdGeom.PrimvarsAPI', 'UsdGeom.PrimvarsAPI', (['mesh'], {}), '(mesh)\n', (12647, 12653), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((13342, 13367), 'pxr.UsdGeom.PrimvarsAPI', 'UsdGeom.PrimvarsAPI', (['mesh'], {}), '(mesh)\n', (13361, 13367), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((15353, 15408), 'pxr.UsdSkel.BlendShape.Define', 'UsdSkel.BlendShape.Define', (['self.stage', 'blend_shape_name'], {}), '(self.stage, blend_shape_name)\n', (15378, 15408), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((28332, 28347), 'pxr.Usd.TimeCode', 'Usd.TimeCode', (['i'], {}), '(i)\n', (28344, 28347), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((31283, 31304), 'pxr.Usd.GetMinorVersion', 'Usd.GetMinorVersion', ([], {}), '()\n', (31302, 31304), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((31315, 31336), 'pxr.Usd.GetPatchVersion', 'Usd.GetPatchVersion', ([], {}), '()\n', (31334, 31336), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((33117, 33143), 'os.path.dirname', 'os.path.dirname', (['usdc_file'], {}), '(usdc_file)\n', (33132, 33143), False, 'import os\n'), ((33266, 33292), 'ntpath.basename', 'ntpath.basename', (['usdc_file'], {}), '(usdc_file)\n', (33281, 33292), False, 'import ntpath\n'), ((33453, 33486), 'pxr.Ar.ResolverContextBinder', 'Ar.ResolverContextBinder', (['context'], {}), '(context)\n', (33477, 33486), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((36024, 36057), 'os.path.expanduser', 'os.path.expanduser', (['args.usd_file'], {}), '(args.usd_file)\n', (36042, 36057), False, 'import os\n'), ((4798, 4817), 'pxr.Gf.Matrix4d', 'Gf.Matrix4d', (['*xform'], {}), '(*xform)\n', (4809, 4817), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((6765, 6787), 'pxr.Vt.QuatfArray', 'Vt.QuatfArray', (['entries'], {}), '(entries)\n', (6778, 6787), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((6789, 6823), 'pxr.Usd.TimeCode', 'Usd.TimeCode', (['(input_key * self.fps)'], {}), '(input_key * self.fps)\n', (6801, 6823), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((7363, 7397), 'pxr.Usd.TimeCode', 'Usd.TimeCode', (['(input_key * self.fps)'], {}), '(input_key * self.fps)\n', (7375, 7397), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((7917, 7951), 'pxr.Usd.TimeCode', 'Usd.TimeCode', (['(input_key * self.fps)'], {}), '(input_key * self.fps)\n', (7929, 7951), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((8404, 8489), 'pxr.Gf.Quatf', 'Gf.Quatf', (['gltf_rotation[3]', 'gltf_rotation[0]', 'gltf_rotation[1]', 'gltf_rotation[2]'], {}), '(gltf_rotation[3], gltf_rotation[0], gltf_rotation[1], gltf_rotation[2]\n )\n', (8412, 8489), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((11960, 11993), 'pxr.UsdShade.MaterialBindingAPI', 'UsdShade.MaterialBindingAPI', (['mesh'], {}), '(mesh)\n', (11987, 11993), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((16323, 16352), '_gltf2usd.gltf2loader.TextureWrap', 'TextureWrap', (["sampler['wrapS']"], {}), "(sampler['wrapS'])\n", (16334, 16352), False, 'from _gltf2usd.gltf2loader import GLTF2Loader, PrimitiveMode, TextureWrap, MinFilter, MagFilter\n'), ((16460, 16489), '_gltf2usd.gltf2loader.TextureWrap', 'TextureWrap', (["sampler['wrapT']"], {}), "(sampler['wrapT'])\n", (16471, 16489), False, 'from _gltf2usd.gltf2loader import GLTF2Loader, PrimitiveMode, TextureWrap, MinFilter, MagFilter\n'), ((17946, 17997), 'os.path.join', 'os.path.join', (['self.gltf_loader.root_dir', 'image_name'], {}), '(self.gltf_loader.root_dir, image_name)\n', (17958, 17997), False, 'import os\n'), ((18150, 18203), 'os.path.join', 'os.path.join', (['self.gltf_loader.root_dir', "image['uri']"], {}), "(self.gltf_loader.root_dir, image['uri'])\n", (18162, 18203), False, 'import os\n'), ((18612, 18639), 'ntpath.basename', 'ntpath.basename', (['image_name'], {}), '(image_name)\n', (18627, 18639), False, 'import ntpath\n'), ((25748, 25758), 'pxr.Gf.Quatf', 'Gf.Quatf', ([], {}), '()\n', (25756, 25758), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((33653, 33708), 'pxr.UsdUtils.CreateNewUsdzPackage', 'UsdUtils.CreateNewUsdzPackage', (['resolved_asset', 'usd_file'], {}), '(resolved_asset, usd_file)\n', (33682, 33708), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((33870, 33896), 'pxr.Usd.ZipFile.Open', 'Usd.ZipFile.Open', (['usd_file'], {}), '(usd_file)\n', (33886, 33896), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((12896, 12916), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['entry[0:3]'], {}), '(entry[0:3])\n', (12904, 12916), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((17313, 17322), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (17320, 17322), False, 'from io import BytesIO\n'), ((18267, 18294), 'ntpath.basename', 'ntpath.basename', (['image_path'], {}), '(image_path)\n', (18282, 18294), False, 'import ntpath\n'), ((29639, 29657), 'pxr.Gf.Rotation', 'Gf.Rotation', (['value'], {}), '(value)\n', (29650, 29657), False, 'from pxr import Usd, UsdGeom, Sdf, UsdShade, Gf, UsdSkel, Vt, Ar, UsdUtils\n'), ((32758, 32784), 'os.path.splitext', 'os.path.splitext', (['usd_file'], {}), '(usd_file)\n', (32774, 32784), False, 'import os\n'), ((17358, 17386), 'base64.b64decode', 'base64.b64decode', (['img_base64'], {}), '(img_base64)\n', (17374, 17386), False, 'import base64\n'), ((18536, 18575), 'shutil.copyfile', 'shutil.copyfile', (['image_path', 'image_name'], {}), '(image_path, image_name)\n', (18551, 18575), False, 'import shutil\n'), ((18439, 18465), 'os.path.isfile', 'os.path.isfile', (['image_name'], {}), '(image_name)\n', (18453, 18465), False, 'import os\n'), ((18470, 18505), 'filecmp.cmp', 'filecmp.cmp', (['image_path', 'image_name'], {}), '(image_path, image_name)\n', (18481, 18505), False, 'import filecmp\n'), ((17713, 17739), 'base64.b64decode', 'base64.b64decode', (['uri_data'], {}), '(uri_data)\n', (17729, 17739), False, 'import base64\n')]
|
from unittest.mock import patch
import numpy as np
import pytest
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QListWidget
from nexus_constructor.field_attrs import FieldAttrFrame, FieldAttrsDialog
from nexus_constructor.model.module import Dataset
from nexus_constructor.model.value_type import ValueTypes
from tests.ui_tests.ui_test_utils import show_and_close_window
def get_attribute_widget(index: int, list_widget: QListWidget) -> FieldAttrFrame:
item = list_widget.item(index)
return list_widget.itemWidget(item)
def add_attribute(field_attributes_dialog, qtbot):
qtbot.mouseClick(field_attributes_dialog.add_button, Qt.LeftButton)
def add_array_attribute(field_attributes_dialog, qtbot):
add_attribute(field_attributes_dialog, qtbot)
widget = get_attribute_widget(0, field_attributes_dialog.list_widget)
widget.array_or_scalar_combo.setCurrentText("Array")
return widget
@pytest.fixture(scope="function")
def field_attributes_dialog(qtbot, template):
field_attributes_dialog = FieldAttrsDialog(template)
qtbot.addWidget(field_attributes_dialog)
return field_attributes_dialog
@pytest.mark.parametrize("attr_val", ["test", 123, 1.1, np.ushort(12)])
def test_GIVEN_existing_field_with_attr_WHEN_editing_component_THEN_both_field_and_attrs_are_filled_in_correctly(
qtbot, attr_val, field_attributes_dialog
):
attr_key = "testattr"
ds = Dataset(parent_node=None, name="test", type=ValueTypes.STRING, values="")
ds.attributes.set_attribute_value(attr_key, attr_val)
field_attributes_dialog.fill_existing_attrs(ds)
assert len(field_attributes_dialog.get_attrs()) == 1
assert field_attributes_dialog.get_attrs()[0][1] == str(attr_val)
def test_GIVEN_existing_field_with_attr_which_is_in_excludelist_WHEN_editing_component_THEN_attr_is_not_filled_in(
qtbot, field_attributes_dialog
):
attr_key = "units"
attr_val = "m"
ds = Dataset(parent_node=None, name="test", type=ValueTypes.STRING, values="")
ds.attributes.set_attribute_value(attr_key, attr_val)
field_attributes_dialog.fill_existing_attrs(ds)
assert len(field_attributes_dialog.get_attrs()) == 0
def test_GIVEN_add_attribute_button_pressed_WHEN_changing_attributes_THEN_new_attribute_is_created(
qtbot, field_attributes_dialog
):
add_attribute(field_attributes_dialog, qtbot)
assert field_attributes_dialog.list_widget.count() == 1
def test_GIVEN_remove_attribute_button_pressed_WHEN_changing_attributes_THEN_selected_attribute_is_removed(
qtbot, field_attributes_dialog
):
add_attribute(field_attributes_dialog, qtbot)
qtbot.mouseClick(
get_attribute_widget(0, field_attributes_dialog.list_widget), Qt.LeftButton
)
qtbot.mouseClick(field_attributes_dialog.remove_button, Qt.LeftButton)
assert field_attributes_dialog.list_widget.count() == 0
def test_GIVEN_data_type_changes_WHEN_editing_component_THEN_validate_method_is_called(
qtbot, field_attributes_dialog
):
add_attribute(field_attributes_dialog, qtbot)
widget = get_attribute_widget(0, field_attributes_dialog.list_widget)
with patch(
"nexus_constructor.field_attrs.FieldValueValidator.validate"
) as mock_validate:
widget.attr_dtype_combo.setCurrentIndex(2)
mock_validate.assert_called_once()
def test_GIVEN_edit_array_button_pressed_WHEN_attribute_is_an_array_THEN_array_widget_opens(
qtbot, field_attributes_dialog
):
widget = add_array_attribute(field_attributes_dialog, qtbot)
qtbot.mouseClick(widget.array_edit_button, Qt.LeftButton)
assert widget.dialog.isVisible()
def test_GIVEN_attribute_is_an_array_WHEN_getting_data_THEN_array_is_returned(
qtbot, field_attributes_dialog
):
widget = add_array_attribute(field_attributes_dialog, qtbot)
data = np.arange(9).reshape((3, 3))
qtbot.mouseClick(widget.array_edit_button, Qt.LeftButton)
widget.dialog.model.array = data
attribute_name = "AttributeName"
qtbot.keyClicks(widget.attr_name_lineedit, attribute_name)
assert widget.name == attribute_name
assert np.array_equal(widget.value, data)
def test_GIVEN_array_and_attribute_name_set_WHEN_changing_attribute_THEN_array_attribute_set(
qtbot, field_attributes_dialog
):
widget = add_array_attribute(field_attributes_dialog, qtbot)
data = np.arange(9).reshape((3, 3))
widget.name = "AttributeName"
widget.value = data
assert np.array_equal(widget.dialog.model.array, data)
def test_GIVEN_type_changed_to_array_WHEN_changing_attribute_THEN_edit_array_button_is_visible(
qtbot, field_attributes_dialog
):
widget = add_array_attribute(field_attributes_dialog, qtbot)
widget.type_changed("Array")
show_and_close_window(qtbot, field_attributes_dialog)
assert widget.array_edit_button.isVisible()
assert not widget.attr_value_lineedit.isVisible()
def test_GIVEN_type_changed_to_scalar_WHEN_changing_attribute_THEN_value_line_edit_is_visible(
qtbot, field_attributes_dialog
):
widget = add_array_attribute(field_attributes_dialog, qtbot)
widget.type_changed("Scalar")
show_and_close_window(qtbot, field_attributes_dialog)
assert not widget.array_edit_button.isVisible()
assert widget.attr_value_lineedit.isVisible()
|
[
"nexus_constructor.field_attrs.FieldAttrsDialog",
"pytest.fixture",
"unittest.mock.patch",
"tests.ui_tests.ui_test_utils.show_and_close_window",
"numpy.ushort",
"numpy.arange",
"nexus_constructor.model.module.Dataset",
"numpy.array_equal"
] |
[((931, 963), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (945, 963), False, 'import pytest\n'), ((1040, 1066), 'nexus_constructor.field_attrs.FieldAttrsDialog', 'FieldAttrsDialog', (['template'], {}), '(template)\n', (1056, 1066), False, 'from nexus_constructor.field_attrs import FieldAttrFrame, FieldAttrsDialog\n'), ((1418, 1491), 'nexus_constructor.model.module.Dataset', 'Dataset', ([], {'parent_node': 'None', 'name': '"""test"""', 'type': 'ValueTypes.STRING', 'values': '""""""'}), "(parent_node=None, name='test', type=ValueTypes.STRING, values='')\n", (1425, 1491), False, 'from nexus_constructor.model.module import Dataset\n'), ((1938, 2011), 'nexus_constructor.model.module.Dataset', 'Dataset', ([], {'parent_node': 'None', 'name': '"""test"""', 'type': 'ValueTypes.STRING', 'values': '""""""'}), "(parent_node=None, name='test', type=ValueTypes.STRING, values='')\n", (1945, 2011), False, 'from nexus_constructor.model.module import Dataset\n'), ((4108, 4142), 'numpy.array_equal', 'np.array_equal', (['widget.value', 'data'], {}), '(widget.value, data)\n', (4122, 4142), True, 'import numpy as np\n'), ((4452, 4499), 'numpy.array_equal', 'np.array_equal', (['widget.dialog.model.array', 'data'], {}), '(widget.dialog.model.array, data)\n', (4466, 4499), True, 'import numpy as np\n'), ((4738, 4791), 'tests.ui_tests.ui_test_utils.show_and_close_window', 'show_and_close_window', (['qtbot', 'field_attributes_dialog'], {}), '(qtbot, field_attributes_dialog)\n', (4759, 4791), False, 'from tests.ui_tests.ui_test_utils import show_and_close_window\n'), ((5132, 5185), 'tests.ui_tests.ui_test_utils.show_and_close_window', 'show_and_close_window', (['qtbot', 'field_attributes_dialog'], {}), '(qtbot, field_attributes_dialog)\n', (5153, 5185), False, 'from tests.ui_tests.ui_test_utils import show_and_close_window\n'), ((1205, 1218), 'numpy.ushort', 'np.ushort', (['(12)'], {}), '(12)\n', (1214, 1218), True, 'import numpy as np\n'), ((3138, 3205), 'unittest.mock.patch', 'patch', (['"""nexus_constructor.field_attrs.FieldValueValidator.validate"""'], {}), "('nexus_constructor.field_attrs.FieldValueValidator.validate')\n", (3143, 3205), False, 'from unittest.mock import patch\n'), ((3826, 3838), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (3835, 3838), True, 'import numpy as np\n'), ((4353, 4365), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (4362, 4365), True, 'import numpy as np\n')]
|
import numpy as np
from samplers import *
from utils import *
from joblib import Parallel, delayed
'''
Attacks for the ARA approach to Adversarial Classification
'''
def compute_probability(x, params):
l = params["l"]
prb = np.zeros(l)
for c in range(l):
# Sample from p^*(x|x')
sample = params["sampler_star"](x)
# Compute p(y|x) for each x in sample
probs = sample_label(sample, params["clf"],
mode='evaluate', n_samples=0)[:,c]
# Approximate with MC the value of the mean
prb[c] = np.mean(probs, axis = 0)
return prb
def attack_ARA(x, y, params):
l = params["l"]
if y < l:
return x
else:
S = params["S"]
ut_mat = params["ut_mat"]
uts = np.expand_dims(ut_mat[:params["l"],1], axis=1)
perturbations = original_instances_given_dist(x[S],
n=params["distance_to_original"])
attacks = np.ones([perturbations.shape[0], x.shape[0]], dtype=int)*x
attacks[:,S] = perturbations
prob_matrix = np.zeros([perturbations.shape[0], l])
##
for i in range(perturbations.shape[0]): ## ESTO ES UN CHOCHO
prob_matrix[i] = compute_probability(attacks[i], params)
##
expected_ut = np.dot(prob_matrix, uts)
idx = np.argmax(expected_ut)
return attacks[idx]
def attack_par(i, X, y, params):
return attack_ARA(X[i], y[i], params)
def attack_set(X, y, params):
# num_cores=4 # it depends of the processor
atts = Parallel(n_jobs=-1)(delayed(attack_par)(i, X, y, params) for i in range(X.shape[0]))
return np.array(atts)
def attack_noCK(x, y, params):
l = params["l"]
if y < l:
return x
else:
S = params["S"]
ut_mat = params["ut_mat"]
uts = np.expand_dims(ut_mat[:params["l"],1], axis=1)
perturbations = original_instances_given_dist(x[S],
n=params["distance_to_original"])
attacks = np.ones([perturbations.shape[0], x.shape[0]], dtype=int)*x
attacks[:,S] = perturbations
prob_matrix = np.zeros([perturbations.shape[0], l])
##
for i in range(perturbations.shape[0]): ## ESTO ES UN CHOCHO
pr = compute_probability(attacks[i], params)
prob_matrix[i] = np.random.uniform(pr - params["dev"]*pr,
pr + params["dev"]*pr)
##
expected_ut = np.dot(prob_matrix, uts)
idx = np.argmax(expected_ut)
return attacks[idx]
def attack_par_noCK(i, X, y, params):
return attack_noCK(X[i], y[i], params)
def attack_set_noCK(X, y, params):
# num_cores=4 # it depends of the processor
atts = Parallel(n_jobs=-1)(delayed(attack_par_noCK)(i, X, y, params) for i in range(X.shape[0]))
return np.array(atts)
def attack_UP(x, y, params):
l = params["l"]
if y < l:
return x
else:
S = params["S"]
ut_mat = params["ut_mat"]
uts = np.expand_dims(ut_mat[:params["l"],1], axis=1)
perturbations = original_instances_given_dist(x[S],
n=params["distance_to_original"])
attacks = np.ones([perturbations.shape[0], x.shape[0]], dtype=int)*x
attacks[:,S] = perturbations
prob_matrix = np.zeros([perturbations.shape[0], l])
##
for i in range(perturbations.shape[0]): ## ESTO ES UN CHOCHO
pr = compute_probability(attacks[i], params)
prob_matrix[i] = np.random.uniform(pr, pr + params["dev"]*pr )
##
expected_ut = np.dot(prob_matrix, uts)
idx = np.argmax(expected_ut)
return attacks[idx]
def attack_par_UP(i, X, y, params):
return attack_UP(X[i], y[i], params)
def attack_set_UP(X, y, params):
# num_cores=4 # it depends on the processor
atts = Parallel(n_jobs=-1)(delayed(attack_par_UP)(i, X, y, params) for i in range(X.shape[0]))
return np.array(atts)
if __name__ == '__main__':
X, y = get_spam_data("data/uciData.csv")
X_train, X_test, y_train, y_test = generate_train_test(X, y, q=0.3)
clf = LogisticRegression(penalty='l1', C=0.01)
clf.fit(X,y)
## Get "n" more important covariates
n=5
weights = np.abs(clf.coef_)
S = (-weights).argsort()[0,:n]
params = {
"l" : 1, # Good instances are y=0,1,...,l-1. Rest are bad
"k" : 2, # Number of classes
"var" : 0.1, # Proportion of max variance of betas
"ut" : np.array([[1.0, 0.0],[0.0, 1.0]]), # Ut matrix for defender
"ut_mat" : np.array([[0.0, 0.7],[0.0, 0.0]]), # Ut matrix for attacker rows is
# what the classifier says, columns
# real label!!!
"sampler_star" : lambda x: sample_original_instance_star(x,
n_samples=15, rho=2, x=None, mode='sample', heuristic='uniform'),
##
"clf" : clf,
"tolerance" : 3, # For ABC
"classes" : np.array([0,1]),
"S" : S, # Set of index representing covariates with
# "sufficient" information
"X_train" : X_train,
"distance_to_original" : 2 # Numbers of changes allowed to adversary
}
# attack = lambda x, y: attack_ARA(x, y, params)
print(accuracy_score(y_test, clf.predict(X_test)))
## Attack test set
X_att = attack_set(X_test, y_test, params)
print(accuracy_score(y_test, clf.predict(X_att)))
|
[
"numpy.random.uniform",
"numpy.abs",
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"numpy.ones",
"numpy.mean",
"numpy.array",
"joblib.Parallel",
"numpy.dot",
"joblib.delayed"
] |
[((234, 245), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (242, 245), True, 'import numpy as np\n'), ((1625, 1639), 'numpy.array', 'np.array', (['atts'], {}), '(atts)\n', (1633, 1639), True, 'import numpy as np\n'), ((2817, 2831), 'numpy.array', 'np.array', (['atts'], {}), '(atts)\n', (2825, 2831), True, 'import numpy as np\n'), ((3930, 3944), 'numpy.array', 'np.array', (['atts'], {}), '(atts)\n', (3938, 3944), True, 'import numpy as np\n'), ((4225, 4242), 'numpy.abs', 'np.abs', (['clf.coef_'], {}), '(clf.coef_)\n', (4231, 4242), True, 'import numpy as np\n'), ((558, 580), 'numpy.mean', 'np.mean', (['probs'], {'axis': '(0)'}), '(probs, axis=0)\n', (565, 580), True, 'import numpy as np\n'), ((762, 809), 'numpy.expand_dims', 'np.expand_dims', (["ut_mat[:params['l'], 1]"], {'axis': '(1)'}), "(ut_mat[:params['l'], 1], axis=1)\n", (776, 809), True, 'import numpy as np\n'), ((1052, 1089), 'numpy.zeros', 'np.zeros', (['[perturbations.shape[0], l]'], {}), '([perturbations.shape[0], l])\n', (1060, 1089), True, 'import numpy as np\n'), ((1272, 1296), 'numpy.dot', 'np.dot', (['prob_matrix', 'uts'], {}), '(prob_matrix, uts)\n', (1278, 1296), True, 'import numpy as np\n'), ((1311, 1333), 'numpy.argmax', 'np.argmax', (['expected_ut'], {}), '(expected_ut)\n', (1320, 1333), True, 'import numpy as np\n'), ((1529, 1548), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1537, 1548), False, 'from joblib import Parallel, delayed\n'), ((1806, 1853), 'numpy.expand_dims', 'np.expand_dims', (["ut_mat[:params['l'], 1]"], {'axis': '(1)'}), "(ut_mat[:params['l'], 1], axis=1)\n", (1820, 1853), True, 'import numpy as np\n'), ((2096, 2133), 'numpy.zeros', 'np.zeros', (['[perturbations.shape[0], l]'], {}), '([perturbations.shape[0], l])\n', (2104, 2133), True, 'import numpy as np\n'), ((2449, 2473), 'numpy.dot', 'np.dot', (['prob_matrix', 'uts'], {}), '(prob_matrix, uts)\n', (2455, 2473), True, 'import numpy as np\n'), ((2488, 2510), 'numpy.argmax', 'np.argmax', (['expected_ut'], {}), '(expected_ut)\n', (2497, 2510), True, 'import numpy as np\n'), ((2716, 2735), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (2724, 2735), False, 'from joblib import Parallel, delayed\n'), ((2997, 3044), 'numpy.expand_dims', 'np.expand_dims', (["ut_mat[:params['l'], 1]"], {'axis': '(1)'}), "(ut_mat[:params['l'], 1], axis=1)\n", (3011, 3044), True, 'import numpy as np\n'), ((3287, 3324), 'numpy.zeros', 'np.zeros', (['[perturbations.shape[0], l]'], {}), '([perturbations.shape[0], l])\n', (3295, 3324), True, 'import numpy as np\n'), ((3570, 3594), 'numpy.dot', 'np.dot', (['prob_matrix', 'uts'], {}), '(prob_matrix, uts)\n', (3576, 3594), True, 'import numpy as np\n'), ((3609, 3631), 'numpy.argmax', 'np.argmax', (['expected_ut'], {}), '(expected_ut)\n', (3618, 3631), True, 'import numpy as np\n'), ((3831, 3850), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (3839, 3850), False, 'from joblib import Parallel, delayed\n'), ((4530, 4564), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (4538, 4564), True, 'import numpy as np\n'), ((4618, 4652), 'numpy.array', 'np.array', (['[[0.0, 0.7], [0.0, 0.0]]'], {}), '([[0.0, 0.7], [0.0, 0.0]])\n', (4626, 4652), True, 'import numpy as np\n'), ((5108, 5124), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (5116, 5124), True, 'import numpy as np\n'), ((934, 990), 'numpy.ones', 'np.ones', (['[perturbations.shape[0], x.shape[0]]'], {'dtype': 'int'}), '([perturbations.shape[0], x.shape[0]], dtype=int)\n', (941, 990), True, 'import numpy as np\n'), ((1978, 2034), 'numpy.ones', 'np.ones', (['[perturbations.shape[0], x.shape[0]]'], {'dtype': 'int'}), '([perturbations.shape[0], x.shape[0]], dtype=int)\n', (1985, 2034), True, 'import numpy as np\n'), ((2300, 2367), 'numpy.random.uniform', 'np.random.uniform', (["(pr - params['dev'] * pr)", "(pr + params['dev'] * pr)"], {}), "(pr - params['dev'] * pr, pr + params['dev'] * pr)\n", (2317, 2367), True, 'import numpy as np\n'), ((3169, 3225), 'numpy.ones', 'np.ones', (['[perturbations.shape[0], x.shape[0]]'], {'dtype': 'int'}), '([perturbations.shape[0], x.shape[0]], dtype=int)\n', (3176, 3225), True, 'import numpy as np\n'), ((3491, 3537), 'numpy.random.uniform', 'np.random.uniform', (['pr', "(pr + params['dev'] * pr)"], {}), "(pr, pr + params['dev'] * pr)\n", (3508, 3537), True, 'import numpy as np\n'), ((1549, 1568), 'joblib.delayed', 'delayed', (['attack_par'], {}), '(attack_par)\n', (1556, 1568), False, 'from joblib import Parallel, delayed\n'), ((2736, 2760), 'joblib.delayed', 'delayed', (['attack_par_noCK'], {}), '(attack_par_noCK)\n', (2743, 2760), False, 'from joblib import Parallel, delayed\n'), ((3851, 3873), 'joblib.delayed', 'delayed', (['attack_par_UP'], {}), '(attack_par_UP)\n', (3858, 3873), False, 'from joblib import Parallel, delayed\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.