code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
import argparse from PIL import Image import numpy as np import onnxruntime as rt if __name__ == '__main__': parser = argparse.ArgumentParser(description="StyleTransferONNX") parser.add_argument('--model', type=str, default=' ', help='ONNX model file', required=True) parser.add_argument('--input', type=str, default=' ', help='Input image', required=True) parser.add_argument('--output', type=str, default=' ', help='learning rate',required=True) args = parser.parse_args() session = rt.InferenceSession(args.model) inputH = session.get_inputs() outputH = session.get_outputs() img = Image.open(args.input) print('img dim: ',img.width,' ',img.height) inputArray = np.asarray(img) inputArray = inputArray.astype(np.float32); inputArray = inputArray.transpose([2,0,1]) np.clip(inputArray,0,255,out=inputArray) inputArray = inputArray.reshape((1,3,img.height,img.width)) output_res = session.run(None,{inputH[0].name: inputArray}) output_img = output_res[0].reshape(3,output_res[0].shape[2],output_res[0].shape[3]) output_img = output_img.transpose([1,2,0]) output_img = output_img.astype(np.uint8) output = Image.fromarray(output_img) output.save(args.output)
[ "numpy.clip", "PIL.Image.fromarray", "PIL.Image.open", "argparse.ArgumentParser", "numpy.asarray", "onnxruntime.InferenceSession" ]
[((122, 178), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""StyleTransferONNX"""'}), "(description='StyleTransferONNX')\n", (145, 178), False, 'import argparse\n'), ((504, 535), 'onnxruntime.InferenceSession', 'rt.InferenceSession', (['args.model'], {}), '(args.model)\n', (523, 535), True, 'import onnxruntime as rt\n'), ((613, 635), 'PIL.Image.open', 'Image.open', (['args.input'], {}), '(args.input)\n', (623, 635), False, 'from PIL import Image\n'), ((699, 714), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (709, 714), True, 'import numpy as np\n'), ((811, 854), 'numpy.clip', 'np.clip', (['inputArray', '(0)', '(255)'], {'out': 'inputArray'}), '(inputArray, 0, 255, out=inputArray)\n', (818, 854), True, 'import numpy as np\n'), ((1168, 1195), 'PIL.Image.fromarray', 'Image.fromarray', (['output_img'], {}), '(output_img)\n', (1183, 1195), False, 'from PIL import Image\n')]
import os import datetime import gym import numpy as np import matplotlib.pyplot as plt from es import CMAES import pandas as pd import string def sigmoid(x): return 1 / (1 + np.exp(-x)) class Agent: def __init__(self, x, y, layer1_nodes, layer2_nodes): self.input = np.zeros(x, dtype=np.float128) self.weights1 = np.zeros((x, layer1_nodes), dtype=np.float128) self.weights2 = np.zeros((layer1_nodes, layer2_nodes), dtype=np.float128) self.weights3 = np.zeros((layer2_nodes, y), dtype=np.float128) self.output = np.zeros(y, dtype=np.float128) def feedforward(self, x): self.input = x self.layer1 = sigmoid(np.dot(self.input, self.weights1)) self.layer2 = sigmoid(np.dot(self.layer1, self.weights2)) self.output = sigmoid(np.dot(self.layer2, self.weights3)) def assignWeights(self, s): self.weights1 = s[0] self.weights2 = s[1] self.weights3 = s[2] class RL: # initializes the CMA and RL algo __slots__ = ['HL1', "HL2", "NPOP", "MAX_ITER", "STEPS", "dir", "STATE_SIZE", "ACTION_SIZE", "env", "FINAL"] def __init__(self, D="DefaultDir", H1=64, H2=64, P=100, G=5000, S=50000, E="TimePilot-ram-v0", wd=0.01, #weight decay initialized to 0.01 si=0.5): # HYPERPARAMETERS self.HL1 = H1 self.HL2 = H2 self.NPOP = P self.MAX_ITER = G self.STEPS = S self.dir = D # CONSTANTS self.STATE_SIZE = 128 self.ACTION_SIZE = self.decisions_env(E) self.env = gym.make(E) self.env.reset() # CMA NPARAMS = (self.STATE_SIZE * self.HL1) + (self.HL1 * self.HL2) + (self.HL2 * self.ACTION_SIZE) cma = CMAES(NPARAMS, popsize=self.NPOP, weight_decay=wd, sigma_init=si) self.FINAL = self.Engine(cma) # Function to initialize def decisions_env(self, name): if name == "TimePilot-ram-v0": return 10 elif name == "Breakout-ram-v0": return 4 return 10 # just rerturn TimePilot command as default def findHighest(self, NN_Output): NN_Temp = NN_Output NN_I = [] xF = 0 index = 0 foundI = 0 for xl in range(self.ACTION_SIZE + 1): for NN_O in NN_Temp: if xF < NN_O: xF = NN_O foundI = index index = index + 1 NN_Temp[foundI] = -1 NN_I.append(foundI) index = 0 xF = 0 return NN_I[0] def weightsCalc(self, s): x1 = np.asarray(s[:self.STATE_SIZE * self.HL1], dtype=np.float128) x2 = np.asarray(s[self.STATE_SIZE * self.HL1:self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2], dtype=np.float128) x3 = np.asarray(s[self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2:], dtype=np.float128) x1 = np.reshape(x1, (self.STATE_SIZE, self.HL1)) x2 = np.reshape(x2, (self.HL1, self.HL2)) x3 = np.reshape(x3, (self.HL2, self.ACTION_SIZE)) return (x1, x2, x3) # runs the sim and tallies reward def Fitness(self, solution): a = Agent(self.STATE_SIZE, self.ACTION_SIZE, self.HL1, self.HL2) a.assignWeights(self.weightsCalc(solution)) fitness = 0 self.env.reset() first = True for i in range(self.STEPS): if first: obs = a.input first = False a.feedforward(obs) choice = list(a.output) action = self.findHighest(choice) obs, reward, done, info = self.env.step(action) # self.env.render() fitness = fitness + reward if done: self.env.close() break self.env.close() return fitness # This function communicates with the es-tools CMA object "solver" def Engine(self, solver): history = [] dLogs = [] word = None Word = "Start: {0}\n".format(str(self.MAX_ITER)) print(Word) dLogs.append(Word) for j in range(self.MAX_ITER): solutions = solver.ask() # Generates parameters based on distribution mean and covariance matrix fitness_list = np.zeros(solver.popsize) for k in range(solver.popsize): fitness_list[k] = self.Fitness(solutions[k]) solver.tell(fitness_list) # update distribution mean and covariance matrix result = solver.result() history.append(result[1]) if (j + 1) % 100 == 0: Word = "Iteration {0}___{1}".format((j + 1), result[1]) print(Word) dLogs.append(Word) print("{0} {1}".format(str(j), self.dir), flush=True) print("local optimum discovered: {0}\n".format(result[0])) Word = "fitness score: {0}".format(result[1]) print(Word) dLogs.append(Word) self.env.close() self.GRAPHDB(history, dLogs) return result[1] # Graphs, makes database, and saves to directory def GRAPHDB(self, history, dLogs): if not (os.path.exists(self.dir)): os.mkdir(self.dir) else: print("{0} already exists as a directory".format(self.dir)) fig = plt.figure() ax = plt.subplot() plt.plot(np.arange(len(history)), history, 'r', label="Fitness") ax.set_ylabel('Score') ax.set_xlabel('Generation') ax.set_title('Fitness') plt.legend() plt.show() mainN = "_HL1_{0}_HL2_{1}_P_{2}_F_{3}".format(str(self.HL1), str(self.HL2), str(self.NPOP), str(self.MAX_ITER)) fig.savefig("{0}/Grid_Plot_{1}.png".format(self.dir, mainN)) DataR = pd.DataFrame() DataR['BestV'] = history print(DataR) DataR.to_csv("{0}/R_data_{1}.csv".format(self.dir, mainN), index=None, header=True) f = open("{0}/Logs_data_{1}.csv".format(self.dir, mainN), "w") f.writelines(str(dLogs)) f.close() def autoLabel(rects, I, ax): cI = 0 for rect in rects: height = rect.get_height() information = "L1: {0} L2: {1}\nP: {2}\nG: {3}\nS: {4}".format(str(I[cI][1]), str(I[cI][2]), str(I[cI][3]), str(I[cI][4]), str(I[cI][5])) ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), textcoords="offset points", ha='center', va="bottom") ax.annotate('{}'.format(information), xy=(rect.get_x() + rect.get_width() / 2, height / 2), xytext=(0, 3), textcoords="offset points", ha='center', va="bottom") cI += 1 def createGraph(testCaseResults, t): fig, ax = plt.subplots() tests = [] results = [] for i in range(len(testCaseResults)): tests.append("{0}_{1}".format(currentLabelE, str(i))) results.append(testCaseResults[i][0]) r = ax.bar(np.arange(len(testCaseResults)), results) ax.set_ylabel('Highest Score') ax.set_title('Overall Performance test case set {0}_{1}'.format(currentLabelE, currentLabelTC)) ax.set_xticks(np.arange(len(testCaseResults))) ax.set_xticklabels(tests) plt.xticks(np.arange(int(len(testCaseResults)))) autoLabel(r, testCaseResults, ax) fig.tight_layout() plt.show() if not (os.path.exists("TestOverall")): os.mkdir("TestOverall") else: print("TestOverall already exists as a directory") fig.savefig("TestOverall/NN_Plot_{0}_{1}_Overall.png".format(str(t), str(datetime.datetime.today()))) print("Main Started...") #caculates n amount of test cases each with m amount of cases # returns a bar graph of each test cases test in one graph, labeled ################# # testCases represents n sets of tests cases that you are going to test out # TEST CASE # # each testCase set increases the Generation count linearly. This way we can compare the same pop ################# # size with different generation counts TestRLA = [] #Each test case set houses a m amount of Cases startP = 50 #Each iteration is set to linearly increase the population size startG = 500 envAtari = ["TimePilot-ram-v0", "Breakout-ram-v0"] environments = len(envAtari) testCases = 1 Cases = 1 currentLabelE = None currentLabelTC = None #number of environments we are testing on for currentE in range(environments): currentLabelE = envAtari[currentE].split("-")[0] if not (os.path.exists(currentLabelE)): os.mkdir(currentLabelE) else: print("{0} already exists".format(currentLabelE)) print("TEST SET on {0} environment".format(envAtari[currentE])) #(start, end). index at start = 1, index at the end = end - 1 for tC in range(1, (testCases + 1)): #tC is used as the generation max multiplier per {test case set} currentLabelTC = string.ascii_uppercase[tC - 1] print("CASE {0} Set Time G: {1}\n".format(currentLabelTC, str(startG))) for i in range(1, (Cases + 1)): #i is used as the population multiplier per {test case} caseRL = RL(D="{0}/_RL_{1}{2}".format(currentLabelE, currentLabelTC, str(i-1)), P=(startP * i), G=(startG * tC), E=envAtari[currentE]) TestRLA.append((int(caseRL.FINAL), caseRL.HL1, caseRL.HL2, caseRL.NPOP, caseRL.MAX_ITER, caseRL.STEPS)) createGraph(TestRLA, tC) TestRLA = []
[ "os.path.exists", "numpy.reshape", "es.CMAES", "numpy.asarray", "numpy.exp", "datetime.datetime.today", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.dot", "matplotlib.pyplot.subplots", "os.mkdir", "pandas.DataFrame", "matplotlib.pyplot.subplot", "gym.make", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((6770, 6784), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6782, 6784), True, 'import matplotlib.pyplot as plt\n'), ((7360, 7370), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7368, 7370), True, 'import matplotlib.pyplot as plt\n'), ((286, 316), 'numpy.zeros', 'np.zeros', (['x'], {'dtype': 'np.float128'}), '(x, dtype=np.float128)\n', (294, 316), True, 'import numpy as np\n'), ((341, 387), 'numpy.zeros', 'np.zeros', (['(x, layer1_nodes)'], {'dtype': 'np.float128'}), '((x, layer1_nodes), dtype=np.float128)\n', (349, 387), True, 'import numpy as np\n'), ((412, 469), 'numpy.zeros', 'np.zeros', (['(layer1_nodes, layer2_nodes)'], {'dtype': 'np.float128'}), '((layer1_nodes, layer2_nodes), dtype=np.float128)\n', (420, 469), True, 'import numpy as np\n'), ((494, 540), 'numpy.zeros', 'np.zeros', (['(layer2_nodes, y)'], {'dtype': 'np.float128'}), '((layer2_nodes, y), dtype=np.float128)\n', (502, 540), True, 'import numpy as np\n'), ((563, 593), 'numpy.zeros', 'np.zeros', (['y'], {'dtype': 'np.float128'}), '(y, dtype=np.float128)\n', (571, 593), True, 'import numpy as np\n'), ((1577, 1588), 'gym.make', 'gym.make', (['E'], {}), '(E)\n', (1585, 1588), False, 'import gym\n'), ((1746, 1811), 'es.CMAES', 'CMAES', (['NPARAMS'], {'popsize': 'self.NPOP', 'weight_decay': 'wd', 'sigma_init': 'si'}), '(NPARAMS, popsize=self.NPOP, weight_decay=wd, sigma_init=si)\n', (1751, 1811), False, 'from es import CMAES\n'), ((2623, 2684), 'numpy.asarray', 'np.asarray', (['s[:self.STATE_SIZE * self.HL1]'], {'dtype': 'np.float128'}), '(s[:self.STATE_SIZE * self.HL1], dtype=np.float128)\n', (2633, 2684), True, 'import numpy as np\n'), ((2698, 2812), 'numpy.asarray', 'np.asarray', (['s[self.STATE_SIZE * self.HL1:self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2]'], {'dtype': 'np.float128'}), '(s[self.STATE_SIZE * self.HL1:self.STATE_SIZE * self.HL1 + self.\n HL1 * self.HL2], dtype=np.float128)\n', (2708, 2812), True, 'import numpy as np\n'), ((2821, 2909), 'numpy.asarray', 'np.asarray', (['s[self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2:]'], {'dtype': 'np.float128'}), '(s[self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2:], dtype=np.\n float128)\n', (2831, 2909), True, 'import numpy as np\n'), ((2918, 2961), 'numpy.reshape', 'np.reshape', (['x1', '(self.STATE_SIZE, self.HL1)'], {}), '(x1, (self.STATE_SIZE, self.HL1))\n', (2928, 2961), True, 'import numpy as np\n'), ((2975, 3011), 'numpy.reshape', 'np.reshape', (['x2', '(self.HL1, self.HL2)'], {}), '(x2, (self.HL1, self.HL2))\n', (2985, 3011), True, 'import numpy as np\n'), ((3025, 3069), 'numpy.reshape', 'np.reshape', (['x3', '(self.HL2, self.ACTION_SIZE)'], {}), '(x3, (self.HL2, self.ACTION_SIZE))\n', (3035, 3069), True, 'import numpy as np\n'), ((5342, 5354), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5352, 5354), True, 'import matplotlib.pyplot as plt\n'), ((5368, 5381), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (5379, 5381), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5574), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5572, 5574), True, 'import matplotlib.pyplot as plt\n'), ((5583, 5593), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5591, 5593), True, 'import matplotlib.pyplot as plt\n'), ((5801, 5815), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5813, 5815), True, 'import pandas as pd\n'), ((7383, 7412), 'os.path.exists', 'os.path.exists', (['"""TestOverall"""'], {}), "('TestOverall')\n", (7397, 7412), False, 'import os\n'), ((7423, 7446), 'os.mkdir', 'os.mkdir', (['"""TestOverall"""'], {}), "('TestOverall')\n", (7431, 7446), False, 'import os\n'), ((8501, 8530), 'os.path.exists', 'os.path.exists', (['currentLabelE'], {}), '(currentLabelE)\n', (8515, 8530), False, 'import os\n'), ((8541, 8564), 'os.mkdir', 'os.mkdir', (['currentLabelE'], {}), '(currentLabelE)\n', (8549, 8564), False, 'import os\n'), ((180, 190), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (186, 190), True, 'import numpy as np\n'), ((678, 711), 'numpy.dot', 'np.dot', (['self.input', 'self.weights1'], {}), '(self.input, self.weights1)\n', (684, 711), True, 'import numpy as np\n'), ((743, 777), 'numpy.dot', 'np.dot', (['self.layer1', 'self.weights2'], {}), '(self.layer1, self.weights2)\n', (749, 777), True, 'import numpy as np\n'), ((809, 843), 'numpy.dot', 'np.dot', (['self.layer2', 'self.weights3'], {}), '(self.layer2, self.weights3)\n', (815, 843), True, 'import numpy as np\n'), ((4289, 4313), 'numpy.zeros', 'np.zeros', (['solver.popsize'], {}), '(solver.popsize)\n', (4297, 4313), True, 'import numpy as np\n'), ((5183, 5207), 'os.path.exists', 'os.path.exists', (['self.dir'], {}), '(self.dir)\n', (5197, 5207), False, 'import os\n'), ((5222, 5240), 'os.mkdir', 'os.mkdir', (['self.dir'], {}), '(self.dir)\n', (5230, 5240), False, 'import os\n'), ((7593, 7618), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (7616, 7618), False, 'import datetime\n')]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from ..losses import build_loss class ConvBNAct(nn.Sequential): def __init__(self, in_channels: int, out_channels: int): super().__init__( nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) class Stage(nn.Sequential): def __init__(self, in_channels: int, out_channels: int, n_layers: int = 2): super().__init__() self.add_module('0', ConvBNAct(in_channels, out_channels)) for i in range(1, n_layers): self.add_module(str(i), ConvBNAct(out_channels, out_channels)) class UNet(nn.Module): def __init__(self, stage_channels: list, n_classes: int, criterion: dict): super().__init__() # layers assert len(stage_channels) == 5 c1, c2, c3, c4, c5 = stage_channels self.stage1_down = Stage(3, c1) self.stage2_down = Stage(c1, c2) self.stage3_down = Stage(c2, c3) self.stage4_down = Stage(c3, c4) self.stage5 = Stage(c4, c5) self.stage4_up = Stage(c5, c4) self.stage3_up = Stage(c4, c3) self.stage2_up = Stage(c3, c2) self.stage1_up = Stage(c2, c1) self.pool = nn.MaxPool2d(2, 2) self.upconv_5to4 = nn.ConvTranspose2d(c5, c4, 2, 2) self.upconv_4to3 = nn.ConvTranspose2d(c4, c3, 2, 2) self.upconv_3to2 = nn.ConvTranspose2d(c3, c2, 2, 2) self.upconv_2to1 = nn.ConvTranspose2d(c2, c1, 2, 2) self.cls_top = nn.Conv2d(c1, n_classes, kernel_size=1) # loss self.cls_loss = build_loss(criterion['cls_loss']) self._init_weights() def forward(self, x): x = x1 = self.stage1_down(x) x = self.pool(x) x = x2 = self.stage2_down(x) x = self.pool(x) x = x3 = self.stage3_down(x) x = self.pool(x) x = x4 = self.stage4_down(x) x = self.pool(x) x = self.upconv_5to4(self.stage5(x)) x = self.upconv_4to3(self.stage4_up(torch.cat([x4, x], dim=1))) x = self.upconv_3to2(self.stage3_up(torch.cat([x3, x], dim=1))) x = self.upconv_2to1(self.stage2_up(torch.cat([x2, x], dim=1))) out = self.cls_top(self.stage1_up(torch.cat([x1, x], dim=1))) return out def _init_weights(self): for name, m in self.named_modules(): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu') if m.bias is not None: if 'cls_top' in name: nn.init.constant_(m.bias, np.log((1 - 0.01) / 0.01)) else: nn.init.constant_(m.bias, 0.0) def loss(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: loss = self.cls_loss(outputs, targets) return loss def predict(self, outputs: torch.Tensor) -> torch.Tensor: preds = F.softmax(outputs, dim=1) return preds
[ "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.init.constant_", "numpy.log", "torch.nn.init.kaiming_normal_", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.ConvTranspose2d", "torch.nn.functional.softmax", "torch.cat" ]
[((1339, 1357), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1351, 1357), True, 'import torch.nn as nn\n'), ((1385, 1417), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c5', 'c4', '(2)', '(2)'], {}), '(c5, c4, 2, 2)\n', (1403, 1417), True, 'import torch.nn as nn\n'), ((1445, 1477), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c4', 'c3', '(2)', '(2)'], {}), '(c4, c3, 2, 2)\n', (1463, 1477), True, 'import torch.nn as nn\n'), ((1505, 1537), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c3', 'c2', '(2)', '(2)'], {}), '(c3, c2, 2, 2)\n', (1523, 1537), True, 'import torch.nn as nn\n'), ((1565, 1597), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c2', 'c1', '(2)', '(2)'], {}), '(c2, c1, 2, 2)\n', (1583, 1597), True, 'import torch.nn as nn\n'), ((1622, 1661), 'torch.nn.Conv2d', 'nn.Conv2d', (['c1', 'n_classes'], {'kernel_size': '(1)'}), '(c1, n_classes, kernel_size=1)\n', (1631, 1661), True, 'import torch.nn as nn\n'), ((3083, 3108), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (3092, 3108), True, 'import torch.nn.functional as F\n'), ((252, 326), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=3, padding=1, bias=False)\n', (261, 326), True, 'import torch.nn as nn\n'), ((340, 368), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (354, 368), True, 'import torch.nn as nn\n'), ((382, 403), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (389, 403), True, 'import torch.nn as nn\n'), ((2130, 2155), 'torch.cat', 'torch.cat', (['[x4, x]'], {'dim': '(1)'}), '([x4, x], dim=1)\n', (2139, 2155), False, 'import torch\n'), ((2202, 2227), 'torch.cat', 'torch.cat', (['[x3, x]'], {'dim': '(1)'}), '([x3, x], dim=1)\n', (2211, 2227), False, 'import torch\n'), ((2274, 2299), 'torch.cat', 'torch.cat', (['[x2, x]'], {'dim': '(1)'}), '([x2, x], dim=1)\n', (2283, 2299), False, 'import torch\n'), ((2344, 2369), 'torch.cat', 'torch.cat', (['[x1, x]'], {'dim': '(1)'}), '([x1, x], dim=1)\n', (2353, 2369), False, 'import torch\n'), ((2545, 2614), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (2568, 2614), True, 'import torch.nn as nn\n'), ((2823, 2853), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.0)'], {}), '(m.bias, 0.0)\n', (2840, 2853), True, 'import torch.nn as nn\n'), ((2746, 2771), 'numpy.log', 'np.log', (['((1 - 0.01) / 0.01)'], {}), '((1 - 0.01) / 0.01)\n', (2752, 2771), True, 'import numpy as np\n')]
import numpy as np import pandas as pd import unittest from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow def create_time_series_data_set(start_date=pd.datetime(year=2000, month=1, day=1), n_rows=100): end_date = start_date + pd.Timedelta(days=n_rows-1) ds = np.random.rand(n_rows) X = pd.DataFrame(ds, columns=['variable'], index=pd.date_range(start_date, end_date)) y = np.random.randint(2, size=(n_rows,)) return X, y class TestGrowingWindow(unittest.TestCase): def test_n_splits(self): assert GrowingWindow(4).get_n_splits(np.arange(15).reshape(3, 5)) == 4 def test_n_splits_returned(self): assert len(list(GrowingWindow(4).split( np.arange(15).reshape(3, 5), np.arange(3)))) == 4 def test_n_splits_testsize(self): for train, test in GrowingWindow(4).split(np.arange(15).reshape(5, 3), np.arange(5)): assert len(test) == 1 def test_n_splits_testsize2(self): for i, (train, test) in zip(range(4), GrowingWindow(4).split(np.arange(15).reshape(5, 3), np.arange(5))): assert len(train) == i+1 class TestIntervalGrowingWindow(unittest.TestCase): def test_split_on_index(self): X, y = create_time_series_data_set() cv = IntervalGrowingWindow( test_start_date=pd.datetime(year=2000, month=2, day=1), test_end_date=pd.datetime(year=2000, month=3, day=1), test_size='7D') self.assertTrue(len(list(cv.split(X, y))) == 4) def test_split_on_array(self): X, y = create_time_series_data_set() test_size_in_days = 7 cv = IntervalGrowingWindow( timestamps=X.index.values, test_start_date=pd.datetime(year=2000, month=2, day=1), test_end_date=pd.datetime(year=2000, month=3, day=1), test_size=pd.Timedelta(days=test_size_in_days)) self.assertTrue(len(list(cv.split(X, y))) == 4) def test_split_test_size(self): X, y = create_time_series_data_set() test_size_in_days = 7 cv = IntervalGrowingWindow( test_start_date=pd.datetime(year=2000, month=2, day=1), test_end_date=pd.datetime(year=2000, month=3, day=1), test_size=pd.Timedelta(days=test_size_in_days)) for _, test in cv.split(X, y): self.assertTrue(len(test) == test_size_in_days) def test_split_with_train_size(self): X, y = create_time_series_data_set() train_size_in_days = 14 cv = IntervalGrowingWindow( test_start_date=pd.datetime(year=2000, month=2, day=1), test_end_date=pd.datetime(year=2000, month=3, day=1), test_size=pd.Timedelta(days=7), train_size=pd.Timedelta(days=train_size_in_days)) for train, _ in cv.split(X, y): self.assertTrue(len(train) == train_size_in_days) def test_n_splits(self): X, y = create_time_series_data_set() cv = IntervalGrowingWindow( test_start_date=pd.datetime(year=2000, month=2, day=1), test_end_date=pd.datetime(year=2000, month=3, day=1), test_size=pd.Timedelta(days=7)) self.assertTrue(cv.get_n_splits(X) == 4)
[ "numpy.random.rand", "numpy.arange", "pandas.Timedelta", "bdranalytics.sklearn.model_selection.GrowingWindow", "numpy.random.randint", "pandas.datetime", "pandas.date_range" ]
[((187, 225), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(1)', 'day': '(1)'}), '(year=2000, month=1, day=1)\n', (198, 225), True, 'import pandas as pd\n'), ((307, 329), 'numpy.random.rand', 'np.random.rand', (['n_rows'], {}), '(n_rows)\n', (321, 329), True, 'import numpy as np\n'), ((472, 508), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(n_rows,)'}), '(2, size=(n_rows,))\n', (489, 508), True, 'import numpy as np\n'), ((269, 298), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(n_rows - 1)'}), '(days=n_rows - 1)\n', (281, 298), True, 'import pandas as pd\n'), ((426, 461), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {}), '(start_date, end_date)\n', (439, 461), True, 'import pandas as pd\n'), ((948, 960), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (957, 960), True, 'import numpy as np\n'), ((896, 912), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (909, 912), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((1135, 1147), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1144, 1147), True, 'import numpy as np\n'), ((1389, 1427), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (1400, 1427), True, 'import pandas as pd\n'), ((1455, 1493), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (1466, 1493), True, 'import pandas as pd\n'), ((1797, 1835), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (1808, 1835), True, 'import pandas as pd\n'), ((1863, 1901), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (1874, 1901), True, 'import pandas as pd\n'), ((1925, 1961), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'test_size_in_days'}), '(days=test_size_in_days)\n', (1937, 1961), True, 'import pandas as pd\n'), ((2199, 2237), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (2210, 2237), True, 'import pandas as pd\n'), ((2265, 2303), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (2276, 2303), True, 'import pandas as pd\n'), ((2327, 2363), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'test_size_in_days'}), '(days=test_size_in_days)\n', (2339, 2363), True, 'import pandas as pd\n'), ((2652, 2690), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (2663, 2690), True, 'import pandas as pd\n'), ((2718, 2756), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (2729, 2756), True, 'import pandas as pd\n'), ((2780, 2800), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(7)'}), '(days=7)\n', (2792, 2800), True, 'import pandas as pd\n'), ((2825, 2862), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'train_size_in_days'}), '(days=train_size_in_days)\n', (2837, 2862), True, 'import pandas as pd\n'), ((3108, 3146), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (3119, 3146), True, 'import pandas as pd\n'), ((3174, 3212), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (3185, 3212), True, 'import pandas as pd\n'), ((3236, 3256), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(7)'}), '(days=7)\n', (3248, 3256), True, 'import pandas as pd\n'), ((617, 633), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (630, 633), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((919, 932), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (928, 932), True, 'import numpy as np\n'), ((1083, 1099), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (1096, 1099), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((647, 660), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (656, 660), True, 'import numpy as np\n'), ((809, 821), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (818, 821), True, 'import numpy as np\n'), ((1106, 1119), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (1115, 1119), True, 'import numpy as np\n'), ((744, 760), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (757, 760), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((780, 793), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (789, 793), True, 'import numpy as np\n')]
"""Implement 3D image thresholding.""" from typing import List, Optional import numpy.typing as npt import numpy as np from ..image_utils import get_xy_block_coords, get_xy_block from ..gpu import get_image_method def get_threshold_otsu(image: npt.ArrayLike, blur_sigma=5): """Perform Otsu's thresholding with Gaussian blur.""" skimage_gaussian = get_image_method(image, "skimage.filters.gaussian") skimage_otsu = get_image_method(image, "skimage.filters.threshold_otsu") image = skimage_gaussian(image, sigma=blur_sigma) return skimage_otsu(image) def select_nonempty_patches( image: npt.ArrayLike, patch_size: int = 512, min_nonzeros: float = 0.02, threshold: Optional[float] = None, verbose: bool = False, ) -> List[List[int]]: """Select XY patches from 3D image by percent of nonzero voxels.""" verboseprint = print if verbose else lambda *a, **k: None selected_patches = [] if threshold is None: threshold = get_threshold_otsu(image) img_as_float = get_image_method(image, "skimage.img_as_float") binary_image = (img_as_float(image) > threshold).astype(np.uint8) patch_coordinates = get_xy_block_coords(image.shape, patch_size) verboseprint(f"Nonzero pixels in the image: {np.count_nonzero(binary_image) / binary_image.size}") # type: ignore[operator] for single_patch_coords in patch_coordinates: binary_tile = get_xy_block(binary_image, single_patch_coords) patch_nonzero = np.count_nonzero(binary_tile) / binary_tile.size if patch_nonzero >= min_nonzeros: selected_patches.append(single_patch_coords) return selected_patches
[ "numpy.count_nonzero" ]
[((1495, 1524), 'numpy.count_nonzero', 'np.count_nonzero', (['binary_tile'], {}), '(binary_tile)\n', (1511, 1524), True, 'import numpy as np\n'), ((1270, 1300), 'numpy.count_nonzero', 'np.count_nonzero', (['binary_image'], {}), '(binary_image)\n', (1286, 1300), True, 'import numpy as np\n')]
import cv2 import logging import numpy as np import nibabel as nib from skimage.measure import label from skimage.morphology import binary_closing, cube from fetal_brain_mask.model import Unet logger = logging.getLogger(__name__) class MaskingTool: def __init__(self): self.model = Unet() def mask_tensor(self, data, smoothen=True): # axes have to be switched from (256,256,x) to (x,256,256) data = np.moveaxis(data, -1, 0) # normalize each image slice data = np.array([self.normalize_uint8(islice) for islice in data], dtype=np.uint16) data = data[..., np.newaxis] resize_needed = False original_shape = (data.shape[2], data.shape[1]) if data.shape[1] != 256 or data.shape[2] != 256: data = self.resize_data(data) resize_needed = True # do prediction data = self.model.predict_mask(data) if smoothen: # it would be better for this to be put in its own plugin data = binary_closing(np.squeeze(data), cube(2)) try: labels = label(data) data = (labels == np.argmax(np.bincount(labels.flat)[1:]) + 1).astype(np.uint16) except Exception as e: logger.error(e) logger.error('Failed to apply smoothing for ' + input_filename) if resize_needed: data = self.resize_data(data.astype(np.uint16), target=original_shape) # remove extra dimension data = np.squeeze(data) # return result into shape (256,256, X) data = np.moveaxis(data, 0, -1) return data @staticmethod def normalize_uint8(img_slice): """ Normalizes the image to be in the range of 0-255 it round up negative values to 0 and caps the top values at the 97% value as to avoid outliers """ img_slice[img_slice < 0] = 0 flat_sorted = np.sort(img_slice.flatten()) # dont consider values greater than 97% of the values # maybe we should use a statistical method here instead? top_3_limit = int(len(flat_sorted) * 0.97) limit = flat_sorted[top_3_limit] img_slice[img_slice > limit] = limit rows, cols = img_slice.shape # create new empty image new_img = np.zeros((rows, cols)) max_val = np.max(img_slice) if max_val == 0: return new_img # normalize all values for i in range(rows): for j in range(cols): new_img[i, j] = int((float(img_slice[i, j]) / float(max_val)) * 255) return new_img @staticmethod def resize_data(image, target=(256, 256)): # maybe use a library for this? image = np.squeeze(image) resized_img = [] for i in range(image.shape[0]): img_slice = cv2.resize(image[i, :, :], target) resized_img.append(img_slice) image = np.array(resized_img, dtype=np.uint16) return image[..., np.newaxis]
[ "logging.getLogger", "skimage.morphology.cube", "numpy.squeeze", "numpy.max", "numpy.array", "numpy.zeros", "fetal_brain_mask.model.Unet", "numpy.moveaxis", "cv2.resize", "numpy.bincount", "skimage.measure.label" ]
[((204, 231), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (221, 231), False, 'import logging\n'), ((298, 304), 'fetal_brain_mask.model.Unet', 'Unet', ([], {}), '()\n', (302, 304), False, 'from fetal_brain_mask.model import Unet\n'), ((436, 460), 'numpy.moveaxis', 'np.moveaxis', (['data', '(-1)', '(0)'], {}), '(data, -1, 0)\n', (447, 460), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (1536, 1542), True, 'import numpy as np\n'), ((1606, 1630), 'numpy.moveaxis', 'np.moveaxis', (['data', '(0)', '(-1)'], {}), '(data, 0, -1)\n', (1617, 1630), True, 'import numpy as np\n'), ((2341, 2363), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (2349, 2363), True, 'import numpy as np\n'), ((2382, 2399), 'numpy.max', 'np.max', (['img_slice'], {}), '(img_slice)\n', (2388, 2399), True, 'import numpy as np\n'), ((2779, 2796), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (2789, 2796), True, 'import numpy as np\n'), ((2980, 3018), 'numpy.array', 'np.array', (['resized_img'], {'dtype': 'np.uint16'}), '(resized_img, dtype=np.uint16)\n', (2988, 3018), True, 'import numpy as np\n'), ((2886, 2920), 'cv2.resize', 'cv2.resize', (['image[i, :, :]', 'target'], {}), '(image[i, :, :], target)\n', (2896, 2920), False, 'import cv2\n'), ((1042, 1058), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (1052, 1058), True, 'import numpy as np\n'), ((1060, 1067), 'skimage.morphology.cube', 'cube', (['(2)'], {}), '(2)\n', (1064, 1067), False, 'from skimage.morphology import binary_closing, cube\n'), ((1111, 1122), 'skimage.measure.label', 'label', (['data'], {}), '(data)\n', (1116, 1122), False, 'from skimage.measure import label\n'), ((1167, 1191), 'numpy.bincount', 'np.bincount', (['labels.flat'], {}), '(labels.flat)\n', (1178, 1191), True, 'import numpy as np\n')]
# ##### BEGIN GPL LICENSE BLOCK ##### # KeenTools for blender is a blender addon for using KeenTools in Blender. # Copyright (C) 2019 KeenTools # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # ##### END GPL LICENSE BLOCK ##### import logging import numpy as np import bpy def find_bpy_image_by_name(image_name): image_num = bpy.data.images.find(image_name) if image_num >= 0: return bpy.data.images[image_num] return None def remove_bpy_image(image): if image and image in bpy.data.images: bpy.data.images.remove(image) def remove_bpy_image_by_name(image_name): image = find_bpy_image_by_name(image_name) if image is not None: bpy.data.images.remove(image) def store_bpy_image_in_scene(image): image.pack() image.use_fake_user = True def add_alpha_channel(np_image_array): return np.dstack((np_image_array, np.ones(np_image_array.shape[:2]))) def check_bpy_image_size(image): if not image or not image.size: return False w, h = image.size[:2] return w > 0 and h > 0 def check_bpy_image_has_same_size(image, size): if not image or not image.size: return False w, h = image.size[:2] return w == size[0] and h == size[1] def safe_bpy_image_loading(blender_name, path): tex = find_bpy_image_by_name(blender_name) if tex is not None: if check_bpy_image_size(tex): return tex else: remove_bpy_image_by_name(blender_name) try: image = bpy.data.images.load(path) image.name = blender_name except Exception: logger = logging.getLogger(__name__) logger.error('Source texture for "{}" ' 'is not found on path: {}'.format(blender_name, path)) return None if not check_bpy_image_size(image): return None return image def safe_bpy_image_in_scene_loading(blender_name, path): logger = logging.getLogger(__name__) tex = find_bpy_image_by_name(blender_name) if tex is not None: if check_bpy_image_size(tex): return tex else: remove_bpy_image_by_name(blender_name) try: image = bpy.data.images.load(path) except Exception: logger.error('Source texture for "{}" ' 'is not found on path: {}'.format(blender_name, path)) return None if not check_bpy_image_size(image): bpy.data.images.remove(image) logger.error('Source texture "{}" ' 'has wrong format on path: {}'.format(blender_name, path)) return None tex = bpy.data.images.new(blender_name, width=image.size[0], height=image.size[1], alpha=True, float_buffer=False) tex.pixels[:] = image.pixels[:] store_bpy_image_in_scene(tex) bpy.data.images.remove(image) return tex
[ "logging.getLogger", "numpy.ones", "bpy.data.images.new", "bpy.data.images.find", "bpy.data.images.load", "bpy.data.images.remove" ]
[((918, 950), 'bpy.data.images.find', 'bpy.data.images.find', (['image_name'], {}), '(image_name)\n', (938, 950), False, 'import bpy\n'), ((2513, 2540), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2530, 2540), False, 'import logging\n'), ((3189, 3301), 'bpy.data.images.new', 'bpy.data.images.new', (['blender_name'], {'width': 'image.size[0]', 'height': 'image.size[1]', 'alpha': '(True)', 'float_buffer': '(False)'}), '(blender_name, width=image.size[0], height=image.size[1],\n alpha=True, float_buffer=False)\n', (3208, 3301), False, 'import bpy\n'), ((3432, 3461), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (3454, 3461), False, 'import bpy\n'), ((1114, 1143), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (1136, 1143), False, 'import bpy\n'), ((1269, 1298), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (1291, 1298), False, 'import bpy\n'), ((2092, 2118), 'bpy.data.images.load', 'bpy.data.images.load', (['path'], {}), '(path)\n', (2112, 2118), False, 'import bpy\n'), ((2763, 2789), 'bpy.data.images.load', 'bpy.data.images.load', (['path'], {}), '(path)\n', (2783, 2789), False, 'import bpy\n'), ((3004, 3033), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (3026, 3033), False, 'import bpy\n'), ((1465, 1498), 'numpy.ones', 'np.ones', (['np_image_array.shape[:2]'], {}), '(np_image_array.shape[:2])\n', (1472, 1498), True, 'import numpy as np\n'), ((2192, 2219), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2209, 2219), False, 'import logging\n')]
""" This file contains quantum code in support of Shor's Algorithm """ """ Imports from qiskit""" from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister import sys import math import numpy as np """ ********* QFT Functions *** """ """ Function to create QFT """ def create_QFT(circuit,up_reg,n,with_swaps): i=n-1 """ Apply the H gates and Cphases""" """ The Cphases with |angle| < threshold are not created because they do nothing. The threshold is put as being 0 so all CPhases are created, but the clause is there so if wanted just need to change the 0 of the if-clause to the desired value """ while i>=0: circuit.h(up_reg[i]) j=i-1 while j>=0: if (np.pi)/(pow(2,(i-j))) > 0: circuit.cu1( (np.pi)/(pow(2,(i-j))) , up_reg[i] , up_reg[j] ) j=j-1 i=i-1 """ If specified, apply the Swaps at the end """ if with_swaps==1: i=0 while i < ((n-1)/2): circuit.swap(up_reg[i], up_reg[n-1-i]) i=i+1 """ Function to create inverse QFT """ def create_inverse_QFT(circuit,up_reg,n,with_swaps): """ If specified, apply the Swaps at the beginning""" if with_swaps==1: i=0 while i < ((n-1)/2): circuit.swap(up_reg[i], up_reg[n-1-i]) i=i+1 """ Apply the H gates and Cphases""" """ The Cphases with |angle| < threshold are not created because they do nothing. The threshold is put as being 0 so all CPhases are created, but the clause is there so if wanted just need to change the 0 of the if-clause to the desired value """ i=0 while i<n: circuit.h(up_reg[i]) if i != n-1: j=i+1 y=i while y>=0: if (np.pi)/(pow(2,(j-y))) > 0: circuit.cu1( - (np.pi)/(pow(2,(j-y))) , up_reg[j] , up_reg[y] ) y=y-1 i=i+1 """ ********* Arithmetic Functions *** """ """ Helper Functions """ def egcd(a, b): if a == 0: return (b, 0, 1) else: g, y, x = egcd(b % a, a) return (g, x - (b // a) * y, y) def modinv(a, m): g, x, y = egcd(a, m) if g != 1: raise Exception('modular inverse does not exist') else: return x % m """Function that calculates the angle of a phase shift in the sequential QFT based on the binary digits of a.""" """a represents a possible value of the classical register""" def getAngle(a, N): """convert the number a to a binary string with length N""" s=bin(int(a))[2:].zfill(N) angle = 0 for i in range(0, N): """if the digit is 1, add the corresponding value to the angle""" if s[N-1-i] == '1': angle += math.pow(2, -(N-i)) angle *= np.pi return angle """Function that calculates the array of angles to be used in the addition in Fourier Space""" def getAngles(a,N): s=bin(int(a))[2:].zfill(N) angles=np.zeros([N]) for i in range(0, N): for j in range(i,N): if s[j]=='1': angles[N-i-1]+=math.pow(2, -(j-i)) angles[N-i-1]*=np.pi return angles """Creation of a doubly controlled phase gate""" def ccphase(circuit, angle, ctl1, ctl2, tgt): circuit.cu1(angle/2,ctl1,tgt) circuit.cx(ctl2,ctl1) circuit.cu1(-angle/2,ctl1,tgt) circuit.cx(ctl2,ctl1) circuit.cu1(angle/2,ctl2,tgt) """Creation of the circuit that performs addition by a in Fourier Space""" """Can also be used for subtraction by setting the parameter inv to a value different from 0""" def phiADD(circuit, q, a, N, inv): angle=getAngles(a,N) for i in range(0,N): if inv==0: circuit.u1(angle[i],q[i]) """addition""" else: circuit.u1(-angle[i],q[i]) """subtraction""" """Single controlled version of the phiADD circuit""" def cphiADD(circuit, q, ctl, a, n, inv): angle=getAngles(a,n) for i in range(0,n): if inv==0: circuit.cu1(angle[i],ctl,q[i]) else: circuit.cu1(-angle[i],ctl,q[i]) """Doubly controlled version of the phiADD circuit""" def ccphiADD(circuit,q,ctl1,ctl2,a,n,inv): angle=getAngles(a,n) for i in range(0,n): if inv==0: ccphase(circuit,angle[i],ctl1,ctl2,q[i]) else: ccphase(circuit,-angle[i],ctl1,ctl2,q[i]) """Circuit that implements doubly controlled modular addition by a""" def ccphiADDmodN(circuit, q, ctl1, ctl2, aux, a, N, n): ccphiADD(circuit, q, ctl1, ctl2, a, n, 0) phiADD(circuit, q, N, n, 1) create_inverse_QFT(circuit, q, n, 0) circuit.cx(q[n-1],aux) create_QFT(circuit,q,n,0) cphiADD(circuit, q, aux, N, n, 0) ccphiADD(circuit, q, ctl1, ctl2, a, n, 1) create_inverse_QFT(circuit, q, n, 0) circuit.x(q[n-1]) circuit.cx(q[n-1], aux) circuit.x(q[n-1]) create_QFT(circuit,q,n,0) ccphiADD(circuit, q, ctl1, ctl2, a, n, 0) """Circuit that implements the inverse of doubly controlled modular addition by a""" def ccphiADDmodN_inv(circuit, q, ctl1, ctl2, aux, a, N, n): ccphiADD(circuit, q, ctl1, ctl2, a, n, 1) create_inverse_QFT(circuit, q, n, 0) circuit.x(q[n-1]) circuit.cx(q[n-1],aux) circuit.x(q[n-1]) create_QFT(circuit, q, n, 0) ccphiADD(circuit, q, ctl1, ctl2, a, n, 0) cphiADD(circuit, q, aux, N, n, 1) create_inverse_QFT(circuit, q, n, 0) circuit.cx(q[n-1], aux) create_QFT(circuit, q, n, 0) phiADD(circuit, q, N, n, 0) ccphiADD(circuit, q, ctl1, ctl2, a, n, 1) """Circuit that implements single controlled modular multiplication by a""" def cMULTmodN(circuit, ctl, q, aux, a, N, n): create_QFT(circuit,aux,n+1,0) for i in range(0, n): ccphiADDmodN(circuit, aux, q[i], ctl, aux[n+1], (2**i)*a % N, N, n+1) create_inverse_QFT(circuit, aux, n+1, 0) for i in range(0, n): circuit.cswap(ctl,q[i],aux[i]) a_inv = modinv(a, N) create_QFT(circuit, aux, n+1, 0) i = n-1 while i >= 0: ccphiADDmodN_inv(circuit, aux, q[i], ctl, aux[n+1], math.pow(2,i)*a_inv % N, N, n+1) i -= 1 create_inverse_QFT(circuit, aux, n+1, 0)
[ "math.pow", "numpy.zeros" ]
[((3006, 3019), 'numpy.zeros', 'np.zeros', (['[N]'], {}), '([N])\n', (3014, 3019), True, 'import numpy as np\n'), ((2791, 2812), 'math.pow', 'math.pow', (['(2)', '(-(N - i))'], {}), '(2, -(N - i))\n', (2799, 2812), False, 'import math\n'), ((3132, 3153), 'math.pow', 'math.pow', (['(2)', '(-(j - i))'], {}), '(2, -(j - i))\n', (3140, 3153), False, 'import math\n'), ((6151, 6165), 'math.pow', 'math.pow', (['(2)', 'i'], {}), '(2, i)\n', (6159, 6165), False, 'import math\n')]
import sys sys.path.append('.') from sslplay.data.digits import DataDigits import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import numpy as np obj_data = DataDigits() obj_data.load() X = obj_data.X y = obj_data.y target_names = np.array(["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]) colors = np.array(['black', 'lime', 'darkorange', 'darkred', 'chocolate', 'yellow', 'olive', 'cyan', 'darkgrey', 'darkgreen']) scaler = StandardScaler() pca = PCA(n_components=6) np.random.seed(1102) X_r = pca.fit(scaler.fit_transform(X)).transform(scaler.fit_transform(X)) array_subset = np.random.choice(range(X_r.shape[0]), size=500, replace=False) X_r = X_r[array_subset, :] y_r = y[array_subset] array_classes_show = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] plt.figure() for color, i, target_name in zip(colors[array_classes_show], \ array_classes_show, target_names[array_classes_show]): plt.scatter( X_r[y_r == i, 0], X_r[y_r == i, 1], color=color, alpha=.8, lw=2, label=target_name ) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.suptitle('PCA - DIGITS') plt.title("Explained variance ratio: %s" % str(pca.explained_variance_ratio_[[0, 1]])) plt.xlabel("Dimension 1") plt.ylabel("Dimension 2") plt.legend(fancybox=True, framealpha=1) plt.show() """ array_classes_show = [1,2,3,6,8,9] plt.figure() for color, i, target_name in zip(colors[array_classes_show], \ array_classes_show, target_names[array_classes_show]): plt.scatter( X_r[y_r == i, 2], X_r[y_r == i, 3], color=color, alpha=.8, lw=2, label=target_name ) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.suptitle('PCA - DIGITS') plt.title("Explained variance ratio: %s" % str(pca.explained_variance_ratio_[[2, 3]])) plt.xlabel("Dimension 3") plt.ylabel("Dimension 4") plt.legend(fancybox=True, framealpha=1) plt.show() array_classes_show = [2,6,8,9] plt.figure() for color, i, target_name in zip(colors[array_classes_show], \ array_classes_show, target_names[array_classes_show]): plt.scatter( X_r[y_r == i, 4], X_r[y_r == i, 5], color=color, alpha=.8, lw=2, label=target_name ) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.suptitle('PCA - DIGITS') plt.title("Explained variance ratio: %s" % str(pca.explained_variance_ratio_[[4, 5]])) plt.xlabel("Dimension 5") plt.ylabel("Dimension 6") plt.legend(fancybox=True, framealpha=1) plt.show() """
[ "matplotlib.pyplot.ylabel", "sklearn.decomposition.PCA", "matplotlib.pyplot.xlabel", "sslplay.data.digits.DataDigits", "numpy.array", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.figure", "numpy.random.seed", "matplotlib.pyplot.scatter", "sys.path.append", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((11, 31), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (26, 31), False, 'import sys\n'), ((227, 239), 'sslplay.data.digits.DataDigits', 'DataDigits', ([], {}), '()\n', (237, 239), False, 'from sslplay.data.digits import DataDigits\n'), ((302, 362), 'numpy.array', 'np.array', (["['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']"], {}), "(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", (310, 362), True, 'import numpy as np\n'), ((372, 493), 'numpy.array', 'np.array', (["['black', 'lime', 'darkorange', 'darkred', 'chocolate', 'yellow', 'olive',\n 'cyan', 'darkgrey', 'darkgreen']"], {}), "(['black', 'lime', 'darkorange', 'darkred', 'chocolate', 'yellow',\n 'olive', 'cyan', 'darkgrey', 'darkgreen'])\n", (380, 493), True, 'import numpy as np\n'), ((502, 518), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (516, 518), False, 'from sklearn.preprocessing import StandardScaler\n'), ((525, 544), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(6)'}), '(n_components=6)\n', (528, 544), False, 'from sklearn.decomposition import PCA\n'), ((545, 565), 'numpy.random.seed', 'np.random.seed', (['(1102)'], {}), '(1102)\n', (559, 565), True, 'import numpy as np\n'), ((822, 834), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1140), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'shadow': '(False)', 'scatterpoints': '(1)'}), "(loc='best', shadow=False, scatterpoints=1)\n", (1097, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1169), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""PCA - DIGITS"""'], {}), "('PCA - DIGITS')\n", (1153, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1263, 1288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dimension 1"""'], {}), "('Dimension 1')\n", (1273, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1314), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dimension 2"""'], {}), "('Dimension 2')\n", (1299, 1314), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1354), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fancybox': '(True)', 'framealpha': '(1)'}), '(fancybox=True, framealpha=1)\n', (1325, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1355, 1365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1363, 1365), True, 'import matplotlib.pyplot as plt\n'), ((957, 1058), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_r[y_r == i, 0]', 'X_r[y_r == i, 1]'], {'color': 'color', 'alpha': '(0.8)', 'lw': '(2)', 'label': 'target_name'}), '(X_r[y_r == i, 0], X_r[y_r == i, 1], color=color, alpha=0.8, lw=\n 2, label=target_name)\n', (968, 1058), True, 'import matplotlib.pyplot as plt\n')]
from collider.data.sensor import Sensor from collider.data.message_package import MessagePackage from scipy.stats import spearmanr import numpy as np class FakeForwardReturn(Sensor): def __init__(self, **kwargs): super().__init__(**kwargs) self.lastvalue = None @property def output_variables(self): return ["exposure", "factorName"] def do(self, date, mp: MessagePackage, **kwargs): scaler = kwargs.get("scaler", 0.5) sigma = kwargs.get("sigma", 0.1) shrinkage = kwargs.get("shrinkage", 0.2) trueForwardReturn = mp.exposure fakeForwardReturn = trueForwardReturn * scaler + np.random.normal(scale=sigma, size=4000) if self.lastvalue is None: thisvalue = fakeForwardReturn else: thisvalue = self.lastvalue * (1 - shrinkage) + fakeForwardReturn * shrinkage self.lastvalue = thisvalue self.logger.debug(spearmanr(trueForwardReturn, thisvalue, nan_policy="omit")[0]) return thisvalue, np.array(["fakeForwardReturn"])
[ "numpy.random.normal", "numpy.array", "scipy.stats.spearmanr" ]
[((660, 700), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma', 'size': '(4000)'}), '(scale=sigma, size=4000)\n', (676, 700), True, 'import numpy as np\n'), ((1036, 1067), 'numpy.array', 'np.array', (["['fakeForwardReturn']"], {}), "(['fakeForwardReturn'])\n", (1044, 1067), True, 'import numpy as np\n'), ((946, 1004), 'scipy.stats.spearmanr', 'spearmanr', (['trueForwardReturn', 'thisvalue'], {'nan_policy': '"""omit"""'}), "(trueForwardReturn, thisvalue, nan_policy='omit')\n", (955, 1004), False, 'from scipy.stats import spearmanr\n')]
import random import gym import sys import numpy as np from collections import deque,namedtuple import os import time import matplotlib.pyplot as plt import torch import torch.nn as nn from torch.optim import Adam plt.style.use('seaborn') class DQN(nn.Module): def __init__(self,hidden_sz,state_sz, action_sz): super().__init__() self.hidden_sz = hidden_sz self.fc1 = nn.Linear(state_sz,self.hidden_sz) self.fc2 = nn.Linear(self.hidden_sz,self.hidden_sz) self.fc3 = nn.Linear(self.hidden_sz,action_sz) self.relu = nn.ReLU() def forward(self,x): x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) x = self.fc3(x) return x class Agent(): def __init__(self,env,target_update_frequency=100,eps=1): self.env = env self.action_sz = self.env.action_space.n self.state_sz = self.env.observation_space.shape[0] self.eps = eps self.target_update_frequency = target_update_frequency self.target_update_counter = 0 self.rewards = [] self.train_time = None self.n_episodes = None self.batch_size = None self.gamma = None self.lr = None self.decay = None self.replay_buffer = deque(maxlen=10000) self.transition = namedtuple('transition',['s_prime','reward','s','action','done']) self.network = DQN(256,self.state_sz, self.action_sz) self.target_network = DQN(256,self.state_sz, self.action_sz) self.loss_fn = nn.MSELoss() self.optimizer = None def print_env_settings(self): print('State space: ',self.state_sz) print('Action space: ',self.action_sz) def init_hyperparameters(self, n_episodes,batch_size,gamma,lr,decay): self.n_episodes = n_episodes self.batch_size = batch_size self.gamma = gamma self.lr = lr self.decay = decay self.optimizer = Adam(self.network.parameters(), lr=self.lr) def select_action(self,state,eps): t = np.random.random() if t < eps: a = np.random.choice(range(self.action_sz)) else: q = self.network(torch.FloatTensor(state)) a = q.argmax().item() return a def store(self,transition): self.replay_buffer.append(transition) def update(self): if len(self.replay_buffer)< self.batch_size: return batch = random.sample(self.replay_buffer,self.batch_size) s = torch.FloatTensor([t.s for t in batch]) r = torch.FloatTensor([t.reward for t in batch]) s_prime = torch.FloatTensor([t.s_prime for t in batch]) a = torch.LongTensor([t.action for t in batch]).unsqueeze(1) done = torch.FloatTensor([t.done for t in batch]) target = (r + self.gamma*self.target_network(s_prime).max(dim=1)[0]*(1-done)) prediction = self.network(s).gather(1,a) self.optimizer.zero_grad() loss = self.loss_fn(target.unsqueeze(1),prediction) loss.backward() self.optimizer.step() def get_train_time(self): return self.train_time def run_episode(self,render,k): s = self.env.reset() done = False total_reward = 0.0 self.eps = self.eps * self.decay transition_count = 0 while not done: if render: self.env.render() self.target_update_counter += 1 if self.eps > 0.01: eps = self.eps else: eps = 0.01 action = self.select_action(s,eps) s_prime,reward,done,_ = self.env.step(action) self.store((self.transition(s_prime,reward,s,action,done))) total_reward += reward s = s_prime done = done self.update() transition_count+=1 if k % 100 == 0 and k > 1: print('Transition Count: ',transition_count) print('Episode Reward: ',total_reward) self.rewards.append(total_reward) def run_episode2(self,render,k): s = self.env.reset() done = False total_reward = 0.0 self.eps = self.eps * self.decay transition_count = 0 while not done: if render: self.env.render() # eps = 0.0 transition_count+=1 self.target_update_counter += 1 if self.eps > 0.01: eps = self.eps else: eps = 0.01 action = self.select_action(s,eps) s_prime,reward,done,_ = self.env.step(action) next_state = np.reshape(s_prime, [1, self.state_sz]) s_ = np.reshape(s, [1, self.state_sz]) # We want to encourage swing moves if next_state[0][0] > s_[0][0] and next_state[0][0]>-0.4 and s_[0][0]>-0.4: reward += 20 elif next_state[0][0] < s_[0][0] and next_state[0][0]<=-0.6 and s_[0][0]<=-0.6: reward += 20 # Massive reward to reach flag if done and transition_count != 200: reward = reward + 10000 else: # put extra penalty if not done reward = reward - 10 self.store(self.transition(s_prime,reward,s,action,done)) total_reward += reward s = s_prime done = done self.update() if k % 100 == 0 and k > 1: print('Transition Count: ',transition_count) print('Episode Reward: ',total_reward) self.rewards.append(total_reward) def train(self): t1 = time.time() for k in range(self.n_episodes): if k == self.n_episodes - 1: self.train_time = time.time() - t1 render = False # if k % 100 <= 10: # render = True if k % 100 == 0 and k > 1: print('Episode: ',k) self.run_episode(render,k) if self.target_update_counter >= self.target_update_frequency: self.target_update_counter = 0 self.target_network.load_state_dict(self.network.state_dict()) def train2(self): t1 = time.time() for k in range(self.n_episodes): if k == self.n_episodes - 1: self.train_time = time.time() - t1 render = False # if k % 100 <= 10: # render = True if k % 100 == 0 and k > 1: print('Episode: ',k) self.run_episode2(render,k) if self.target_update_counter >= self.target_update_frequency: self.target_update_counter = 0 self.target_network.load_state_dict(self.network.state_dict())
[ "torch.nn.ReLU", "collections.namedtuple", "collections.deque", "random.sample", "numpy.reshape", "numpy.random.random", "torch.LongTensor", "matplotlib.pyplot.style.use", "torch.nn.MSELoss", "torch.nn.Linear", "time.time", "torch.FloatTensor" ]
[((214, 238), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (227, 238), True, 'import matplotlib.pyplot as plt\n'), ((398, 433), 'torch.nn.Linear', 'nn.Linear', (['state_sz', 'self.hidden_sz'], {}), '(state_sz, self.hidden_sz)\n', (407, 433), True, 'import torch.nn as nn\n'), ((452, 493), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_sz', 'self.hidden_sz'], {}), '(self.hidden_sz, self.hidden_sz)\n', (461, 493), True, 'import torch.nn as nn\n'), ((512, 548), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_sz', 'action_sz'], {}), '(self.hidden_sz, action_sz)\n', (521, 548), True, 'import torch.nn as nn\n'), ((568, 577), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (575, 577), True, 'import torch.nn as nn\n'), ((1303, 1322), 'collections.deque', 'deque', ([], {'maxlen': '(10000)'}), '(maxlen=10000)\n', (1308, 1322), False, 'from collections import deque, namedtuple\n'), ((1349, 1419), 'collections.namedtuple', 'namedtuple', (['"""transition"""', "['s_prime', 'reward', 's', 'action', 'done']"], {}), "('transition', ['s_prime', 'reward', 's', 'action', 'done'])\n", (1359, 1419), False, 'from collections import deque, namedtuple\n'), ((1569, 1581), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1579, 1581), True, 'import torch.nn as nn\n'), ((2085, 2103), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2101, 2103), True, 'import numpy as np\n'), ((2492, 2542), 'random.sample', 'random.sample', (['self.replay_buffer', 'self.batch_size'], {}), '(self.replay_buffer, self.batch_size)\n', (2505, 2542), False, 'import random\n'), ((2555, 2594), 'torch.FloatTensor', 'torch.FloatTensor', (['[t.s for t in batch]'], {}), '([t.s for t in batch])\n', (2572, 2594), False, 'import torch\n'), ((2607, 2651), 'torch.FloatTensor', 'torch.FloatTensor', (['[t.reward for t in batch]'], {}), '([t.reward for t in batch])\n', (2624, 2651), False, 'import torch\n'), ((2670, 2715), 'torch.FloatTensor', 'torch.FloatTensor', (['[t.s_prime for t in batch]'], {}), '([t.s_prime for t in batch])\n', (2687, 2715), False, 'import torch\n'), ((2800, 2842), 'torch.FloatTensor', 'torch.FloatTensor', (['[t.done for t in batch]'], {}), '([t.done for t in batch])\n', (2817, 2842), False, 'import torch\n'), ((5752, 5763), 'time.time', 'time.time', ([], {}), '()\n', (5761, 5763), False, 'import time\n'), ((6344, 6355), 'time.time', 'time.time', ([], {}), '()\n', (6353, 6355), False, 'import time\n'), ((4737, 4776), 'numpy.reshape', 'np.reshape', (['s_prime', '[1, self.state_sz]'], {}), '(s_prime, [1, self.state_sz])\n', (4747, 4776), True, 'import numpy as np\n'), ((4794, 4827), 'numpy.reshape', 'np.reshape', (['s', '[1, self.state_sz]'], {}), '(s, [1, self.state_sz])\n', (4804, 4827), True, 'import numpy as np\n'), ((2223, 2247), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (2240, 2247), False, 'import torch\n'), ((2728, 2771), 'torch.LongTensor', 'torch.LongTensor', (['[t.action for t in batch]'], {}), '([t.action for t in batch])\n', (2744, 2771), False, 'import torch\n'), ((5880, 5891), 'time.time', 'time.time', ([], {}), '()\n', (5889, 5891), False, 'import time\n'), ((6472, 6483), 'time.time', 'time.time', ([], {}), '()\n', (6481, 6483), False, 'import time\n')]
""" Some notes: HDI: Highest Density Interval. ROPE: Region of Practical Equivalence. """ import numpy as np from matplotlib import pyplot as plt def ch01_01(): """ """ thetas = np.linspace(0, 1, 1001) print(thetas) likelihood = lambda r: thetas if r else (1 - thetas) def posterior(r, prior): lp = likelihood(r) * prior return lp / lp.sum() p = np.array([1 / len(thetas) for _ in thetas]) print(p) # Bayesian update by click-event(r=1) p = posterior(1, p) print(p) plt.plot(thetas, p) plt.xlabel(r'$\theta$') plt.ylabel(r'$p(\theta)$') plt.show() # clicks = 2 noclicks = 38 p = np.array([1 / len(thetas) for theta in thetas]) for _ in range(clicks): p = posterior(1, p) for _ in range(noclicks): p = posterior(0, p) print(p) plt.plot(thetas, p) plt.xlabel(r'$\theta$') plt.ylabel(r'$p(\theta)$') plt.show() def ch01_02(): """ Binomial distribution """ thetas = np.linspace(0, 1, 1001) print(thetas) likelihood = lambda a, N: thetas ** a * (1 - thetas) ** (N - a) def posterior(a, N, prior): lp = likelihood(a, N) * prior return lp / lp.sum() prior = 1 / len(thetas) plt.subplot(2, 1, 1) plt.plot(thetas, posterior(2, 40, prior), label='Alice - A') plt.plot(thetas, posterior(4, 50, prior), label='Alice - B') plt.xlabel(r'$\theta$') plt.ylabel(r'$p(\theta)$') plt.xlim(0, 0.2) plt.legend() plt.subplot(2, 1, 2) plt.plot(thetas, posterior(64, 1280, prior), label='Bob - A') plt.plot(thetas, posterior(128, 1600, prior), label='Bob - B') plt.xlabel(r'$\theta$') plt.ylabel(r'$p(\theta)$') plt.xlim(0, 0.2) plt.legend() plt.tight_layout() plt.show() def ch01_03(): """ theta ~ Beta. alpha ~ Binomial. """ thetas = np.linspace(0, 1, 1001) print(thetas) def betaf(alpha, beta): numerator = thetas ** (alpha - 1) * (1 - thetas) ** (beta - 1) return numerator / numerator.sum() def posterior(a, N): return betaf(a + 1, N - a + 1) plt.subplot(2, 1, 1) plt.plot(thetas, posterior(2, 40), label='Alice - A') plt.plot(thetas, posterior(4, 50), label='Alice - B') plt.xlabel(r'$\theta$') plt.ylabel(r'$p(\theta)$') plt.xlim(0, 0.2) plt.legend() plt.subplot(2, 1, 2) plt.plot(thetas, posterior(64, 1280), label='Bob - A') plt.plot(thetas, posterior(128, 1600), label='Bob - B') plt.xlabel(r'$\theta$') plt.ylabel(r'$p(\theta)$') plt.xlim(0, 0.2) plt.legend() plt.tight_layout() plt.show() def ch01_04(): """ """ def hmv(xs, ps, alpha=0.95): """ Highest Mass Value function. Parameters: xs : Probability variables. ps : Probability Mass. alpha : threshold. Return: """ xps = sorted(zip(xs, ps), key=lambda xp: xp[1], reverse=True) xps = np.array(xps) xs = xps[:, 0] ps = xps[:, 1] return np.sort(xs[np.cumsum(ps) <= alpha]) thetas = np.linspace(0, 1, 1001) def posterior(a, N): alpha = a + 1 beta = N - a + 1 numerator = thetas ** (alpha - 1) * (1 - thetas) ** (beta - 1) return numerator / numerator.sum() ps = posterior(2, 40) hm_thetas = hmv(thetas, ps, alpha=0.95) plt.plot(thetas, ps) plt.annotate('', xy=(hm_thetas.min(), 0), xytext=(hm_thetas.max(), 0), arrowprops=dict(color='black', shrinkA=0, shrinkB=0, arrowstyle='<->', linewidth=2)) plt.annotate('%.3f' % hm_thetas.min(), xy=(hm_thetas.min(), 0), ha='right', va='bottom') plt.annotate('%.3f' % hm_thetas.max(), xy=(hm_thetas.max(), 0), ha='left', va='bottom') plt.annotate('95% HDI', xy=(hm_thetas.mean(), 0), ha='center', va='bottom') hm_region = (hm_thetas.min() < thetas) & (thetas < hm_thetas.max()) plt.fill_between(thetas[hm_region], ps[hm_region], 0, alpha=0.3) plt.xlabel(r'$\theta$') plt.ylabel(r'$p(\theta)$') plt.xlim(0, 0.3) plt.tight_layout() plt.show() def plot_hdi(ps, label): """ """ hm_thetas = hmv(thetas, ps, 0.95) plt.plot(thetas, ps) plt.annotate('', xy=(hm_thetas.min(), 0), xytext=(hm_thetas.max(), 0), arrowprops=dict(color='black', shrinkA=0, shrinkB=0, arrowstyle='<->', linewidth=2)) plt.annotate('%.3f' % hm_thetas.min(), xy=(hm_thetas.min(), 0), ha='right', va='bottom') plt.annotate('%.3f' % hm_thetas.max(), xy=(hm_thetas.max(), 0), ha='left', va='bottom') plt.annotate('95% HDI', xy=(hm_thetas.mean(), 0), ha='center', va='bottom') hm_region = (hm_thetas.min() < thetas) & (thetas < hm_thetas.max()) plt.fill_between(thetas[hm_region], ps[hm_region], 0, alpha=0.3) plt.xlim(0, 0.3) plt.ylabel(label) plt.yticks([]) plt.subplot(4, 1, 1) alice_a = posterior(2, 40) plot_hdi(alice_a, '<NAME>') plt.subplot(4, 1, 2) alice_b = posterior(4, 50) plot_hdi(alice_b, '<NAME>') plt.subplot(4, 1, 3) bob_a = posterior(64, 1280) plot_hdi(bob_a, '<NAME>') plt.subplot(4, 1, 4) bob_b = posterior(128, 1600) plot_hdi(bob_b, '<NAME>') plt.xlabel(r'$\theta$') plt.tight_layout() plt.show def ch01_05(): """ """ theta_a = np.random.beta(3, 39, size=100000) theta_b = np.random.beta(5, 47, size=100000) delta = theta_b - theta_a plt.hist(delta, range=(-0.3, 0.3), bins=60) plt.xlabel(r'$\delta$') plt.ylabel(r'Frequency') plt.show() print((delta > 0).mean())
[ "numpy.random.beta", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.fill_between", "numpy.array", "numpy.linspace", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "numpy.cumsum", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((203, 226), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1001)'], {}), '(0, 1, 1001)\n', (214, 226), True, 'import numpy as np\n'), ((548, 567), 'matplotlib.pyplot.plot', 'plt.plot', (['thetas', 'p'], {}), '(thetas, p)\n', (556, 567), True, 'from matplotlib import pyplot as plt\n'), ((572, 595), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (582, 595), True, 'from matplotlib import pyplot as plt\n'), ((600, 626), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p(\\\\theta)$"""'], {}), "('$p(\\\\theta)$')\n", (610, 626), True, 'from matplotlib import pyplot as plt\n'), ((631, 641), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (639, 641), True, 'from matplotlib import pyplot as plt\n'), ((869, 888), 'matplotlib.pyplot.plot', 'plt.plot', (['thetas', 'p'], {}), '(thetas, p)\n', (877, 888), True, 'from matplotlib import pyplot as plt\n'), ((893, 916), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (903, 916), True, 'from matplotlib import pyplot as plt\n'), ((921, 947), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p(\\\\theta)$"""'], {}), "('$p(\\\\theta)$')\n", (931, 947), True, 'from matplotlib import pyplot as plt\n'), ((952, 962), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (960, 962), True, 'from matplotlib import pyplot as plt\n'), ((1031, 1054), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1001)'], {}), '(0, 1, 1001)\n', (1042, 1054), True, 'import numpy as np\n'), ((1275, 1295), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1286, 1295), True, 'from matplotlib import pyplot as plt\n'), ((1430, 1453), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (1440, 1453), True, 'from matplotlib import pyplot as plt\n'), ((1458, 1484), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p(\\\\theta)$"""'], {}), "('$p(\\\\theta)$')\n", (1468, 1484), True, 'from matplotlib import pyplot as plt\n'), ((1489, 1505), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(0.2)'], {}), '(0, 0.2)\n', (1497, 1505), True, 'from matplotlib import pyplot as plt\n'), ((1510, 1522), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1520, 1522), True, 'from matplotlib import pyplot as plt\n'), ((1527, 1547), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1538, 1547), True, 'from matplotlib import pyplot as plt\n'), ((1685, 1708), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (1695, 1708), True, 'from matplotlib import pyplot as plt\n'), ((1713, 1739), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p(\\\\theta)$"""'], {}), "('$p(\\\\theta)$')\n", (1723, 1739), True, 'from matplotlib import pyplot as plt\n'), ((1744, 1760), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(0.2)'], {}), '(0, 0.2)\n', (1752, 1760), True, 'from matplotlib import pyplot as plt\n'), ((1765, 1777), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1775, 1777), True, 'from matplotlib import pyplot as plt\n'), ((1782, 1800), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1798, 1800), True, 'from matplotlib import pyplot as plt\n'), ((1805, 1815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1813, 1815), True, 'from matplotlib import pyplot as plt\n'), ((1910, 1933), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1001)'], {}), '(0, 1, 1001)\n', (1921, 1933), True, 'import numpy as np\n'), ((2165, 2185), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2176, 2185), True, 'from matplotlib import pyplot as plt\n'), ((2306, 2329), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (2316, 2329), True, 'from matplotlib import pyplot as plt\n'), ((2334, 2360), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p(\\\\theta)$"""'], {}), "('$p(\\\\theta)$')\n", (2344, 2360), True, 'from matplotlib import pyplot as plt\n'), ((2365, 2381), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(0.2)'], {}), '(0, 0.2)\n', (2373, 2381), True, 'from matplotlib import pyplot as plt\n'), ((2386, 2398), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2396, 2398), True, 'from matplotlib import pyplot as plt\n'), ((2403, 2423), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2414, 2423), True, 'from matplotlib import pyplot as plt\n'), ((2547, 2570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (2557, 2570), True, 'from matplotlib import pyplot as plt\n'), ((2575, 2601), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p(\\\\theta)$"""'], {}), "('$p(\\\\theta)$')\n", (2585, 2601), True, 'from matplotlib import pyplot as plt\n'), ((2606, 2622), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(0.2)'], {}), '(0, 0.2)\n', (2614, 2622), True, 'from matplotlib import pyplot as plt\n'), ((2627, 2639), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2637, 2639), True, 'from matplotlib import pyplot as plt\n'), ((2644, 2662), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2660, 2662), True, 'from matplotlib import pyplot as plt\n'), ((2667, 2677), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2675, 2677), True, 'from matplotlib import pyplot as plt\n'), ((3150, 3173), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1001)'], {}), '(0, 1, 1001)\n', (3161, 3173), True, 'import numpy as np\n'), ((3436, 3456), 'matplotlib.pyplot.plot', 'plt.plot', (['thetas', 'ps'], {}), '(thetas, ps)\n', (3444, 3456), True, 'from matplotlib import pyplot as plt\n'), ((4076, 4140), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['thetas[hm_region]', 'ps[hm_region]', '(0)'], {'alpha': '(0.3)'}), '(thetas[hm_region], ps[hm_region], 0, alpha=0.3)\n', (4092, 4140), True, 'from matplotlib import pyplot as plt\n'), ((4145, 4168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (4155, 4168), True, 'from matplotlib import pyplot as plt\n'), ((4173, 4199), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p(\\\\theta)$"""'], {}), "('$p(\\\\theta)$')\n", (4183, 4199), True, 'from matplotlib import pyplot as plt\n'), ((4204, 4220), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(0.3)'], {}), '(0, 0.3)\n', (4212, 4220), True, 'from matplotlib import pyplot as plt\n'), ((4225, 4243), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4241, 4243), True, 'from matplotlib import pyplot as plt\n'), ((4248, 4258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4256, 4258), True, 'from matplotlib import pyplot as plt\n'), ((5181, 5201), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (5192, 5201), True, 'from matplotlib import pyplot as plt\n'), ((5269, 5289), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {}), '(4, 1, 2)\n', (5280, 5289), True, 'from matplotlib import pyplot as plt\n'), ((5357, 5377), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {}), '(4, 1, 3)\n', (5368, 5377), True, 'from matplotlib import pyplot as plt\n'), ((5444, 5464), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {}), '(4, 1, 4)\n', (5455, 5464), True, 'from matplotlib import pyplot as plt\n'), ((5532, 5555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (5542, 5555), True, 'from matplotlib import pyplot as plt\n'), ((5560, 5578), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5576, 5578), True, 'from matplotlib import pyplot as plt\n'), ((5639, 5673), 'numpy.random.beta', 'np.random.beta', (['(3)', '(39)'], {'size': '(100000)'}), '(3, 39, size=100000)\n', (5653, 5673), True, 'import numpy as np\n'), ((5688, 5722), 'numpy.random.beta', 'np.random.beta', (['(5)', '(47)'], {'size': '(100000)'}), '(5, 47, size=100000)\n', (5702, 5722), True, 'import numpy as np\n'), ((5757, 5800), 'matplotlib.pyplot.hist', 'plt.hist', (['delta'], {'range': '(-0.3, 0.3)', 'bins': '(60)'}), '(delta, range=(-0.3, 0.3), bins=60)\n', (5765, 5800), True, 'from matplotlib import pyplot as plt\n'), ((5805, 5828), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta$"""'], {}), "('$\\\\delta$')\n", (5815, 5828), True, 'from matplotlib import pyplot as plt\n'), ((5833, 5856), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (5843, 5856), True, 'from matplotlib import pyplot as plt\n'), ((5862, 5872), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5870, 5872), True, 'from matplotlib import pyplot as plt\n'), ((3025, 3038), 'numpy.array', 'np.array', (['xps'], {}), '(xps)\n', (3033, 3038), True, 'import numpy as np\n'), ((4355, 4375), 'matplotlib.pyplot.plot', 'plt.plot', (['thetas', 'ps'], {}), '(thetas, ps)\n', (4363, 4375), True, 'from matplotlib import pyplot as plt\n'), ((5037, 5101), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['thetas[hm_region]', 'ps[hm_region]', '(0)'], {'alpha': '(0.3)'}), '(thetas[hm_region], ps[hm_region], 0, alpha=0.3)\n', (5053, 5101), True, 'from matplotlib import pyplot as plt\n'), ((5110, 5126), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(0.3)'], {}), '(0, 0.3)\n', (5118, 5126), True, 'from matplotlib import pyplot as plt\n'), ((5135, 5152), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['label'], {}), '(label)\n', (5145, 5152), True, 'from matplotlib import pyplot as plt\n'), ((5161, 5175), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5171, 5175), True, 'from matplotlib import pyplot as plt\n'), ((3111, 3124), 'numpy.cumsum', 'np.cumsum', (['ps'], {}), '(ps)\n', (3120, 3124), True, 'import numpy as np\n')]
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def uniform_dist(a, b, size): ''' Sample from a uniform distribution Unif(a,b) ''' std_unif = torch.rand(size) return std_unif*(b-a)+a def safe_log(tens, epsilon:float=1e-5): ''' Safe log to prevent infinities ''' return torch.log(tens+epsilon) def sample_dist(probs): ''' Sample from a given probability distribution Parameters ---------- probs: numpy.float array, shape = (num_samples, num_values) Note: the sum of each row must be = 1 ''' num_values = probs.shape[1] generated = [] for prob in probs: generated.append(np.random.choice(np.arange(num_values), p=prob)) return np.array(generated)
[ "numpy.array", "torch.log", "torch.rand", "numpy.arange" ]
[((198, 214), 'torch.rand', 'torch.rand', (['size'], {}), '(size)\n', (208, 214), False, 'import torch\n'), ((346, 371), 'torch.log', 'torch.log', (['(tens + epsilon)'], {}), '(tens + epsilon)\n', (355, 371), False, 'import torch\n'), ((760, 779), 'numpy.array', 'np.array', (['generated'], {}), '(generated)\n', (768, 779), True, 'import numpy as np\n'), ((717, 738), 'numpy.arange', 'np.arange', (['num_values'], {}), '(num_values)\n', (726, 738), True, 'import numpy as np\n')]
import numpy as np from typing import List, Dict, Tuple def get_metrics( y_pred=None, y_true=None, metrics: List[str] = ["Accuracy"], classes: List[str] = ["Ham", "Spam"] ) -> Dict: if isinstance(y_pred, np.ndarray) == False: y_pred = y_pred.to_numpy() if isinstance(y_true, np.ndarray) == False: y_true = y_true.to_numpy() results = {} for metric in metrics: if metric == "Accuracy": results[metric] = accuracy(y_pred, y_true) elif metric == "Precision": results[metric] = precision(y_pred, y_true) elif metric == "Recall": results[metric] = recall(y_pred, y_true) elif metric == "F1": results[metric] = f1_score(y_pred, y_true) elif metric == "Confusion Matrix": results[metric] = confusion_matrix(y_pred, y_true, classes) else: raise ValueError("Invalid Metric") return results def accuracy(y_pred: np.ndarray, y_true: np.ndarray) -> float: return np.mean(y_pred == y_true) def precision(y_pred: np.ndarray, y_true: np.ndarray) -> float: """ Computes the precision score Precision score = true_pos / (true_pos + false_pos) Args: y_true: The true labels. y_pred: The predicted labels. Returns: Precision Score. """ true_pos = np.sum(y_pred * y_true) return true_pos / (np.sum(y_pred) + 1e-8) def recall(y_pred: np.ndarray, y_true: np.ndarray) -> float: """ Computes the recall score Recall score = true_pos / (true_pos + false_neg) Args: y_true: The true labels. y_pred: The predicted labels. Returns: Recall Score. """ true_pos = np.sum(y_pred * y_true) return true_pos / (np.sum(y_true)) def f1_score(y_pred: np.ndarray, y_true: np.ndarray) -> float: """ Computes the recall score for a given set of labels. Recall score = true_pos / (true_pos + false_neg) Args: y_true: The true labels. y_pred: The predicted labels. label: The label to consider. Returns: Recall Score. """ p = precision(y_pred, y_true) r = recall(y_pred, y_true) return 2 * (p * r) / (p + r) def confusion_matrix(y_pred: np.ndarray, y_true: np.ndarray, classes: List[str]) -> np.ndarray: confusion_matrix = np.zeros((len(classes), len(classes))) for i in range(len(y_pred)): confusion_matrix[y_true[i]][y_pred[i]] += 1 return confusion_matrix
[ "numpy.mean", "numpy.sum" ]
[((1035, 1060), 'numpy.mean', 'np.mean', (['(y_pred == y_true)'], {}), '(y_pred == y_true)\n', (1042, 1060), True, 'import numpy as np\n'), ((1366, 1389), 'numpy.sum', 'np.sum', (['(y_pred * y_true)'], {}), '(y_pred * y_true)\n', (1372, 1389), True, 'import numpy as np\n'), ((1729, 1752), 'numpy.sum', 'np.sum', (['(y_pred * y_true)'], {}), '(y_pred * y_true)\n', (1735, 1752), True, 'import numpy as np\n'), ((1776, 1790), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (1782, 1790), True, 'import numpy as np\n'), ((1413, 1427), 'numpy.sum', 'np.sum', (['y_pred'], {}), '(y_pred)\n', (1419, 1427), True, 'import numpy as np\n')]
import tkinter as tk import psycopg2 import pickle import time, calendar, requests, datetime try: conn = psycopg2.connect(database="postgres", user="postgres", password="<PASSWORD>", host="10.10.100.120") print("connected") except: print ("I am unable to connect to the database") motions = [] stationMotions = {} lastMotion = {} import cv2 import threading import schedule print(cv2.__version__) def maintenance(): print("waiting...") while True: time.sleep(119) cur =conn.cursor() autoid = str("Select value from autoid WHERE id = 1 limit 1") autoids = cur.execute(autoid) autoids = cur.fetchall() cur.close() auditval = '' for autoid in autoids: last_Date_from = autoid[0].strip().split(' ') auditval = autoid[0] last_time = (int(datetime.datetime.strptime(auditval.strip()+',000', "%Y-%m-%d %H:%M:%S,%f").timestamp())) last_time = last_time + 120 if int(time.time()) > int(last_time): last_Date_to = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(last_time))) last_Date_to = last_Date_to.split(' ') if last_Date_from[0] != last_Date_to[0]: date_send = last_Date_to[0] date_from_time = "00:00:00" date_to_time = last_Date_to[1] else: date_send = last_Date_from[0] date_from_time = last_Date_from[1] date_to_time = last_Date_to[1] try: response = requests.get('https://deepbluapi.gocontec.com/autoreceive/direct-shipments?_format=json&date='+date_send+'&s_time='+date_from_time+'&e_time='+date_to_time+'', headers={'Content-Type': 'application/json', 'Authorization': 'Basic QVVUT1JFQ0VJVkU6YXV0b0AxMjM='} ) if response.status_code == 200: data_time = [] if (response.content.decode("utf-8") != ""): result = response.json() s = 0 for value in result: s = 1 data_time = value["Scan Timestamp"] # cur =conn.cursor() # cur.execute("INSERT INTO directshipping (scantime, station, operator, product, eventtype, shipid, errorcode, errormessage, siteid) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",(value["Scan Timestamp"], value["Work Station ID"], value["Operator ID"], value["Product ID"], value["Event Type"], value["Shipment ID"], value["Error Code"], value["Error Message"], value["Site ID"])) # conn.commit() # cur.close() upautoidlastValue = date_send +' '+date_to_time print(upautoidlastValue) # cur =conn.cursor() # qry = str("Update autoid SET value = '"+upautoidlastValue+"' WHERE id = 1") # cur.execute(qry) # conn.commit() # cur.close() except: print("Unable to connect deepblu") def job(): print("I'm working...") # cur =conn.cursor() # autoid = str("select * from test_loop(1)") # autoids = cur.execute(autoid) # conn.commit() # cur.close() schedule.every().day.at("00:05").do(job) def pendingrun(): while True: schedule.run_pending() time.sleep(1) threading.Thread(target=maintenance, daemon=True).start() threading.Thread(target=pendingrun, daemon=True).start() def capture_motion(motion): ts = int(time.time()) if len(motions) > 0: if motion not in stationMotions: stationMotions[motion] = 0 if motion not in lastMotion: lastMotion[motion] = 0 if stationMotions[motion] < (ts-5): # cur =conn.cursor() # #print("INSERT INTO motions (area, timeadded, warehouse, station_type) VALUES (%s, %s, %s, %s)",(str(motion), ts, 1, 1 )) # cur.execute("INSERT INTO motions (area, timeadded, warehouse, station_type) VALUES (%s, %s, %s, %s)",(str(motion), ts, 1, 1 )) # conn.commit() # cur.close() #print() stationMotions[motion] = ts def get_correct_path(relative_path): p = os.path.abspath(".").replace('/dist', "") return os.path.join(p, relative_path) # define a video capture object from vidgear.gears import WriteGear cap = cv2.VideoCapture("rtsp://admin:3J7Bm!j@@10.10.153.21:8221/Streaming/Channels/102/picture?subtype=1") import cv2 import numpy as np import os import time import random from os.path import isfile, join img_array = [] fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output222.avi', fourcc, 30.0, (800,480)) pathIn= get_correct_path('static/') pathOut = get_correct_path('video.mp4') fps = 25.0 # def convert_frames_to_video(pathIn,pathOut,fps): # frame_array = [] # files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))] # #for sorting the file names properly # #files.sort(key = lambda x: int(x[5:-4])) # for i in range(len(files)): # filename=pathIn + files[i] # #reading each files # img = cv2.imread(filename) # print(filename) # height, width, layers = img.shape # size = (width,height) # print(size) # #inserting the frames into an image array # frame_array.append(img) # out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'mp4v'), fps, size) # for i in range(len(frame_array)): # # writing to a image array # out.write(frame_array[i]) # out.release() # convert_frames_to_video(pathIn, pathOut, fps) out = cv2.VideoWriter() out.open('output.mp4',fourcc,fps,(720,720),True) while cap.isOpened(): ret,image = cap.read() if image is None: break height, width = image.shape[:2] mask = np.zeros((height, width), dtype=np.uint8) points = np.array([[[305,80],[-100,493],[1123,513],[897,80],[700,80],[613,80]]]) cv2.fillPoly(mask, points, (255)) res = cv2.bitwise_and(image,image,mask = mask) rect = cv2.boundingRect(points) # returns (x,y,w,h) of the rect cropped = res[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]] height2, width2 = res.shape[:2] img_array.append(res) for i in range(len(img_array)): if img_array[i] is None: break out.write(img_array[i]) gmt = time.gmtime() ts = calendar.timegm(gmt) fillenameImage = str(str(ts)+'-'+str(random.randint(100000,999999))) cv2.imwrite(get_correct_path("static/%s.png") % fillenameImage, image) img = cv2.imread(get_correct_path("static/%s.png") % fillenameImage) height, width, layers = (720,720,0) size = (width,height) out.write(img) img_array = [] print('try') cap.release() cv2.destroyAllWindows() #out = cv2.VideoWriter('hwyeni.mp4',cv2.VideoWriter_fourcc(-), 24, size) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output222.avi', fourcc, 30.0, (800,480)) for i in range(len(img_array)): if img_array[i] is None: break out.write(img_array[i]) out.release()
[ "psycopg2.connect", "time.sleep", "calendar.timegm", "numpy.array", "cv2.destroyAllWindows", "cv2.VideoWriter", "schedule.every", "cv2.VideoWriter_fourcc", "random.randint", "cv2.fillPoly", "requests.get", "time.gmtime", "time.time", "schedule.run_pending", "os.path.join", "cv2.bitwise_and", "numpy.zeros", "cv2.VideoCapture", "os.path.abspath", "threading.Thread", "cv2.boundingRect" ]
[((4831, 4941), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""rtsp://admin:3J7Bm!j@@10.10.153.21:8221/Streaming/Channels/102/picture?subtype=1"""'], {}), "(\n 'rtsp://admin:3J7Bm!j@@10.10.153.21:8221/Streaming/Channels/102/picture?subtype=1'\n )\n", (4847, 4941), False, 'import cv2\n'), ((5055, 5086), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (5077, 5086), False, 'import cv2\n'), ((5093, 5151), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output222.avi"""', 'fourcc', '(30.0)', '(800, 480)'], {}), "('output222.avi', fourcc, 30.0, (800, 480))\n", (5108, 5151), False, 'import cv2\n'), ((6097, 6114), 'cv2.VideoWriter', 'cv2.VideoWriter', ([], {}), '()\n', (6112, 6114), False, 'import cv2\n'), ((7324, 7347), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7345, 7347), False, 'import cv2\n'), ((7431, 7462), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (7453, 7462), False, 'import cv2\n'), ((7469, 7527), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output222.avi"""', 'fourcc', '(30.0)', '(800, 480)'], {}), "('output222.avi', fourcc, 30.0, (800, 480))\n", (7484, 7527), False, 'import cv2\n'), ((107, 211), 'psycopg2.connect', 'psycopg2.connect', ([], {'database': '"""postgres"""', 'user': '"""postgres"""', 'password': '"""<PASSWORD>"""', 'host': '"""10.10.100.120"""'}), "(database='postgres', user='postgres', password=\n '<PASSWORD>', host='10.10.100.120')\n", (123, 211), False, 'import psycopg2\n'), ((4726, 4756), 'os.path.join', 'os.path.join', (['p', 'relative_path'], {}), '(p, relative_path)\n', (4738, 4756), False, 'import os\n'), ((6304, 6345), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (6312, 6345), True, 'import numpy as np\n'), ((6359, 6445), 'numpy.array', 'np.array', (['[[[305, 80], [-100, 493], [1123, 513], [897, 80], [700, 80], [613, 80]]]'], {}), '([[[305, 80], [-100, 493], [1123, 513], [897, 80], [700, 80], [613,\n 80]]])\n', (6367, 6445), True, 'import numpy as np\n'), ((6435, 6466), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'points', '(255)'], {}), '(mask, points, 255)\n', (6447, 6466), False, 'import cv2\n'), ((6479, 6519), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (6494, 6519), False, 'import cv2\n'), ((6532, 6556), 'cv2.boundingRect', 'cv2.boundingRect', (['points'], {}), '(points)\n', (6548, 6556), False, 'import cv2\n'), ((478, 493), 'time.sleep', 'time.sleep', (['(119)'], {}), '(119)\n', (488, 493), False, 'import time\n'), ((3689, 3711), 'schedule.run_pending', 'schedule.run_pending', ([], {}), '()\n', (3709, 3711), False, 'import schedule\n'), ((3720, 3733), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3730, 3733), False, 'import time\n'), ((3736, 3785), 'threading.Thread', 'threading.Thread', ([], {'target': 'maintenance', 'daemon': '(True)'}), '(target=maintenance, daemon=True)\n', (3752, 3785), False, 'import threading\n'), ((3794, 3842), 'threading.Thread', 'threading.Thread', ([], {'target': 'pendingrun', 'daemon': '(True)'}), '(target=pendingrun, daemon=True)\n', (3810, 3842), False, 'import threading\n'), ((3896, 3907), 'time.time', 'time.time', ([], {}), '()\n', (3905, 3907), False, 'import time\n'), ((6860, 6873), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (6871, 6873), False, 'import time\n'), ((6887, 6907), 'calendar.timegm', 'calendar.timegm', (['gmt'], {}), '(gmt)\n', (6902, 6907), False, 'import time, calendar, requests, datetime\n'), ((4673, 4693), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (4688, 4693), False, 'import os\n'), ((1005, 1016), 'time.time', 'time.time', ([], {}), '()\n', (1014, 1016), False, 'import time\n'), ((1578, 1867), 'requests.get', 'requests.get', (["(\n 'https://deepbluapi.gocontec.com/autoreceive/direct-shipments?_format=json&date='\n + date_send + '&s_time=' + date_from_time + '&e_time=' + date_to_time + ''\n )"], {'headers': "{'Content-Type': 'application/json', 'Authorization':\n 'Basic QVVUT1JFQ0VJVkU6YXV0b0AxMjM='}"}), "(\n 'https://deepbluapi.gocontec.com/autoreceive/direct-shipments?_format=json&date='\n + date_send + '&s_time=' + date_from_time + '&e_time=' + date_to_time +\n '', headers={'Content-Type': 'application/json', 'Authorization':\n 'Basic QVVUT1JFQ0VJVkU6YXV0b0AxMjM='})\n", (1590, 1867), False, 'import time, calendar, requests, datetime\n'), ((3605, 3621), 'schedule.every', 'schedule.every', ([], {}), '()\n', (3619, 3621), False, 'import schedule\n'), ((6953, 6983), 'random.randint', 'random.randint', (['(100000)', '(999999)'], {}), '(100000, 999999)\n', (6967, 6983), False, 'import random\n')]
# Importing all required libraries for the code to function import tkinter from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg) from matplotlib import pyplot as plt, animation from mpl_toolkits import mplot3d from stl import mesh import numpy as np import serial from serial.tools import list_ports import time import csv import matplotlib import seaborn as sns matplotlib.use("TkAgg") # Setting the matplotlib style with the seaborn module sns.set_style("whitegrid") with sns.axes_style("whitegrid"): fig = plt.subplots() for param in ['figure.facecolor', 'axes.facecolor', 'savefig.facecolor']: plt.rcParams[param] = '141417' # bluish dark grey for param in ['text.color', 'axes.labelcolor', 'xtick.color', 'ytick.color']: plt.rcParams[param] = '0.9' # very light grey # Creating the matplotlib figure with subplots and axes fig = plt.figure() fig.set_tight_layout(True) ax1 = fig.add_subplot(1,2,2,projection="3d") ax2 = fig.add_subplot(3,2,1) ax3 = fig.add_subplot(3,2,3) ax4 = fig.add_subplot(3,2,5) # Defining the USB serial port for the Arduino COM = "/dev/cu.usbmodem14101" # Importing the stl file for the 3D graph data = mesh.Mesh.from_file('RocketFast.stl') # Fieldnames to be written on the CSV file fieldnames = ["Time", "Yaw", "Pitch", "Roll", "Pressure", "Altitude", "R_Altitude", "B_Temp", "AccelX", "AccelY", "AccelZ", "GyroX", "GyroY", "GyroZ", "A_Temp"] # Creating or opening the data.csv file and writing the fieldnames # If the file existed it will be truncated before being used again. with open('data.csv', 'w', newline='') as csv_file: csv_file.truncate() csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames) csv_writer.writeheader() # Lists all serial ports ports = list_ports.comports() for port in ports: print(port) # Try statement within infinite loop to attempt connection to the Arduino until connection while True: try: serialCom = serial.Serial(port=COM, baudrate=115200, timeout=0.1) break except: print('Could not connect!') print('Retrying...') time.sleep(0.1) # Creating all required empty NumPy arrays time_x = np.empty([1, 1]) ori_x = np.empty([1, 1]) ori_y = np.empty([1, 1]) ori_z = np.empty([1, 1]) accel_x = np.empty([1, 1]) accel_y = np.empty([1, 1]) accel_z = np.empty([1, 1]) gyro_x = np.empty([1, 1]) gyro_y = np.empty([1, 1]) gyro_z = np.empty([1, 1]) alt = np.empty([1, 1]) r_alt = np.empty([1, 1]) a_temp = np.empty([1, 1]) b_temp = np.empty([1, 1]) # Main processing class class Processing(): def animate(self, i): ctr = 0 # Gets data from Arduino try: while serialCom.inWaiting() > 0: # Read the serial line s_bytes = serialCom.readline() # Decode serial data decoded_bytes = s_bytes.decode("utf-8").strip('\r\n') # print(decoded_bytes) # Place serial data in list ori = [float(x) for x in decoded_bytes.split()] # Parse the line # First line may be read in the middle, so the data would be incomplete. if ctr == 0: ctr = ctr + 1 else: values = [float(x) for x in decoded_bytes.split()] print(values) # Write to data to CSV with open('data.csv', 'a') as csv_file: csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames) info = { "Time": values[0], "Yaw": values[1], "Pitch": values[2], "Roll": values[3], "Pressure": values[4], "Altitude": values[5], "R_Altitude": values[6], "B_Temp": values[7], "AccelX": values[8], "AccelY": values[9], "AccelZ": values[10], "GyroX": values[11], "GyroY": values[12], "GyroZ": values[13], "A_Temp": values[14], } csv_writer.writerow(info) csv_file.close() except: # Exit program if communication is lost # In real life you would want logic that would reattempt after certain delay. print('Communication lost...') print('Exiting Program...') exit() try: # Gets data for other graphs by appending to numpy arrays from list. np.append(time_x, float(ori[0])/1000) np.append(ori_x, float(ori[1])) np.append(ori_y, float(ori[2])) np.append(ori_z, float(ori[3])) np.append(alt, float(ori[5])) np.append(r_alt, float(ori[6])) np.append(b_temp, float(ori[7])) np.append(accel_x, float(ori[8])) np.append(accel_y, float(ori[9])) np.append(accel_z, float(ori[10])) np.append(gyro_x, float(ori[11])) np.append(gyro_y, float(ori[12])) np.append(gyro_z, float(ori[13])) np.append(a_temp, float(ori[14])) except: return 1 # Checks to see if orientation has changed in any axis # This is as this process would take a few cycles to compute, # if we can skip it when not necessary the program will be faster. if ori_y[ori_y.size-2] != ori[1] or ori_x[ori_x.size-2] != ori[1] or ori_z[ori_z.size-2] != ori[1]: # change the rotation of the 3 sides ax1.clear() data.rotate([1, 0, 0], np.radians(ori_y[ori_y.size-2]-float(ori[2]))) data.rotate([0, 1, 0], np.radians(-ori_x[ori_x.size-2]+float(ori[1]))) data.rotate([0, 0, 1], np.radians(-ori_z[ori_z.size-2]+float(ori[3]))) # Graph the STL file onto the graph. collection = mplot3d.art3d.Poly3DCollection(data.vectors) collection.set_facecolor('#17205B') ax1.add_collection3d(collection) scale = data.points.flatten("A") ax1.auto_scale_xyz(scale, scale, scale) # If the size of the array has become larger than 50, delete the first index if time_x.size > 50: np.resize(time_x, (1, 50)) np.resize(ori_x, (1, 50)) np.resize(ori_y, (1, 50)) np.resize(ori_z, (1, 50)) np.resize(alt, (1, 50)) np.resize(r_alt, (1, 50)) np.resize(accel_x, (1, 50)) np.resize(accel_y, (1, 50)) np.resize(accel_z, (1, 50)) np.resize(gyro_x, (1, 50)) np.resize(gyro_y, (1, 50)) np.resize(gyro_z, (1, 50)) np.resize(a_temp, (1, 50)) np.resize(b_temp, (1, 50)) # Deals with plotting the orientation outputs ax2.clear() ax2.plot(time_x, ori_x, label="X-axis") ax2.plot(time_x, ori_y, label="Y-axis") ax2.plot(time_x, ori_z, label="Z-axis") ax2.set_ylabel("Orientation (deg)") ax2.set_xticklabels([]) ax2.grid(b=True) # Deals with plotting altitude ax3.clear() ax3.plot(time_x, accel_x, label="X") ax3.plot(time_x, accel_y, label="Y") ax3.plot(time_x, accel_z, label="Z") ax3.set_ylabel("Acceleration (m/s^2)") ax3.set_xticklabels([]) ax3.grid(b=True) # Deals with plotting temperature ax4.clear() ax4.plot(time_x, gyro_x, label="X") ax4.plot(time_x, gyro_y, label="Y") ax4.plot(time_x, gyro_z, label="Z") ax4.set_xlabel("Time") ax4.set_ylabel("Angular Rates (deg/s)") ax4.grid(b=True) # Sets the legend to be above the first graph ax2.legend(bbox_to_anchor=[0.5, 1.2], loc='upper center', ncol=3, mode="tight", borderaxespad=0) return 1 # Creates the tkinter window and calls the main procesing class. class Window(): def __init__(self): root = tkinter.Tk() root.geometry("1500x735") root.wm_title("Graphical User Interface") canvas = FigureCanvasTkAgg(fig, master=root) canvas.draw() canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1) process = Processing() anim = animation.FuncAnimation(fig, process.animate) root.mainloop() # Run the file while preventing accidental invokes if __name__ == "__main__": win = Window()
[ "csv.DictWriter", "mpl_toolkits.mplot3d.art3d.Poly3DCollection", "serial.tools.list_ports.comports", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "matplotlib.use", "matplotlib.animation.FuncAnimation", "time.sleep", "seaborn.set_style", "matplotlib.pyplot.figure", "tkinter.Tk", "numpy.empty", "serial.Serial", "numpy.resize", "seaborn.axes_style", "matplotlib.pyplot.subplots", "stl.mesh.Mesh.from_file" ]
[((375, 398), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (389, 398), False, 'import matplotlib\n'), ((455, 481), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (468, 481), True, 'import seaborn as sns\n'), ((864, 876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (874, 876), True, 'from matplotlib import pyplot as plt, animation\n'), ((1164, 1201), 'stl.mesh.Mesh.from_file', 'mesh.Mesh.from_file', (['"""RocketFast.stl"""'], {}), "('RocketFast.stl')\n", (1183, 1201), False, 'from stl import mesh\n'), ((1761, 1782), 'serial.tools.list_ports.comports', 'list_ports.comports', ([], {}), '()\n', (1780, 1782), False, 'from serial.tools import list_ports\n'), ((2173, 2189), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2181, 2189), True, 'import numpy as np\n'), ((2198, 2214), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2206, 2214), True, 'import numpy as np\n'), ((2223, 2239), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2231, 2239), True, 'import numpy as np\n'), ((2248, 2264), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2256, 2264), True, 'import numpy as np\n'), ((2275, 2291), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2283, 2291), True, 'import numpy as np\n'), ((2302, 2318), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2310, 2318), True, 'import numpy as np\n'), ((2329, 2345), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2337, 2345), True, 'import numpy as np\n'), ((2355, 2371), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2363, 2371), True, 'import numpy as np\n'), ((2381, 2397), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2389, 2397), True, 'import numpy as np\n'), ((2407, 2423), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2415, 2423), True, 'import numpy as np\n'), ((2430, 2446), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2438, 2446), True, 'import numpy as np\n'), ((2455, 2471), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2463, 2471), True, 'import numpy as np\n'), ((2481, 2497), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2489, 2497), True, 'import numpy as np\n'), ((2507, 2523), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (2515, 2523), True, 'import numpy as np\n'), ((487, 514), 'seaborn.axes_style', 'sns.axes_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (501, 514), True, 'import seaborn as sns\n'), ((526, 540), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (538, 540), True, 'from matplotlib import pyplot as plt, animation\n'), ((1650, 1697), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'fieldnames'}), '(csv_file, fieldnames=fieldnames)\n', (1664, 1697), False, 'import csv\n'), ((1951, 2004), 'serial.Serial', 'serial.Serial', ([], {'port': 'COM', 'baudrate': '(115200)', 'timeout': '(0.1)'}), '(port=COM, baudrate=115200, timeout=0.1)\n', (1964, 2004), False, 'import serial\n'), ((8295, 8307), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (8305, 8307), False, 'import tkinter\n'), ((8410, 8445), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig'], {'master': 'root'}), '(fig, master=root)\n', (8427, 8445), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((8599, 8644), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'process.animate'], {}), '(fig, process.animate)\n', (8622, 8644), False, 'from matplotlib import pyplot as plt, animation\n'), ((2104, 2119), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2114, 2119), False, 'import time\n'), ((6183, 6227), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mplot3d.art3d.Poly3DCollection', (['data.vectors'], {}), '(data.vectors)\n', (6213, 6227), False, 'from mpl_toolkits import mplot3d\n'), ((6546, 6572), 'numpy.resize', 'np.resize', (['time_x', '(1, 50)'], {}), '(time_x, (1, 50))\n', (6555, 6572), True, 'import numpy as np\n'), ((6585, 6610), 'numpy.resize', 'np.resize', (['ori_x', '(1, 50)'], {}), '(ori_x, (1, 50))\n', (6594, 6610), True, 'import numpy as np\n'), ((6623, 6648), 'numpy.resize', 'np.resize', (['ori_y', '(1, 50)'], {}), '(ori_y, (1, 50))\n', (6632, 6648), True, 'import numpy as np\n'), ((6661, 6686), 'numpy.resize', 'np.resize', (['ori_z', '(1, 50)'], {}), '(ori_z, (1, 50))\n', (6670, 6686), True, 'import numpy as np\n'), ((6699, 6722), 'numpy.resize', 'np.resize', (['alt', '(1, 50)'], {}), '(alt, (1, 50))\n', (6708, 6722), True, 'import numpy as np\n'), ((6735, 6760), 'numpy.resize', 'np.resize', (['r_alt', '(1, 50)'], {}), '(r_alt, (1, 50))\n', (6744, 6760), True, 'import numpy as np\n'), ((6773, 6800), 'numpy.resize', 'np.resize', (['accel_x', '(1, 50)'], {}), '(accel_x, (1, 50))\n', (6782, 6800), True, 'import numpy as np\n'), ((6813, 6840), 'numpy.resize', 'np.resize', (['accel_y', '(1, 50)'], {}), '(accel_y, (1, 50))\n', (6822, 6840), True, 'import numpy as np\n'), ((6853, 6880), 'numpy.resize', 'np.resize', (['accel_z', '(1, 50)'], {}), '(accel_z, (1, 50))\n', (6862, 6880), True, 'import numpy as np\n'), ((6893, 6919), 'numpy.resize', 'np.resize', (['gyro_x', '(1, 50)'], {}), '(gyro_x, (1, 50))\n', (6902, 6919), True, 'import numpy as np\n'), ((6932, 6958), 'numpy.resize', 'np.resize', (['gyro_y', '(1, 50)'], {}), '(gyro_y, (1, 50))\n', (6941, 6958), True, 'import numpy as np\n'), ((6971, 6997), 'numpy.resize', 'np.resize', (['gyro_z', '(1, 50)'], {}), '(gyro_z, (1, 50))\n', (6980, 6997), True, 'import numpy as np\n'), ((7010, 7036), 'numpy.resize', 'np.resize', (['a_temp', '(1, 50)'], {}), '(a_temp, (1, 50))\n', (7019, 7036), True, 'import numpy as np\n'), ((7049, 7075), 'numpy.resize', 'np.resize', (['b_temp', '(1, 50)'], {}), '(b_temp, (1, 50))\n', (7058, 7075), True, 'import numpy as np\n'), ((3500, 3547), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'fieldnames'}), '(csv_file, fieldnames=fieldnames)\n', (3514, 3547), False, 'import csv\n')]
import datetime import utils import glob import os import numpy as np import pandas as pd if __name__ == '__main__': loaddir = "E:/Data/h5/" labels = ['https', 'netflix'] max_packet_length = 1514 for label in labels: print("Starting label: " + label) savedir = loaddir + label + "/" now = datetime.datetime.now() savename = "payload_%s-%.2d%.2d_%.2d%.2d" % (label, now.day, now.month, now.hour, now.minute) filelist = glob.glob(loaddir + label + '*.h5') # Try only one of each file fullname = filelist[0] # for fullname in filelist: load_dir, filename = os.path.split(fullname) print("Loading: {0}".format(filename)) df = utils.load_h5(load_dir, filename) packets = df['bytes'].values payloads = [] labels = [] filenames = [] for packet in packets: if len(packet) == max_packet_length: # Extract the payload from the packet should have length 1460 payload = packet[54:] p = np.fromstring(payload, dtype=np.uint8) payloads.append(p) labels.append(label) filenames.append(filename) d = {'filename': filenames, 'bytes': payloads, 'label': labels} dataframe = pd.DataFrame(data=d) key = savename.split('-')[0] dataframe.to_hdf(savedir + savename + '.h5', key=key, mode='w') # utils.saveextractedheaders(loaddir, savedir, savename, num_headers=headersize) print("Done with label: " + label)
[ "os.path.split", "datetime.datetime.now", "pandas.DataFrame", "numpy.fromstring", "utils.load_h5", "glob.glob" ]
[((331, 354), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (352, 354), False, 'import datetime\n'), ((476, 511), 'glob.glob', 'glob.glob', (["(loaddir + label + '*.h5')"], {}), "(loaddir + label + '*.h5')\n", (485, 511), False, 'import glob\n'), ((644, 667), 'os.path.split', 'os.path.split', (['fullname'], {}), '(fullname)\n', (657, 667), False, 'import os\n'), ((728, 761), 'utils.load_h5', 'utils.load_h5', (['load_dir', 'filename'], {}), '(load_dir, filename)\n', (741, 761), False, 'import utils\n'), ((1326, 1346), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (1338, 1346), True, 'import pandas as pd\n'), ((1080, 1118), 'numpy.fromstring', 'np.fromstring', (['payload'], {'dtype': 'np.uint8'}), '(payload, dtype=np.uint8)\n', (1093, 1118), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt """ E is in MeV, D in μm, vB in μm/h, τ in h, and k in (MeV)^-1 """ def E(D, τ=5, vB=2.66, k=.8, n=1.2, a=1, z=1): return z**2*a*((2*τ*vB/D - 1)/k)**(1/n) def D(E, τ=5, vB=2.66, k=.8, n=1.2, a=1, z=1): return np.where(E > 0, 2*τ*vB/(1 + k*(E/(z**2*a))**n), np.nan) if __name__ == '__main__': plt.rcParams.update({'font.family': 'serif', 'font.size': 14}) x = np.linspace(1, 16) # for k, n in [(.849, .806), (.626, .867), (.651, .830), (.651, .779), (.868, 1.322)]: # plt.plot(x, D(x, k=k, n=n), '-') plt.plot(x, D(x, a=1, z=1), '-k', linewidth=3) # print(x.min(), E(3), E(1.7), x.max()) # plt.fill_between([E(1.7), x.max()], [D(x.max()), D(x.max())], [1.7, 1.7], color='b', alpha=.2) # plt.fill_between([E(3), x.min()], [3, 3], [D(x.min()), D(x.min())], color='r', alpha=.2) # plt.title("Relationship between incident energy and track diameter") plt.xlabel("Energy (MeV)") plt.ylabel("Diameter (μm)") plt.tight_layout() plt.show()
[ "matplotlib.pyplot.ylabel", "numpy.where", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.rcParams.update", "numpy.linspace", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show" ]
[((263, 334), 'numpy.where', 'np.where', (['(E > 0)', '(2 * τ * vB / (1 + k * (E / (z ** 2 * a)) ** n))', 'np.nan'], {}), '(E > 0, 2 * τ * vB / (1 + k * (E / (z ** 2 * a)) ** n), np.nan)\n', (271, 334), True, 'import numpy as np\n'), ((353, 415), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.family': 'serif', 'font.size': 14}"], {}), "({'font.family': 'serif', 'font.size': 14})\n", (372, 415), True, 'import matplotlib.pyplot as plt\n'), ((421, 439), 'numpy.linspace', 'np.linspace', (['(1)', '(16)'], {}), '(1, 16)\n', (432, 439), True, 'import numpy as np\n'), ((917, 943), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy (MeV)"""'], {}), "('Energy (MeV)')\n", (927, 943), True, 'import matplotlib.pyplot as plt\n'), ((945, 972), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Diameter (μm)"""'], {}), "('Diameter (μm)')\n", (955, 972), True, 'import matplotlib.pyplot as plt\n'), ((974, 992), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (990, 992), True, 'import matplotlib.pyplot as plt\n'), ((994, 1004), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1002, 1004), True, 'import matplotlib.pyplot as plt\n')]
from math import floor import scipy.io as sio from bokeh.plotting import figure, show, output_file, save, ColumnDataSource from bokeh.models import HoverTool, CrosshairTool, PanTool, WheelZoomTool, ResetTool, SaveTool, CustomJS from bokeh.models.widgets import Button from bokeh.layouts import widgetbox, row, column, gridplot import matplotlib as plt import matplotlib.cm as cm import numpy as np # Different ways of extracting the matlab files - or their varying structure standards def extract_matlab(filename): file = sio.loadmat(filename) wave_key = "" trig_key = "StimTrig" # Due to naming convention variations, it must search for the right keys schmitt = ["Schmitt", "Sch_wav"] for i in schmitt: if i in file.keys(): wave_key = i if wave_key == "": raise KeyError("Can't find the schmitt wave data") # Extract data using keys file_comments = file[wave_key][0][0][1][0] wave_timestamp = file[wave_key][0][0][4] # Neuron pop - milliseconds since trigger stim_timestamp = file[trig_key][0][0][4] # Stimulus time into experiment - seconds stim_amplitude = file[trig_key][0][0][5] # Amplitude of particular stimulus time raw_values = [] assorted_values = [] final_values = [] # Pair the amplitudes with stimulus time # Note: An Amplitude of 62 occurs between sets, null value for i in range(len(stim_timestamp)): raw_values += [[float("%.6f" % stim_timestamp[i][0]), stim_amplitude[i][0]]] # Calculates time difference between stimulus and pops for each group # then sorts them into sorted_values, before moving onto next group index = -1 pops = [] for j in wave_timestamp: if index < len(raw_values) - 1: if j > raw_values[index + 1][0]: # Add set to sorted_values if index > -1: assorted_values.append([raw_values[index][0], raw_values[index][1], pops]) # Move to next set of values index += 1 pops = [] if index > -1: # Compute time difference in ms, add to pops list difference = float("%.3f" % ((j - raw_values[index][0]) * 1000)) if difference <= 50: pops += [difference] # Add final set to sorted_values assorted_values.append([raw_values[index][0], raw_values[index][1], pops]) # Collate and order assorted_values into final_values # Each batch is separated by a None value in the final list batch = [[] for i in range(10)] for i in range(len(assorted_values)): if assorted_values[i][1] == 62: # 62 separator # Append sorted batch, followed by a None to separate batches final_values += batch + [None] else: batch[assorted_values[i][1] - 1] = assorted_values[i] return final_values # Sorts the values in separate sections to list of plot-able coordinates def vals_to_coords(vals): values = [] coords = [] n = [] for i in vals: if not i: # end row values += [n] n = [] else: n += [i] for i in range(len(values)): for j in values[i]: for k in j[2]: coords += [(k, j[1]+(i/len(values)))] return coords # Graphing and plotting functions def generate_graph(extracted_file=None, raw_file="", scatter=False, heatmap=False, hm_width=250, hm_height=125, hm_radius=10, dot_size=0.06, widgets=False): # Initialise basic plot data plot_title = "Plot: " scatter_plot = None heatmap_plot = None toggle_scatter = None toggle_heatmap = None if (extracted_file == None and raw_file != ""): extracted_file = extract_matlab(raw_file) coordinates = vals_to_coords(extracted_file) print("data size: " + str(len(coordinates))) # Process individual data points n = [] x = [] y = [] for i in coordinates: n.append(floor(i[1])) x.append(i[0]) y.append(i[1] - 1) # Configure hovertext for individual data points data_source = ColumnDataSource(data=dict( x=x, y=y, time=x, amp=n )) Hover = HoverTool( tooltips=[ ("time", "@time ms"), ("amplitude", "@amp") ], names=["dots"] ) # Determine plot title if (scatter and heatmap): plot_title = "Composite Plot: " elif (scatter): plot_title = "Scatter Plot: " elif (heatmap): plot_title = "Heatmap Plot: " # Initialise plot figure tools = [Hover, CrosshairTool(), PanTool(), WheelZoomTool(), ResetTool(), SaveTool()] p = figure(tools=tools, title=plot_title + raw_file.split("/")[-1], plot_width=50, plot_height=10) p.sizing_mode = "stretch_both" p.border_fill_color = "whitesmoke" p.min_border_left = 40 p.min_border_right = 40 p.min_border_bottom = 50 p.min_border_top = 20 p.xaxis.axis_label = "Time (ms)" p.yaxis.axis_label = "Amplitude" p.width = 160 p.height = 70 # Add graphs to plot -- note: the order is important for layering if heatmap: heatmap_plot = add_heatmap(p, coordinates, w=hm_width, h=hm_height, radius=hm_radius) if scatter: scatter_plot = p.scatter('x', 'y', radius=dot_size, fill_alpha=0.8, line_color=None, color="black", source=data_source, name='dots') # Add amplitude lines to plot for i in range(11): p.line((0, 50), (i, i), color="black", alpha=0.5) # Widgets to toggle visibility of layers if widgets: if scatter: toggle_scatter = Button( label="Toggle Scatter Plot") toggle_scatter.width = 100 toggle_scatter.js_on_click(CustomJS(args=dict(scatter_plot=scatter_plot), code="scatter_plot.visible=!scatter_plot.visible")) if heatmap: toggle_heatmap = Button( label="Toggle Heatmap") toggle_heatmap.width = 100 toggle_heatmap.js_on_click(CustomJS(args=dict(heatmap_plot=heatmap_plot), code="heatmap_plot.visible=!heatmap_plot.visible")) # Return plot w/ widgets return p, toggle_scatter, toggle_heatmap def add_heatmap(p, coordinates, w=500, h=250, radius=10): # TODO: OPTIMISE THE CIRCLE CODE (there has to be a quicker way) raw = np.zeros((h, w)) # Plot circles for pos in coordinates: x_pos = floor((pos[1] - 1) / 10 * h) y_pos = floor(pos[0] / 50 * w) for i in range(-radius, radius + 1): for j in range(-radius, radius + 1): x_pos_2 = x_pos + i y_pos_2 = y_pos + j if x_pos_2 >= 0 and x_pos_2 < h: if y_pos_2 >= 0 and y_pos_2 < w: if i * i + j * j < radius * radius: raw[x_pos_2, y_pos_2] += 1 # Generate colour map colormap = cm.get_cmap("RdPu") bokeh_palette = [plt.colors.rgb2hex(m) for m in colormap(np.arange(colormap.N))] # Render image heatmap = p.image(image=[raw], x=0, y=0, dw=50, dh=10, palette=bokeh_palette) return heatmap # Plotting for the website def graph_single(file_name, widgets=True, width=500, height=250, radius=10, auto_open=False, dir=""): plot = generate_graph(raw_file=file_name, scatter=True, heatmap=True, dot_size=0.07, hm_width=width, hm_height=height, hm_radius=radius, widgets=widgets) output_layout = plot[0] file_dir = file_name.split("/")[0] + "/" file_name = file_name.split("/")[-1] if dir != "": file_dir = dir name = file_dir + file_name.replace('.mat', '') + '.html' title = "Composite Plot: " + file_name output_file(name, title) if widgets: doc_layout = column( [plot[0], row([widgetbox([plot[1], plot[2]], width=10)], height=50, sizing_mode="fixed")], sizing_mode="scale_width") output_layout = doc_layout if auto_open: show(output_layout) else: save(output_layout) def graph_multiple(file_names, width=250, height=100, radius=5, auto_open=False, dir="", ncols=2): file_dir = file_names[0].split("/")[0] + "/" file_name_parts = [] plots = [] if dir != "": file_dir = dir # loop through files, adding to plot list for file in file_names: for part in file.split("/")[-1].replace('.mat','').split('_'): if part not in file_name_parts: file_name_parts.append(part) p = generate_graph(raw_file=file, scatter=True, heatmap=True, dot_size=0.11, hm_width=width, hm_height=height, hm_radius=radius, widgets=False)[0] p.min_border_bottom = 20 p.min_border_left = 30 p.min_border_right = 30 p.height = 90 plots.append(p) for i in range(ncols - (len(file_names)%ncols)): plots.append(None) # make into nice grid plots_layout = [] i = 0 while i < len(plots): plots_layout += [[[plots[i:i+ncols]]]] i += ncols # generate final layout file_name = '_'.join(file_name_parts) output_file(file_dir+file_name+".html", file_name) output_layout = gridplot(plots, ncols=ncols, sizing_mode="scale_width", merge_tools=True) if auto_open: show(output_layout) else: save(output_layout) return file_name+".html" # --- TEMPORARY TESTING CODE; REMOVE IN FINAL BUILD --- # if __name__ == '__main__': print("Make sure you're running app.py if you want the web interface") print("This code is just for testing functions\n") #graph_single("temp/659607_rec03_all.mat", widgets=True, width=500, height=200, radius=9) #graph_multiple(["temp/659602_rec03_all.mat", "temp/659602_rec03_f01.mat", "temp/659602_rec03_f02.mat", "temp/659602_rec03_f03.mat"], auto_open=True, ncols=2)
[ "bokeh.plotting.show", "bokeh.layouts.widgetbox", "math.floor", "numpy.arange", "bokeh.models.SaveTool", "scipy.io.loadmat", "bokeh.plotting.save", "bokeh.models.widgets.Button", "bokeh.layouts.gridplot", "numpy.zeros", "matplotlib.colors.rgb2hex", "bokeh.models.WheelZoomTool", "bokeh.models.ResetTool", "bokeh.models.CrosshairTool", "bokeh.models.PanTool", "matplotlib.cm.get_cmap", "bokeh.plotting.output_file", "bokeh.models.HoverTool" ]
[((533, 554), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {}), '(filename)\n', (544, 554), True, 'import scipy.io as sio\n'), ((4264, 4350), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('time', '@time ms'), ('amplitude', '@amp')]", 'names': "['dots']"}), "(tooltips=[('time', '@time ms'), ('amplitude', '@amp')], names=[\n 'dots'])\n", (4273, 4350), False, 'from bokeh.models import HoverTool, CrosshairTool, PanTool, WheelZoomTool, ResetTool, SaveTool, CustomJS\n'), ((6545, 6561), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (6553, 6561), True, 'import numpy as np\n'), ((7119, 7138), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""RdPu"""'], {}), "('RdPu')\n", (7130, 7138), True, 'import matplotlib.cm as cm\n'), ((7927, 7951), 'bokeh.plotting.output_file', 'output_file', (['name', 'title'], {}), '(name, title)\n', (7938, 7951), False, 'from bokeh.plotting import figure, show, output_file, save, ColumnDataSource\n'), ((9375, 9429), 'bokeh.plotting.output_file', 'output_file', (["(file_dir + file_name + '.html')", 'file_name'], {}), "(file_dir + file_name + '.html', file_name)\n", (9386, 9429), False, 'from bokeh.plotting import figure, show, output_file, save, ColumnDataSource\n'), ((9446, 9519), 'bokeh.layouts.gridplot', 'gridplot', (['plots'], {'ncols': 'ncols', 'sizing_mode': '"""scale_width"""', 'merge_tools': '(True)'}), "(plots, ncols=ncols, sizing_mode='scale_width', merge_tools=True)\n", (9454, 9519), False, 'from bokeh.layouts import widgetbox, row, column, gridplot\n'), ((4666, 4681), 'bokeh.models.CrosshairTool', 'CrosshairTool', ([], {}), '()\n', (4679, 4681), False, 'from bokeh.models import HoverTool, CrosshairTool, PanTool, WheelZoomTool, ResetTool, SaveTool, CustomJS\n'), ((4683, 4692), 'bokeh.models.PanTool', 'PanTool', ([], {}), '()\n', (4690, 4692), False, 'from bokeh.models import HoverTool, CrosshairTool, PanTool, WheelZoomTool, ResetTool, SaveTool, CustomJS\n'), ((4694, 4709), 'bokeh.models.WheelZoomTool', 'WheelZoomTool', ([], {}), '()\n', (4707, 4709), False, 'from bokeh.models import HoverTool, CrosshairTool, PanTool, WheelZoomTool, ResetTool, SaveTool, CustomJS\n'), ((4711, 4722), 'bokeh.models.ResetTool', 'ResetTool', ([], {}), '()\n', (4720, 4722), False, 'from bokeh.models import HoverTool, CrosshairTool, PanTool, WheelZoomTool, ResetTool, SaveTool, CustomJS\n'), ((4724, 4734), 'bokeh.models.SaveTool', 'SaveTool', ([], {}), '()\n', (4732, 4734), False, 'from bokeh.models import HoverTool, CrosshairTool, PanTool, WheelZoomTool, ResetTool, SaveTool, CustomJS\n'), ((6626, 6654), 'math.floor', 'floor', (['((pos[1] - 1) / 10 * h)'], {}), '((pos[1] - 1) / 10 * h)\n', (6631, 6654), False, 'from math import floor\n'), ((6671, 6693), 'math.floor', 'floor', (['(pos[0] / 50 * w)'], {}), '(pos[0] / 50 * w)\n', (6676, 6693), False, 'from math import floor\n'), ((7160, 7181), 'matplotlib.colors.rgb2hex', 'plt.colors.rgb2hex', (['m'], {}), '(m)\n', (7178, 7181), True, 'import matplotlib as plt\n'), ((8215, 8234), 'bokeh.plotting.show', 'show', (['output_layout'], {}), '(output_layout)\n', (8219, 8234), False, 'from bokeh.plotting import figure, show, output_file, save, ColumnDataSource\n'), ((8253, 8272), 'bokeh.plotting.save', 'save', (['output_layout'], {}), '(output_layout)\n', (8257, 8272), False, 'from bokeh.plotting import figure, show, output_file, save, ColumnDataSource\n'), ((9547, 9566), 'bokeh.plotting.show', 'show', (['output_layout'], {}), '(output_layout)\n', (9551, 9566), False, 'from bokeh.plotting import figure, show, output_file, save, ColumnDataSource\n'), ((9585, 9604), 'bokeh.plotting.save', 'save', (['output_layout'], {}), '(output_layout)\n', (9589, 9604), False, 'from bokeh.plotting import figure, show, output_file, save, ColumnDataSource\n'), ((4026, 4037), 'math.floor', 'floor', (['i[1]'], {}), '(i[1])\n', (4031, 4037), False, 'from math import floor\n'), ((5733, 5768), 'bokeh.models.widgets.Button', 'Button', ([], {'label': '"""Toggle Scatter Plot"""'}), "(label='Toggle Scatter Plot')\n", (5739, 5768), False, 'from bokeh.models.widgets import Button\n'), ((6060, 6090), 'bokeh.models.widgets.Button', 'Button', ([], {'label': '"""Toggle Heatmap"""'}), "(label='Toggle Heatmap')\n", (6066, 6090), False, 'from bokeh.models.widgets import Button\n'), ((7200, 7221), 'numpy.arange', 'np.arange', (['colormap.N'], {}), '(colormap.N)\n', (7209, 7221), True, 'import numpy as np\n'), ((8038, 8077), 'bokeh.layouts.widgetbox', 'widgetbox', (['[plot[1], plot[2]]'], {'width': '(10)'}), '([plot[1], plot[2]], width=10)\n', (8047, 8077), False, 'from bokeh.layouts import widgetbox, row, column, gridplot\n')]
import gpu import numpy from bgl import * from . rectangle import Rectangle from gpu_extras.batch import batch_for_shader shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') class InterpolationPreview: def __init__(self, interpolation, position, width, resolution): self.interpolation = interpolation self.position = position self.width = width self.normalHeight = width self.resolution = resolution self.padding = 5 self.boundary = Rectangle() self.samples = interpolation.sample(amount = resolution) def calculateBoundaries(self): minSample = self.samples.getMinValue() maxSample = self.samples.getMaxValue() bottomOvershoot = abs(min(0, minSample) * self.normalHeight) topOvershoot = abs(max(0, maxSample - 1) * self.normalHeight) x1 = self.position.x x2 = x1 + self.width y1 = self.position.y y2 = y1 - self.normalHeight - bottomOvershoot - topOvershoot self.boundary.resetPosition(x1, y1, x2, y2) self.interpolationLeft = x1 self.interpolationRight = x2 self.interpolationTop = y1 - topOvershoot - self.padding self.interpolationBottom = y2 + bottomOvershoot + self.padding def getHeight(self): return self.boundary.height def draw(self, backgroundColor = (0.9, 0.9, 0.9, 0.6), borderColor = (0.9, 0.76, 0.4, 1.0), borderThickness = -1): self.boundary.draw( color = backgroundColor, borderColor = borderColor, borderThickness = borderThickness ) self.drawInterpolationCurve() self.drawRangeLines() def drawInterpolationCurve(self): left, right = self.interpolationLeft, self.interpolationRight top, bottom = self.interpolationTop, self.interpolationBottom x = numpy.linspace(left, right, self.resolution) y = top + (self.samples.asNumpyArray() - 1) * (top - bottom) points = numpy.stack((x, y), axis = -1).astype(numpy.float32) batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": points}) shader.bind() shader.uniform_float("color", (0.2, 0.2, 0.2, 0.8)) glLineWidth(2) glEnable(GL_BLEND) glEnable(GL_LINE_SMOOTH) batch.draw(shader) glDisable(GL_LINE_SMOOTH) glDisable(GL_BLEND) glLineWidth(1) def drawRangeLines(self): points = ( (self.boundary.left, self.interpolationTop), (self.boundary.right, self.interpolationTop), (self.boundary.left, self.interpolationBottom), (self.boundary.right, self.interpolationBottom)) batch = batch_for_shader(shader, 'LINES', {"pos": points}) shader.bind() shader.uniform_float("color", (0.2, 0.2, 0.2, 0.5)) glLineWidth(1) glEnable(GL_BLEND) batch.draw(shader) glDisable(GL_BLEND)
[ "numpy.stack", "gpu.shader.from_builtin", "gpu_extras.batch.batch_for_shader", "numpy.linspace" ]
[((132, 175), 'gpu.shader.from_builtin', 'gpu.shader.from_builtin', (['"""2D_UNIFORM_COLOR"""'], {}), "('2D_UNIFORM_COLOR')\n", (155, 175), False, 'import gpu\n'), ((1903, 1947), 'numpy.linspace', 'numpy.linspace', (['left', 'right', 'self.resolution'], {}), '(left, right, self.resolution)\n', (1917, 1947), False, 'import numpy\n'), ((2103, 2158), 'gpu_extras.batch.batch_for_shader', 'batch_for_shader', (['shader', '"""LINE_STRIP"""', "{'pos': points}"], {}), "(shader, 'LINE_STRIP', {'pos': points})\n", (2119, 2158), False, 'from gpu_extras.batch import batch_for_shader\n'), ((2740, 2790), 'gpu_extras.batch.batch_for_shader', 'batch_for_shader', (['shader', '"""LINES"""', "{'pos': points}"], {}), "(shader, 'LINES', {'pos': points})\n", (2756, 2790), False, 'from gpu_extras.batch import batch_for_shader\n'), ((2034, 2062), 'numpy.stack', 'numpy.stack', (['(x, y)'], {'axis': '(-1)'}), '((x, y), axis=-1)\n', (2045, 2062), False, 'import numpy\n')]
import cv2 import numpy as np class drawingCanvas(): def __init__(self): self.penrange = np.load('penrange.npy') self.cap = cv2.VideoCapture(0) self.canvas = None self.x1,self.y1=0,0 self.val=1 self.draw() def draw(self): while True: _, self.frame = self.cap.read() self.frame = cv2.flip( self.frame,+1) if self.canvas is None: self.canvas = np.zeros_like(self.frame) mask=self.CreateMask() contours=self.ContourDetect(mask) self.drawLine(contours) self.display() k = cv2.waitKey(1) & 0xFF self.takeAction(k) if k == 27: break def CreateMask(self): hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) lower_range = self.penrange[0] upper_range = self.penrange[1] mask = cv2.inRange(hsv, lower_range, upper_range) return mask def ContourDetect(self,mask): contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) return contours def drawLine(self,contours): if contours and cv2.contourArea(max(contours, key = cv2.contourArea)) > 100: c = max(contours, key = cv2.contourArea) x2,y2,w,h = cv2.boundingRect(c) if self.x1 == 0 and self.y1 == 0: self.x1,self.y1= x2,y2 else: self.canvas = cv2.line(self.canvas, (self.x1,self.y1),(x2,y2), [255*self.val,0,0], 10) self.x1,self.y1= x2,y2 else: self.x1,self.y1 =0,0 def display(self): self.frame = cv2.add(self.frame,self.canvas) cv2.imshow('frame',self.frame) cv2.imshow('canvas',self.canvas) def takeAction(self,k): # When c is pressed clear the entire canvas if k == ord('c'): self.canvas = None #press e to change between eraser mode and writing mode if k==ord('e'): self.val= int(not self.val) if __name__ == '__main__': drawingCanvas() cv2.destroyAllWindows()
[ "cv2.flip", "cv2.inRange", "cv2.boundingRect", "cv2.line", "cv2.imshow", "cv2.waitKey", "cv2.destroyAllWindows", "cv2.VideoCapture", "cv2.cvtColor", "cv2.findContours", "numpy.load", "numpy.zeros_like", "cv2.add" ]
[((2327, 2350), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2348, 2350), False, 'import cv2\n'), ((107, 130), 'numpy.load', 'np.load', (['"""penrange.npy"""'], {}), "('penrange.npy')\n", (114, 130), True, 'import numpy as np\n'), ((151, 170), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (167, 170), False, 'import cv2\n'), ((836, 879), 'cv2.cvtColor', 'cv2.cvtColor', (['self.frame', 'cv2.COLOR_BGR2HSV'], {}), '(self.frame, cv2.COLOR_BGR2HSV)\n', (848, 879), False, 'import cv2\n'), ((977, 1019), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_range', 'upper_range'], {}), '(hsv, lower_range, upper_range)\n', (988, 1019), False, 'import cv2\n'), ((1113, 1179), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1129, 1179), False, 'import cv2\n'), ((1854, 1886), 'cv2.add', 'cv2.add', (['self.frame', 'self.canvas'], {}), '(self.frame, self.canvas)\n', (1861, 1886), False, 'import cv2\n'), ((1899, 1930), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'self.frame'], {}), "('frame', self.frame)\n", (1909, 1930), False, 'import cv2\n'), ((1939, 1972), 'cv2.imshow', 'cv2.imshow', (['"""canvas"""', 'self.canvas'], {}), "('canvas', self.canvas)\n", (1949, 1972), False, 'import cv2\n'), ((384, 408), 'cv2.flip', 'cv2.flip', (['self.frame', '(+1)'], {}), '(self.frame, +1)\n', (392, 408), False, 'import cv2\n'), ((1440, 1459), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (1456, 1459), False, 'import cv2\n'), ((479, 504), 'numpy.zeros_like', 'np.zeros_like', (['self.frame'], {}), '(self.frame)\n', (492, 504), True, 'import numpy as np\n'), ((684, 698), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (695, 698), False, 'import cv2\n'), ((1621, 1700), 'cv2.line', 'cv2.line', (['self.canvas', '(self.x1, self.y1)', '(x2, y2)', '[255 * self.val, 0, 0]', '(10)'], {}), '(self.canvas, (self.x1, self.y1), (x2, y2), [255 * self.val, 0, 0], 10)\n', (1629, 1700), False, 'import cv2\n')]
from collections import OrderedDict import numpy as np import math import torch import torch.optim as optim from torch import nn as nn import rlkit.torch.pytorch_util as ptu from rlkit.core.eval_util import create_stats_ordered_dict from rlkit.torch.torch_rl_algorithm import TorchTrainer def kl_divergence(mu, std): kld = -0.5 * (1. + 2. * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2)) return kld def weighted_mse_loss(input, target, weight): return torch.sum(weight * (input - target) ** 2) class IWQTrainer(TorchTrainer): def __init__( self, env, policy, qf, target_qf, num_samples, weighted_mse, beta, discount=0.99, reward_scale=1.0, policy_lr=1e-3, qf_lr=1e-3, optimizer_class=optim.Adam, soft_target_tau=1e-2, target_update_period=1, plotter=None, render_eval_paths=False, use_automatic_entropy_tuning=True, target_entropy=None, ): super().__init__() self.env = env self.policy = policy self.qf = qf self.target_qf = target_qf self.num_samples = num_samples self.weighted_mse = weighted_mse self.beta = beta self.soft_target_tau = soft_target_tau self.target_update_period = target_update_period self.use_automatic_entropy_tuning = use_automatic_entropy_tuning if self.use_automatic_entropy_tuning: if target_entropy: self.target_entropy = target_entropy else: self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas self.log_alpha = ptu.zeros(1, requires_grad=True) self.alpha_optimizer = optimizer_class( [self.log_alpha], lr=policy_lr, ) self.plotter = plotter self.render_eval_paths = render_eval_paths self.qf_criterion = nn.MSELoss() self.vf_criterion = nn.MSELoss() self.policy_optimizer = optimizer_class( self.policy.parameters(), lr=policy_lr, ) self.qf_optimizer = optimizer_class( self.qf.parameters(), lr=qf_lr, ) self.discount = discount self.reward_scale = reward_scale self.eval_statistics = OrderedDict() self._n_train_steps_total = 0 self._need_to_update_eval_statistics = True def train_from_torch(self, batch): rewards = batch['rewards'] terminals = batch['terminals'] obs = batch['observations'] actions = batch['actions'] next_obs = batch['next_observations'] batch_size = obs.size(0) """ Policy and Alpha Loss """ new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy( obs, reparameterize=True, return_log_prob=True, ) if self.use_automatic_entropy_tuning: alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean() self.alpha_optimizer.zero_grad() alpha_loss.backward() self.alpha_optimizer.step() alpha = self.log_alpha.exp() else: alpha_loss = 0 alpha = 1 q_all, _, _ = self.qf(obs, new_obs_actions) q_all = q_all.view(batch_size, self.num_samples, 1) q_new_actions, _ = torch.min(q_all, dim=1) policy_loss = (alpha*log_pi - q_new_actions).mean() """ QF Loss """ q_pred, mu, std = self.qf(obs, actions) print(q_pred) # Make sure policy accounts for squashing functions like tanh correctly! new_next_actions, _, _, new_log_pi, *_ = self.policy( next_obs, reparameterize=True, return_log_prob=True, ) target_all, _, _ = self.target_qf(next_obs, new_next_actions) target_all = target_all.view(self.num_samples, batch_size, 1) print(target_all) print(target_all.size()) target_q_values, _ = torch.min(target_all, dim=1) print(target_q_values) target_q_values = target_q_values - alpha * new_log_pi q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values if self.weighted_mse: raise NotImplementedError else: q_target = q_target.repeat_interleave(self.num_samples, dim=0) qf_loss = self.qf_criterion(q_pred, q_target.detach()) qf_loss += self.beta * kl_divergence(mu, std) """ Update networks """ self.qf_optimizer.zero_grad() qf_loss.backward() self.qf_optimizer.step() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() # exit() """ Soft Updates """ if self._n_train_steps_total % self.target_update_period == 0: ptu.soft_update_from_to( self.qf, self.target_qf, self.soft_target_tau ) """ Save some statistics for eval """ if self._need_to_update_eval_statistics: self._need_to_update_eval_statistics = False """ Eval should set this to None. This way, these statistics are only computed for one batch. """ policy_loss = (log_pi - q_new_actions).mean() self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss)) self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy( policy_loss )) self.eval_statistics.update(create_stats_ordered_dict( 'Q Predictions', ptu.get_numpy(q_pred), )) self.eval_statistics.update(create_stats_ordered_dict( 'Q Targets', ptu.get_numpy(q_target), )) self.eval_statistics.update(create_stats_ordered_dict( 'Log Pis', ptu.get_numpy(log_pi), )) self.eval_statistics.update(create_stats_ordered_dict( 'Policy mu', ptu.get_numpy(policy_mean), )) self.eval_statistics.update(create_stats_ordered_dict( 'Policy log std', ptu.get_numpy(policy_log_std), )) if self.use_automatic_entropy_tuning: self.eval_statistics['Alpha'] = alpha.item() self.eval_statistics['Alpha Loss'] = alpha_loss.item() self._n_train_steps_total += 1 def get_diagnostics(self): return self.eval_statistics def end_epoch(self, epoch): self._need_to_update_eval_statistics = True @property def networks(self): return [ self.policy, self.qf, self.target_qf, ] def get_snapshot(self): return dict( policy=self.policy, qf1=self.qf, target_qf1=self.qf, )
[ "numpy.prod", "collections.OrderedDict", "math.log", "torch.min", "torch.nn.MSELoss", "rlkit.torch.pytorch_util.get_numpy", "torch.sum", "rlkit.torch.pytorch_util.soft_update_from_to", "rlkit.torch.pytorch_util.zeros" ]
[((489, 530), 'torch.sum', 'torch.sum', (['(weight * (input - target) ** 2)'], {}), '(weight * (input - target) ** 2)\n', (498, 530), False, 'import torch\n'), ((2104, 2116), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2114, 2116), True, 'from torch import nn as nn\n'), ((2145, 2157), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2155, 2157), True, 'from torch import nn as nn\n'), ((2499, 2512), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2510, 2512), False, 'from collections import OrderedDict\n'), ((3574, 3597), 'torch.min', 'torch.min', (['q_all'], {'dim': '(1)'}), '(q_all, dim=1)\n', (3583, 3597), False, 'import torch\n'), ((4219, 4247), 'torch.min', 'torch.min', (['target_all'], {'dim': '(1)'}), '(target_all, dim=1)\n', (4228, 4247), False, 'import torch\n'), ((403, 414), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (411, 414), False, 'import math\n'), ((1829, 1861), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (1838, 1861), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5128, 5198), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf', 'self.target_qf', 'self.soft_target_tau'], {}), '(self.qf, self.target_qf, self.soft_target_tau)\n', (5151, 5198), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5657, 5679), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf_loss'], {}), '(qf_loss)\n', (5670, 5679), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5739, 5765), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (5752, 5765), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5913, 5934), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_pred'], {}), '(q_pred)\n', (5926, 5934), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6063, 6086), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_target'], {}), '(q_target)\n', (6076, 6086), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6213, 6234), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['log_pi'], {}), '(log_pi)\n', (6226, 6234), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6363, 6389), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_mean'], {}), '(policy_mean)\n', (6376, 6389), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6523, 6552), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_log_std'], {}), '(policy_log_std)\n', (6536, 6552), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1725, 1761), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (1732, 1761), True, 'import numpy as np\n')]
import os import utils import torch import torch.nn as nn from torchvision import transforms from torch.utils.data import DataLoader import numpy as np import data import scipy.io as sio from options.training_options import TrainOptions import utils import time from models import AutoEncoderCov3D, AutoEncoderCov3DMem from models import EntropyLossEncap ### opt_parser = TrainOptions() opt = opt_parser.parse(is_print=True) use_cuda = opt.UseCUDA device = torch.device("cuda" if use_cuda else "cpu") ### utils.seed(opt.Seed) if(opt.IsDeter): torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True ###### model_setting = utils.get_model_setting(opt) print('Setting: %s' % (model_setting)) ############ batch_size_in = opt.BatchSize learning_rate = opt.LR max_epoch_num = opt.EpochNum chnum_in_ = opt.ImgChnNum # channel number of the input images framenum_in_ = opt.FrameNum # num of frames in a video clip mem_dim_in = opt.MemDim entropy_loss_weight = opt.EntropyLossWeight sparse_shrink_thres = opt.ShrinkThres img_crop_size = 0 print('bs=%d, lr=%f, entrloss=%f, shr=%f, memdim=%d' % (batch_size_in, learning_rate, entropy_loss_weight, sparse_shrink_thres, mem_dim_in)) ############ ## data path data_root = opt.DataRoot + opt.Dataset + '/' tr_data_frame_dir = data_root + 'Train/' tr_data_idx_dir = data_root + 'Train_idx/' ############ model saving dir path saving_root = opt.ModelRoot saving_model_path = os.path.join(saving_root, 'model_' + model_setting + '/') utils.mkdir(saving_model_path) ### tblog if(opt.IsTbLog): log_path = os.path.join(saving_root, 'log_'+model_setting + '/') utils.mkdir(log_path) tb_logger = utils.Logger(log_path) ## if(chnum_in_==1): norm_mean = [0.5] norm_std = [0.5] elif(chnum_in_==3): norm_mean = (0.5, 0.5, 0.5) norm_std = (0.5, 0.5, 0.5) frame_trans = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std) ]) unorm_trans = utils.UnNormalize(mean=norm_mean, std=norm_std) ###### data video_dataset = data.VideoDataset(tr_data_idx_dir, tr_data_frame_dir, transform=frame_trans) tr_data_loader = DataLoader(video_dataset, batch_size=batch_size_in, shuffle=True, num_workers=opt.NumWorker ) ###### model if(opt.ModelName=='MemAE'): model = AutoEncoderCov3DMem(chnum_in_, mem_dim_in, shrink_thres=sparse_shrink_thres) else: model = [] print('Wrong model name.') model.apply(utils.weights_init) ######### device = torch.device("cuda" if use_cuda else "cpu") model.to(device) tr_recon_loss_func = nn.MSELoss().to(device) tr_entropy_loss_func = EntropyLossEncap().to(device) tr_optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) ## data_loader_len = len(tr_data_loader) textlog_interval = opt.TextLogInterval snap_save_interval = opt.SnapInterval save_check_interval = opt.SaveCheckInterval tb_img_log_interval = opt.TBImgLogInterval global_ite_idx = 0 # for logging for epoch_idx in range(0, max_epoch_num): for batch_idx, (item, frames) in enumerate(tr_data_loader): frames = frames.to(device) if (opt.ModelName == 'MemAE'): recon_res = model(frames) recon_frames = recon_res['output'] att_w = recon_res['att'] loss = tr_recon_loss_func(recon_frames, frames) recon_loss_val = loss.item() entropy_loss = tr_entropy_loss_func(att_w) entropy_loss_val = entropy_loss.item() loss = loss + entropy_loss_weight * entropy_loss loss_val = loss.item() ## tr_optimizer.zero_grad() loss.backward() tr_optimizer.step() ## ## TB log val if(opt.IsTbLog): tb_info = { 'loss': loss_val, 'recon_loss': recon_loss_val, 'entropy_loss': entropy_loss_val } for tag, value in tb_info.items(): tb_logger.scalar_summary(tag, value, global_ite_idx) # TB log img if( (global_ite_idx % tb_img_log_interval)==0 ): frames_vis = utils.vframes2imgs(unorm_trans(frames.data), step=5, batch_idx=0) frames_vis = np.concatenate(frames_vis, axis=-1) frames_vis = frames_vis[None, :, :] * np.ones(3, dtype=int)[:, None, None] frames_recon_vis = utils.vframes2imgs(unorm_trans(recon_frames.data), step=5, batch_idx=0) frames_recon_vis = np.concatenate(frames_recon_vis, axis=-1) frames_recon_vis = frames_recon_vis[None, :, :] * np.ones(3, dtype=int)[:, None, None] tb_info = { 'x': frames_vis, 'x_rec': frames_recon_vis } for tag, imgs in tb_info.items(): tb_logger.image_summary(tag, imgs, global_ite_idx) ## if((batch_idx % textlog_interval)==0): print('[%s, epoch %d/%d, bt %d/%d] loss=%f, rc_losss=%f, ent_loss=%f' % (model_setting, epoch_idx, max_epoch_num, batch_idx, data_loader_len, loss_val, recon_loss_val, entropy_loss_val) ) if((global_ite_idx % snap_save_interval)==0): torch.save(model.state_dict(), '%s/%s_snap.pt' % (saving_model_path, model_setting) ) global_ite_idx += 1 if((epoch_idx % save_check_interval)==0): torch.save(model.state_dict(), '%s/%s_epoch_%04d.pt' % (saving_model_path, model_setting, epoch_idx) ) torch.save(model.state_dict(), '%s/%s_epoch_%04d_final.pt' % (saving_model_path, model_setting, epoch_idx) )
[ "models.EntropyLossEncap", "numpy.ones", "utils.UnNormalize", "utils.get_model_setting", "os.path.join", "options.training_options.TrainOptions", "utils.Logger", "torch.nn.MSELoss", "utils.seed", "utils.mkdir", "torchvision.transforms.Normalize", "torch.utils.data.DataLoader", "numpy.concatenate", "models.AutoEncoderCov3DMem", "data.VideoDataset", "torchvision.transforms.ToTensor", "torch.device" ]
[((373, 387), 'options.training_options.TrainOptions', 'TrainOptions', ([], {}), '()\n', (385, 387), False, 'from options.training_options import TrainOptions\n'), ((458, 501), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (470, 501), False, 'import torch\n'), ((507, 527), 'utils.seed', 'utils.seed', (['opt.Seed'], {}), '(opt.Seed)\n', (517, 527), False, 'import utils\n'), ((658, 686), 'utils.get_model_setting', 'utils.get_model_setting', (['opt'], {}), '(opt)\n', (681, 686), False, 'import utils\n'), ((1458, 1515), 'os.path.join', 'os.path.join', (['saving_root', "('model_' + model_setting + '/')"], {}), "(saving_root, 'model_' + model_setting + '/')\n", (1470, 1515), False, 'import os\n'), ((1516, 1546), 'utils.mkdir', 'utils.mkdir', (['saving_model_path'], {}), '(saving_model_path)\n', (1527, 1546), False, 'import utils\n'), ((1995, 2042), 'utils.UnNormalize', 'utils.UnNormalize', ([], {'mean': 'norm_mean', 'std': 'norm_std'}), '(mean=norm_mean, std=norm_std)\n', (2012, 2042), False, 'import utils\n'), ((2072, 2148), 'data.VideoDataset', 'data.VideoDataset', (['tr_data_idx_dir', 'tr_data_frame_dir'], {'transform': 'frame_trans'}), '(tr_data_idx_dir, tr_data_frame_dir, transform=frame_trans)\n', (2089, 2148), False, 'import data\n'), ((2166, 2262), 'torch.utils.data.DataLoader', 'DataLoader', (['video_dataset'], {'batch_size': 'batch_size_in', 'shuffle': '(True)', 'num_workers': 'opt.NumWorker'}), '(video_dataset, batch_size=batch_size_in, shuffle=True,\n num_workers=opt.NumWorker)\n', (2176, 2262), False, 'from torch.utils.data import DataLoader\n'), ((2607, 2650), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2619, 2650), False, 'import torch\n'), ((1590, 1645), 'os.path.join', 'os.path.join', (['saving_root', "('log_' + model_setting + '/')"], {}), "(saving_root, 'log_' + model_setting + '/')\n", (1602, 1645), False, 'import os\n'), ((1648, 1669), 'utils.mkdir', 'utils.mkdir', (['log_path'], {}), '(log_path)\n', (1659, 1669), False, 'import utils\n'), ((1686, 1708), 'utils.Logger', 'utils.Logger', (['log_path'], {}), '(log_path)\n', (1698, 1708), False, 'import utils\n'), ((2426, 2502), 'models.AutoEncoderCov3DMem', 'AutoEncoderCov3DMem', (['chnum_in_', 'mem_dim_in'], {'shrink_thres': 'sparse_shrink_thres'}), '(chnum_in_, mem_dim_in, shrink_thres=sparse_shrink_thres)\n', (2445, 2502), False, 'from models import AutoEncoderCov3D, AutoEncoderCov3DMem\n'), ((1901, 1922), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1920, 1922), False, 'from torchvision import transforms\n'), ((1932, 1973), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['norm_mean', 'norm_std'], {}), '(norm_mean, norm_std)\n', (1952, 1973), False, 'from torchvision import transforms\n'), ((2689, 2701), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2699, 2701), True, 'import torch.nn as nn\n'), ((2736, 2754), 'models.EntropyLossEncap', 'EntropyLossEncap', ([], {}), '()\n', (2752, 2754), False, 'from models import EntropyLossEncap\n'), ((4347, 4382), 'numpy.concatenate', 'np.concatenate', (['frames_vis'], {'axis': '(-1)'}), '(frames_vis, axis=-1)\n', (4361, 4382), True, 'import numpy as np\n'), ((4616, 4657), 'numpy.concatenate', 'np.concatenate', (['frames_recon_vis'], {'axis': '(-1)'}), '(frames_recon_vis, axis=-1)\n', (4630, 4657), True, 'import numpy as np\n'), ((4437, 4458), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (4444, 4458), True, 'import numpy as np\n'), ((4724, 4745), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (4731, 4745), True, 'import numpy as np\n')]
import pandas as pd df = pd.read_csv(r'balanced_reviews.csv') df.isnull().any(axis = 0) #handle the missing data df.dropna(inplace = True) #leaving the reviews with rating 3 and collect reviews with #rating 1, 2, 4 and 5 onyl df = df [df['overall'] != 3] import numpy as np #creating a label #based on the values in overall column df['Positivity'] = np.where(df['overall'] > 3 , 1 , 0) #NLP #reviewText - feature - df['reviewText'] #Positivity - label - df['Positivity'] from sklearn.model_selection import train_test_split features_train, features_test, labels_train, labels_test = train_test_split(df['reviewText'], df['Positivity'], random_state = 42 ) from sklearn.feature_extraction.text import TfidfVectorizer vect = TfidfVectorizer(min_df = 5).fit(features_train) features_train_vectorized = vect.transform(features_train) #model building from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(features_train_vectorized, labels_train) predictions = model.predict(vect.transform(features_test)) from sklearn.metrics import confusion_matrix confusion_matrix(labels_test, predictions) from sklearn.metrics import roc_auc_score roc_auc_score(labels_test, predictions) #save - pickle format import pickle file = open("pickle_model.pkl","wb") pickle.dump(model, file) #pickle the vocabulary pickle.dump(vect.vocabulary_, open('features.pkl', 'wb'))
[ "pickle.dump", "pandas.read_csv", "numpy.where", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LogisticRegression", "sklearn.metrics.roc_auc_score", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.metrics.confusion_matrix" ]
[((29, 64), 'pandas.read_csv', 'pd.read_csv', (['"""balanced_reviews.csv"""'], {}), "('balanced_reviews.csv')\n", (40, 64), True, 'import pandas as pd\n'), ((381, 414), 'numpy.where', 'np.where', (["(df['overall'] > 3)", '(1)', '(0)'], {}), "(df['overall'] > 3, 1, 0)\n", (389, 414), True, 'import numpy as np\n'), ((625, 694), 'sklearn.model_selection.train_test_split', 'train_test_split', (["df['reviewText']", "df['Positivity']"], {'random_state': '(42)'}), "(df['reviewText'], df['Positivity'], random_state=42)\n", (641, 694), False, 'from sklearn.model_selection import train_test_split\n'), ((974, 994), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (992, 994), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1164, 1206), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (1180, 1206), False, 'from sklearn.metrics import confusion_matrix\n'), ((1253, 1292), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (1266, 1292), False, 'from sklearn.metrics import roc_auc_score\n'), ((1379, 1403), 'pickle.dump', 'pickle.dump', (['model', 'file'], {}), '(model, file)\n', (1390, 1403), False, 'import pickle\n'), ((775, 800), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(5)'}), '(min_df=5)\n', (790, 800), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n')]
#basic components: Embedding Layer, Scaled Dot-Product Attention, Dense Layer import numpy as np import torch.nn.functional as F from torch import nn import torch class Embed(nn.Module): def __init__(self, length, emb_dim, embeddings=None, trainable=False, dropout=.1): super(Embed, self).__init__() self.embedding = nn.Embedding(num_embeddings=length, embedding_dim=emb_dim, padding_idx=0) if embeddings is not None: print("Loading pre-trained embeddings!") self.embedding.weight = nn.Parameter(torch.from_numpy(embeddings), requires_grad=trainable) self.dropout = nn.Dropout(dropout) def forward(self, X): embedded = self.embedding(X) embedded = self.dropout(embedded) return embedded class PosEmbed(nn.Module): def __init__(self, length, emb_dim): super(PosEmbed, self).__init__() self.length = length self.emb_dim = emb_dim pos_weight = self.position_encoding_init(n_position=length, emb_dim=emb_dim) self.pos_embedding = nn.Embedding.from_pretrained(pos_weight, freeze=True) def get_pos(self, word_sequences, mode='seq'): batch = [] for word_seq in word_sequences: start_idx = 1.0 word_pos = [] for pos in word_seq: if mode == 'seq': if int(pos) == 0: word_pos.append(0.0) else: word_pos.append(start_idx) start_idx += 1.0 elif mode == 'set': word_pos.append(0.0) else: raise ValueError('Unrecognized position encoding mode! Should be chosen from "seq" or "set"! ') batch.append(torch.from_numpy(np.array(word_pos)).type(torch.LongTensor)) batch = torch.cat(batch).view(-1, self.length) return batch.to('cuda') def forward(self, X, mode='seq'): X = self.get_pos(X, mode=mode) pos_embeded = self.pos_embedding(X) return pos_embeded @staticmethod def position_encoding_init(n_position, emb_dim): ''' Init the sinusoid position encoding table ''' # keep dim 0 for padding token position encoding zero vector n_position += 1 position_enc = np.array([ [pos / np.power(10000, 2 * (j // 2) / emb_dim) for j in range(emb_dim)] if pos != 0 else np.zeros(emb_dim) for pos in range(n_position)]) position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # apply sin on 0th,2nd,4th...emb_dim position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # apply cos on 1st,3rd,5th...emb_dim return torch.from_numpy(position_enc).type(torch.FloatTensor) class ScaledDotProductAttention(nn.Module): def __init__(self, d_k, num_head,dropout=.1): super(ScaledDotProductAttention, self).__init__() self.reg = np.sqrt(d_k) self.num_head = num_head self.dropout = nn.Dropout(dropout) self.softmax = nn.Softmax(dim=2) #input tensor dim: (batch, seq_length, seq_length) def forward(self, q, k, v, pad_mask=None, context_mask=None): attention = torch.bmm(q, k.transpose(1, 2)) #dim of q and k: (batch * n_head, seq_length) attention /= self.reg if pad_mask is not None: attention = attention.masked_fill(pad_mask, -1e9) #see Attention is all you need 3.2.3 attention = self.softmax(attention) attention = self.dropout(attention) if pad_mask is not None: attention = attention.masked_fill(pad_mask, 0) #see Attention is all you need 3.2.3 if context_mask is not None: #context masking attention *= context_mask # attention residual residual = 0 if self.num_head > 1: _length_1 = attention.shape[1] _length_2 = attention.shape[2] _attn = attention.contiguous().view(self.num_head, -1, _length_1, _length_2) for m, left in enumerate(_attn): for n, right in enumerate(_attn): if not m == n: residual += torch.sum(torch.abs(left * right)) / _length_1 residual = residual/self.num_head/self.num_head/2 output = torch.bmm(attention, v) return output, attention, residual class MultiHeadAttention(nn.Module): def __init__(self, num_head, d_x, d_k, dropout=.1): super(MultiHeadAttention, self).__init__() self.num_head = num_head self.d_k = d_k self.wq = nn.Linear(d_x, num_head * d_k) self.wk = nn.Linear(d_x, num_head * d_k) self.wv = nn.Linear(d_x, num_head * d_k) nn.init.xavier_normal_(self.wq.weight) nn.init.xavier_normal_(self.wk.weight) nn.init.xavier_normal_(self.wv.weight) self.sdp_attn = ScaledDotProductAttention(d_k=d_k, num_head=num_head, dropout=dropout) self.dropout = nn.Dropout(dropout) self.norm = nn.LayerNorm(d_x) self.wo = nn.Linear(num_head * d_k, d_x) nn.init.xavier_normal_(self.wo.weight) def forward(self, q, k, v, pad_mask=None): X = q #batch * length_q * d_x length_q = q.shape[1] assert v.shape[1] == k.shape[1] length_k = k.shape[1] q = self.wq(q).view(-1, length_q, self.num_head, self.d_k) #batch * length * num_head * d_k k = self.wk(k).view(-1, length_k, self.num_head, self.d_k) v = self.wv(v).view(-1, length_k, self.num_head, self.d_k) q = q.permute(2, 0, 1, 3).contiguous().view(-1, length_q, self.d_k) # (batch * num_head) * length * d_k k = k.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k) v = v.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k) if pad_mask is not None: pad_mask = pad_mask.repeat(self.num_head, 1, 1) # batch * length_q * length_k -> (batch * num_head) * l_q * l_k output, attention, _ = self.sdp_attn(q, k, v, pad_mask=pad_mask) #output: (batch*nh) * length_q * d_k #attention: (batch*nh) * length_q * length_k output = output.view(self.num_head, -1, length_q, self.d_k) # nh * batch * l_q * d_k output = output.permute(1, 2, 0, 3).contiguous().view(-1, length_q, self.num_head * self.d_k) # batch * l_q * (nh * d_k) output = self.norm(self.dropout(self.wo(output)) + X) #batch * l_q * d_x attention = attention.view(self.num_head, -1, length_q, length_k).permute(1, 0, 2, 3) #batch * nh * l_q * l_k return output, attention class LexiconMultiHeadAttention(nn.Module): def __init__(self, num_head, d_x, d_k, d_kl, dropout=.1): super(LexiconMultiHeadAttention, self).__init__() self.num_head = num_head self.d_k = d_k self.d_kl = d_kl self.wq = nn.Linear(d_x, num_head * d_k) self.wk = nn.Linear(d_x, num_head * d_k) self.wv = nn.Linear(d_x, num_head * d_k) self.wkl = nn.Linear(d_x, num_head * d_kl) self.wvl = nn.Linear(d_x, num_head * d_kl) #initialization problems? nn.init.xavier_normal_(self.wq.weight) nn.init.xavier_normal_(self.wk.weight) nn.init.xavier_normal_(self.wv.weight) nn.init.xavier_normal_(self.wkl.weight) nn.init.xavier_normal_(self.wvl.weight) self.sdp_attn_context = ScaledDotProductAttention(d_k=d_k, num_head=num_head, dropout=dropout) self.sdp_attn_lex = ScaledDotProductAttention(d_k=d_kl, num_head=num_head, dropout=dropout) self.dropout = nn.Dropout(dropout) self.norm = nn.LayerNorm(d_x) self.wo = nn.Linear(num_head * d_k, d_x) nn.init.xavier_normal_(self.wo.weight) def forward(self, q, k, v, kl, vl, pad_mask=None, pad_mask_l=None, context_mask=None, alpha=0.5): X = q #batch * length_q * d_x length_q = q.shape[1] assert v.shape[1] == k.shape[1] length_k = k.shape[1] assert vl.shape[1] == kl.shape[1] length_kl = kl.shape[1] q = self.wq(q).view(-1, length_q, self.num_head, self.d_k) #batch * length * num_head * d_k k = self.wk(k).view(-1, length_k, self.num_head, self.d_k) v = self.wv(v).view(-1, length_k, self.num_head, self.d_k) kl = self.wkl(kl).view(-1, length_kl, self.num_head, self.d_kl) vl = self.wvl(vl).view(-1, length_kl, self.num_head, self.d_kl) q = q.permute(2, 0, 1, 3).contiguous().view(-1, length_q, self.d_k) # (batch * num_head) * length * d_k k = k.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k) v = v.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k) # value residual residual = 0 # if self.num_head > 1: # # _v = v.contiguous().view(self.num_head, -1, length_k, self.d_k) # _sim = torch.nn.CosineSimilarity(dim=2) # for m, left in enumerate(_v): # for n, right in enumerate(_v): # if not m == n: # residual += (torch.sum(torch.abs(_sim(left, right)))) / left.shape[0] # residual /= 2 # residual = residual/self.num_head/self.num_head kl = kl.permute(2, 0, 1, 3).contiguous().view(-1, length_kl, self.d_kl) vl = vl.permute(2, 0, 1, 3).contiguous().view(-1, length_kl, self.d_kl) if pad_mask is not None: pad_mask = pad_mask.repeat(self.num_head, 1, 1) # batch * length_q * length_k -> (batch * num_head) * l_q * l_k if pad_mask_l is not None: pad_mask_l = pad_mask_l.repeat(self.num_head, 1, 1) if context_mask is not None: context_mask = context_mask.repeat(self.num_head, 1, 1) output_context, attention_context, a_res_context = self.sdp_attn_context(q, k, v, pad_mask=pad_mask) output_lexicon, attention_lexicon, a_res_lexicon = self.sdp_attn_lex(q, kl, vl, pad_mask=pad_mask_l, context_mask=context_mask) output = alpha * output_context + (1 - alpha) * output_lexicon residual += a_res_context #output: (batch*nh) * length_q * d_k #attention: (batch*nh) * length_q * length_k output = output.view(self.num_head, -1, length_q, self.d_k) #nh * batch * l_q * d_k output = output.permute(1, 2, 0, 3).contiguous().view(-1, length_q, self.num_head * self.d_k) #batch * l_q * (nh * d_k) output = self.norm(self.dropout(self.wo(output)) + X) #batch * l_q * d_x attention_context = attention_context.view(self.num_head, -1, length_q, length_k).permute(1, 0, 2, 3)#batch * nh * l_q * l_k attention_lexicon = attention_lexicon.view(self.num_head, -1, length_q, length_kl).permute(1, 0, 2, 3)#batch * nh * l_q * l_k return output, attention_context, attention_lexicon, residual class PointwiseFF(nn.Module): def __init__(self, d_x, d_ff, dropout=.0): super(PointwiseFF, self).__init__() self.w1 = nn.Conv1d(d_x, d_ff, 1) self.w2 = nn.Conv1d(d_ff, d_x, 1) nn.init.xavier_normal_(self.w1.weight) nn.init.xavier_normal_(self.w2.weight) self.dropout = nn.Dropout(dropout) self.norm = nn.LayerNorm(d_x) def forward(self, X): output = self.w2(F.relu(self.w1(X.transpose(1, 2)))) #dim of x: (batch, seq_length, d_x) output = self.dropout(output.transpose(1, 2)) output = self.norm(output + X) #batch * seq_length * d_x return output
[ "torch.bmm", "torch.nn.Dropout", "numpy.sqrt", "torch.abs", "torch.nn.Softmax", "numpy.power", "torch.nn.LayerNorm", "torch.from_numpy", "torch.nn.init.xavier_normal_", "torch.cat", "numpy.zeros", "numpy.array", "numpy.cos", "torch.nn.Linear", "numpy.sin", "torch.nn.Conv1d", "torch.nn.Embedding", "torch.nn.Embedding.from_pretrained" ]
[((367, 440), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'length', 'embedding_dim': 'emb_dim', 'padding_idx': '(0)'}), '(num_embeddings=length, embedding_dim=emb_dim, padding_idx=0)\n', (379, 440), False, 'from torch import nn\n'), ((792, 811), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (802, 811), False, 'from torch import nn\n'), ((1348, 1401), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['pos_weight'], {'freeze': '(True)'}), '(pos_weight, freeze=True)\n', (1376, 1401), False, 'from torch import nn\n'), ((2917, 2947), 'numpy.sin', 'np.sin', (['position_enc[1:, 0::2]'], {}), '(position_enc[1:, 0::2])\n', (2923, 2947), True, 'import numpy as np\n'), ((3018, 3048), 'numpy.cos', 'np.cos', (['position_enc[1:, 1::2]'], {}), '(position_enc[1:, 1::2])\n', (3024, 3048), True, 'import numpy as np\n'), ((3352, 3364), 'numpy.sqrt', 'np.sqrt', (['d_k'], {}), '(d_k)\n', (3359, 3364), True, 'import numpy as np\n'), ((3421, 3440), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3431, 3440), False, 'from torch import nn\n'), ((3464, 3481), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (3474, 3481), False, 'from torch import nn\n'), ((4764, 4787), 'torch.bmm', 'torch.bmm', (['attention', 'v'], {}), '(attention, v)\n', (4773, 4787), False, 'import torch\n'), ((5074, 5104), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (5083, 5104), False, 'from torch import nn\n'), ((5123, 5153), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (5132, 5153), False, 'from torch import nn\n'), ((5172, 5202), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (5181, 5202), False, 'from torch import nn\n'), ((5222, 5260), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wq.weight'], {}), '(self.wq.weight)\n', (5244, 5260), False, 'from torch import nn\n'), ((5269, 5307), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wk.weight'], {}), '(self.wk.weight)\n', (5291, 5307), False, 'from torch import nn\n'), ((5316, 5354), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wv.weight'], {}), '(self.wv.weight)\n', (5338, 5354), False, 'from torch import nn\n'), ((5483, 5502), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (5493, 5502), False, 'from torch import nn\n'), ((5523, 5540), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_x'], {}), '(d_x)\n', (5535, 5540), False, 'from torch import nn\n'), ((5568, 5598), 'torch.nn.Linear', 'nn.Linear', (['(num_head * d_k)', 'd_x'], {}), '(num_head * d_k, d_x)\n', (5577, 5598), False, 'from torch import nn\n'), ((5607, 5645), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wo.weight'], {}), '(self.wo.weight)\n', (5629, 5645), False, 'from torch import nn\n'), ((7575, 7605), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (7584, 7605), False, 'from torch import nn\n'), ((7624, 7654), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (7633, 7654), False, 'from torch import nn\n'), ((7673, 7703), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (7682, 7703), False, 'from torch import nn\n'), ((7726, 7757), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_kl)'], {}), '(d_x, num_head * d_kl)\n', (7735, 7757), False, 'from torch import nn\n'), ((7777, 7808), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_kl)'], {}), '(d_x, num_head * d_kl)\n', (7786, 7808), False, 'from torch import nn\n'), ((7862, 7900), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wq.weight'], {}), '(self.wq.weight)\n', (7884, 7900), False, 'from torch import nn\n'), ((7909, 7947), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wk.weight'], {}), '(self.wk.weight)\n', (7931, 7947), False, 'from torch import nn\n'), ((7956, 7994), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wv.weight'], {}), '(self.wv.weight)\n', (7978, 7994), False, 'from torch import nn\n'), ((8003, 8042), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wkl.weight'], {}), '(self.wkl.weight)\n', (8025, 8042), False, 'from torch import nn\n'), ((8051, 8090), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wvl.weight'], {}), '(self.wvl.weight)\n', (8073, 8090), False, 'from torch import nn\n'), ((8335, 8354), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (8345, 8354), False, 'from torch import nn\n'), ((8375, 8392), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_x'], {}), '(d_x)\n', (8387, 8392), False, 'from torch import nn\n'), ((8420, 8450), 'torch.nn.Linear', 'nn.Linear', (['(num_head * d_k)', 'd_x'], {}), '(num_head * d_k, d_x)\n', (8429, 8450), False, 'from torch import nn\n'), ((8459, 8497), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wo.weight'], {}), '(self.wo.weight)\n', (8481, 8497), False, 'from torch import nn\n'), ((12184, 12207), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_x', 'd_ff', '(1)'], {}), '(d_x, d_ff, 1)\n', (12193, 12207), False, 'from torch import nn\n'), ((12226, 12249), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_ff', 'd_x', '(1)'], {}), '(d_ff, d_x, 1)\n', (12235, 12249), False, 'from torch import nn\n'), ((12259, 12297), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.w1.weight'], {}), '(self.w1.weight)\n', (12281, 12297), False, 'from torch import nn\n'), ((12306, 12344), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.w2.weight'], {}), '(self.w2.weight)\n', (12328, 12344), False, 'from torch import nn\n'), ((12369, 12388), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (12379, 12388), False, 'from torch import nn\n'), ((12409, 12426), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_x'], {}), '(d_x)\n', (12421, 12426), False, 'from torch import nn\n'), ((665, 693), 'torch.from_numpy', 'torch.from_numpy', (['embeddings'], {}), '(embeddings)\n', (681, 693), False, 'import torch\n'), ((2187, 2203), 'torch.cat', 'torch.cat', (['batch'], {}), '(batch)\n', (2196, 2203), False, 'import torch\n'), ((3101, 3131), 'torch.from_numpy', 'torch.from_numpy', (['position_enc'], {}), '(position_enc)\n', (3117, 3131), False, 'import torch\n'), ((2825, 2842), 'numpy.zeros', 'np.zeros', (['emb_dim'], {}), '(emb_dim)\n', (2833, 2842), True, 'import numpy as np\n'), ((2127, 2145), 'numpy.array', 'np.array', (['word_pos'], {}), '(word_pos)\n', (2135, 2145), True, 'import numpy as np\n'), ((2731, 2770), 'numpy.power', 'np.power', (['(10000)', '(2 * (j // 2) / emb_dim)'], {}), '(10000, 2 * (j // 2) / emb_dim)\n', (2739, 2770), True, 'import numpy as np\n'), ((4646, 4669), 'torch.abs', 'torch.abs', (['(left * right)'], {}), '(left * right)\n', (4655, 4669), False, 'import torch\n')]
#!/usr/bin/env python # -*- coding:utf-8 -*- # @FileName : core_recorder.py # @Time : 2020/9/25 12:29 # @Author : 陈嘉昕 # @Demand : 声音复杂记录 import threading import logging import wave from pyaudio import PyAudio, paInt16 import numpy as np import queue import time class CoreRecorder(threading.Thread): def __init__(self, whole_time=None, # How much time to the end sr=20000, # Sample rate batch_num=600, # Batch size (how much data for a single fetch) frames_per_buffer=600 ): threading.Thread.__init__(self) self.time = whole_time self.sr = sr self.batch_num = batch_num self.data_alter = threading.Lock() self.frames_per_buffer = frames_per_buffer self.logger = logging.getLogger(__name__ + '.CoreRecorder') self.buffer = queue.Queue() self.start_time = None self.__running = threading.Event() self.__running.set() def run(self): self.logger.debug("Start to recording...") self.logger.debug(" Time = %s" % self.time) self.logger.debug(" Sample Rate = %s" % self.sr) self.start_time = time.time() pa = PyAudio() stream = pa.open(format=paInt16, channels=1, rate=self.sr, input=True, frames_per_buffer=self.frames_per_buffer) my_buf = [] count = 0 if self.time is None: total_count = 1e10 else: total_count = self.time * self.sr / self.batch_num while count < total_count and self.__running.isSet(): datawav = stream.read(self.batch_num, exception_on_overflow=True) datause = np.fromstring(datawav, dtype=np.short) for w in datause: self.buffer.put(w) count += 1 stream.close() def save_wave_file(self, filename, data): wf = wave.open(filename, 'wb') wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(self.sr) wf.writeframes(b"".join(data)) wf.close() def stop(self): self.__running.clear()
[ "logging.getLogger", "threading.Thread.__init__", "wave.open", "threading.Lock", "numpy.fromstring", "threading.Event", "queue.Queue", "pyaudio.PyAudio", "time.time" ]
[((591, 622), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (616, 622), False, 'import threading\n'), ((736, 752), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (750, 752), False, 'import threading\n'), ((826, 871), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.CoreRecorder')"], {}), "(__name__ + '.CoreRecorder')\n", (843, 871), False, 'import logging\n'), ((894, 907), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (905, 907), False, 'import queue\n'), ((964, 981), 'threading.Event', 'threading.Event', ([], {}), '()\n', (979, 981), False, 'import threading\n'), ((1219, 1230), 'time.time', 'time.time', ([], {}), '()\n', (1228, 1230), False, 'import time\n'), ((1244, 1253), 'pyaudio.PyAudio', 'PyAudio', ([], {}), '()\n', (1251, 1253), False, 'from pyaudio import PyAudio, paInt16\n'), ((1923, 1948), 'wave.open', 'wave.open', (['filename', '"""wb"""'], {}), "(filename, 'wb')\n", (1932, 1948), False, 'import wave\n'), ((1713, 1751), 'numpy.fromstring', 'np.fromstring', (['datawav'], {'dtype': 'np.short'}), '(datawav, dtype=np.short)\n', (1726, 1751), True, 'import numpy as np\n')]
import featext.feature_processing from featext.mfcc import Mfcc import numpy as np import system.gmm_em as gmm import system.ivector as ivector import system.backend as backend # UPDATE THIS FOLDER (folder to spoken digit dataset recordings): data_folder = '/home/ville/files/recordings/' speakers = ['jackson', 'nicolas', 'theo'] n_speakers = len(speakers) n_digits = 10 # 0 - 9 n_sessions = 50 # 0 - 49 #### Feature extraction: # Let us train the spoken digit recognition system with speakers Jackson and Nicolas and test with Theo. mfcc = Mfcc() mfcc.frame_duration = 0.025 mfcc.frame_overlap_duration = 0.01 mfcc.sad_threshold = 60 mfcc.include_deltas = 1 mfcc.include_double_deltas = 1 mfcc.include_base_coeffs = 1 mfcc.include_energy = 1 mfcc.n_coeffs = 20 mfcc.rasta_coeff = 0 mfcc.pre_emphasis = 0 mfcc.cmvn = 1 mfcc.initialize() all_features = np.empty((n_speakers, n_digits, n_sessions), dtype=object) for speaker in range(n_speakers): for digit in range(n_digits): for session in range(n_sessions): filename = '{}{}_{}_{}.wav'.format(data_folder, digit, speakers[speaker], session) all_features[speaker, digit, session] = featext.feature_processing.extract_features_from_file(filename, mfcc) feature_dim = all_features[0, 0, 0].shape[0] #### Train GMM for every digit: n_components = 64 digit_models = [] for digit in range(n_digits): model = gmm.GMM(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10, nworkers=2) model.fit(np.reshape(all_features[0:2, digit, :], (-1,))) digit_models.append(model) ### Scoring (Based on GMM log likelihoods): test_features = np.reshape(all_features[2, :, :], (-1)) n_tests = test_features.size true_labels = np.repeat(np.arange(n_digits), n_sessions) scores = np.zeros((n_digits, n_tests)) for test_segment in range(n_tests): for digit in range(n_digits): scores[digit, test_segment] = np.mean(digit_models[digit].compute_log_lik(test_features[test_segment])) classifications = np.argmax(scores, axis=0) n_correct = sum(classifications == true_labels) print('Correct classifications: {} / {} ({:.1f} %)\n'.format(n_correct, n_tests, n_correct / n_tests * 100)) # EXERCISE: Implement GMM-based scoring with universal background model (UBM) #### Universal background model (UBM) training: ubm = gmm.GMM(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10, nworkers=2) ubm.fit(np.reshape(all_features[0:2, :, :], -1)) #### GMM adaptation relevance_factor = 1 #digit_models = np.empty((n_digits,), dtype=object) digit_models = [] for i in range(n_digits): digit_models.append(ubm.adapt(np.reshape(all_features[0:2, i, :], (-1,)), relevance_factor)) #### Scoring trials (all test files vs. all models): test_features = np.reshape(all_features[2, :, :], (-1)) n_tests = test_features.size true_labels = np.repeat(np.arange(n_digits), n_sessions) scores = np.zeros((n_digits, n_tests)) for test_segment in range(n_tests): for digit in range(n_digits): scores[digit, test_segment] = np.mean(digit_models[digit].compute_log_lik(test_features[test_segment])) classifications = np.argmax(scores, axis=0) n_correct = sum(classifications == true_labels) print('Correct classifications: {} / {} ({:.1f} %)\n'.format(n_correct, n_tests, n_correct / n_tests * 100)) ###### I-vector / PLDA system #### Sufficient statistics (Baum-Welch statistics) extraction: all_stats = np.empty((n_speakers, n_digits, n_sessions), dtype=object) for speaker in range(n_speakers): for digit in range(n_digits): for session in range(n_sessions): N, F = ubm.compute_centered_stats(all_features[speaker, digit, session]) all_stats[speaker, digit, session] = (N, F) #### Total variability matrix training: ivector_dim = 50; tMatrix = ivector.TMatrix(ivector_dim, feature_dim, n_components, niter=5, nworkers=2) tMatrix.train(np.reshape(all_stats[0:2, :, :], (-1,)), ubm) #### I-vector extraction: extractor = ivector.Ivector(ivector_dim, feature_dim, n_components) extractor.initialize(ubm, tMatrix.Tm) ivectors = np.empty((ivector_dim, n_speakers, n_digits, n_sessions)) for speaker in range(n_speakers): for digit in range(n_digits): for session in range(n_sessions): ivectors[:, speaker, digit, session] = extractor.extract(*all_stats[speaker, digit, session]) #### I-vector processing: training_vectors = np.reshape(ivectors[:, 0:2, :, :], (ivector_dim, -1), order='F') training_labels = np.tile(np.arange(n_digits).repeat(2), n_sessions) model_vectors = np.reshape(np.mean(ivectors[:, 0:2, :, :], (1, 3)), (ivector_dim, -1), order='F') test_vectors = np.reshape(ivectors[:, 2, :, :], (ivector_dim, -1), order='F') true_labels = np.tile(np.arange(n_digits), n_sessions) center = backend.compute_mean(training_vectors) w = backend.calc_white_mat(np.cov(training_vectors)) training_vectors = backend.preprocess(training_vectors, center, w) model_vectors = backend.preprocess(model_vectors, center, w) test_vectors = backend.preprocess(test_vectors, center, w) #### PLDA training: #### (probabilistic linear discriminant analysis) latent_dim = 40; plda = backend.GPLDA(ivector_dim, latent_dim, niter=20) plda.train_em(training_vectors, training_labels) #### Scoring: scores = plda.score_trials(model_vectors, test_vectors) # scores = backend.cosine_similarity(modelVectors, testVectors) classifications = np.argmax(scores, axis=0) n_correct = sum(classifications == true_labels) print('Correct classifications: {} / {} ({:.1f} %)\n'.format(n_correct, n_tests, n_correct / n_tests * 100))
[ "featext.mfcc.Mfcc", "numpy.mean", "numpy.reshape", "system.ivector.TMatrix", "system.backend.GPLDA", "system.gmm_em.GMM", "numpy.argmax", "system.backend.preprocess", "system.backend.compute_mean", "numpy.zeros", "numpy.empty", "numpy.cov", "system.ivector.Ivector", "numpy.arange" ]
[((551, 557), 'featext.mfcc.Mfcc', 'Mfcc', ([], {}), '()\n', (555, 557), False, 'from featext.mfcc import Mfcc\n'), ((863, 921), 'numpy.empty', 'np.empty', (['(n_speakers, n_digits, n_sessions)'], {'dtype': 'object'}), '((n_speakers, n_digits, n_sessions), dtype=object)\n', (871, 921), True, 'import numpy as np\n'), ((1647, 1684), 'numpy.reshape', 'np.reshape', (['all_features[2, :, :]', '(-1)'], {}), '(all_features[2, :, :], -1)\n', (1657, 1684), True, 'import numpy as np\n'), ((1783, 1812), 'numpy.zeros', 'np.zeros', (['(n_digits, n_tests)'], {}), '((n_digits, n_tests))\n', (1791, 1812), True, 'import numpy as np\n'), ((2015, 2040), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (2024, 2040), True, 'import numpy as np\n'), ((2333, 2422), 'system.gmm_em.GMM', 'gmm.GMM', ([], {'ndim': 'feature_dim', 'nmix': 'n_components', 'ds_factor': '(1)', 'final_niter': '(10)', 'nworkers': '(2)'}), '(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10,\n nworkers=2)\n', (2340, 2422), True, 'import system.gmm_em as gmm\n'), ((2776, 2813), 'numpy.reshape', 'np.reshape', (['all_features[2, :, :]', '(-1)'], {}), '(all_features[2, :, :], -1)\n', (2786, 2813), True, 'import numpy as np\n'), ((2912, 2941), 'numpy.zeros', 'np.zeros', (['(n_digits, n_tests)'], {}), '((n_digits, n_tests))\n', (2920, 2941), True, 'import numpy as np\n'), ((3143, 3168), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (3152, 3168), True, 'import numpy as np\n'), ((3435, 3493), 'numpy.empty', 'np.empty', (['(n_speakers, n_digits, n_sessions)'], {'dtype': 'object'}), '((n_speakers, n_digits, n_sessions), dtype=object)\n', (3443, 3493), True, 'import numpy as np\n'), ((3815, 3891), 'system.ivector.TMatrix', 'ivector.TMatrix', (['ivector_dim', 'feature_dim', 'n_components'], {'niter': '(5)', 'nworkers': '(2)'}), '(ivector_dim, feature_dim, n_components, niter=5, nworkers=2)\n', (3830, 3891), True, 'import system.ivector as ivector\n'), ((3993, 4048), 'system.ivector.Ivector', 'ivector.Ivector', (['ivector_dim', 'feature_dim', 'n_components'], {}), '(ivector_dim, feature_dim, n_components)\n', (4008, 4048), True, 'import system.ivector as ivector\n'), ((4098, 4155), 'numpy.empty', 'np.empty', (['(ivector_dim, n_speakers, n_digits, n_sessions)'], {}), '((ivector_dim, n_speakers, n_digits, n_sessions))\n', (4106, 4155), True, 'import numpy as np\n'), ((4419, 4483), 'numpy.reshape', 'np.reshape', (['ivectors[:, 0:2, :, :]', '(ivector_dim, -1)'], {'order': '"""F"""'}), "(ivectors[:, 0:2, :, :], (ivector_dim, -1), order='F')\n", (4429, 4483), True, 'import numpy as np\n'), ((4666, 4728), 'numpy.reshape', 'np.reshape', (['ivectors[:, 2, :, :]', '(ivector_dim, -1)'], {'order': '"""F"""'}), "(ivectors[:, 2, :, :], (ivector_dim, -1), order='F')\n", (4676, 4728), True, 'import numpy as np\n'), ((4794, 4832), 'system.backend.compute_mean', 'backend.compute_mean', (['training_vectors'], {}), '(training_vectors)\n', (4814, 4832), True, 'import system.backend as backend\n'), ((4905, 4952), 'system.backend.preprocess', 'backend.preprocess', (['training_vectors', 'center', 'w'], {}), '(training_vectors, center, w)\n', (4923, 4952), True, 'import system.backend as backend\n'), ((4969, 5013), 'system.backend.preprocess', 'backend.preprocess', (['model_vectors', 'center', 'w'], {}), '(model_vectors, center, w)\n', (4987, 5013), True, 'import system.backend as backend\n'), ((5029, 5072), 'system.backend.preprocess', 'backend.preprocess', (['test_vectors', 'center', 'w'], {}), '(test_vectors, center, w)\n', (5047, 5072), True, 'import system.backend as backend\n'), ((5168, 5216), 'system.backend.GPLDA', 'backend.GPLDA', (['ivector_dim', 'latent_dim'], {'niter': '(20)'}), '(ivector_dim, latent_dim, niter=20)\n', (5181, 5216), True, 'import system.backend as backend\n'), ((5420, 5445), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (5429, 5445), True, 'import numpy as np\n'), ((1407, 1496), 'system.gmm_em.GMM', 'gmm.GMM', ([], {'ndim': 'feature_dim', 'nmix': 'n_components', 'ds_factor': '(1)', 'final_niter': '(10)', 'nworkers': '(2)'}), '(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10,\n nworkers=2)\n', (1414, 1496), True, 'import system.gmm_em as gmm\n'), ((1740, 1759), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (1749, 1759), True, 'import numpy as np\n'), ((2427, 2466), 'numpy.reshape', 'np.reshape', (['all_features[0:2, :, :]', '(-1)'], {}), '(all_features[0:2, :, :], -1)\n', (2437, 2466), True, 'import numpy as np\n'), ((2869, 2888), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (2878, 2888), True, 'import numpy as np\n'), ((3907, 3946), 'numpy.reshape', 'np.reshape', (['all_stats[0:2, :, :]', '(-1,)'], {}), '(all_stats[0:2, :, :], (-1,))\n', (3917, 3946), True, 'import numpy as np\n'), ((4580, 4619), 'numpy.mean', 'np.mean', (['ivectors[:, 0:2, :, :]', '(1, 3)'], {}), '(ivectors[:, 0:2, :, :], (1, 3))\n', (4587, 4619), True, 'import numpy as np\n'), ((4751, 4770), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (4760, 4770), True, 'import numpy as np\n'), ((4860, 4884), 'numpy.cov', 'np.cov', (['training_vectors'], {}), '(training_vectors)\n', (4866, 4884), True, 'import numpy as np\n'), ((1507, 1553), 'numpy.reshape', 'np.reshape', (['all_features[0:2, digit, :]', '(-1,)'], {}), '(all_features[0:2, digit, :], (-1,))\n', (1517, 1553), True, 'import numpy as np\n'), ((2642, 2684), 'numpy.reshape', 'np.reshape', (['all_features[0:2, i, :]', '(-1,)'], {}), '(all_features[0:2, i, :], (-1,))\n', (2652, 2684), True, 'import numpy as np\n'), ((4510, 4529), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (4519, 4529), True, 'import numpy as np\n')]
import functools import itertools import operator import numpy as np from qecsim.model import StabilizerCode, cli_description from qecsim.models.rotatedplanar import RotatedPlanarPauli @cli_description('Rotated planar (rows INT >= 3, cols INT >= 3)') class RotatedPlanarCode(StabilizerCode): r""" Implements a rotated planar mixed boundary code defined by its lattice size. In addition to the members defined in :class:`qecsim.model.StabilizerCode`, it provides several lattice methods as described below. Lattice methods: * Get size: :meth:`size`. * Get plaquette type: :meth:`is_x_plaquette`, :meth:`is_z_plaquette`, :meth:`is_virtual_plaquette`. * Get and test bounds: :meth:`site_bounds`, :meth:`is_in_site_bounds`, :meth:`is_in_plaquette_bounds`. * Resolve a syndrome to plaquettes: :meth:`syndrome_to_plaquette_indices`. * Construct a Pauli operator on the lattice: :meth:`new_pauli`. Indices: * Indices are in the format (x, y). * Qubit sites (vertices) are indexed by (x, y) coordinates with the origin at the lower left qubit. * Stabilizer plaquettes are indexed by (x, y) coordinates such that the lower left corner of the plaquette is on the qubit site at (x, y). * X-type stabilizer plaquette indices satisfy (x-y) % 2 == 1. * Z-type stabilizer plaquette indices satisfy (x-y) % 2 == 0. For example, qubit site indices on a 3 x 3 lattice: :: (0,2)-----(1,2)-----(2,2) | | | | | | | | | (0,1)-----(1,1)-----(2,1) | | | | | | | | | (0,0)-----(1,0)-----(2,0) For example, stabilizer plaquette types and indices on a 3 x 3 lattice: :: ------- / Z \ | (0,2) | +---------+---------+----- | X | Z | X \ | (0,1) | (1,1) |(2,1) | | | | / -----+---------+---------+----- / X | Z | X | |(-1,0)| (0,0) | (1,0) | \ | | | -----+---------+---------+ | Z | \ (1,-1)/ ------- """ MIN_SIZE = (3, 3) def __init__(self, rows, columns): """ Initialise new rotated planar code. :param rows: Number of rows in lattice. :type rows: int :param columns: Number of columns in lattice. :type columns: int :raises ValueError: if (rows, columns) smaller than (3, 3) in either dimension. :raises TypeError: if any parameter is of an invalid type. """ min_rows, min_cols = self.MIN_SIZE try: # paranoid checking for CLI. (operator.index ensures the parameter can be treated as an int) if operator.index(rows) < min_rows or operator.index(columns) < min_cols: raise ValueError('{} minimum size is {}.'.format(type(self).__name__, self.MIN_SIZE)) except TypeError as ex: raise TypeError('{} invalid parameter type'.format(type(self).__name__)) from ex self._size = rows, columns # < StabilizerCode interface methods > @property @functools.lru_cache() def n_k_d(self): """See :meth:`qecsim.model.StabilizerCode.n_k_d`""" # n = r*c, k = 1, d = min(r, c) rows, cols = self.size return rows * cols, 1, min(rows, cols) @property def label(self): """See :meth:`qecsim.model.StabilizerCode.label`""" return 'Rotated planar {}x{}'.format(*self.size) @property @functools.lru_cache() def stabilizers(self): """See :meth:`qecsim.model.StabilizerCode.stabilizers`""" return np.array([self.new_pauli().plaquette(i).to_bsf() for i in self._plaquette_indices]) @property @functools.lru_cache() def logical_xs(self): """See :meth:`qecsim.model.StabilizerCode.logical_xs`""" return np.array([self.new_pauli().logical_x().to_bsf()]) @property @functools.lru_cache() def logical_zs(self): """See :meth:`qecsim.model.StabilizerCode.logical_zs`""" return np.array([self.new_pauli().logical_z().to_bsf()]) # </ StabilizerCode interface methods > @property def size(self): """ Size of the lattice in format (rows, columns), e.g. (5, 5). :rtype: 2-tuple of int """ return self._size @classmethod def is_x_plaquette(cls, index): """ Return True if the plaquette index specifies an X-type plaquette, irrespective of lattice bounds. :param index: Index in the format (x, y). :type index: 2-tuple of int :return: If the index specifies an X-type plaquette. :rtype: bool """ x, y = index return (x - y) % 2 == 1 @classmethod def is_z_plaquette(cls, index): """ Return True if the plaquette index specifies an Z-type plaquette, irrespective of lattice bounds. :param index: Index in the format (x, y). :type index: 2-tuple of int :return: If the index specifies an Z-type plaquette. :rtype: bool """ return not cls.is_x_plaquette(index) @property def site_bounds(self): """ Maximum x and y value that an index coordinate can take. :rtype: 2-tuple of int """ # max_row, max_col rows, cols = self.size return cols - 1, rows - 1 # max_x, max_y def is_in_site_bounds(self, index): """ Return True if the site index is within lattice bounds inclusive. :param index: Index in the format (x, y). :type index: 2-tuple of int :return: If the index is within lattice bounds inclusive. :rtype: bool """ x, y = index max_site_x, max_site_y = self.site_bounds return 0 <= x <= max_site_x and 0 <= y <= max_site_y @functools.lru_cache(maxsize=2 ** 14) # O(n) per code, so for 101x101 code def is_in_plaquette_bounds(self, index): """ Return True if the plaquette index is within lattice bounds inclusive. :param index: Index in the format (x, y). :type index: 2-tuple of int :return: If the index is within lattice bounds inclusive. :rtype: bool """ x, y = index max_site_x, max_site_y = self.site_bounds # derive min and max x bounds allowing for boundary plaquettes min_x = -1 if y % 2 == 0 else 0 if max_site_x % 2 == 0: # even max_site_x (i.e. odd number of columns) max_x = max_site_x - 1 if y % 2 == 0 else max_site_x else: max_x = max_site_x if y % 2 == 0 else max_site_x - 1 # derive min and max y bounds allowing for boundary plaquettes min_y = 0 if x % 2 == 0 else -1 if max_site_y % 2 == 0: # even max_site_y (i.e. odd number of rows) max_y = max_site_y if x % 2 == 0 else max_site_y - 1 else: # odd max_site_y (i.e. even number of rows) max_y = max_site_y - 1 if x % 2 == 0 else max_site_y # evaluate in bounds return min_x <= x <= max_x and min_y <= y <= max_y def is_virtual_plaquette(self, index): """ Return True if the plaquette index specifies a virtual plaquette (i.e. index is on the boundary but not within lattice bounds). :param index: Index in the format (x, y). :type index: 2-tuple of int :return: If the index specifies a virtual plaquette. :rtype: bool """ x, y = index max_site_x, max_site_y = self.site_bounds # index is on boundary but not within lattice bounds. return (x == -1 or x == max_site_x or y == -1 or y == max_site_y) and not self.is_in_plaquette_bounds(index) @property @functools.lru_cache() def _plaquette_indices(self): """ Return a list of the plaquette indices of the lattice. Notes: * Each index is in the format (x, y). * Indices are in order of increasing type, y, x. (Z-type first) :return: List of indices in the format (x, y). :rtype: list of 2-tuple of int """ max_site_x, max_site_y = self.site_bounds z_plaquette_indices, x_plaquette_indices = [], [] for y in range(-1, max_site_y + 2): for x in range(-1, max_site_x + 2): index = x, y if self.is_in_plaquette_bounds(index): if self.is_z_plaquette(index): z_plaquette_indices.append(index) else: x_plaquette_indices.append(index) return list(itertools.chain(z_plaquette_indices, x_plaquette_indices)) def syndrome_to_plaquette_indices(self, syndrome): """ Returns the indices of the plaquettes associated with the non-commuting stabilizers identified by the syndrome. :param syndrome: Binary vector identifying commuting and non-commuting stabilizers by 0 and 1 respectively. :type syndrome: numpy.array (1d) :return: Set of plaquette indices. :rtype: set of 2-tuple of int """ return set(tuple(index) for index in np.array(self._plaquette_indices)[syndrome.nonzero()]) def __eq__(self, other): if type(other) is type(self): return self._size == other._size return NotImplemented def __hash__(self): return hash(self._size) def __repr__(self): return '{}({!r}, {!r})'.format(type(self).__name__, *self.size) def ascii_art(self, syndrome=None, pauli=None, plaquette_labels=None, site_labels=None): """ Return ASCII art style lattice showing primal lattice lines with syndrome bits and Pauli operators as given. Notes: * Optional plaquette_labels override syndrome. * Optional site_labels override pauli. :param syndrome: Syndrome (optional) as binary vector. :type syndrome: numpy.array (1d) :param pauli: Rotated planar Pauli (optional) :type pauli: RotatedPlanarPauli :param plaquette_labels: Dictionary of plaquette indices as (x, y) to single-character labels (optional). :type plaquette_labels: dict of (int, int) to char :param site_labels: Dictionary of site indices as (x, y) to single-character labels (optional). :type site_labels: dict of (int, int) to char :return: ASCII art style lattice. :rtype: str """ # See https://unicode-table.com/en/blocks/box-drawing/ for box-drawing unicode characters max_site_x, max_site_y = self.site_bounds syndrome_indices = set() if syndrome is None else self.syndrome_to_plaquette_indices(syndrome) pauli = self.new_pauli() if pauli is None else pauli plaquette_labels = {} if plaquette_labels is None else plaquette_labels site_labels = {} if site_labels is None else site_labels # Build row templates # e.g. (where @=plaquette, o=site, .=virtual_plaquette): # # . /-@-\ . /-@-\ . . :plaquette_row_top_even # o---o---o---o---o-\ :site_row_top_even # . |#@#| @ |#@#| @ |#@ :plaquette_row_odd # /-o---o---o---o---o-/ :site_row_odd # @#| @ |#@#| @ |#@#| . :plaquette_row_even # \-o---o---o---o---o-\ :t_site_row_even # . |#@#| @ |#@#| @ |#@ :plaquette_row_odd # /-o---o---o---o---o-/ :site_row_odd # @#| @ |#@#| @ |#@#| . :plaquette_row_even # \-o---o---o---o---o :site_row_bottom # . . \-@-/ . \-@-/ . :plaquette_row_bottom # # e.g (if top row odd): # # . . /-@-\ . /-@-\ . :plaquette_row_top_odd # /-o---o---o---o---o :site_row_top_odd # # Common chars c_dot = chr(0x00B7) c_dash = chr(0x2500) c_bar = chr(0x2502) c_angle_nw = chr(0x250C) c_angle_ne = chr(0x2510) c_angle_sw = chr(0x2514) c_angle_se = chr(0x2518) c_shade = chr(0x2591) # Common char sequences cs_pn = c_angle_nw + c_dash + '{}' + c_dash + c_angle_ne # '/-{}-\' cs_pnw = c_angle_nw + c_dash # '/-' cs_pw = '{}' + c_shade # ' #' cs_psw = c_angle_sw + c_dash # '\-' cs_pne = c_dash + c_angle_ne # '-\' cs_pe = c_shade + '{}' # '# ' cs_pse = c_dash + c_angle_se # '-/' cs_ps = c_angle_sw + c_dash + '{}' + c_dash + c_angle_se # '\-{}-/' cs_pbulkx = c_bar + c_shade + '{}' + c_shade # '|#{}#' cs_pbulkz = c_bar + ' {} ' # '| {} ' cs_sbulk = '{}' + c_dash * 3 # '{}---' # booleans to control placement of boundary plaquettes odd_rows = max_site_y % 2 == 0 odd_cols = max_site_x % 2 == 0 if odd_rows: # . /-@-\ . /-@-\ . . t_plaquette_row_top = ('{} ' + cs_pn + ' ') * ((max_site_x + 1) // 2) + ('{} {}' if odd_cols else '{}') # o---o---o---o---o-\ t_site_row_top = ' ' + cs_sbulk * max_site_x + '{}' + (cs_pne if odd_cols else ' ') else: # . . /-@-\ . /-@-\ . t_plaquette_row_top = '{} {}' + (' ' + cs_pn + ' {}') * (max_site_x // 2) + ('' if odd_cols else ' {}') # /-o---o---o---o---o t_site_row_top = cs_pnw + cs_sbulk * max_site_x + '{}' + (cs_pne if not odd_cols else ' ') # |#@#| @ |#@#| @ |#@ t_plaquette_row_odd = ('{} ' + ''.join(([cs_pbulkx, cs_pbulkz] * max_site_x)[:max_site_x]) + c_bar + (cs_pe if odd_cols else ' {}')) # /-o---o---o---o---o-/ t_site_row_odd = cs_pnw + cs_sbulk * max_site_x + '{}' + (cs_pse if odd_cols else cs_pne) # @#| @ |#@#| @ |#@#| . t_plaquette_row_even = (cs_pw + ''.join(([cs_pbulkz, cs_pbulkx] * max_site_x)[:max_site_x]) + c_bar + (cs_pe if not odd_cols else ' {}')) # \-o---o---o---o---o-\ t_site_row_even = cs_psw + cs_sbulk * max_site_x + '{}' + (cs_pne if odd_cols else cs_pse) # \-o---o---o---o---o t_site_row_bottom = cs_psw + cs_sbulk * max_site_x + '{}' + (cs_pse if not odd_cols else ' ') # . . \-@-/ . \-@-/ . t_plaquette_row_bottom = '{} {}' + (' ' + cs_ps + ' {}') * (max_site_x // 2) + ('' if odd_cols else ' {}') # Parameter extraction functions def _site_parameters(y): indices = [i for i in ((x, y) for x in range(max_site_x + 1))] parameters = [] for i in indices: if i in site_labels: parameters.append(site_labels[i]) else: op = pauli.operator(i) parameters.append(c_dot if op == 'I' else op) return parameters def _plaquette_parameters(y): indices = [i for i in ((x, y) for x in range(-1, max_site_x + 1))] parameters = [] for i in indices: is_z_plaquette = self.is_z_plaquette(i) is_virtual_plaquette = self.is_virtual_plaquette(i) if is_virtual_plaquette: parameters.append(plaquette_labels.get(i, ' ')) elif i in plaquette_labels: parameters.append(plaquette_labels[i]) elif i in syndrome_indices: parameters.append('Z' if is_z_plaquette else 'X') elif i[0] == -1 or i[0] == max_site_x: parameters.append(c_bar) elif i[1] == -1 or i[1] == max_site_y: parameters.append(c_dash) else: parameters.append(' ' if is_z_plaquette else c_shade) return parameters # Append templates to text with parameters text = [] # top rows text.append(t_plaquette_row_top.format(*_plaquette_parameters(max_site_y))) text.append(t_site_row_top.format(*_site_parameters(max_site_y))) # middle rows for y in range(max_site_y - 1, 0, -1): if y % 2 == 0: text.append(t_plaquette_row_even.format(*_plaquette_parameters(y))) text.append(t_site_row_even.format(*_site_parameters(y))) else: text.append(t_plaquette_row_odd.format(*_plaquette_parameters(y))) text.append(t_site_row_odd.format(*_site_parameters(y))) # bottom rows text.append(t_plaquette_row_even.format(*_plaquette_parameters(0))) text.append(t_site_row_bottom.format(*_site_parameters(0))) text.append(t_plaquette_row_bottom.format(*_plaquette_parameters(-1))) return '\n'.join(text) def new_pauli(self, bsf=None): """ Convenience constructor of planar Pauli for this code. Notes: * For performance reasons, the new Pauli is a view of the given bsf. Modifying one will modify the other. :param bsf: Binary symplectic representation of Pauli. (Optional. Defaults to identity.) :type bsf: numpy.array (1d) :return: Rotated planar Pauli :rtype: RotatedPlanarPauli """ return RotatedPlanarPauli(self, bsf)
[ "itertools.chain", "qecsim.model.cli_description", "operator.index", "numpy.array", "functools.lru_cache", "qecsim.models.rotatedplanar.RotatedPlanarPauli" ]
[((190, 254), 'qecsim.model.cli_description', 'cli_description', (['"""Rotated planar (rows INT >= 3, cols INT >= 3)"""'], {}), "('Rotated planar (rows INT >= 3, cols INT >= 3)')\n", (205, 254), False, 'from qecsim.model import StabilizerCode, cli_description\n'), ((3425, 3446), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (3444, 3446), False, 'import functools\n'), ((3819, 3840), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (3838, 3840), False, 'import functools\n'), ((4053, 4074), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4072, 4074), False, 'import functools\n'), ((4251, 4272), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4270, 4272), False, 'import functools\n'), ((6185, 6221), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(2 ** 14)'}), '(maxsize=2 ** 14)\n', (6204, 6221), False, 'import functools\n'), ((8104, 8125), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (8123, 8125), False, 'import functools\n'), ((17584, 17613), 'qecsim.models.rotatedplanar.RotatedPlanarPauli', 'RotatedPlanarPauli', (['self', 'bsf'], {}), '(self, bsf)\n', (17602, 17613), False, 'from qecsim.models.rotatedplanar import RotatedPlanarPauli\n'), ((8974, 9031), 'itertools.chain', 'itertools.chain', (['z_plaquette_indices', 'x_plaquette_indices'], {}), '(z_plaquette_indices, x_plaquette_indices)\n', (8989, 9031), False, 'import itertools\n'), ((3028, 3048), 'operator.index', 'operator.index', (['rows'], {}), '(rows)\n', (3042, 3048), False, 'import operator\n'), ((3063, 3086), 'operator.index', 'operator.index', (['columns'], {}), '(columns)\n', (3077, 3086), False, 'import operator\n'), ((9517, 9550), 'numpy.array', 'np.array', (['self._plaquette_indices'], {}), '(self._plaquette_indices)\n', (9525, 9550), True, 'import numpy as np\n')]
import nose.tools as nt import numpy as np import theano import theano.tensor as T import treeano import treeano.nodes as tn from treeano.sandbox.nodes import lrn fX = theano.config.floatX def ground_truth_normalizer(bc01, k, n, alpha, beta): """ This code is adapted from pylearn2. https://github.com/lisa-lab/pylearn2/blob/master/LICENSE.txt """ def ground_truth_normalize_row(row, k, n, alpha, beta): assert row.ndim == 1 out = np.zeros(row.shape) for i in range(row.shape[0]): s = k tot = 0 for j in range(max(0, i - n // 2), min(row.shape[0], i + n // 2 + 1)): tot += 1 sq = row[j] ** 2. assert sq > 0. assert s >= k assert alpha > 0. s += alpha * sq assert s >= k assert tot <= n assert s >= k s = s ** beta out[i] = row[i] / s return out c01b = bc01.transpose(1, 2, 3, 0) out = np.zeros(c01b.shape) for r in range(out.shape[1]): for c in range(out.shape[2]): for x in range(out.shape[3]): out[:, r, c, x] = ground_truth_normalize_row( row=c01b[:, r, c, x], k=k, n=n, alpha=alpha, beta=beta) out_bc01 = out.transpose(3, 0, 1, 2) return out_bc01 def _test_localresponse_normalization_fn(fn, shape=(3, 4, 5, 6), **kwargs): vw = treeano.VariableWrapper("foo", variable=T.tensor4(), shape=shape) new_kwargs = dict( # use a big value of alpha so mistakes involving alpha show up strong alpha=1.5, k=2, beta=0.75, n=5, ) new_kwargs.update(kwargs) fn = theano.function([vw.variable], [fn(vw, **new_kwargs)]) x = np.random.randn(*shape).astype(fX) res, = fn(x) ans = ground_truth_normalizer(x, **new_kwargs) np.testing.assert_allclose(ans, res, rtol=1e-5) def test_local_response_normalization_2d_v1(): _test_localresponse_normalization_fn( lrn.local_response_normalization_2d_v1) def test_local_response_normalization_2d_v2(): _test_localresponse_normalization_fn( lrn.local_response_normalization_2d_v2) def test_local_response_normalization_2d_pool(): _test_localresponse_normalization_fn( lrn.local_response_normalization_2d_pool) def test_local_response_normalization_2d_pool(): _test_localresponse_normalization_fn( lrn.local_response_normalization_pool) def test_local_response_normalization_2d_node_shape(): shape = (3, 4, 5, 6) network = tn.SequentialNode( "s", [tn.InputNode("i", shape=shape), lrn.LocalResponseNormalization2DNode("lrn")] ).network() fn = network.function(["i"], ["s"]) x = np.random.randn(*shape).astype(fX) res = fn(x)[0].shape np.testing.assert_equal(shape, res) def test_local_response_normalization_node_shape(): for ndim in [2, 3, 4, 5, 6]: shape = (3,) * ndim network = tn.SequentialNode( "s", [tn.InputNode("i", shape=shape), lrn.LocalResponseNormalizationNode("lrn")] ).network() fn = network.function(["i"], ["s"]) x = np.random.randn(*shape).astype(fX) res = fn(x)[0].shape np.testing.assert_equal(shape, res)
[ "treeano.nodes.InputNode", "numpy.testing.assert_equal", "numpy.testing.assert_allclose", "numpy.zeros", "treeano.sandbox.nodes.lrn.LocalResponseNormalizationNode", "treeano.sandbox.nodes.lrn.LocalResponseNormalization2DNode", "theano.tensor.tensor4", "numpy.random.randn" ]
[((1075, 1095), 'numpy.zeros', 'np.zeros', (['c01b.shape'], {}), '(c01b.shape)\n', (1083, 1095), True, 'import numpy as np\n'), ((1963, 2011), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ans', 'res'], {'rtol': '(1e-05)'}), '(ans, res, rtol=1e-05)\n', (1989, 2011), True, 'import numpy as np\n'), ((2923, 2958), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['shape', 'res'], {}), '(shape, res)\n', (2946, 2958), True, 'import numpy as np\n'), ((473, 492), 'numpy.zeros', 'np.zeros', (['row.shape'], {}), '(row.shape)\n', (481, 492), True, 'import numpy as np\n'), ((3377, 3412), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['shape', 'res'], {}), '(shape, res)\n', (3400, 3412), True, 'import numpy as np\n'), ((1557, 1568), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (1566, 1568), True, 'import theano.tensor as T\n'), ((1856, 1879), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1871, 1879), True, 'import numpy as np\n'), ((2859, 2882), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2874, 2882), True, 'import numpy as np\n'), ((3305, 3328), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3320, 3328), True, 'import numpy as np\n'), ((2709, 2739), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': 'shape'}), "('i', shape=shape)\n", (2721, 2739), True, 'import treeano.nodes as tn\n'), ((2750, 2793), 'treeano.sandbox.nodes.lrn.LocalResponseNormalization2DNode', 'lrn.LocalResponseNormalization2DNode', (['"""lrn"""'], {}), "('lrn')\n", (2786, 2793), False, 'from treeano.sandbox.nodes import lrn\n'), ((3141, 3171), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': 'shape'}), "('i', shape=shape)\n", (3153, 3171), True, 'import treeano.nodes as tn\n'), ((3186, 3227), 'treeano.sandbox.nodes.lrn.LocalResponseNormalizationNode', 'lrn.LocalResponseNormalizationNode', (['"""lrn"""'], {}), "('lrn')\n", (3220, 3227), False, 'from treeano.sandbox.nodes import lrn\n')]
""" Script to send prediction request. Usage: python predict.py --url=YOUR_KF_HOST/models/coco --input_image=YOUR_LOCAL_IMAGE --output_image=OUTPUT_IMAGE_NAME. This will save the prediction result as OUTPUT_IMAGE_NAME. The output image is the input image with the detected bounding boxes. """ import argparse import json import requests import numpy as np from PIL import Image import visualization_utils as vis_util WIDTH = 1024 HEIGHT = 768 def main(): parser = argparse.ArgumentParser() parser.add_argument("--url", help='The url to send the request') parser.add_argument("--input_image", default='image1.jpg') parser.add_argument("--output_image", default='output.jpg') args = parser.parse_args() img = Image.open(args.input_image) img = img.resize((WIDTH, HEIGHT), Image.ANTIALIAS) img_np = np.array(img) res = requests.post( args.url, data=json.dumps({"instances": [{"inputs": img_np.tolist()}]})) if res.status_code != 200: print('Failed: {}'.format(res.text)) return output_dict = json.loads(res.text).get('predictions')[0] vis_util.visualize_boxes_and_labels_on_image_array( img_np, np.array(output_dict['detection_boxes']), map(int, output_dict['detection_classes']), output_dict['detection_scores'], {}, instance_masks=output_dict.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) output_image = Image.fromarray(img_np) output_image.save(args.output_image) if __name__ == '__main__': main()
[ "PIL.Image.fromarray", "PIL.Image.open", "json.loads", "argparse.ArgumentParser", "numpy.array" ]
[((477, 502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (500, 502), False, 'import argparse\n'), ((731, 759), 'PIL.Image.open', 'Image.open', (['args.input_image'], {}), '(args.input_image)\n', (741, 759), False, 'from PIL import Image\n'), ((824, 837), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (832, 837), True, 'import numpy as np\n'), ((1422, 1445), 'PIL.Image.fromarray', 'Image.fromarray', (['img_np'], {}), '(img_np)\n', (1437, 1445), False, 'from PIL import Image\n'), ((1155, 1195), 'numpy.array', 'np.array', (["output_dict['detection_boxes']"], {}), "(output_dict['detection_boxes'])\n", (1163, 1195), True, 'import numpy as np\n'), ((1041, 1061), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (1051, 1061), False, 'import json\n')]
""" Estimate luminosity function in COSMOS from interferometric follow-up of Miettinen+ 2014, Younger+ 2007, and Younger+2009. """ import numpy import matplotlib.pyplot as plt from pylab import savefig from astropy.table import Table import matplotlib def MonteCarloCounts(fluxes, errors): hist890, bin_edges = numpy.histogram(fluxes) nbins = bin_edges.size - 1 nsource = fluxes.size nsim = 1000 obsPDF = numpy.zeros([nsource, nsim]) for i in range(nsource): imean = fluxes[i] irms = errors[i] obsPDF[i, :] = numpy.random.normal(loc=imean, scale=irms, size=nsim) histPDF = numpy.zeros([nbins, nsim]) for isim in range(nsim): hist, bedge = numpy.histogram(obsPDF[:, isim], bins=bin_edges) histPDF[:, isim] = hist histmean = numpy.mean(histPDF, axis=1) histrms = numpy.std(histPDF, axis=1) return histmean, histrms, bin_edges S_1100 = [10.7, 9.0, 7.6, 6.8, 7.6, 7.9, 8.3, 5.5, 5.8, 4.7, 4.7, 4.5, 4.4, 4.3, 4.3, 4.2] S_1100 = numpy.array(S_1100) S_890 = [15.6, 12.4, 8.7, 14.4, 9.3, 8.6, 12.0, 19.7, 9.0, 5.3, 14.4, 13.5, 8.2, 5.0, 3.9, 4.4] e_S_890 = [1.1, 1.0, 1.5, 1.9, 1.3, 1.3, 1.5, 1.8, 2.2, 1.0, 2.9, 1.8, 1.8, 1.0, 1.0, 1.0] S_890 = numpy.array(S_890) e_S_890 = numpy.array(e_S_890) * 1.2 plt.clf() plt.plot(S_1100, S_890/S_1100, 'o') # This plot illustrates that the typical correction factor from total 1.1mm # flux density to total 890um flux density is ~1.5 Oskari1300 = [2.07, 2.15, 1.58, 1.53, 1.78, 1.04, 4.82, 5.72, 1.85, 3.37, 2.19, 1.27, 1.82, 0.99, 1.41, 1.79, 1.72, 2.85, 0.98, 0.90, 3.36, 2.38, 2.45, 9.01, 1.53] e_Oskari1300 = [0.62, 0.63, 0.43, 0.46, 0.54, 0.36, 1.33, 1.85, 0.49, 1.03, 0.83, 0.40, 0.59, 0.29, 0.42, 0.53, 0.53, 0.78, 0.36, 0.28, 0.97, 0.77, 0.67, 2.39, 0.45] Oskari890 = numpy.array(Oskari1300) * 2.5 e_Oskari890 = numpy.array(e_Oskari1300) * 2.5 * 1.5 cosmos890 = numpy.append(S_890, Oskari890) e_cosmos890 = numpy.append(e_S_890, e_Oskari890) #cosmos890 = S_890 #e_cosmos890 = e_S_890 completeness = [0.28, 0.5, 0.8, 0.9, 0.99, 1.0, 1.0, 1.0, 1.0, 1.0] # AzTEC coverage in COSMOS is 0.15 deg^2, centered on z=0.7 overdensity MCresult = MonteCarloCounts(cosmos890, e_cosmos890) hist890mean = MCresult[0] hist890rms = MCresult[1] bin_edges = MCresult[2] bin_width = bin_edges[1] - bin_edges[0] area_aztec = 0.15 norm890 = hist890mean / area_aztec e_norm890 = hist890rms / area_aztec nbins = norm890.size cum890 = numpy.zeros(nbins) bin_centers = numpy.zeros(nbins) for ibin in range(nbins): bin_centers[ibin] = (bin_edges[ibin] + bin_edges[ibin + 1]) / 2 cum890[ibin] = norm890[ibin:].sum() diff890 = norm890 / bin_width / completeness #/ bin_centers e_diff890 = e_norm890 / bin_width / completeness #/ bin_centers # Barger catalog bargerloc = '../Data/barger_catalog.txt' bargercat = Table.read(bargerloc, format='ascii') bargerfluxes = bargercat['S860'] e_bargerfluxes = bargercat['e_S860'] * 1.2 MCresult = MonteCarloCounts(bargerfluxes, e_bargerfluxes) barger890mean = MCresult[0] barger890rms = MCresult[1] bin_edges = MCresult[2] bin_width = bin_edges[1] - bin_edges[0] area_barger = 0.09 barger890 = barger890mean / area_barger e_barger890 = barger890rms / area_barger diffbarger890 = barger890 / bin_width# / completeness #/ bin_centers e_diffbarger890 = e_barger890 / bin_width# / completeness #/ bin_centers nbins = barger890.size cum890 = numpy.zeros(nbins) barger_bin_centers = numpy.zeros(nbins) for ibin in range(nbins): barger_bin_centers[ibin] = (bin_edges[ibin] + bin_edges[ibin + 1]) / 2 # Smolcic catalog smolcicloc = '../Data/smolcic_catalog.txt' smolciccat = Table.read(smolcicloc, format='ascii') smolcicfluxes = smolciccat['S1300'] * 2.5 e_smolcicfluxes = smolciccat['e_S1300'] * 2.5 * 1.5 MCresult = MonteCarloCounts(smolcicfluxes, e_smolcicfluxes) smolcic890mean = MCresult[0] smolcic890rms = MCresult[1] bin_edges = MCresult[2] bin_width = bin_edges[1] - bin_edges[0] area_smolcic = 0.7 / 3.5 smolcic890 = smolcic890mean / area_smolcic e_smolcic890 = smolcic890rms / area_smolcic diffsmolcic890 = smolcic890 / bin_width / completeness #/ bin_centers e_diffsmolcic890 = e_smolcic890 / bin_width / completeness #/ bin_centers nbins = smolcic890.size cum890 = numpy.zeros(nbins) smolcic_bin_centers = numpy.zeros(nbins) for ibin in range(nbins): smolcic_bin_centers[ibin] = (bin_edges[ibin] + bin_edges[ibin + 1]) / 2 # ALESS number counts from Karim et al. 2013 alesscounts = [52.3, 32.3, 24.9, 15.6, 1.6]#, 0.0, 0.0] e_alesscounts = [18.2, 13.6, 7.9, 12.2, 7.2]#, 0.0, 0.0] alessfluxes = [4.8, 5.9, 7.5, 8.8, 9.7]#, 11.0, 14.0] #shadescounts = [2506, 844, 362, 150, 68, 33, 15, 7.4, 3.9, 2.0] shadescounts = [831, 240, 106, 41, 17, 8.8, 3.9, 1.8, 1.0, 0.6] shadescounts = numpy.array(shadescounts) shadesfluxes = numpy.array([2.77, 4.87, 6.90, 8.93, 10.94, 12.95, 14.96, 16.96, 18.96, 20.97]) / 1.5 # Aretxaga luminosity function true_centers = [1.41, 2.44, 3.44, 4.45, 5.45, 6.46, 7.46, 8.46, 9.46, 10.46, 11.46] true_centers = numpy.array(true_centers) true_edges = true_centers - 0.5 true_edges = numpy.append(true_edges, true_centers[-1] + 0.5) true_diffaretxaga = [394, 269, 176, 99.5, 49.9, 22.3, 10.3, 5.83, 4.07, 2.94, 1.87] true_diffaretxaga = numpy.array(true_diffaretxaga) aretxaga = Table.read('../Data/aretxagacatalog.fits') aretxaga_S1100 = aretxaga['S1_1mm'] hist_aretxaga, edge_aretxaga = numpy.histogram(aretxaga_S1100, bins=true_edges) nbins = hist_aretxaga.size #cum890 = numpy.zeros(nbins) aretxaga_centers = numpy.zeros(nbins) for ibin in range(nbins): aretxaga_centers[ibin] = (edge_aretxaga[ibin] + edge_aretxaga[ibin + 1]) / 2 #cum890[ibin] = norm890[ibin:].sum() area_aretxaga = 0.71 normaretxaga = hist_aretxaga / area_aretxaga aretxaga_completeness = [0.5, 0.85, 0.92, 0.95, 0.97, 0.98, 0.99, 1.0, 1.0, 1.0, 1.0] aretxaga_completeness = numpy.array(aretxaga_completeness) diffaretxaga = normaretxaga / aretxaga_completeness #/ aretxaga_centers # set font properties font = {'family' : 'Arial', 'weight' : 'normal', 'size' : 12} matplotlib.rc('font', **font) matplotlib.rcParams['axes.linewidth'] = 1.5 fig = plt.figure(figsize=(5.0, 4.5)) plt.clf() # plot the intrinsic luminosity functions used to make predictions shown in # mag-flux.py Sstar = 7. nstar = 424. alpha = 1.9 Svector = numpy.arange(1e3)/10 dndS1 = nstar / Sstar dndS2 = (Svector / Sstar) ** (-alpha) dndS3 = numpy.exp(-Svector / Sstar) dndS = dndS1 * dndS2 * dndS3 #line1, = plt.plot(Svector, dndS, color='blue', label='Schechter') Sstar = 8. Nstar = 20. beta1 = 2.0 beta2 = 6.9 dndS1 = Nstar * (Svector / Sstar) ** (-beta1) dndS2 = Nstar * (Svector / Sstar) ** (-beta2) dndS = dndS1 high = Svector > Sstar dndS[high] = dndS2[high] #line2 = plt.plot(Svector, dndS, color='black', lw=1.5, label='Karim+ 2013') line2 = plt.plot(Svector, dndS, color='magenta', lw=1.5, label='Karim+ 2013') Sstar = 15. Nstar = 5. beta1 = 2.0 beta2 = 6.9 dndS1 = Nstar * (Svector / Sstar) ** (-beta1) dndS2 = Nstar * (Svector / Sstar) ** (-beta2) dndS = dndS1 high = Svector > Sstar dndS[high] = dndS2[high] line3, = plt.plot(Svector, dndS, color='blue', lw=1.5, label=r'PL, $S_\star = 15\,{\rm mJy}$') data1, = plt.plot(bin_centers, diff890, 'o', label='COSMOS', color='black') plt.errorbar(bin_centers, diff890, yerr=e_diff890, fmt='o', ecolor='gray', capsize=0, color='black') data2, = plt.plot(alessfluxes, alesscounts, 'D', label='ALESS', color='pink') plt.errorbar(alessfluxes, alesscounts, yerr=e_alesscounts, fmt='D', ecolor='gray', capsize=0, color='pink') #data3, = plt.plot(barger_bin_centers, diffbarger890, 's', label='Barger', # color='orange') #plt.errorbar(barger_bin_centers, diffbarger890, yerr=e_diffbarger890, # fmt='s', ecolor='gray', capsize=0, color='orange') #data4, = plt.plot(smolcic_bin_centers, diffsmolcic890, 's', label='Smolcic', # color='orange') #plt.errorbar(smolcic_bin_centers, diffsmolcic890, yerr=e_diffsmolcic890, # fmt='s', ecolor='gray', capsize=0, color='orange') #plt.plot(shadesfluxes, shadescounts, 's', label='SHADES') #plt.hist(cosmos890, cumulative=-1) #plt.plot(aretxaga_centers, diffaretxaga, '+', label='Aretxaga+ 2011: Me') #plt.plot(true_centers, true_diffaretxaga, 'x', label='Aretxaga+ 2011: True') #plt.loglog() plt.yscale('log', nonposy='clip') plt.xscale('log', nonposy='clip') plt.minorticks_on() plt.tick_params(width=1.2, which='both') plt.tick_params(length=2, which='minor') plt.tick_params(length=4, which='major') plt.axis([01., 120, .001, 300]) first_legend = plt.legend(loc='lower left', numpoints=1, handletextpad=0.35, borderpad=0.4, labelspacing=0.18, handlelength=1.0) leg = plt.gca().get_legend() ltext = leg.get_texts() #ax = plt.gca().add_artist(first_legend) # Create another legend for the second line. #plt.legend(handles=[line2], loc=4) #plt.setp(ltext, fontsize='medium') plt.subplots_adjust(left=0.15, right=0.98, top=0.97, bottom=0.13, wspace=0.39) plt.ylabel(r'$dN/dS\;{\rm (mJy}^{-1} \, {\rm deg}^{-2})$', fontsize='large') plt.xlabel(r'$S_{870}\;{\rm (mJy)}$', fontsize='large') savefig('../Figures/DifferentialNumberCounts.pdf') import pdb; pdb.set_trace()
[ "matplotlib.pyplot.ylabel", "pylab.savefig", "numpy.array", "matplotlib.rc", "matplotlib.pyplot.errorbar", "numpy.arange", "numpy.mean", "numpy.histogram", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.minorticks_on", "numpy.exp", "matplotlib.pyplot.axis", "matplotlib.pyplot.yscale", "numpy.random.normal", "matplotlib.pyplot.gca", "matplotlib.pyplot.tick_params", "numpy.std", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.legend", "astropy.table.Table.read", "matplotlib.pyplot.clf", "numpy.append", "numpy.zeros", "matplotlib.pyplot.figure", "pdb.set_trace", "matplotlib.pyplot.xscale" ]
[((1029, 1048), 'numpy.array', 'numpy.array', (['S_1100'], {}), '(S_1100)\n', (1040, 1048), False, 'import numpy\n'), ((1261, 1279), 'numpy.array', 'numpy.array', (['S_890'], {}), '(S_890)\n', (1272, 1279), False, 'import numpy\n'), ((1318, 1327), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1325, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1365), 'matplotlib.pyplot.plot', 'plt.plot', (['S_1100', '(S_890 / S_1100)', '"""o"""'], {}), "(S_1100, S_890 / S_1100, 'o')\n", (1336, 1365), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1993), 'numpy.append', 'numpy.append', (['S_890', 'Oskari890'], {}), '(S_890, Oskari890)\n', (1975, 1993), False, 'import numpy\n'), ((2008, 2042), 'numpy.append', 'numpy.append', (['e_S_890', 'e_Oskari890'], {}), '(e_S_890, e_Oskari890)\n', (2020, 2042), False, 'import numpy\n'), ((2514, 2532), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (2525, 2532), False, 'import numpy\n'), ((2547, 2565), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (2558, 2565), False, 'import numpy\n'), ((2896, 2933), 'astropy.table.Table.read', 'Table.read', (['bargerloc'], {'format': '"""ascii"""'}), "(bargerloc, format='ascii')\n", (2906, 2933), False, 'from astropy.table import Table\n'), ((3464, 3482), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (3475, 3482), False, 'import numpy\n'), ((3504, 3522), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (3515, 3522), False, 'import numpy\n'), ((3699, 3737), 'astropy.table.Table.read', 'Table.read', (['smolcicloc'], {'format': '"""ascii"""'}), "(smolcicloc, format='ascii')\n", (3709, 3737), False, 'from astropy.table import Table\n'), ((4305, 4323), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (4316, 4323), False, 'import numpy\n'), ((4346, 4364), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (4357, 4364), False, 'import numpy\n'), ((4825, 4850), 'numpy.array', 'numpy.array', (['shadescounts'], {}), '(shadescounts)\n', (4836, 4850), False, 'import numpy\n'), ((5096, 5121), 'numpy.array', 'numpy.array', (['true_centers'], {}), '(true_centers)\n', (5107, 5121), False, 'import numpy\n'), ((5167, 5215), 'numpy.append', 'numpy.append', (['true_edges', '(true_centers[-1] + 0.5)'], {}), '(true_edges, true_centers[-1] + 0.5)\n', (5179, 5215), False, 'import numpy\n'), ((5328, 5358), 'numpy.array', 'numpy.array', (['true_diffaretxaga'], {}), '(true_diffaretxaga)\n', (5339, 5358), False, 'import numpy\n'), ((5371, 5413), 'astropy.table.Table.read', 'Table.read', (['"""../Data/aretxagacatalog.fits"""'], {}), "('../Data/aretxagacatalog.fits')\n", (5381, 5413), False, 'from astropy.table import Table\n'), ((5481, 5529), 'numpy.histogram', 'numpy.histogram', (['aretxaga_S1100'], {'bins': 'true_edges'}), '(aretxaga_S1100, bins=true_edges)\n', (5496, 5529), False, 'import numpy\n'), ((5606, 5624), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (5617, 5624), False, 'import numpy\n'), ((5958, 5992), 'numpy.array', 'numpy.array', (['aretxaga_completeness'], {}), '(aretxaga_completeness)\n', (5969, 5992), False, 'import numpy\n'), ((6168, 6197), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (6181, 6197), False, 'import matplotlib\n'), ((6249, 6279), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.0, 4.5)'}), '(figsize=(5.0, 4.5))\n', (6259, 6279), True, 'import matplotlib.pyplot as plt\n'), ((6281, 6290), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6288, 6290), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6545), 'numpy.exp', 'numpy.exp', (['(-Svector / Sstar)'], {}), '(-Svector / Sstar)\n', (6527, 6545), False, 'import numpy\n'), ((6930, 6999), 'matplotlib.pyplot.plot', 'plt.plot', (['Svector', 'dndS'], {'color': '"""magenta"""', 'lw': '(1.5)', 'label': '"""Karim+ 2013"""'}), "(Svector, dndS, color='magenta', lw=1.5, label='Karim+ 2013')\n", (6938, 6999), True, 'import matplotlib.pyplot as plt\n'), ((7211, 7303), 'matplotlib.pyplot.plot', 'plt.plot', (['Svector', 'dndS'], {'color': '"""blue"""', 'lw': '(1.5)', 'label': '"""PL, $S_\\\\star = 15\\\\,{\\\\rm mJy}$"""'}), "(Svector, dndS, color='blue', lw=1.5, label=\n 'PL, $S_\\\\star = 15\\\\,{\\\\rm mJy}$')\n", (7219, 7303), True, 'import matplotlib.pyplot as plt\n'), ((7315, 7381), 'matplotlib.pyplot.plot', 'plt.plot', (['bin_centers', 'diff890', '"""o"""'], {'label': '"""COSMOS"""', 'color': '"""black"""'}), "(bin_centers, diff890, 'o', label='COSMOS', color='black')\n", (7323, 7381), True, 'import matplotlib.pyplot as plt\n'), ((7382, 7486), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['bin_centers', 'diff890'], {'yerr': 'e_diff890', 'fmt': '"""o"""', 'ecolor': '"""gray"""', 'capsize': '(0)', 'color': '"""black"""'}), "(bin_centers, diff890, yerr=e_diff890, fmt='o', ecolor='gray',\n capsize=0, color='black')\n", (7394, 7486), True, 'import matplotlib.pyplot as plt\n'), ((7500, 7568), 'matplotlib.pyplot.plot', 'plt.plot', (['alessfluxes', 'alesscounts', '"""D"""'], {'label': '"""ALESS"""', 'color': '"""pink"""'}), "(alessfluxes, alesscounts, 'D', label='ALESS', color='pink')\n", (7508, 7568), True, 'import matplotlib.pyplot as plt\n'), ((7569, 7681), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['alessfluxes', 'alesscounts'], {'yerr': 'e_alesscounts', 'fmt': '"""D"""', 'ecolor': '"""gray"""', 'capsize': '(0)', 'color': '"""pink"""'}), "(alessfluxes, alesscounts, yerr=e_alesscounts, fmt='D', ecolor=\n 'gray', capsize=0, color='pink')\n", (7581, 7681), True, 'import matplotlib.pyplot as plt\n'), ((8417, 8450), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'nonposy': '"""clip"""'}), "('log', nonposy='clip')\n", (8427, 8450), True, 'import matplotlib.pyplot as plt\n'), ((8451, 8484), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {'nonposy': '"""clip"""'}), "('log', nonposy='clip')\n", (8461, 8484), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8505), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (8503, 8505), True, 'import matplotlib.pyplot as plt\n'), ((8506, 8546), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'width': '(1.2)', 'which': '"""both"""'}), "(width=1.2, which='both')\n", (8521, 8546), True, 'import matplotlib.pyplot as plt\n'), ((8547, 8587), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'length': '(2)', 'which': '"""minor"""'}), "(length=2, which='minor')\n", (8562, 8587), True, 'import matplotlib.pyplot as plt\n'), ((8588, 8628), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'length': '(4)', 'which': '"""major"""'}), "(length=4, which='major')\n", (8603, 8628), True, 'import matplotlib.pyplot as plt\n'), ((8630, 8662), 'matplotlib.pyplot.axis', 'plt.axis', (['[1.0, 120, 0.001, 300]'], {}), '([1.0, 120, 0.001, 300])\n', (8638, 8662), True, 'import matplotlib.pyplot as plt\n'), ((8677, 8794), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""', 'numpoints': '(1)', 'handletextpad': '(0.35)', 'borderpad': '(0.4)', 'labelspacing': '(0.18)', 'handlelength': '(1.0)'}), "(loc='lower left', numpoints=1, handletextpad=0.35, borderpad=0.4,\n labelspacing=0.18, handlelength=1.0)\n", (8687, 8794), True, 'import matplotlib.pyplot as plt\n'), ((9015, 9093), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'right': '(0.98)', 'top': '(0.97)', 'bottom': '(0.13)', 'wspace': '(0.39)'}), '(left=0.15, right=0.98, top=0.97, bottom=0.13, wspace=0.39)\n', (9034, 9093), True, 'import matplotlib.pyplot as plt\n'), ((9095, 9174), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$dN/dS\\\\;{\\\\rm (mJy}^{-1} \\\\, {\\\\rm deg}^{-2})$"""'], {'fontsize': '"""large"""'}), "('$dN/dS\\\\;{\\\\rm (mJy}^{-1} \\\\, {\\\\rm deg}^{-2})$', fontsize='large')\n", (9105, 9174), True, 'import matplotlib.pyplot as plt\n'), ((9172, 9228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$S_{870}\\\\;{\\\\rm (mJy)}$"""'], {'fontsize': '"""large"""'}), "('$S_{870}\\\\;{\\\\rm (mJy)}$', fontsize='large')\n", (9182, 9228), True, 'import matplotlib.pyplot as plt\n'), ((9228, 9278), 'pylab.savefig', 'savefig', (['"""../Figures/DifferentialNumberCounts.pdf"""'], {}), "('../Figures/DifferentialNumberCounts.pdf')\n", (9235, 9278), False, 'from pylab import savefig\n'), ((9291, 9306), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9304, 9306), False, 'import pdb\n'), ((321, 344), 'numpy.histogram', 'numpy.histogram', (['fluxes'], {}), '(fluxes)\n', (336, 344), False, 'import numpy\n'), ((432, 460), 'numpy.zeros', 'numpy.zeros', (['[nsource, nsim]'], {}), '([nsource, nsim])\n', (443, 460), False, 'import numpy\n'), ((633, 659), 'numpy.zeros', 'numpy.zeros', (['[nbins, nsim]'], {}), '([nbins, nsim])\n', (644, 659), False, 'import numpy\n'), ((810, 837), 'numpy.mean', 'numpy.mean', (['histPDF'], {'axis': '(1)'}), '(histPDF, axis=1)\n', (820, 837), False, 'import numpy\n'), ((852, 878), 'numpy.std', 'numpy.std', (['histPDF'], {'axis': '(1)'}), '(histPDF, axis=1)\n', (861, 878), False, 'import numpy\n'), ((1290, 1310), 'numpy.array', 'numpy.array', (['e_S_890'], {}), '(e_S_890)\n', (1301, 1310), False, 'import numpy\n'), ((1868, 1891), 'numpy.array', 'numpy.array', (['Oskari1300'], {}), '(Oskari1300)\n', (1879, 1891), False, 'import numpy\n'), ((4866, 4944), 'numpy.array', 'numpy.array', (['[2.77, 4.87, 6.9, 8.93, 10.94, 12.95, 14.96, 16.96, 18.96, 20.97]'], {}), '([2.77, 4.87, 6.9, 8.93, 10.94, 12.95, 14.96, 16.96, 18.96, 20.97])\n', (4877, 4944), False, 'import numpy\n'), ((6429, 6449), 'numpy.arange', 'numpy.arange', (['(1000.0)'], {}), '(1000.0)\n', (6441, 6449), False, 'import numpy\n'), ((564, 617), 'numpy.random.normal', 'numpy.random.normal', ([], {'loc': 'imean', 'scale': 'irms', 'size': 'nsim'}), '(loc=imean, scale=irms, size=nsim)\n', (583, 617), False, 'import numpy\n'), ((713, 761), 'numpy.histogram', 'numpy.histogram', (['obsPDF[:, isim]'], {'bins': 'bin_edges'}), '(obsPDF[:, isim], bins=bin_edges)\n', (728, 761), False, 'import numpy\n'), ((1912, 1937), 'numpy.array', 'numpy.array', (['e_Oskari1300'], {}), '(e_Oskari1300)\n', (1923, 1937), False, 'import numpy\n'), ((8806, 8815), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8813, 8815), True, 'import matplotlib.pyplot as plt\n')]
#!/usr/bin/env python """ analyse Elasticsearch query """ import json from elasticsearch import Elasticsearch from elasticsearch import logger as es_logger from collections import defaultdict, Counter import re import os from datetime import datetime # Preprocess terms for TF-IDF import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer # progress bar from tqdm import tqdm # ploting import matplotlib.pyplot as plt # LOG import logging from logging.handlers import RotatingFileHandler # Word embedding for evaluation from sentence_transformers import SentenceTransformer from sklearn.manifold import TSNE import seaborn as sns from sklearn.cluster import KMeans, AgglomerativeClustering from sklearn.metrics.pairwise import cosine_similarity from scipy import sparse import scipy.spatial as sp # Spatial entity as descriptor : from geopy.geocoders import Nominatim from geopy.extra.rate_limiter import RateLimiter # venn from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud import operator # Global var on Levels on spatial and temporal axis spatialLevels = ['city', 'state', 'country'] temporalLevels = ['day', 'week', 'month', 'period'] def elasticsearch_query(query_fname, logger): """ Build a ES query and return a default dict with resuls :return: tweetsByCityAndDate """ # Elastic search credentials client = Elasticsearch("http://localhost:9200") es_logger.setLevel(logging.WARNING) index = "twitter" # Define a Query query = open(query_fname, "r").read() result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000) # Append all pages form scroll search : avoid the 10k limitation of ElasticSearch results = avoid10kquerylimitation(result, client, logger) # Initiate a dict for each city append all Tweets content tweetsByCityAndDate = defaultdict(list) for hits in results: # parse Java date : EEE MMM dd HH:mm:ss Z yyyy inDate = hits["_source"]["created_at"] parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y") try:# geodocing may be bad geocoding = hits["_source"]["rest"]["features"][0]["properties"] except: continue # skip this iteraction if "country" in hits["_source"]["rest"]["features"][0]["properties"]: # locaties do not necessarily have an associated stated try: cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["country"]) except: # there is no state in geocoding try: logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state") cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \ str("none") + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["country"]) except: # there is no city as well : only country # print(json.dumps(hits["_source"], indent=4)) try: # cityStateCountry = str("none") + "_" + \ str("none") + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["country"]) except: cityStateCountry = str("none") + "_" + \ str("none") + "_" + \ str("none") try: tweetsByCityAndDate[cityStateCountry].append( { "tweet": preprocessTweets(hits["_source"]["full_text"]), "created_at": parseDate } ) except: print(json.dumps(hits["_source"], indent=4)) # biotexInputBuilder(tweetsByCityAndDate) # pprint(tweetsByCityAndDate) return tweetsByCityAndDate def avoid10kquerylimitation(result, client, logger): """ Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll This method append all pages form scroll search :param result: a result of a ElasticSearcg query :return: """ scroll_size = result['hits']['total']["value"] logger.info("Number of elasticsearch scroll: " + str(scroll_size)) results = [] # Progress bar pbar = tqdm(total=scroll_size) while (scroll_size > 0): try: scroll_id = result['_scroll_id'] res = client.scroll(scroll_id=scroll_id, scroll='60s') results += res['hits']['hits'] scroll_size = len(res['hits']['hits']) pbar.update(scroll_size) except: pbar.close() logger.error("elasticsearch search scroll failed") break pbar.close() return results def preprocessTweets(text): """ 1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1 2 - Detection lang 3 - remove stopword ?? :param text: :return: list : texclean, and langue detected """ ## 1 clean up twetts # remove URLs textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text) textclean = re.sub(r'http\S+', '', textclean) # remove usernames # textclean = re.sub('@[^\s]+', '', textclean) # remove the # in #hashtag # textclean = re.sub(r'#([^\s]+)', r'\1', textclean) return textclean def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger): """ Create a matrix of : - line : (city,day) - column : terms - value of cells : TF (term frequency) Help found here : http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76 :param tweetsofcity: :param matrixAggDay_fout: file to save :param matrixOccurence_fout: file to save :return: """ # initiate matrix of tweets aggregate by day # col = ['city', 'day', 'tweetsList', 'bow'] col = ['city', 'day', 'tweetsList'] matrixAggDay = pd.DataFrame(columns=col) cityDayList = [] logger.info("start full_text concatenation for city & day") pbar = tqdm(total=len(tweetsofcity)) for city in tweetsofcity: # create a table with 2 columns : tweet and created_at for a specific city matrix = pd.DataFrame(tweetsofcity[city]) # Aggregate list of tweets by single day for specifics cities ## Loop on days for a city period = matrix['created_at'].dt.date period = period.unique() period.sort() for day in period: # aggregate city and date document document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist()) # Bag of Words and preprocces # preproccesFullText = preprocessTerms(document) tweetsOfDayAndCity = { 'city': city, 'day': day, 'tweetsList': document } cityDayList.append(city + "_" + str(day)) try: matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True) except: print("full_text empty after pre-process: "+document) continue pbar.update(1) pbar.close() if save_intermediaire_files: logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout)) matrixAggDay.to_csv(matrixAggDay_fout) # Count terms with sci-kit learn cd = CountVectorizer( stop_words='english', #preprocessor=sklearn_vectorizer_no_number_preprocessor, #min_df=2, # token at least present in 2 cities : reduce size of matrix max_features=25000, ngram_range=(1, 1), token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue # strip_accents= "ascii" # remove token with special character (trying to keep only english word) ) cd.fit(matrixAggDay['tweetsList']) res = cd.transform(matrixAggDay["tweetsList"]) countTerms = res.todense() # create matrix ## get terms : # voc = cd.vocabulary_ # listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])} listOfTerms = cd.get_feature_names() ##initiate matrix with count for each terms matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms) # save to file if save_intermediaire_files: logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout)) matrixOccurence.to_csv(matrixOccurence_fout) return matrixOccurence def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'): """ Filter matrix with list of cities and a period :param matrix: :param listOfcities: :param spatialLevel: :param period: :param temporalLevel: :return: matrix filtred """ if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels: print("wrong level, please double check") return 1 # Extract cities and period ## cities if listOfcities != 'all': ### we need to filter ###Initiate a numpy array of False filter = np.zeros((1, len(matrix.index)), dtype=bool)[0] for city in listOfcities: ### edit filter if index contains the city (for each city of the list) filter += matrix.index.str.startswith(str(city) + "_") matrix = matrix.loc[filter] ##period if str(period) != 'all': ### we need a filter on date datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0] for date in period: datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d')) matrix = matrix.loc[datefilter] return matrix def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'): """ Aggregate on spatial and temporel and then compute TF-IDF :param matrixOcc: Matrix with TF already compute :param listOfcities: filter on this cities :param spatialLevel: city / state / country / world :param period: Filter on this period :param temporalLevel: day / week (month have to be implemented) :return: """ matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities, spatialLevel='state', period=period) # Aggregate by level ## Create 4 new columns : city, State, Country and date def splitindex(row): return row.split("_") matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \ zip(*matrixOcc.index.map(splitindex)) if temporalLevel == 'day': ## In space if spatialLevel == 'city': # do nothing pass elif spatialLevel == 'state' and temporalLevel == 'day': matrixOcc = matrixOcc.groupby("state").sum() elif spatialLevel == 'country' and temporalLevel == 'day': matrixOcc = matrixOcc.groupby("country").sum() elif temporalLevel == "week": matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime ## in space and time if spatialLevel == 'country': matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum() elif spatialLevel == 'state': matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum() elif spatialLevel == 'city': matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="W")]).sum() # Compute TF-IDF ## compute TF : for each doc, devide count by Sum of all count ### Sum fo all count by row matrixOcc['sumCount'] = matrixOcc.sum(axis=1) ### Devide each cell by these sums listOfTerms = matrixOcc.keys() matrixOcc = matrixOcc.loc[:, listOfTerms].div(matrixOcc['sumCount'], axis=0) ## Compute IDF : create a vector of length = nb of termes with IDF value idf = pd.Series(index=matrixOcc.keys(), dtype=float) ### N : nb of doucments <=> nb of rows : N = matrixOcc.shape[0] ### DFt : nb of document that contains the term DFt = matrixOcc.astype(bool).sum(axis=0) # Tip : convert all value in boolean. float O,O will be False, other True #### Not a Number when value 0 because otherwise log is infinite DFt.replace(0, np.nan, inplace=True) ### compute log(N/DFt) idf = np.log10(N / (DFt)) # idf = np.log10( N / (DFt * 10)) ## compute TF-IDF matrixTFIDF = matrixOcc * idf # matrixTFIDF = matrixOcc * idf * idf ## remove terms if for all documents value are Nan matrixTFIDF.dropna(axis=1, how='all', inplace=True) # Save file matrixTFIDF.to_csv(matrixHTFIDF_fname) # Export N biggest TF-IDF score: top_n = 500 extractBiggest = pd.DataFrame(index=matrixTFIDF.index, columns=range(0, top_n)) for row in matrixTFIDF.index: try: row_without_zero = matrixTFIDF.loc[row]# we remove term with a score = 0 row_without_zero = row_without_zero[ row_without_zero !=0 ] try: extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys() except: extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys() except: logger.debug("H-TFIDF: city "+str(matrixTFIDF.loc[row].name)+ "not enough terms") extractBiggest.to_csv(biggestHTFIDFscore_fname+".old.csv") # Transpose this table in order to share the same structure with TF-IDF classifical biggest score : hbt = pd.DataFrame() extractBiggest = extractBiggest.reset_index() for index, row in extractBiggest.iterrows(): hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"]) hbtrow[spatialLevel] = row[spatialLevel] hbtrow["date"] = row["date"] hbt = hbt.append(hbtrow, ignore_index=True) hbt.to_csv(biggestHTFIDFscore_fname) def TFIDF_TF_with_corpus_state(elastic_query_fname, logger, save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved="./", spatial_hiearchy="country", temporal_period='all', listOfCities='all'): """ Compute TFIDF and TF from an elastic query file 1 doc = 1 tweet Corpus = by hiearchy level, i.e. : state or country :param elastic_query_fname: filename and path of the elastic query :param logger: logger of the main program :param nb_biggest_terms: How many biggest term are to keep :param spatial_hiearchy: define the size of the corpus : state or country :param temporal_period: :param listOfCities: If you want to filter out some cities, you can :return: """ # tfidfStartDate = date(2020, 1, 23) # tfidfEndDate = date(2020, 1, 30) # temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate) # listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff'] # listOfState = ["England", "Scotland", "Northern Ireland", "Wales"] tweets = elasticsearch_query(elastic_query_fname, logger) if listOfCities == 'all': listOfCities = [] listOfStates = [] listOfCountry = [] for triple in tweets: splitted = triple.split("_") listOfCities.append(splitted[0]) listOfStates.append(splitted[1]) listOfCountry.append(splitted[2]) listOfCities = list(set(listOfCities)) listOfStates = list(set(listOfStates)) listOfCountry = list(set(listOfCountry)) # reorganie tweets (dict : tweets by cities) into dataframe (city and date) matrixAllTweets = pd.DataFrame() for tweetByCity in tweets.keys(): # Filter cities : city = str(tweetByCity).split("_")[0] state = str(tweetByCity).split("_")[1] country = str(tweetByCity).split("_")[2] if city in listOfCities: matrix = pd.DataFrame(tweets[tweetByCity]) matrix['city'] = city matrix['state'] = state matrix['country'] = country matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True) # Split datetime into date and time matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']] matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']] # Filter by a period if temporal_period != "all": mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max())) matrixAllTweets = matrixAllTweets.loc[mask] # Compute TF-IDF and TF by state extractBiggestTF_allstates = pd.DataFrame() extractBiggestTFIDF_allstates = pd.DataFrame() if spatial_hiearchy == "country": listOfLocalities = listOfCountry elif spatial_hiearchy == "state": listOfLocalities = listOfStates elif spatial_hiearchy == "city": listOfLocalities = listOfCities for locality in listOfLocalities: matrix_by_locality = matrixAllTweets[matrixAllTweets[spatial_hiearchy] == locality] vectorizer = TfidfVectorizer( stop_words='english', min_df=0.001, # max_features=50000, ngram_range=(1, 1), token_pattern='[<KEY>', ) # logger.info("Compute TF-IDF on corpus = "+spatial_hiearchy) try: vectors = vectorizer.fit_transform(matrix_by_locality['tweet']) feature_names = vectorizer.get_feature_names() dense = vectors.todense() denselist = dense.tolist() except: logger.info("Impossible to compute TF-IDF on: "+locality) continue ## matrixTFIDF TFIDFClassical = pd.DataFrame(denselist, columns=feature_names) locality_format = locality.replace("/", "_") locality_format = locality_format.replace(" ", "_") if save_intermediaire_files: logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv") TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv") ## Extract N TOP ranking score extractBiggest = TFIDFClassical.max().nlargest(nb_biggest_terms) extractBiggest = extractBiggest.to_frame() extractBiggest = extractBiggest.reset_index() extractBiggest.columns = ['terms', 'score'] extractBiggest[spatial_hiearchy] = locality extractBiggestTFIDF_allstates = extractBiggestTFIDF_allstates.append(extractBiggest, ignore_index=True) """ # Compute TF tf = CountVectorizer( stop_words='english', min_df=2, ngram_range=(1,2), token_pattern='[a-zA-Z0-9@#]+', ) try: tf.fit(matrix_by_locality['tweet']) tf_res = tf.transform(matrix_by_locality['tweet']) listOfTermsTF = tf.get_feature_names() countTerms = tf_res.todense() except:# locality does not have enough different term logger.info("Impossible to compute TF on: "+locality) continue ## matrixTF TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF) ### save in file logger.info("saving TF File: "+path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv") TFClassical.to_csv(path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv") ## Extract N TOP ranking score extractBiggestTF = TFClassical.max().nlargest(nb_biggest_terms) extractBiggestTF = extractBiggestTF.to_frame() extractBiggestTF = extractBiggestTF.reset_index() extractBiggestTF.columns = ['terms', 'score'] extractBiggestTF[spatial_hiearchy] = locality extractBiggestTF_allstates = extractBiggestTF_allstates.append(extractBiggestTF, ignore_index=True) """ logger.info("saving TF and TF-IDF top"+str(nb_biggest_terms)+" biggest score") extractBiggestTF_allstates.to_csv(path_for_filesaved+"/TF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv") extractBiggestTFIDF_allstates.to_csv(path_for_filesaved+"/TF-IDF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv") def TFIDF_TF_on_whole_corpus(elastic_query_fname, logger, save_intermediaire_files, path_for_filesaved="./", temporal_period='all', listOfCities='all'): """ Compute TFIDF and TF from an elastic query file 1 doc = 1 tweet Corpus = on the whole elastic query (with filter out cities that are not in listOfCities :param elastic_query_fname: filename and path of the elastic query :param logger: logger of the main program :param nb_biggest_terms: How many biggest term are to keep. It has to be greater than H-TF-IDF or TF-IDF classical on corpus = localité because a lot of temrs have 1.0 has the score :param spatial_hiearchy: define the size of the corpus : state or country :param temporal_period: :param listOfCities: If you want to filter out some cities, you can :return: """ # tfidfStartDate = date(2020, 1, 23) # tfidfEndDate = date(2020, 1, 30) # temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate) # listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff'] # listOfState = ["England", "Scotland", "Northern Ireland", "Wales"] # Query Elasticsearch to get all tweets from UK tweets = elasticsearch_query(elastic_query_fname, logger) if listOfCities == 'all': listOfCities = [] listOfStates = [] listOfCountry = [] for triple in tweets: splitted = triple.split("_") listOfCities.append(splitted[0]) listOfStates.append(splitted[1]) listOfCountry.append(splitted[2]) listOfCities = list(set(listOfCities)) listOfStates = list(set(listOfStates)) listOfCountry = list(set(listOfCountry)) # reorganie tweets (dict : tweets by cities) into dataframe (city and date) matrixAllTweets = pd.DataFrame() for tweetByCity in tweets.keys(): # Filter cities : city = str(tweetByCity).split("_")[0] state = str(tweetByCity).split("_")[1] country = str(tweetByCity).split("_")[2] if city in listOfCities: matrix = pd.DataFrame(tweets[tweetByCity]) matrix["country"] = country matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True) # Split datetime into date and time matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']] matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']] # Filter by a period if temporal_period != "all": mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max())) matrixAllTweets = matrixAllTweets.loc[mask] vectorizer = TfidfVectorizer( stop_words='english', min_df=0.001, # max_features=50000, ngram_range=(1, 1), token_pattern='[a-zA-Z0-9#]+', #remove user name, i.e term starting with @ for personnal data issue ) try: vectors = vectorizer.fit_transform(matrixAllTweets['tweet']) feature_names = vectorizer.get_feature_names() dense = vectors.todense() denselist = dense.tolist() except: logger.info("Impossible to compute TF-IDF") exit(-1) ## matrixTFIDF TFIDFClassical = pd.DataFrame(denselist, columns=feature_names) TFIDFClassical["country"] = matrixAllTweets["country"] if save_intermediaire_files: logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_whole_corpus.csv") TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_whole_corpus.csv") extractBiggest = pd.DataFrame() for term in TFIDFClassical.keys(): try: index = TFIDFClassical[term].idxmax() score = TFIDFClassical[term].max() country = TFIDFClassical.iloc[index]["country"] row = { 'terms': term, 'score': score, 'country': country } extractBiggest = extractBiggest.append(row, ignore_index=True) except: logger.info(term+' : '+str(index)+" : "+str(score)+" : "+country) ## Extract N TOP ranking score # extractBiggest = TFIDFClassical.max() extractBiggest = extractBiggest[extractBiggest['score'] == 1] # we keep only term with high score TF-IDF, i.e 1.0 # extractBiggest = extractBiggest.to_frame() # extractBiggest = extractBiggest.reset_index() # extractBiggest.columns = ['terms', 'score', 'country'] logger.info("saving TF-IDF top"+str(extractBiggest['terms'].size)+" biggest score") extractBiggest.to_csv(path_for_filesaved+"/TFIDF_BiggestScore_on_whole_corpus.csv") def logsetup(log_fname): """ Initiate a logger object : - Log in file : collectweets.log - also print on screen :return: logger object """ logger = logging.getLogger() logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s') now = datetime.now() file_handler = RotatingFileHandler(log_fname + "_" + now.strftime("%Y-%m-%d_%H-%M-%S") + ".log", 'a', 1000000, 1) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) logger.addHandler(file_handler) stream_handler = logging.StreamHandler() # Only display on screen INFO stream_handler.setLevel(logging.INFO) logger.addHandler(stream_handler) return logger def t_SNE_bert_embedding_visualization(biggest_score, logger, listOfLocalities="all", spatial_hieararchy="country", plotname="colored by country", paht2save="./"): """ Plot t-SNE representation of terms by country ressources: + https://colab.research.google.com/drive/1FmREx0O4BDeogldyN74_7Lur5NeiOVye?usp=sharing#scrollTo=Fbq5MAv0jkft + https://github.com/UKPLab/sentence-transformers :param biggest_score: :param listOfLocalities: :param spatial_hieararchy: :param plotname: :param paht2save: :return: """ modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens') # filter by localities for locality in biggest_score[spatial_hieararchy].unique(): if locality not in listOfLocalities: biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index) embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True) # embeddings.tofile(paht2save+"/tsne_bert-embeddings_"+plotname+"_matrix-embeddig") modelTSNE = TSNE(n_components=2) # n_components means the lower dimension low_dim_data = modelTSNE.fit_transform(embeddings) label_tsne = biggest_score[spatial_hieararchy] # Style Plots a bit sns.set_style('darkgrid') sns.set_palette('muted') sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 2.5}) plt.rcParams['figure.figsize'] = (20, 14) tsne_df = pd.DataFrame(low_dim_data, label_tsne) tsne_df.columns = ['x', 'y'] ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index) plt.setp(ax.get_legend().get_texts(), fontsize='40') # for legend text plt.setp(ax.get_legend().get_title(), fontsize='50') # for legend title plt.ylim(-100,100) plt.xlim(-100, 100) #ax.set_title('T-SNE BERT Sentence Embeddings for '+plotname) plt.savefig(paht2save+"/tsne_bert-embeddings_"+plotname) logger.info("file: "+paht2save+"/tsne_bert-embeddings_"+plotname+" has been saved.") #plt.show() plt.close() # Perform kmean clustering # num_clusters = 5 # clustering_model = KMeans(n_clusters=num_clusters) # clustering_model.fit(embeddings) # cluster_assignment = clustering_model.labels_ # Normalize the embeddings to unit length corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True) # Perform kmean clustering clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4) clustering_model.fit(corpus_embeddings) cluster_assignment = clustering_model.labels_ # clustered_sentences = [[] for i in range(num_clusters)] # for sentence_id, cluster_id in enumerate(cluster_assignment): # clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id]) clustered_sentences = {} for sentence_id, cluster_id in enumerate(cluster_assignment): if cluster_id not in clustered_sentences: clustered_sentences[cluster_id] = [] clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id]) #for i, cluster in enumerate(clustered_sentences): # for i, cluster in clustered_sentences.items(): # print("Cluster ", i+1) # print(cluster) # print("") def bert_embedding_filtred(biggest_score, listOfLocalities="all", spatial_hieararchy="country"): """ Retrieve embedding of a matrix of terms (possibility of filtring by a list of locality) :param biggest_score: pd.Datraframe with columns : [terms, country/state/city] :param listOfLocalities: :param spatial_hieararchy: :return: """ modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens') # filter by localities if listOfLocalities != "all": for locality in biggest_score[spatial_hieararchy].unique(): if locality not in listOfLocalities: biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index) embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True) return embeddings def similarity_intra_matrix_pairwise(matrix): """ Compute pairwise cosine similarity on the rows of a Matrix and retrieve unique score by pair. indeed, cosine_similarity pairwise retrive a matrix with duplication : let's take an exemple : Number of terms : 4, cosine similarity : w1 w2 w3 w4 +---+---+----+--+ w1 | 1 | | | | w2 | | 1 | | | w3 | | | 1 | | w4 | | | | 1 | +---+---+----+--+ (w1, w2) = (w2, w1), so we have to keep only : (number_of_terms)^2/2 - (number_of_terms)/2 for nb_term = 4 : 4*4/2 - 4/2 = 16/2 - 4/2 = 6 => we have 6 unique scores :param matrix: :return: list of unique similarity score """ similarity = cosine_similarity(sparse.csr_matrix(matrix)) similarity_1D = np.array([]) for i, row in enumerate(similarity): similarity_1D = np.append(similarity_1D, row[i+1:]) # We remove duplicate pairwise value return similarity_1D def similarity_inter_matrix(matrix1, matrix2): """ :param matrix1: :param matrix2: :return: """ similarity = 1 - sp.distance.cdist(matrix1, matrix2, 'cosine') return similarity def clustering_terms(biggest, logger, cluster_f_out, listOfLocalities="all", spatial_hieararchy="country", method="kmeans"): """ :param biggest: :param method: :return: """ method_list = ["kmeans", "agglomerative_clustering"] if method not in method_list: logger.error("This method is not implemented for clustering: "+str(method)) return -1 # filter by localities if listOfLocalities != "all": for locality in biggest[spatial_hieararchy].unique(): if locality not in listOfLocalities: biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index) embeddings = bert_embedding_filtred(biggest) if method == "kmeans": # Perform kmean clustering num_clusters = 5 clustering_model = KMeans(n_clusters=num_clusters) clustering_model.fit(embeddings) cluster_assignment = clustering_model.labels_ elif method == "agglomerative_clustering": # Normalize the embeddings to unit length corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True) # Perform Agglomerative clustering clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4) clustering_model.fit(corpus_embeddings) cluster_assignment = clustering_model.labels_ clustered_sentences = {} for sentence_id, cluster_id in enumerate(cluster_assignment): if str(cluster_id) not in clustered_sentences: clustered_sentences[str(cluster_id)] = [] clustered_sentences[str(cluster_id)].append(biggest['terms'].iloc[sentence_id]) with open(cluster_f_out, "w") as outfile: json.dump(clustered_sentences, outfile) logger.info("file " + cluster_f_out + " has been saved") def geocoding_token(biggest, listOfLocality, spatial_hieararchy, logger): """ Find and geocode Spatial entity with OSM data (nominatim) Respect terms and use of OSM and Nomitim : - Specify a name for the application, Ie.e user agent - add delay between each query : min_delay_seconds = 1. See : https://geopy.readthedocs.io/en/stable/#module-geopy.extra.rate_limiter - define a time out for waiting nomatim answer : to 10 seconds :param biggest: :return: biggest with geocoding information """ try: if listOfLocality != "all": for locality in biggest[spatial_hieararchy].unique(): if locality not in listOfLocality: biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index) except: logger.info("could not filter, certainly because there is no spatial hiearchy on biggest score") geolocator = Nominatim(user_agent="h-tfidf-evaluation", timeout=10) geocoder = RateLimiter(geolocator.geocode, min_delay_seconds=1) tqdm.pandas() biggest["geocode"] = biggest["terms"].progress_apply(geocoder) return biggest def post_traitement_flood(biggest, logger, spatialLevel, ratio_of_flood=0.5): """ Remove terms from people flooding : return same dataframe with 1 more column : user_flooding With default ratio_of_flood : If an twitter.user use a term in more than 50% of occurence of this terms, we consider this user is flooding :param biggest: File of terms to process :param logger: :param: spatialLevel : work on Country / State / City :param: ratio_of_flood :return: return same dataframe with 1 more column : user_flooding """ ratio_of_flood_global = ratio_of_flood es_logger.setLevel(logging.WARNING) # pre-build elastic query for spatialLevel : rest_user_osm_level = "" if spatialLevel == "country": rest_user_osm_level = "rest_user_osm.country" elif spatialLevel == "state": rest_user_osm_level = "rest.features.properties.state" elif spatialLevel == "city": rest_user_osm_level = "rest.features.properties.city" def is_an_user_flooding(term, locality): client = Elasticsearch("http://localhost:9200") index = "twitter" # Query : ## Retrieve only user name where in full_text = term and rest_user_osm.country = locality if term is not np.NAN: query = {"_source": "user.name","query":{"bool":{"filter":[{"bool":{"should":[{"match_phrase":{"full_text":term}}],"minimum_should_match":1}}, {"bool":{"should":[{"match_phrase":{rest_user_osm_level:locality}}],"minimum_should_match":1}}]}}} try: result = Elasticsearch.search(client, index=index, body=query) list_of_user = [] if len(result["hits"]["hits"]) != 0: for hit in result["hits"]["hits"]: user = hit["_source"]["user"]["name"] list_of_user.append(user) dict_user_nbtweet = dict(Counter(list_of_user)) d = dict((k, v) for k, v in dict_user_nbtweet.items() if v >= (ratio_of_flood_global * len(list_of_user))) if len(d) > 0 : # there is a flood on this term: return 1 else: return 0 else: # not found in ES why ? return "not_in_es" except: logger.info("There is a trouble with this term: " + str(term)) return np.NAN else: return 0 logger.debug("start remove terms if they coming from a flooding user, ie, terms in "+str(ratio_of_flood_global*100)+"% of tweets from an unique user over tweets with this words") tqdm.pandas() biggest["user_flooding"] = biggest.progress_apply(lambda t: is_an_user_flooding(t.terms, t[spatialLevel]), axis=1) return biggest def venn(biggest, logger, spatial_level, result_path, locality): """ Build Venn diagramm in word_cloud Save fig in result_path Discussion about font size : In each subset (common or specific), the font size of term is related with the H-TFIDF Rank inside the subset :param biggest: :param logger: :param spatialLevel: :return: """ # Post-traitement biggest = biggest[biggest["user_flooding"] == "0"] # Select locality biggest = biggest[biggest[spatial_level] == locality] # select week weeks = biggest['date'].unique() if len(weeks) == 2: sets = [] weeks_list = [] for week in weeks: sets.append(set(biggest[biggest["date"] == week].terms[0:100])) weeks_list.append(week) try: venn = venn2_wordcloud(sets, set_labels=weeks_list, wordcloud_kwargs=dict(min_font_size=10),) except: logger.info("Can't build venn for: "+locality) elif len(weeks) == 3 or len(weeks) > 3: sets = [] weeks_list = [] word_frequency = {} # for font-size of wordcloud : based on H-TFIDF Rank for nb, week in enumerate(weeks[-3:]): sets.append(set(biggest[biggest["date"] == week].terms[0:100])) weeks_list.append(week) for rank, term in enumerate(biggest[biggest["date"] == week].terms[0:100]): if term not in word_frequency: word_frequency[term] = (100 - rank) try: venn = venn3_wordcloud(sets, set_labels=weeks_list, word_to_frequency=word_frequency, wordcloud_kwargs=dict(min_font_size=4,),) except: logger.info("Can't build venn for: "+locality) sorted_word_frequency = dict(sorted(word_frequency.items(), key=operator.itemgetter(1),reverse=True)) logger.info(locality + ": " + str(sorted_word_frequency)) plt.savefig(result_path + "/venn_" + locality) def frequent_terms_by_level(matrixOcc, logger, most_frequent_terms_fpath, listOfLocalities='all', spatialLevel='country'): """ :param matrixOcc: :param most_frequent_terms_fpath: :param listOfLocalities: :param spatialLevel: :return: """ #matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfLocalities, # spatialLevel=spatialLevel, period='all') # Aggregate by level ## Create 4 new columns : city, State, Country and date def splitindex(row): return row.split("_") matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \ zip(*matrixOcc.index.map(splitindex)) matrixOcc.date = pd.to_datetime((matrixOcc.date)) # convert date into datetime if spatialLevel == 'city': matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="Y")]).sum() elif spatialLevel == 'state': matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="Y")]).sum() elif spatialLevel == 'country': matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="Y")]).sum() # Export N biggest TF-IDF score: top_n = 500 extractBiggest = pd.DataFrame(index=matrixOcc.index, columns=range(0, top_n)) for row in matrixOcc.index: try: row_without_zero = matrixOcc.loc[row]# we remove term with a score = 0 row_without_zero = row_without_zero[ row_without_zero !=0 ] try: extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys() except: extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys() except: logger.debug("H-TFIDF: city " + str(matrixOcc.loc[row].name) + "not enough terms") # Transpose this table in order to share the same structure with TF-IDF classifical biggest score : hbt = pd.DataFrame() extractBiggest = extractBiggest.reset_index() for index, row in extractBiggest.iterrows(): hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"]) hbtrow[spatialLevel] = row[spatialLevel] hbtrow["date"] = row["date"] hbt = hbt.append(hbtrow, ignore_index=True) # save file logger.info("saving file: "+most_frequent_terms_fpath) hbt.to_csv(most_frequent_terms_fpath) return hbt def comparison_htfidf_tfidf_frequentterms(htfidf_f, tfidf_corpus_country_f, frequent_terms, logger, plot_f_out, listOfCountries="all"): # Open dataframes htfidf = pd.read_csv(htfidf_f, index_col=0) tfidf = pd.read_csv(tfidf_corpus_country_f, index_col=0) for nb_terms in [100, 200, 500]: # barchart building barchart_df_col = ["country", "h-tfidf", "tf-idf"] barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries))) # loop on countries for country in listOfCountries: htfidf_country = htfidf[htfidf["country"] == country] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] # loop on weeks htfidf_overlap_per_week_df = pd.DataFrame(index=range(1)) for week in htfidf_country.date.unique(): htfidf_country_week = htfidf_country[htfidf_country["date"] == week] # build on venn comparison H-TFIDF with Frequent terms sets = [] sets.append(set(htfidf_country_week.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) try: venn_htfidf = venn2_wordcloud(sets) htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11')) except: htfidf_overlap_per_week_df[week] = np.NAN # mean value for all weeks : mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms # Compute TF-IDF overlap with Frequent termes sets = [] sets.append(set(tfidf_country.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) logger.info(country) venn_tfidf = venn2_wordcloud(sets) plt.close('all') # barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11')) tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms # build the row for barchart if country == "Ἑλλάς": country = "Greece" row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap} barchart_df = barchart_df.append(row, ignore_index=True) # Plot bar chart barchart_df = barchart_df.set_index("country") barchart_df = barchart_df.dropna() barchart_df.plot.bar(figsize=(8,6)) plt.subplots_adjust(bottom=0.27) plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms") plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png") # build venn diagramm ## Choose a country country = "United Kingdom" nb_terms = 100 week = "2020-01-26" ## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3] tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3] frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3] ### Remove number htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) columns_name = [] latex_table_nb_terms = 30 for i in range(latex_table_nb_terms): columns_name.append("rank "+str(i)) latex_table = pd.DataFrame(index=range(3), columns=columns_name) latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False)) sets = [] sets.append(set(htfidf_country_terms)) sets.append(set(tfidf_country_terms)) sets.append(set(frequent_terms_country_terms)) fig, ax = plt.subplots(figsize=(8, 6)) venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax) plt.savefig(plot_f_out + "_"+ country + "venn3.png") plt.show() def comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_whole_f, frequent_terms, logger, plot_f_out, listOfCountries="all"): # Open dataframes htfidf = pd.read_csv(htfidf_f, index_col=0) tfidf = pd.read_csv(tfidf_whole_f, index_col=0) for nb_terms in [100, 200, 500]: # barchart building barchart_df_col = ["country", "h-tfidf", "tf-idf"] barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries))) # loop on countries for country in listOfCountries: # build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"] if country == "Ἑλλάς": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Greece")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Greece")] elif country == "Deutschland": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Germany")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Germany")] elif country == "España": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Spain")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Spain")] elif country == "Italia": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Italy")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Italy")] else: htfidf_country = htfidf[htfidf["country"] == country] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] # loop on weeks htfidf_overlap_per_week_df = pd.DataFrame(index=range(1)) for week in htfidf_country.date.unique(): htfidf_country_week = htfidf_country[htfidf_country["date"] == week] # build on venn comparison H-TFIDF with Frequent terms sets = [] sets.append(set(htfidf_country_week.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) try: venn_htfidf = venn2_wordcloud(sets) htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11')) except: htfidf_overlap_per_week_df[week] = np.NAN # mean value for all weeks : mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms # Compute TF-IDF overlap with Frequent termes sets = [] sets.append(set(tfidf_country.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) logger.info(country) try : venn_tfidf = venn2_wordcloud(sets) plt.close('all') # barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11')) tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms except: logger.info("No terms in biggest score for TF-IDF - country: " + country) tfidf_overlap = 0.0 # build the row for barchart if country == "Ἑλλάς": country = "Greece" row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap} barchart_df = barchart_df.append(row, ignore_index=True) # Plot bar chart barchart_df = barchart_df.set_index("country") barchart_df = barchart_df.dropna() barchart_df.plot.bar(figsize=(8,6)) plt.subplots_adjust(bottom=0.27) plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms") plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png") # build venn diagramm ## Choose a country country = "Germany" nb_terms = 100 week = "2020-01-26" ## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3] tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3] frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3] ### Remove number htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) columns_name = [] latex_table_nb_terms = 15 for i in range(latex_table_nb_terms): columns_name.append("rank "+str(i)) latex_table = pd.DataFrame(index=range(3), columns=columns_name) latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False)) sets = [] sets.append(set(htfidf_country_terms)) sets.append(set(tfidf_country_terms)) sets.append(set(frequent_terms_country_terms)) fig, ax = plt.subplots(figsize=(8, 6)) venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax) plt.savefig(plot_f_out + "_"+ country + "venn3.png") plt.show() if __name__ == '__main__': # Global parameters : ## Spatial level hierarchie : # spatialLevels = ['country', 'state', 'city'] spatialLevels = ['country', 'state'] ## Time level hierarchie : timeLevel = "week" ## List of country to work on : listOfLocalities = ["Deutschland", "España", "France", "Italia", "United Kingdom"] ## elastic query : query_fname = "elasticsearch/analyse/nldb21/elastic-query/nldb21_europeBySpatialExtent_en_february.txt" ## Path to results : period_extent = "feb_tfidf_whole" f_path_result = "elasticsearch/analyse/nldb21/results/" + period_extent + "_" + timeLevel if not os.path.exists(f_path_result): os.makedirs(f_path_result) # Workflow parameters : ## Rebuild H-TFIDF (with Matrix Occurence) build_htfidf = False build_htfidf_save_intermediaire_files = True ## eval 1 : Comparison with classical TF-IDf build_classical_tfidf = False build_classical_tfidf_save_intermediaire_files = False ## evla 2 : Use word_embedding with t-SNE build_tsne = False build_tsne_spatial_level = "country" ## eval 3 : Use word_embedding with box plot to show disparity build_boxplot = False build_boxplot_spatial_level = "country" ## eval 4 : Compare H-TFIDF and TF-IDF with most frequent terms by level build_compare_measures = True build_compare_measures_build_intermedate_files = False build_compare_measures_level = "country" build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"] ## post-traitement 1 : geocode term build_posttraitement_geocode = False ## post-traitement 2 : remove terms form a flooding user build_posttraitement_flooding = False build_posttraitement_flooding_spatial_levels = spatialLevels ## Analyse H-TFIDF for epidemiology 1 : clustering build_clustering = False build_clustering_spatial_levels = ['country', 'state'] build_clustering_list_hierachical_locality = { "country": ["France", "Deutschland", "España", "Italia", "United Kingdom"], 'state': ["Lombardia", "Lazio"], # "city": ["London"] } ## Venn diagramm build_venn = False build_venn_spatial_level = "country" # initialize a logger : log_fname = "elasticsearch/analyse/nldb21/logs/nldb21_" logger = logsetup(log_fname) logger.info("H-TFIDF expirements starts") if build_htfidf: # start the elastic query query = open(query_fname, "r").read() logger.debug("elasticsearch : start quering") tweetsByCityAndDate = elasticsearch_query(query_fname, logger) logger.debug("elasticsearch : stop quering") # Build a matrix of occurence for each terms in document aggregate by city and day ## prepare tree for file in commun for all spatial level : f_path_result_common = f_path_result+"/common" if not os.path.exists(f_path_result_common): os.makedirs(f_path_result_common) ## Define file path matrixAggDay_fpath = f_path_result_common + "/matrixAggDay.csv" matrixOccurence_fpath = f_path_result_common + "/matrixOccurence.csv" logger.debug("Build matrix of occurence : start") matrixOccurence = matrixOccurenceBuilder(tweetsByCityAndDate, matrixAggDay_fpath, matrixOccurence_fpath, build_htfidf_save_intermediaire_files, logger) logger.debug("Build matrix of occurence : stop") ## import matrixOccurence if you don't want to re-build it # matrixOccurence = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0) for spatialLevel in spatialLevels: logger.info("H-TFIDF on: "+spatialLevel) f_path_result_level = f_path_result+"/"+spatialLevel if not os.path.exists(f_path_result_level): os.makedirs(f_path_result_level) ## Compute H-TFIDF matrixHTFIDF_fname = f_path_result_level + "/matrix_H-TFIDF.csv" biggestHTFIDFscore_fname = f_path_result_level + "/h-tfidf-Biggest-score.csv" logger.debug("H-TFIDF : start to compute") HTFIDF(matrixOcc=matrixOccurence, matrixHTFIDF_fname=matrixHTFIDF_fname, biggestHTFIDFscore_fname=biggestHTFIDFscore_fname, spatialLevel=spatialLevel, temporalLevel=timeLevel, ) logger.info("H-TFIDF : stop to compute for all spatial levels") ## Comparison with TF-IDF f_path_result_tfidf = f_path_result + "/tf-idf-classical" f_path_result_tfidf_by_locality = f_path_result_tfidf + "/tfidf-tf-corpus-country" if build_classical_tfidf : if not os.path.exists(f_path_result_tfidf): os.makedirs(f_path_result_tfidf) if not os.path.exists(f_path_result_tfidf_by_locality): os.makedirs(f_path_result_tfidf_by_locality) ### On whole corpus TFIDF_TF_on_whole_corpus(elastic_query_fname=query_fname, logger=logger, save_intermediaire_files=build_classical_tfidf_save_intermediaire_files, path_for_filesaved=f_path_result_tfidf) ### By Country TFIDF_TF_with_corpus_state(elastic_query_fname=query_fname, logger=logger, save_intermediaire_files=build_classical_tfidf_save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved=f_path_result_tfidf_by_locality, spatial_hiearchy="country", temporal_period='all') if build_compare_measures: f_path_result_compare_meassures_dir = f_path_result+"/common" f_path_result_compare_meassures_file = \ f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level + ".csv" f_path_result_compare_meassures_plot = \ f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level if not os.path.exists(f_path_result_compare_meassures_dir): os.makedirs(f_path_result_compare_meassures_dir) # open Matrix of occurence: try: matrixOccurence = pd.read_csv(f_path_result_compare_meassures_dir + '/matrixOccurence.csv', index_col=0) except: logger.error("File: " + f_path_result_compare_meassures_dir + '/matrixOccurence.csv' + "doesn't exist. You may need to save intermediate file for H-TFIDF") logger.info("Retrieve frequent terms per country") if build_compare_measures_build_intermedate_files: ft = frequent_terms_by_level(matrixOccurence, logger, f_path_result_compare_meassures_file, build_compare_measures_localities, build_compare_measures_level) else: ft = pd.read_csv(f_path_result_compare_meassures_file) # files_path htfidf_f = f_path_result + "/country/h-tfidf-Biggest-score.csv" tfidf_corpus_whole_f = f_path_result + "/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv" comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_corpus_whole_f, ft, logger, f_path_result_compare_meassures_plot, listOfCountries=build_compare_measures_localities) if build_tsne : f_path_result_tsne = f_path_result+"/tsne" if not os.path.exists(f_path_result_tsne): os.makedirs(f_path_result_tsne) biggest_TFIDF_country = pd.read_csv(f_path_result+"/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0) biggest_TFIDF_whole = pd.read_csv(f_path_result+"/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv") biggest_H_TFIDF = pd.read_csv(f_path_result+"/"+build_tsne_spatial_level+'/h-tfidf-Biggest-score.csv', index_col=0) # t_SNE visulation t_SNE_bert_embedding_visualization(biggest_TFIDF_country, logger, listOfLocalities=listOfLocalities, plotname="TF-IDF on corpus by Country", paht2save=f_path_result_tsne) t_SNE_bert_embedding_visualization(biggest_H_TFIDF, logger, listOfLocalities=listOfLocalities, plotname="H-TFIDF", paht2save=f_path_result_tsne) if build_boxplot : # dir path to save : f_path_result_boxplot = f_path_result+"/pairwise-similarity-boxplot" if not os.path.exists(f_path_result_boxplot): os.makedirs(f_path_result_boxplot) # open result from mesures : biggest_TFIDF_country = pd.read_csv(f_path_result_tfidf_by_locality+"/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0) biggest_TFIDF_whole = pd.read_csv(f_path_result_tfidf+"/TFIDF_BiggestScore_on_whole_corpus.csv") biggest_H_TFIDF = pd.read_csv(f_path_result+"/"+build_boxplot_spatial_level+'/h-tfidf-Biggest-score.csv', index_col=0) # Retrieve embedding : htfidf_embeddings = bert_embedding_filtred(biggest_H_TFIDF, listOfLocalities=listOfLocalities) tfidf_country_embeddings = bert_embedding_filtred(biggest_TFIDF_country, listOfLocalities=listOfLocalities) tfidf_whole_embeddings = bert_embedding_filtred(biggest_TFIDF_whole) # Compute similarity : ## Distribution of similarities between terms extracted from a measure htidf_similarity = similarity_intra_matrix_pairwise(htfidf_embeddings) tfidf_country_similarity = similarity_intra_matrix_pairwise(tfidf_country_embeddings) tfidf_whole_similarity = similarity_intra_matrix_pairwise(tfidf_whole_embeddings) plt.subplot(131) plt.boxplot(htidf_similarity) plt.title("H-TFIDF") plt.ylim(0,1) plt.subplot(132) plt.boxplot(tfidf_country_similarity) plt.title("TFIDF with corpus by country") plt.ylim(0, 1) plt.subplot(133) plt.boxplot(tfidf_whole_similarity) plt.title("TFIDF on the whole corpus") plt.ylim(0, 1) plt.tight_layout() plt.subplots_adjust(wspace=0.3) plt.suptitle("Distribution of similarity values among the extracted terms pairs of a measure") plt.savefig(f_path_result_boxplot+"/pairwise-similarity-boxplot.png") # plt.show() plt.close() ## Distribution of similarities between the terms of a country extracted from a measure ### H-TFIDF fig2, axs2 = plt.subplots(1, 5) for i, country in enumerate(listOfLocalities): axs2[i].boxplot(similarity_intra_matrix_pairwise(htfidf_embeddings[i*500:(i+1)*500-1])) axs2[i].set_title(country, fontsize=40) axs2[i].set_ylim(0, 1) # fig2.suptitle("Distribution of similarity by pairs for H-TF-IDF") plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_HTFIDF-country.png") # plt.show() plt.close(fig2) ### TF-IDF by corpus = country fig3, axs3 = plt.subplots(1, 5) for i, country in enumerate(listOfLocalities): axs3[i].boxplot(similarity_intra_matrix_pairwise(tfidf_country_embeddings[i*500:(i+1)*500-1])) axs3[i].set_title(country, fontsize=40) axs3[i].set_ylim(0, 1) # fig3.suptitle("Distribution of similarity by pairs for TF-IDF focus on each country") plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_TFIDF-country.png") # plt.show() plt.close(fig3) ## Distribution of similarities between the set of terms of 2 measures ### H-TF-IDF with TF-IDF on whole corpus and TF-IDF country with TF-IDF on whole corpus fig_compare_TFIDF_whole, ax4 = plt.subplots(1,2) similarity_between_htfidf_tfidf_whole = similarity_inter_matrix(htfidf_embeddings, tfidf_whole_embeddings) similarity_between_tfidfcountry_tfidf_whole = similarity_inter_matrix(tfidf_country_embeddings, tfidf_whole_embeddings) similarity_between_htfidf_tfidf_whole_1D = np.array([]) similarity_between_tfidfcountry_tfidf_whole_1D = np.array([]) for i, row in enumerate(similarity_between_htfidf_tfidf_whole): similarity_between_htfidf_tfidf_whole_1D = np.append(similarity_between_htfidf_tfidf_whole_1D, row[i+1:]) # We remove duplicate pairwise value for i, row in enumerate(similarity_between_tfidfcountry_tfidf_whole): similarity_between_tfidfcountry_tfidf_whole_1D = np.append(similarity_between_tfidfcountry_tfidf_whole_1D, row[i + 1:]) ax4[0].boxplot(similarity_between_htfidf_tfidf_whole_1D) ax4[0].set_ylim(0, 1) ax4[0].set_title("H-TFIDF") ax4[1].boxplot(similarity_between_tfidfcountry_tfidf_whole_1D) ax4[1].set_ylim(0, 1) ax4[1].set_title("TFIDF on country") fig_compare_TFIDF_whole.suptitle("Distribution of similarity between H-TFIDF and TF-IDF on whole corpus") plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_between_TFIDF-whole.png") # plt.show() plt.close(fig_compare_TFIDF_whole) ## Distribution of similarities between sub-set terms by country compared by country pair if build_posttraitement_geocode: # Geocode terms : ## Comments : over geocode even on non spatial entities spatial_level = "country" listOfLocalities = ["France", "Deutschland", "España", "Italia", "United Kingdom"] f_path_result = "elasticsearch/analyse/nldb21/results/4thfeb_country" biggest_TFIDF_country = pd.read_csv( f_path_result+"/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_"+spatial_level+"_corpus.csv", index_col=0) biggest_TFIDF_whole = pd.read_csv(f_path_result+"/TFIDF_BiggestScore_on_whole_corpus.csv") biggest_H_TFIDF = pd.read_csv(f_path_result+'/h-tfidf-Biggest-score.csv', index_col=0) biggest_H_TFIDF_gepocode = geocoding_token(biggest_H_TFIDF, listOfLocality=listOfLocalities, spatial_hieararchy=spatial_level, logger=logger) biggest_H_TFIDF_gepocode.to_csv(f_path_result+"/h-tfidf-Biggest-score-geocode.csv") biggest_TFIDF_country_gepocode = geocoding_token(biggest_TFIDF_country, listOfLocality=listOfLocalities, spatial_hieararchy=spatial_level, logger=logger) biggest_TFIDF_country_gepocode.to_csv(f_path_result+"/TF-IDF_BiggestScore_on_"+spatial_level+"_corpus_geocode.csv") biggest_TFIDF_whole_gepocode = geocoding_token(biggest_TFIDF_whole, listOfLocality=listOfLocalities, spatial_hieararchy=spatial_level, logger=logger) biggest_TFIDF_whole_gepocode.to_csv(f_path_result+"/TFIDF_BiggestScore_on_whole_corpus_geocode.csv") if build_posttraitement_flooding: # Post traitement : remove terms coming from user who flood for spatial_level_flood in build_posttraitement_flooding_spatial_levels: logger.info("post-traitement flooding on: " + spatial_level_flood) f_path_result_flood = f_path_result + "/" + spatial_level_flood biggest_H_TFIDF = pd.read_csv(f_path_result_flood + '/h-tfidf-Biggest-score.csv', index_col=0) biggest_H_TFIDF_with_flood = post_traitement_flood(biggest_H_TFIDF, logger, spatialLevel=spatial_level_flood) biggest_H_TFIDF_with_flood.to_csv(f_path_result_flood + "/h-tfidf-Biggest-score-flooding.csv") if build_clustering: # Create clustering # method="agglomerative_clustering" method_list = ["kmeans", "agglomerative_clustering"] for spatial_level in build_clustering_spatial_levels: f_path_result_flood = f_path_result + "/" + spatial_level f_path_result_clustering = f_path_result + "/" + spatial_level + "/clustering" if not os.path.exists(f_path_result_clustering): os.makedirs(f_path_result_clustering) # open result post_traited try: biggest_H_TFIDF = pd.read_csv(f_path_result_flood + "/h-tfidf-Biggest-score-flooding.csv", index_col=0) except: logger.error("Clustering: file biggest score doesn't exist") # drop token from flooding user and drop ngram not in the same sentence (see post_traitement) biggest = biggest_H_TFIDF[biggest_H_TFIDF["user_flooding"] == str(0)] for method in method_list: for locality in build_clustering_list_hierachical_locality[spatial_level]: f_path = f_path_result_clustering + "/" + locality + "_" + method + ".json" try: clustering_terms(biggest, logger, f_path, listOfLocalities=locality, spatial_hieararchy=spatial_level, method=method) except: logger.error("Impossible to cluster for " + spatial_level + "with method: "+method) if build_venn: f_path_result_venn = f_path_result + "/venn" if not os.path.exists(f_path_result_venn): os.makedirs(f_path_result_venn) # open result post_traited try: biggest_H_TFIDF = pd.read_csv(f_path_result + "/" + build_venn_spatial_level + "/h-tfidf-Biggest-score-flooding.csv", index_col=0) except: logger.error("Venn: file biggest score doesn't exist") for locality in listOfLocalities: venn(biggest_H_TFIDF, logger, build_venn_spatial_level, f_path_result_venn, locality) logger.info("H-TFIDF expirements stops")
[ "logging.getLogger", "matplotlib.pyplot.boxplot", "numpy.log10", "logging.StreamHandler", "geopy.extra.rate_limiter.RateLimiter", "pandas.read_csv", "matplotlib.pyplot.ylabel", "pandas.to_timedelta", "pandas.Grouper", "seaborn.set_style", "numpy.array", "seaborn.scatterplot", "numpy.linalg.norm", "matplotlib_venn_wordcloud.venn2_wordcloud", "operator.itemgetter", "pandas.to_datetime", "os.path.exists", "sklearn.cluster.AgglomerativeClustering", "elasticsearch.Elasticsearch.search", "sklearn.feature_extraction.text.CountVectorizer", "elasticsearch.Elasticsearch", "json.dumps", "sklearn.manifold.TSNE", "matplotlib.pyplot.close", "matplotlib_venn_wordcloud.venn3_wordcloud", "pandas.DataFrame", "elasticsearch.logger.setLevel", "matplotlib.pyplot.ylim", "scipy.sparse.csr_matrix", "matplotlib.pyplot.savefig", "seaborn.set_context", "geopy.geocoders.Nominatim", "re.sub", "matplotlib.pyplot.xlim", "tqdm.tqdm.pandas", "matplotlib.pyplot.title", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "sklearn.cluster.KMeans", "sentence_transformers.SentenceTransformer", "seaborn.set_palette", "os.makedirs", "datetime.datetime.strptime", "logging.Formatter", "scipy.spatial.distance.cdist", "tqdm.tqdm", "numpy.append", "datetime.datetime.now", "sklearn.feature_extraction.text.TfidfVectorizer", "collections.Counter", "collections.defaultdict", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots", "json.dump" ]
[((1424, 1462), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['"""http://localhost:9200"""'], {}), "('http://localhost:9200')\n", (1437, 1462), False, 'from elasticsearch import Elasticsearch\n'), ((1467, 1502), 'elasticsearch.logger.setLevel', 'es_logger.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (1485, 1502), True, 'from elasticsearch import logger as es_logger\n'), ((1601, 1678), 'elasticsearch.Elasticsearch.search', 'Elasticsearch.search', (['client'], {'index': 'index', 'body': 'query', 'scroll': '"""2m"""', 'size': '(5000)'}), "(client, index=index, body=query, scroll='2m', size=5000)\n", (1621, 1678), False, 'from elasticsearch import Elasticsearch\n'), ((1917, 1934), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1928, 1934), False, 'from collections import defaultdict, Counter\n'), ((4781, 4804), 'tqdm.tqdm', 'tqdm', ([], {'total': 'scroll_size'}), '(total=scroll_size)\n', (4785, 4804), False, 'from tqdm import tqdm\n'), ((5598, 5672), 're.sub', 're.sub', (['"""((www\\\\.[^\\\\s]+)|(https?://[^\\\\s]+)|(http?://[^\\\\s]+))"""', '""""""', 'text'], {}), "('((www\\\\.[^\\\\s]+)|(https?://[^\\\\s]+)|(http?://[^\\\\s]+))', '', text)\n", (5604, 5672), False, 'import re\n'), ((5685, 5718), 're.sub', 're.sub', (['"""http\\\\S+"""', '""""""', 'textclean'], {}), "('http\\\\S+', '', textclean)\n", (5691, 5718), False, 'import re\n'), ((6676, 6701), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'col'}), '(columns=col)\n', (6688, 6701), True, 'import pandas as pd\n'), ((8174, 8288), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': '"""english"""', 'max_features': '(25000)', 'ngram_range': '(1, 1)', 'token_pattern': '"""[a-zA-Z0-9#@]+"""'}), "(stop_words='english', max_features=25000, ngram_range=(1, 1\n ), token_pattern='[a-zA-Z0-9#@]+')\n", (8189, 8288), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((9033, 9110), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'countTerms[0:, 0:]', 'index': 'cityDayList', 'columns': 'listOfTerms'}), '(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)\n', (9045, 9110), True, 'import pandas as pd\n'), ((13276, 13293), 'numpy.log10', 'np.log10', (['(N / DFt)'], {}), '(N / DFt)\n', (13284, 13293), True, 'import numpy as np\n'), ((14450, 14464), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14462, 14464), True, 'import pandas as pd\n'), ((16494, 16508), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16506, 16508), True, 'import pandas as pd\n'), ((17497, 17511), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17509, 17511), True, 'import pandas as pd\n'), ((17548, 17562), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17560, 17562), True, 'import pandas as pd\n'), ((22919, 22933), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (22931, 22933), True, 'import pandas as pd\n'), ((23799, 23905), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': '"""english"""', 'min_df': '(0.001)', 'ngram_range': '(1, 1)', 'token_pattern': '"""[a-zA-Z0-9#]+"""'}), "(stop_words='english', min_df=0.001, ngram_range=(1, 1),\n token_pattern='[a-zA-Z0-9#]+')\n", (23814, 23905), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((24363, 24409), 'pandas.DataFrame', 'pd.DataFrame', (['denselist'], {'columns': 'feature_names'}), '(denselist, columns=feature_names)\n', (24375, 24409), True, 'import pandas as pd\n'), ((24695, 24709), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (24707, 24709), True, 'import pandas as pd\n'), ((25943, 25962), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (25960, 25962), False, 'import logging\n'), ((26013, 26101), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s"""'], {}), "(\n '%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s')\n", (26030, 26101), False, 'import logging\n'), ((26107, 26121), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26119, 26121), False, 'from datetime import datetime\n'), ((26379, 26402), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (26400, 26402), False, 'import logging\n'), ((27171, 27225), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""distilbert-base-nli-mean-tokens"""'], {}), "('distilbert-base-nli-mean-tokens')\n", (27190, 27225), False, 'from sentence_transformers import SentenceTransformer\n'), ((27691, 27711), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (27695, 27711), False, 'from sklearn.manifold import TSNE\n'), ((27890, 27915), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (27903, 27915), True, 'import seaborn as sns\n'), ((27920, 27944), 'seaborn.set_palette', 'sns.set_palette', (['"""muted"""'], {}), "('muted')\n", (27935, 27944), True, 'import seaborn as sns\n'), ((27949, 28019), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1)', 'rc': "{'lines.linewidth': 2.5}"}), "('notebook', font_scale=1, rc={'lines.linewidth': 2.5})\n", (27964, 28019), True, 'import seaborn as sns\n'), ((28082, 28120), 'pandas.DataFrame', 'pd.DataFrame', (['low_dim_data', 'label_tsne'], {}), '(low_dim_data, label_tsne)\n', (28094, 28120), True, 'import pandas as pd\n'), ((28163, 28225), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'tsne_df', 'x': '"""x"""', 'y': '"""y"""', 'hue': 'tsne_df.index'}), "(data=tsne_df, x='x', y='y', hue=tsne_df.index)\n", (28178, 28225), True, 'import seaborn as sns\n'), ((28382, 28401), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-100)', '(100)'], {}), '(-100, 100)\n', (28390, 28401), True, 'import matplotlib.pyplot as plt\n'), ((28405, 28424), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-100)', '(100)'], {}), '(-100, 100)\n', (28413, 28424), True, 'import matplotlib.pyplot as plt\n'), ((28495, 28555), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(paht2save + '/tsne_bert-embeddings_' + plotname)"], {}), "(paht2save + '/tsne_bert-embeddings_' + plotname)\n", (28506, 28555), True, 'import matplotlib.pyplot as plt\n'), ((28661, 28672), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28670, 28672), True, 'import matplotlib.pyplot as plt\n'), ((29065, 29129), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'None', 'distance_threshold': '(1.5)'}), '(n_clusters=None, distance_threshold=1.5)\n', (29088, 29129), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((30425, 30479), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""distilbert-base-nli-mean-tokens"""'], {}), "('distilbert-base-nli-mean-tokens')\n", (30444, 30479), False, 'from sentence_transformers import SentenceTransformer\n'), ((31854, 31866), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (31862, 31866), True, 'import numpy as np\n'), ((35127, 35181), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""h-tfidf-evaluation"""', 'timeout': '(10)'}), "(user_agent='h-tfidf-evaluation', timeout=10)\n", (35136, 35181), False, 'from geopy.geocoders import Nominatim\n'), ((35197, 35249), 'geopy.extra.rate_limiter.RateLimiter', 'RateLimiter', (['geolocator.geocode'], {'min_delay_seconds': '(1)'}), '(geolocator.geocode, min_delay_seconds=1)\n', (35208, 35249), False, 'from geopy.extra.rate_limiter import RateLimiter\n'), ((35255, 35268), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (35266, 35268), False, 'from tqdm import tqdm\n'), ((35964, 35999), 'elasticsearch.logger.setLevel', 'es_logger.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (35982, 35999), True, 'from elasticsearch import logger as es_logger\n'), ((38102, 38115), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (38113, 38115), False, 'from tqdm import tqdm\n'), ((40199, 40245), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(result_path + '/venn_' + locality)"], {}), "(result_path + '/venn_' + locality)\n", (40210, 40245), True, 'import matplotlib.pyplot as plt\n'), ((40977, 41007), 'pandas.to_datetime', 'pd.to_datetime', (['matrixOcc.date'], {}), '(matrixOcc.date)\n', (40991, 41007), True, 'import pandas as pd\n'), ((42187, 42201), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (42199, 42201), True, 'import pandas as pd\n'), ((42835, 42869), 'pandas.read_csv', 'pd.read_csv', (['htfidf_f'], {'index_col': '(0)'}), '(htfidf_f, index_col=0)\n', (42846, 42869), True, 'import pandas as pd\n'), ((42882, 42930), 'pandas.read_csv', 'pd.read_csv', (['tfidf_corpus_country_f'], {'index_col': '(0)'}), '(tfidf_corpus_country_f, index_col=0)\n', (42893, 42930), True, 'import pandas as pd\n'), ((47247, 47275), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (47259, 47275), True, 'import matplotlib.pyplot as plt\n'), ((47289, 47374), 'matplotlib_venn_wordcloud.venn3_wordcloud', 'venn3_wordcloud', (['sets'], {'set_labels': "['H-TFIDF', 'TF-IDF', 'Frequent terms']", 'ax': 'ax'}), "(sets, set_labels=['H-TFIDF', 'TF-IDF', 'Frequent terms'], ax=ax\n )\n", (47304, 47374), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((47374, 47427), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plot_f_out + '_' + country + 'venn3.png')"], {}), "(plot_f_out + '_' + country + 'venn3.png')\n", (47385, 47427), True, 'import matplotlib.pyplot as plt\n'), ((47431, 47441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (47439, 47441), True, 'import matplotlib.pyplot as plt\n'), ((47611, 47645), 'pandas.read_csv', 'pd.read_csv', (['htfidf_f'], {'index_col': '(0)'}), '(htfidf_f, index_col=0)\n', (47622, 47645), True, 'import pandas as pd\n'), ((47658, 47697), 'pandas.read_csv', 'pd.read_csv', (['tfidf_whole_f'], {'index_col': '(0)'}), '(tfidf_whole_f, index_col=0)\n', (47669, 47697), True, 'import pandas as pd\n'), ((53332, 53360), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (53344, 53360), True, 'import matplotlib.pyplot as plt\n'), ((53374, 53459), 'matplotlib_venn_wordcloud.venn3_wordcloud', 'venn3_wordcloud', (['sets'], {'set_labels': "['H-TFIDF', 'TF-IDF', 'Frequent terms']", 'ax': 'ax'}), "(sets, set_labels=['H-TFIDF', 'TF-IDF', 'Frequent terms'], ax=ax\n )\n", (53389, 53459), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((53459, 53512), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plot_f_out + '_' + country + 'venn3.png')"], {}), "(plot_f_out + '_' + country + 'venn3.png')\n", (53470, 53512), True, 'import matplotlib.pyplot as plt\n'), ((53516, 53526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (53524, 53526), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2134), 'datetime.datetime.strptime', 'datetime.strptime', (['inDate', '"""%a %b %d %H:%M:%S %z %Y"""'], {}), "(inDate, '%a %b %d %H:%M:%S %z %Y')\n", (2099, 2134), False, 'from datetime import datetime\n'), ((6959, 6991), 'pandas.DataFrame', 'pd.DataFrame', (['tweetsofcity[city]'], {}), '(tweetsofcity[city])\n', (6971, 6991), True, 'import pandas as pd\n'), ((17950, 18049), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': '"""english"""', 'min_df': '(0.001)', 'ngram_range': '(1, 1)', 'token_pattern': '"""[<KEY>"""'}), "(stop_words='english', min_df=0.001, ngram_range=(1, 1),\n token_pattern='[<KEY>')\n", (17965, 18049), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((18589, 18635), 'pandas.DataFrame', 'pd.DataFrame', (['denselist'], {'columns': 'feature_names'}), '(denselist, columns=feature_names)\n', (18601, 18635), True, 'import pandas as pd\n'), ((28960, 29009), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings'], {'axis': '(1)', 'keepdims': '(True)'}), '(embeddings, axis=1, keepdims=True)\n', (28974, 29009), True, 'import numpy as np\n'), ((31807, 31832), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['matrix'], {}), '(matrix)\n', (31824, 31832), False, 'from scipy import sparse\n'), ((31932, 31969), 'numpy.append', 'np.append', (['similarity_1D', 'row[i + 1:]'], {}), '(similarity_1D, row[i + 1:])\n', (31941, 31969), True, 'import numpy as np\n'), ((32169, 32214), 'scipy.spatial.distance.cdist', 'sp.distance.cdist', (['matrix1', 'matrix2', '"""cosine"""'], {}), "(matrix1, matrix2, 'cosine')\n", (32186, 32214), True, 'import scipy.spatial as sp\n'), ((33058, 33089), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters'}), '(n_clusters=num_clusters)\n', (33064, 33089), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((34075, 34114), 'json.dump', 'json.dump', (['clustered_sentences', 'outfile'], {}), '(clustered_sentences, outfile)\n', (34084, 34114), False, 'import json\n'), ((36422, 36460), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['"""http://localhost:9200"""'], {}), "('http://localhost:9200')\n", (36435, 36460), False, 'from elasticsearch import Elasticsearch\n'), ((45281, 45313), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.27)'}), '(bottom=0.27)\n', (45300, 45313), True, 'import matplotlib.pyplot as plt\n'), ((45322, 45395), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% overlap between H-TFIDF / TF-IDF with most frequent terms"""'], {}), "('% overlap between H-TFIDF / TF-IDF with most frequent terms')\n", (45332, 45395), True, 'import matplotlib.pyplot as plt\n'), ((51373, 51405), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.27)'}), '(bottom=0.27)\n', (51392, 51405), True, 'import matplotlib.pyplot as plt\n'), ((51414, 51487), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% overlap between H-TFIDF / TF-IDF with most frequent terms"""'], {}), "('% overlap between H-TFIDF / TF-IDF with most frequent terms')\n", (51424, 51487), True, 'import matplotlib.pyplot as plt\n'), ((54184, 54213), 'os.path.exists', 'os.path.exists', (['f_path_result'], {}), '(f_path_result)\n', (54198, 54213), False, 'import os\n'), ((54223, 54249), 'os.makedirs', 'os.makedirs', (['f_path_result'], {}), '(f_path_result)\n', (54234, 54249), False, 'import os\n'), ((61285, 61421), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result +\n '/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv'\n )"], {'index_col': '(0)'}), "(f_path_result +\n '/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv'\n , index_col=0)\n", (61296, 61421), True, 'import pandas as pd\n'), ((61441, 61532), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv')"], {}), "(f_path_result +\n '/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv')\n", (61452, 61532), True, 'import pandas as pd\n'), ((61553, 61660), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/' + build_tsne_spatial_level + '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result + '/' + build_tsne_spatial_level +\n '/h-tfidf-Biggest-score.csv', index_col=0)\n", (61564, 61660), True, 'import pandas as pd\n'), ((62439, 62547), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_tfidf_by_locality + '/TF-IDF_BiggestScore_on_country_corpus.csv'\n )"], {'index_col': '(0)'}), "(f_path_result_tfidf_by_locality +\n '/TF-IDF_BiggestScore_on_country_corpus.csv', index_col=0)\n", (62450, 62547), True, 'import pandas as pd\n'), ((62572, 62648), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_tfidf + '/TFIDF_BiggestScore_on_whole_corpus.csv')"], {}), "(f_path_result_tfidf + '/TFIDF_BiggestScore_on_whole_corpus.csv')\n", (62583, 62648), True, 'import pandas as pd\n'), ((62673, 62783), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/' + build_boxplot_spatial_level +\n '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result + '/' + build_boxplot_spatial_level +\n '/h-tfidf-Biggest-score.csv', index_col=0)\n", (62684, 62783), True, 'import pandas as pd\n'), ((63483, 63499), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (63494, 63499), True, 'import matplotlib.pyplot as plt\n'), ((63508, 63537), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['htidf_similarity'], {}), '(htidf_similarity)\n', (63519, 63537), True, 'import matplotlib.pyplot as plt\n'), ((63546, 63566), 'matplotlib.pyplot.title', 'plt.title', (['"""H-TFIDF"""'], {}), "('H-TFIDF')\n", (63555, 63566), True, 'import matplotlib.pyplot as plt\n'), ((63575, 63589), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (63583, 63589), True, 'import matplotlib.pyplot as plt\n'), ((63597, 63613), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (63608, 63613), True, 'import matplotlib.pyplot as plt\n'), ((63622, 63659), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['tfidf_country_similarity'], {}), '(tfidf_country_similarity)\n', (63633, 63659), True, 'import matplotlib.pyplot as plt\n'), ((63668, 63709), 'matplotlib.pyplot.title', 'plt.title', (['"""TFIDF with corpus by country"""'], {}), "('TFIDF with corpus by country')\n", (63677, 63709), True, 'import matplotlib.pyplot as plt\n'), ((63718, 63732), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (63726, 63732), True, 'import matplotlib.pyplot as plt\n'), ((63741, 63757), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (63752, 63757), True, 'import matplotlib.pyplot as plt\n'), ((63766, 63801), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['tfidf_whole_similarity'], {}), '(tfidf_whole_similarity)\n', (63777, 63801), True, 'import matplotlib.pyplot as plt\n'), ((63810, 63848), 'matplotlib.pyplot.title', 'plt.title', (['"""TFIDF on the whole corpus"""'], {}), "('TFIDF on the whole corpus')\n", (63819, 63848), True, 'import matplotlib.pyplot as plt\n'), ((63857, 63871), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (63865, 63871), True, 'import matplotlib.pyplot as plt\n'), ((63880, 63898), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (63896, 63898), True, 'import matplotlib.pyplot as plt\n'), ((63907, 63938), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.3)'}), '(wspace=0.3)\n', (63926, 63938), True, 'import matplotlib.pyplot as plt\n'), ((63947, 64051), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Distribution of similarity values among the extracted terms pairs of a measure"""'], {}), "(\n 'Distribution of similarity values among the extracted terms pairs of a measure'\n )\n", (63959, 64051), True, 'import matplotlib.pyplot as plt\n'), ((64050, 64121), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot.png')"], {}), "(f_path_result_boxplot + '/pairwise-similarity-boxplot.png')\n", (64061, 64121), True, 'import matplotlib.pyplot as plt\n'), ((64149, 64160), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (64158, 64160), True, 'import matplotlib.pyplot as plt\n'), ((64298, 64316), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {}), '(1, 5)\n', (64310, 64316), True, 'import matplotlib.pyplot as plt\n'), ((64643, 64733), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot_HTFIDF-country.png')"], {}), "(f_path_result_boxplot +\n '/pairwise-similarity-boxplot_HTFIDF-country.png')\n", (64654, 64733), True, 'import matplotlib.pyplot as plt\n'), ((64759, 64774), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (64768, 64774), True, 'import matplotlib.pyplot as plt\n'), ((64835, 64853), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {}), '(1, 5)\n', (64847, 64853), True, 'import matplotlib.pyplot as plt\n'), ((65207, 65296), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot_TFIDF-country.png')"], {}), "(f_path_result_boxplot +\n '/pairwise-similarity-boxplot_TFIDF-country.png')\n", (65218, 65296), True, 'import matplotlib.pyplot as plt\n'), ((65322, 65337), 'matplotlib.pyplot.close', 'plt.close', (['fig3'], {}), '(fig3)\n', (65331, 65337), True, 'import matplotlib.pyplot as plt\n'), ((65552, 65570), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (65564, 65570), True, 'import matplotlib.pyplot as plt\n'), ((65864, 65876), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (65872, 65876), True, 'import numpy as np\n'), ((65934, 65946), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (65942, 65946), True, 'import numpy as np\n'), ((66848, 66943), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot_between_TFIDF-whole.png'\n )"], {}), "(f_path_result_boxplot +\n '/pairwise-similarity-boxplot_between_TFIDF-whole.png')\n", (66859, 66943), True, 'import matplotlib.pyplot as plt\n'), ((66969, 67003), 'matplotlib.pyplot.close', 'plt.close', (['fig_compare_TFIDF_whole'], {}), '(fig_compare_TFIDF_whole)\n', (66978, 67003), True, 'import matplotlib.pyplot as plt\n'), ((67465, 67597), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_' +\n spatial_level + '_corpus.csv')"], {'index_col': '(0)'}), "(f_path_result +\n '/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_' + spatial_level +\n '_corpus.csv', index_col=0)\n", (67476, 67597), True, 'import pandas as pd\n'), ((67627, 67697), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/TFIDF_BiggestScore_on_whole_corpus.csv')"], {}), "(f_path_result + '/TFIDF_BiggestScore_on_whole_corpus.csv')\n", (67638, 67697), True, 'import pandas as pd\n'), ((67722, 67792), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result + '/h-tfidf-Biggest-score.csv', index_col=0)\n", (67733, 67792), True, 'import pandas as pd\n'), ((16769, 16802), 'pandas.DataFrame', 'pd.DataFrame', (['tweets[tweetByCity]'], {}), '(tweets[tweetByCity])\n', (16781, 16802), True, 'import pandas as pd\n'), ((23194, 23227), 'pandas.DataFrame', 'pd.DataFrame', (['tweets[tweetByCity]'], {}), '(tweets[tweetByCity])\n', (23206, 23227), True, 'import pandas as pd\n'), ((33443, 33507), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'None', 'distance_threshold': '(1.5)'}), '(n_clusters=None, distance_threshold=1.5)\n', (33466, 33507), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((44598, 44619), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (44613, 44619), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((44632, 44648), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (44641, 44648), True, 'import matplotlib.pyplot as plt\n'), ((56496, 56532), 'os.path.exists', 'os.path.exists', (['f_path_result_common'], {}), '(f_path_result_common)\n', (56510, 56532), False, 'import os\n'), ((56546, 56579), 'os.makedirs', 'os.makedirs', (['f_path_result_common'], {}), '(f_path_result_common)\n', (56557, 56579), False, 'import os\n'), ((58301, 58336), 'os.path.exists', 'os.path.exists', (['f_path_result_tfidf'], {}), '(f_path_result_tfidf)\n', (58315, 58336), False, 'import os\n'), ((58350, 58382), 'os.makedirs', 'os.makedirs', (['f_path_result_tfidf'], {}), '(f_path_result_tfidf)\n', (58361, 58382), False, 'import os\n'), ((58398, 58445), 'os.path.exists', 'os.path.exists', (['f_path_result_tfidf_by_locality'], {}), '(f_path_result_tfidf_by_locality)\n', (58412, 58445), False, 'import os\n'), ((58459, 58503), 'os.makedirs', 'os.makedirs', (['f_path_result_tfidf_by_locality'], {}), '(f_path_result_tfidf_by_locality)\n', (58470, 58503), False, 'import os\n'), ((59779, 59830), 'os.path.exists', 'os.path.exists', (['f_path_result_compare_meassures_dir'], {}), '(f_path_result_compare_meassures_dir)\n', (59793, 59830), False, 'import os\n'), ((59844, 59892), 'os.makedirs', 'os.makedirs', (['f_path_result_compare_meassures_dir'], {}), '(f_path_result_compare_meassures_dir)\n', (59855, 59892), False, 'import os\n'), ((59972, 60062), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_compare_meassures_dir + '/matrixOccurence.csv')"], {'index_col': '(0)'}), "(f_path_result_compare_meassures_dir + '/matrixOccurence.csv',\n index_col=0)\n", (59983, 60062), True, 'import pandas as pd\n'), ((60561, 60610), 'pandas.read_csv', 'pd.read_csv', (['f_path_result_compare_meassures_file'], {}), '(f_path_result_compare_meassures_file)\n', (60572, 60610), True, 'import pandas as pd\n'), ((61173, 61207), 'os.path.exists', 'os.path.exists', (['f_path_result_tsne'], {}), '(f_path_result_tsne)\n', (61187, 61207), False, 'import os\n'), ((61221, 61252), 'os.makedirs', 'os.makedirs', (['f_path_result_tsne'], {}), '(f_path_result_tsne)\n', (61232, 61252), False, 'import os\n'), ((62284, 62321), 'os.path.exists', 'os.path.exists', (['f_path_result_boxplot'], {}), '(f_path_result_boxplot)\n', (62298, 62321), False, 'import os\n'), ((62335, 62369), 'os.makedirs', 'os.makedirs', (['f_path_result_boxplot'], {}), '(f_path_result_boxplot)\n', (62346, 62369), False, 'import os\n'), ((66074, 66138), 'numpy.append', 'np.append', (['similarity_between_htfidf_tfidf_whole_1D', 'row[i + 1:]'], {}), '(similarity_between_htfidf_tfidf_whole_1D, row[i + 1:])\n', (66083, 66138), True, 'import numpy as np\n'), ((66313, 66383), 'numpy.append', 'np.append', (['similarity_between_tfidfcountry_tfidf_whole_1D', 'row[i + 1:]'], {}), '(similarity_between_tfidfcountry_tfidf_whole_1D, row[i + 1:])\n', (66322, 66383), True, 'import numpy as np\n'), ((69428, 69504), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_flood + '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result_flood + '/h-tfidf-Biggest-score.csv', index_col=0)\n", (69439, 69504), True, 'import pandas as pd\n'), ((71432, 71466), 'os.path.exists', 'os.path.exists', (['f_path_result_venn'], {}), '(f_path_result_venn)\n', (71446, 71466), False, 'import os\n'), ((71480, 71511), 'os.makedirs', 'os.makedirs', (['f_path_result_venn'], {}), '(f_path_result_venn)\n', (71491, 71511), False, 'import os\n'), ((71590, 71706), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/' + build_venn_spatial_level +\n '/h-tfidf-Biggest-score-flooding.csv')"], {'index_col': '(0)'}), "(f_path_result + '/' + build_venn_spatial_level +\n '/h-tfidf-Biggest-score-flooding.csv', index_col=0)\n", (71601, 71706), True, 'import pandas as pd\n'), ((11910, 11940), 'pandas.to_datetime', 'pd.to_datetime', (['matrixOcc.date'], {}), '(matrixOcc.date)\n', (11924, 11940), True, 'import pandas as pd\n'), ((11945, 11973), 'pandas.to_timedelta', 'pd.to_timedelta', (['(7)'], {'unit': '"""d"""'}), "(7, unit='d')\n", (11960, 11973), True, 'import pandas as pd\n'), ((33323, 33372), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings'], {'axis': '(1)', 'keepdims': '(True)'}), '(embeddings, axis=1, keepdims=True)\n', (33337, 33372), True, 'import numpy as np\n'), ((37001, 37054), 'elasticsearch.Elasticsearch.search', 'Elasticsearch.search', (['client'], {'index': 'index', 'body': 'query'}), '(client, index=index, body=query)\n', (37021, 37054), False, 'from elasticsearch import Elasticsearch\n'), ((50532, 50553), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (50547, 50553), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((50570, 50586), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (50579, 50586), True, 'import matplotlib.pyplot as plt\n'), ((57379, 57414), 'os.path.exists', 'os.path.exists', (['f_path_result_level'], {}), '(f_path_result_level)\n', (57393, 57414), False, 'import os\n'), ((57432, 57464), 'os.makedirs', 'os.makedirs', (['f_path_result_level'], {}), '(f_path_result_level)\n', (57443, 57464), False, 'import os\n'), ((70135, 70175), 'os.path.exists', 'os.path.exists', (['f_path_result_clustering'], {}), '(f_path_result_clustering)\n', (70149, 70175), False, 'import os\n'), ((70193, 70230), 'os.makedirs', 'os.makedirs', (['f_path_result_clustering'], {}), '(f_path_result_clustering)\n', (70204, 70230), False, 'import os\n'), ((70321, 70410), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_flood + '/h-tfidf-Biggest-score-flooding.csv')"], {'index_col': '(0)'}), "(f_path_result_flood + '/h-tfidf-Biggest-score-flooding.csv',\n index_col=0)\n", (70332, 70410), True, 'import pandas as pd\n'), ((4161, 4198), 'json.dumps', 'json.dumps', (["hits['_source']"], {'indent': '(4)'}), "(hits['_source'], indent=4)\n", (4171, 4198), False, 'import json\n'), ((43973, 43994), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (43988, 43994), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((49885, 49906), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (49900, 49906), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((37354, 37375), 'collections.Counter', 'Counter', (['list_of_user'], {}), '(list_of_user)\n', (37361, 37375), False, 'from collections import defaultdict, Counter\n'), ((40091, 40113), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (40110, 40113), False, 'import operator\n'), ((41118, 41150), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""Y"""'}), "(key='date', freq='Y')\n", (41128, 41150), True, 'import pandas as pd\n'), ((41241, 41273), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""Y"""'}), "(key='date', freq='Y')\n", (41251, 41273), True, 'import pandas as pd\n'), ((12123, 12155), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""W"""'}), "(key='date', freq='W')\n", (12133, 12155), True, 'import pandas as pd\n'), ((41368, 41400), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""Y"""'}), "(key='date', freq='Y')\n", (41378, 41400), True, 'import pandas as pd\n'), ((12254, 12286), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""W"""'}), "(key='date', freq='W')\n", (12264, 12286), True, 'import pandas as pd\n'), ((12383, 12415), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""W"""'}), "(key='date', freq='W')\n", (12393, 12415), True, 'import pandas as pd\n')]
""" Utility functions for working with DataFrames """ import pandas import numpy as np TEST_DF = pandas.DataFrame([1,2,3]) class O: """ A square shaped block for my PyTetris game. """ def __init__(self): self.type = "O" self.color = (255, 255, 0) mold = np.zeros([24, 10]) # framework for falling piece mold[1, 4:6] = 1 # placing 1s where the piece begins mold[2, 4:6] = 1 self.position = mold self.position = [self.position, self.position, self.position, self.position]
[ "pandas.DataFrame", "numpy.zeros" ]
[((99, 126), 'pandas.DataFrame', 'pandas.DataFrame', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (115, 126), False, 'import pandas\n'), ((297, 315), 'numpy.zeros', 'np.zeros', (['[24, 10]'], {}), '([24, 10])\n', (305, 315), True, 'import numpy as np\n')]
from numpy import log, pi, arange, exp from scipy.optimize import brentq import matplotlib.pyplot as plot from matplotlib import rc import equation def diagram_sum(x, d): return 4.*pi/log(d**2 *2.*x) def diagram_sum_3body(x, d): point=equation.equation(3.*x,'2D',20.,0.1,d) point.solve() g3=point.g3 del point return 4.*pi/log(d**2 *2.*x) + g3 drange=arange(0.6,5.,0.05) xx=[d for d in drange] ee=[1/d**2 for d in drange] yy=[brentq(lambda mu:mu - diagram_sum(mu,d),(0.5+0.01)/(d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-3) for d in drange] drange=arange(0.6,5.6,1.0) zx=[d for d in drange] ze=[1/d**2 for d in drange] zz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange] drange=arange(0.7,1.5,0.1) wx=[d for d in drange] we=[1/d**2 for d in drange] wz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange] drange=arange(0.6,0.7,0.025) fx=[d for d in drange] fe=[1/d**2 for d in drange] fz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange] plot.plot(xx,yy) plot.plot(zx,zz,'o') plot.plot(wx,wz,'o') plot.plot(fx,fz,'o') plot.xlabel('d, bound state size parameter') plot.ylabel(r'$\mu$, self-consistent potential') plot.savefig('results/potential_self-consistent.pdf') plot.close() plot.plot(ee,yy) plot.plot(ze,zz,'o') plot.plot(we,wz,'o') plot.plot(fe,fz,'o') rc('text', usetex=True) plot.xlabel(r'$\frac{1}{d^2}$, bound state energy') plot.ylabel(r'$\mu$, self-consistent potential') plot.savefig('results/potential_energy_parameter.pdf')
[ "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.log", "equation.equation", "matplotlib.pyplot.close", "numpy.exp", "matplotlib.rc", "numpy.arange" ]
[((379, 401), 'numpy.arange', 'arange', (['(0.6)', '(5.0)', '(0.05)'], {}), '(0.6, 5.0, 0.05)\n', (385, 401), False, 'from numpy import log, pi, arange, exp\n'), ((578, 599), 'numpy.arange', 'arange', (['(0.6)', '(5.6)', '(1.0)'], {}), '(0.6, 5.6, 1.0)\n', (584, 599), False, 'from numpy import log, pi, arange, exp\n'), ((784, 805), 'numpy.arange', 'arange', (['(0.7)', '(1.5)', '(0.1)'], {}), '(0.7, 1.5, 0.1)\n', (790, 805), False, 'from numpy import log, pi, arange, exp\n'), ((990, 1013), 'numpy.arange', 'arange', (['(0.6)', '(0.7)', '(0.025)'], {}), '(0.6, 0.7, 0.025)\n', (996, 1013), False, 'from numpy import log, pi, arange, exp\n'), ((1192, 1209), 'matplotlib.pyplot.plot', 'plot.plot', (['xx', 'yy'], {}), '(xx, yy)\n', (1201, 1209), True, 'import matplotlib.pyplot as plot\n'), ((1209, 1231), 'matplotlib.pyplot.plot', 'plot.plot', (['zx', 'zz', '"""o"""'], {}), "(zx, zz, 'o')\n", (1218, 1231), True, 'import matplotlib.pyplot as plot\n'), ((1230, 1252), 'matplotlib.pyplot.plot', 'plot.plot', (['wx', 'wz', '"""o"""'], {}), "(wx, wz, 'o')\n", (1239, 1252), True, 'import matplotlib.pyplot as plot\n'), ((1251, 1273), 'matplotlib.pyplot.plot', 'plot.plot', (['fx', 'fz', '"""o"""'], {}), "(fx, fz, 'o')\n", (1260, 1273), True, 'import matplotlib.pyplot as plot\n'), ((1272, 1316), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""d, bound state size parameter"""'], {}), "('d, bound state size parameter')\n", (1283, 1316), True, 'import matplotlib.pyplot as plot\n'), ((1317, 1365), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""$\\\\mu$, self-consistent potential"""'], {}), "('$\\\\mu$, self-consistent potential')\n", (1328, 1365), True, 'import matplotlib.pyplot as plot\n'), ((1366, 1419), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""results/potential_self-consistent.pdf"""'], {}), "('results/potential_self-consistent.pdf')\n", (1378, 1419), True, 'import matplotlib.pyplot as plot\n'), ((1420, 1432), 'matplotlib.pyplot.close', 'plot.close', ([], {}), '()\n', (1430, 1432), True, 'import matplotlib.pyplot as plot\n'), ((1434, 1451), 'matplotlib.pyplot.plot', 'plot.plot', (['ee', 'yy'], {}), '(ee, yy)\n', (1443, 1451), True, 'import matplotlib.pyplot as plot\n'), ((1451, 1473), 'matplotlib.pyplot.plot', 'plot.plot', (['ze', 'zz', '"""o"""'], {}), "(ze, zz, 'o')\n", (1460, 1473), True, 'import matplotlib.pyplot as plot\n'), ((1472, 1494), 'matplotlib.pyplot.plot', 'plot.plot', (['we', 'wz', '"""o"""'], {}), "(we, wz, 'o')\n", (1481, 1494), True, 'import matplotlib.pyplot as plot\n'), ((1493, 1515), 'matplotlib.pyplot.plot', 'plot.plot', (['fe', 'fz', '"""o"""'], {}), "(fe, fz, 'o')\n", (1502, 1515), True, 'import matplotlib.pyplot as plot\n'), ((1514, 1537), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1516, 1537), False, 'from matplotlib import rc\n'), ((1538, 1589), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""$\\\\frac{1}{d^2}$, bound state energy"""'], {}), "('$\\\\frac{1}{d^2}$, bound state energy')\n", (1549, 1589), True, 'import matplotlib.pyplot as plot\n'), ((1590, 1638), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""$\\\\mu$, self-consistent potential"""'], {}), "('$\\\\mu$, self-consistent potential')\n", (1601, 1638), True, 'import matplotlib.pyplot as plot\n'), ((1639, 1693), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""results/potential_energy_parameter.pdf"""'], {}), "('results/potential_energy_parameter.pdf')\n", (1651, 1693), True, 'import matplotlib.pyplot as plot\n'), ((245, 291), 'equation.equation', 'equation.equation', (['(3.0 * x)', '"""2D"""', '(20.0)', '(0.1)', 'd'], {}), "(3.0 * x, '2D', 20.0, 0.1, d)\n", (262, 291), False, 'import equation\n'), ((189, 210), 'numpy.log', 'log', (['(d ** 2 * 2.0 * x)'], {}), '(d ** 2 * 2.0 * x)\n', (192, 210), False, 'from numpy import log, pi, arange, exp\n'), ((349, 370), 'numpy.log', 'log', (['(d ** 2 * 2.0 * x)'], {}), '(d ** 2 * 2.0 * x)\n', (352, 370), False, 'from numpy import log, pi, arange, exp\n'), ((522, 542), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (525, 542), False, 'from numpy import log, pi, arange, exp\n'), ((728, 748), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (731, 748), False, 'from numpy import log, pi, arange, exp\n'), ((934, 954), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (937, 954), False, 'from numpy import log, pi, arange, exp\n'), ((1142, 1162), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (1145, 1162), False, 'from numpy import log, pi, arange, exp\n')]
# Simulates a network with nodes, where each node can be either a # transmitter or receiver (but not both) at any time step. The simulation # examines the coverage based on the signal-to-interference ratio (SINR). # The network has a random medium access control (MAC) scheme based on a # determinantal point process, as outlined in the paper[1] by # B\laszczyszyn, Brochard and Keeler. This code validates by simulation # Propositions IV.1 and IV.2 in the paper[1]. This result gives the # probability of coverage based on the SINR value of a transmitter-receiver # pair in a non-random network of transmitter-or-receiver nodes such as a # realization of a random point process. # # More specifically, the code estimates the probability of x and y being # connected (ie SINR(x,y)>tau)given that x is transmitting and # y isn't. # # The simulation section estimates the empirical probability of SINR-based # coverage. For a large enough number of simulations, this empirical result # will agree with the analytic results given in the paper[2]. # # By coverage, it is assumed that the SINR of the transmitter is larger # than some threshold at the corresponding receiver. # # Probabilities for other events are calculated/estimated including: # # Event A=SINR(x,y) > tau # Event B=Transmitter exists # Event C=Receiver exists # # This code was originally written by <NAME> for the paper by # B\laszczyszyn, Brochard and Keeler[1]. # # If you use this code in published research, please cite paper[1]. # # References: # # [1] B\laszczyszyn, Brochard and Keeler, "Coverage probability in # wireless networks with determinantal scheduling", 2020. # # Author: <NAME>, 2020. from funProbCovTXRXDet import funProbCovTXRXDet import numpy as np # NumPy package for arrays, random number generation, etc import matplotlib.pyplot as plt # for plotting # simulate determintal point process from funSimSimpleDPP import funSimSimpleDPP from funPalmK import funPalmK # find Palm distribution (for a single point) from funLtoK import funLtoK # convert L kernel to a (normalized) K kernel plt.close("all") # close all figures #set random seed for reproducibility np.random.seed(1) ###START -- Parameters -- START### choiceExample = 1 # 1 or 2 for a random (uniform) or deterministic example numbSim = 10**4 # number of simulations numbNodes = 10 # number of pairs indexTrans = 0 # index for transmitter indexRec = 1 # index for receiver #above indices are bounded by numbNodes #fading model muFading = 1/3 # Rayleigh fading average #path loss model betaPath = 2 # pathloss exponent kappaPath = 1 # rescaling constant for pathloss function thresholdSINR = 0.1 # SINR threshold value constNoise = 0 # noise constant #Determinantal kernel parameters choiceKernel = 1 # 1 for Gaussian (ie squared exponetial );2 for Cauchy #3 for independent (ie binomial) model sigma = 1 # parameter for Gaussian and Cauchy kernel alpha = 1 # parameter for Cauchy kernel pAloha = 0.5 # parameter for independent kernel (ie proportion transmitting) #Simulation window parameters xMin = -1 xMax = 1 # x dimensions yMin = -1 yMax = 1 # y dimensions xDelta = xMax-xMin # rectangle width yDelta = yMax-yMin # rectangle height ###END -- Parameters -- END### #Simulate a random point process for the network configuration #interferer section if (choiceExample == 1): #random (uniform) x/y coordinates #transmitters or receivers xx = xDelta*(np.random.rand(numbNodes))+xMin yy = yDelta*(np.random.rand(numbNodes))+yMin else: #non-random x/y coordinates #transmitters or receivers t = 2*np.pi*np.linspace(0, (numbNodes-1)/numbNodes, numbNodes) xx = (1+np.cos(5*t+1))/2 yy = (1+np.sin(3*t+2))/2 #transmitter location xxTX = xx[indexTrans] yyTX = yy[indexTrans] #Receiver location xxRX = xx[indexRec] yyRX = yy[indexRec] # START -- CREATE L matrix -- START sizeL = numbNodes #Calculate Gaussian or Cauchy kernel based on grid x/y values #all squared distances of x/y difference pairs xxDiff = np.outer(xx, np.ones((sizeL,)))-np.outer(np.ones((sizeL,)), xx) yyDiff = np.outer(yy, np.ones((sizeL,)))-np.outer(np.ones((sizeL,)), yy) rrDiffSquared = (xxDiff**2+yyDiff**2) if choiceKernel == 1: #Gaussian/squared exponential kernel L = np.exp(-(rrDiffSquared)/sigma**2) elif choiceKernel == 2: #Cauchy kernel L = 1/(1+rrDiffSquared/sigma**2)**(alpha+1/2) else: raise Exception('choiceKernel has to be equal to 1 or 2.') L = 10*L # scale matrix up (increases the eigenvalues ie number of points) # END-- CREATE L matrix -- # END #Eigen decomposition eigenValL, eigenVecL = np.linalg.eig(L) #Helper functions def funPathloss(r): return (kappaPath*(1+r))**(-betaPath) # pathloss function #Functions for the proability of being connected def fun_h(s, r): return (1/(thresholdSINR*(funPathloss(s)/funPathloss(r))+1)) def fun_w(r): return (np.exp(-(thresholdSINR/muFading)*constNoise/funPathloss(r))) #initialize boolean vectors/arrays for collecting statistics booleA = np.zeros(numbSim, dtype=bool) # transmitter is connected booleB = np.zeros(numbSim, dtype=bool) # transmitter exists booleC = np.zeros(numbSim, dtype=bool) # receiver exists #loop through all simulations for ss in range(numbSim): #DPP for active transmitter nodes indexDPP = funSimSimpleDPP(eigenVecL, eigenValL) booleB[ss] = any(indexDPP == indexTrans) # if transmitter is in subset booleC[ss] = all(indexDPP != indexRec) # if receiver is not in subset #if transmitter is in the determinantal subset, calculate its SINR if booleB[ss]: #create Boolean variable for active interferers booleInter = np.zeros(numbNodes, dtype=bool) booleInter[indexDPP] = True booleInter[indexTrans] = False # exclude transmitter #x/y values of interfering nodes xxInter = xx[booleInter] yyInter = yy[booleInter] #number of interferers numbInter = np.sum(booleInter) #simulate signal for interferers fadeRandInter = np.random.exponential(muFading, numbInter) # fading distPathInter = np.hypot(xxInter-xxRX, yyInter-yyRX) # path distance proplossInter = fadeRandInter*funPathloss(distPathInter) # pathloss #simulate signal for transmitter fadeRandSig = np.random.exponential(muFading) # fading distPathSig = np.hypot(xxTX-xxRX, yyTX-yyRX) # path distance proplossSig = fadeRandSig*funPathloss(distPathSig) # pathloss #Calculate the SINR SINR = proplossSig/(np.sum(proplossInter)+constNoise) #see if transmitter is connected booleA[ss] = (SINR > thresholdSINR) booleBandC = booleB & booleC # transmitter-receiver pair exists booleNotC = ~booleC # receiver does not exist booleBandNotC = booleB & booleNotC # transmitter exists, receiver does not ###START Create kernels and Palm kernels START### K = funLtoK(L) # caclulate K kernel from kernel L sizeK = K.shape[0] # number of columns/rows in kernel matrix K #Calculate all respective distances (based on random network configuration) #from all transmitters to receiver dist_ji_xx = np.outer(xx, np.ones((sizeK,)))-np.outer(np.ones((sizeK,)), xxRX) dist_ji_yy = np.outer(yy, np.ones((sizeK,)))-np.outer(np.ones((sizeK,)), yyRX) dist_ji = np.hypot(dist_ji_xx, dist_ji_yy) # Euclidean distances #transmitters to receivers dist_ii_xx = xxTX-xxRX dist_ii_yy = yyTX-yyRX dist_ii = np.hypot(dist_ii_xx, dist_ii_yy) # Euclidean distances # repeat cols for element-wise evaluation dist_ii = np.tile(dist_ii, (sizeK, 1)) #apply functions hMatrix = fun_h(dist_ji, dist_ii) # matrix H for all h_{x_i}(x_j) values W_x = fun_w(np.hypot(xx-xxRX, yy-yyRX)) # noise factor ##create h matrix corresponding to transmitter booleAll = np.ones(sizeK, dtype=bool) booleReduced = booleAll booleReduced[indexTrans] = False # remove transmitter #choose transmitter-receiver row hVectorReduced = hMatrix[booleReduced, indexTrans] #repeat vector hVectorReduced as rows hMatrixReduced = np.tile(hVectorReduced, (sizeK-1, 1)) hMatrixReduced = hMatrixReduced.transpose() #create Palm kernels conditioned on transmitter existing KPalmReducedTX, KPalmTX = funPalmK(K, indexTrans) #create Palm kernels conditioned on receiver existing KPalmRXReduced, KPalmRX = funPalmK(K, indexRec) #create Palm kernels conditioned on transmitter AND receiver existing _, KPalmTXRX = funPalmK(KPalmTX, indexRec) #create reduced (by transmitter) Palm kernel conditioned on transmitter #AND receiver existing indexReduced = np.arange(sizeK)[booleReduced] KPalmSemiReducedTXRX = np.eye(sizeK-1) for i in range(KPalmTXRX.shape[0]-1): KPalmSemiReducedTXRX[:, i] = KPalmTXRX[indexReduced, indexReduced[i]] #calculate final kernels #for transmitter KReduced_hTX = np.sqrt(1-hMatrixReduced.transpose()) * \ KPalmReducedTX*np.sqrt(1-hMatrixReduced) ##for reciever and transmitter KReduced_hRX = np.sqrt(1-hMatrixReduced.transpose()) * \ KPalmSemiReducedTXRX*np.sqrt(1-hMatrixReduced) ###END Create kernels and Palm kernels END### ###START Connection Proability (ie SINR>thresholdConst) START### #calculate probabiliity for the event that transmitter's #signal at the receiver has an SINR>thresholdConst, given the pair is # active (ie trasnmitting and receiving); see Section IV in paper[1]. #probability transmitter exists (ie transmitter at indexTrans) - event B probB = K[indexTrans, indexTrans] probB_Emp = np.mean(booleB) #probability receiver exists (ie no transmitter at indexRec) - event C probC = 1-K[indexRec, indexRec] probC_Emp = np.mean(booleC) #probability transmitter but no receiver indexPair = np.array([indexTrans, indexRec]) probBNotC = np.linalg.det(K[indexPair, :][:, indexPair]) probBNotC_Emp = np.mean(booleBandNotC) # #probability transmitter and receiver existing probBandC = probB-probBNotC probBandC_Emp = np.mean(booleBandC) #probability of SINR>threshold (ie transmiter is connected ) given B probA_GivenB = np.linalg.det(np.eye(sizeK-1)-KReduced_hTX)*W_x[indexTrans] probA_GivenB_Emp = np.mean(booleA[booleB]) #probability of SINR>threshold (ie transmiter is connected ) given B and C probA_GivenBNotC = np.linalg.det(np.eye(sizeK-1)-KReduced_hRX)*W_x[indexTrans] probA_GivenBNotC_Emp = np.mean(booleA[booleNotC]) #probability B given NOT C (ie a transmitter exists at indexRec) probB_GivenNotC = KPalmRX[indexTrans, indexTrans] probB_GivenNotC_Emp = np.mean(booleB[booleNotC]) #probability B given C probB_GivenC = (probB-(1-probC)*probB_GivenNotC)/probC probB_GivenC_Emp = np.mean(booleB[booleC]) #probability NOT C (ie a transmitter exists at indexRec) given B probNotC_GivenB = KPalmTX[indexRec, indexRec] probNotC_GivenB_Emp = np.mean(booleNotC[booleB]) #probability C given B probC_GivenB_Emp = np.mean(booleC[booleB]) probC_GivenB = 1-probNotC_GivenB print('Conditional coverage probability (ie A given B and C).') #coverage probability ie probability of A given B and C probA_GivenBandC = (probA_GivenB-probNotC_GivenB*probA_GivenBNotC)/probC_GivenB print('probA_GivenBandC = ', probA_GivenBandC) #Estimate empirical probability two different ways #Directly probA_GivenBandC_Emp1 = np.mean(booleA[booleBandC]) print('probA_GivenBandC_Emp1 = ', probA_GivenBandC_Emp1) #Indirectly probA_GivenBandC_Emp2 = (probA_GivenB_Emp-probNotC_GivenB_Emp*probA_GivenBNotC_Emp)\ / probC_GivenB_Emp print('Coverage probability (ie A given B and C).') #connection probability probCov = probA_GivenBandC*probBandC print('probCov = ', probCov) probCov_Emp1 = np.mean(booleA & booleB & booleC) print('probCov_Emp1 = ', probCov_Emp1) #probCov_Emp2=probA_GivenBandC_Emp2*probBandC_Emp #probCovCond=probA_GivenBandC #conditional coverage probability #probTXRX=probBandC #probability of pair existing #connection probability #probCov=probCovCond*probTXRX ###END Connection Proability (ie SINR>thresholdConst) END### #TEST probCov, probTXRX, probCovCond = funProbCovTXRXDet( xx, yy, fun_h, fun_w, L, indexTrans, indexRec) if indexDPP.size > 0: ### START -- Plotting -- START ### markerSize = 13 #random color vector vectorColor = np.random.rand(3) # random vector for colors of marker #Plot point process plt.plot(xx, yy, 'ko', markerfacecolor="None", markersize=markerSize) #Plot determinantally-thinned point process plt.plot(xx[indexDPP], yy[indexDPP], 'k.', markerfacecolor=vectorColor, markersize=1.1*markerSize, markeredgecolor='none') plt.axis('equal') plt.axis('off') plt.legend(('Original point process', 'Determinantal subset')) ### END -- Plotting -- END ### #end
[ "numpy.sqrt", "numpy.random.rand", "numpy.random.exponential", "numpy.array", "funPalmK.funPalmK", "numpy.sin", "numpy.arange", "numpy.mean", "funProbCovTXRXDet.funProbCovTXRXDet", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "numpy.exp", "numpy.linspace", "numpy.random.seed", "numpy.hypot", "matplotlib.pyplot.axis", "numpy.tile", "numpy.eye", "numpy.linalg.eig", "numpy.ones", "funLtoK.funLtoK", "numpy.cos", "funSimSimpleDPP.funSimSimpleDPP", "matplotlib.pyplot.legend", "numpy.linalg.det", "numpy.sum", "numpy.zeros" ]
[((2079, 2095), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2088, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2155, 2172), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2169, 2172), True, 'import numpy as np\n'), ((4619, 4635), 'numpy.linalg.eig', 'np.linalg.eig', (['L'], {}), '(L)\n', (4632, 4635), True, 'import numpy as np\n'), ((5032, 5061), 'numpy.zeros', 'np.zeros', (['numbSim'], {'dtype': 'bool'}), '(numbSim, dtype=bool)\n', (5040, 5061), True, 'import numpy as np\n'), ((5099, 5128), 'numpy.zeros', 'np.zeros', (['numbSim'], {'dtype': 'bool'}), '(numbSim, dtype=bool)\n', (5107, 5128), True, 'import numpy as np\n'), ((5160, 5189), 'numpy.zeros', 'np.zeros', (['numbSim'], {'dtype': 'bool'}), '(numbSim, dtype=bool)\n', (5168, 5189), True, 'import numpy as np\n'), ((6928, 6938), 'funLtoK.funLtoK', 'funLtoK', (['L'], {}), '(L)\n', (6935, 6938), False, 'from funLtoK import funLtoK\n'), ((7319, 7351), 'numpy.hypot', 'np.hypot', (['dist_ji_xx', 'dist_ji_yy'], {}), '(dist_ji_xx, dist_ji_yy)\n', (7327, 7351), True, 'import numpy as np\n'), ((7458, 7490), 'numpy.hypot', 'np.hypot', (['dist_ii_xx', 'dist_ii_yy'], {}), '(dist_ii_xx, dist_ii_yy)\n', (7466, 7490), True, 'import numpy as np\n'), ((7566, 7594), 'numpy.tile', 'np.tile', (['dist_ii', '(sizeK, 1)'], {}), '(dist_ii, (sizeK, 1))\n', (7573, 7594), True, 'import numpy as np\n'), ((7802, 7828), 'numpy.ones', 'np.ones', (['sizeK'], {'dtype': 'bool'}), '(sizeK, dtype=bool)\n', (7809, 7828), True, 'import numpy as np\n'), ((8048, 8087), 'numpy.tile', 'np.tile', (['hVectorReduced', '(sizeK - 1, 1)'], {}), '(hVectorReduced, (sizeK - 1, 1))\n', (8055, 8087), True, 'import numpy as np\n'), ((8214, 8237), 'funPalmK.funPalmK', 'funPalmK', (['K', 'indexTrans'], {}), '(K, indexTrans)\n', (8222, 8237), False, 'from funPalmK import funPalmK\n'), ((8318, 8339), 'funPalmK.funPalmK', 'funPalmK', (['K', 'indexRec'], {}), '(K, indexRec)\n', (8326, 8339), False, 'from funPalmK import funPalmK\n'), ((8426, 8453), 'funPalmK.funPalmK', 'funPalmK', (['KPalmTX', 'indexRec'], {}), '(KPalmTX, indexRec)\n', (8434, 8453), False, 'from funPalmK import funPalmK\n'), ((8618, 8635), 'numpy.eye', 'np.eye', (['(sizeK - 1)'], {}), '(sizeK - 1)\n', (8624, 8635), True, 'import numpy as np\n'), ((9459, 9474), 'numpy.mean', 'np.mean', (['booleB'], {}), '(booleB)\n', (9466, 9474), True, 'import numpy as np\n'), ((9591, 9606), 'numpy.mean', 'np.mean', (['booleC'], {}), '(booleC)\n', (9598, 9606), True, 'import numpy as np\n'), ((9661, 9693), 'numpy.array', 'np.array', (['[indexTrans, indexRec]'], {}), '([indexTrans, indexRec])\n', (9669, 9693), True, 'import numpy as np\n'), ((9706, 9750), 'numpy.linalg.det', 'np.linalg.det', (['K[indexPair, :][:, indexPair]'], {}), '(K[indexPair, :][:, indexPair])\n', (9719, 9750), True, 'import numpy as np\n'), ((9767, 9789), 'numpy.mean', 'np.mean', (['booleBandNotC'], {}), '(booleBandNotC)\n', (9774, 9789), True, 'import numpy as np\n'), ((9883, 9902), 'numpy.mean', 'np.mean', (['booleBandC'], {}), '(booleBandC)\n', (9890, 9902), True, 'import numpy as np\n'), ((10067, 10090), 'numpy.mean', 'np.mean', (['booleA[booleB]'], {}), '(booleA[booleB])\n', (10074, 10090), True, 'import numpy as np\n'), ((10269, 10295), 'numpy.mean', 'np.mean', (['booleA[booleNotC]'], {}), '(booleA[booleNotC])\n', (10276, 10295), True, 'import numpy as np\n'), ((10434, 10460), 'numpy.mean', 'np.mean', (['booleB[booleNotC]'], {}), '(booleB[booleNotC])\n', (10441, 10460), True, 'import numpy as np\n'), ((10559, 10582), 'numpy.mean', 'np.mean', (['booleB[booleC]'], {}), '(booleB[booleC])\n', (10566, 10582), True, 'import numpy as np\n'), ((10717, 10743), 'numpy.mean', 'np.mean', (['booleNotC[booleB]'], {}), '(booleNotC[booleB])\n', (10724, 10743), True, 'import numpy as np\n'), ((10787, 10810), 'numpy.mean', 'np.mean', (['booleC[booleB]'], {}), '(booleC[booleB])\n', (10794, 10810), True, 'import numpy as np\n'), ((11178, 11205), 'numpy.mean', 'np.mean', (['booleA[booleBandC]'], {}), '(booleA[booleBandC])\n', (11185, 11205), True, 'import numpy as np\n'), ((11542, 11575), 'numpy.mean', 'np.mean', (['(booleA & booleB & booleC)'], {}), '(booleA & booleB & booleC)\n', (11549, 11575), True, 'import numpy as np\n'), ((11936, 12000), 'funProbCovTXRXDet.funProbCovTXRXDet', 'funProbCovTXRXDet', (['xx', 'yy', 'fun_h', 'fun_w', 'L', 'indexTrans', 'indexRec'], {}), '(xx, yy, fun_h, fun_w, L, indexTrans, indexRec)\n', (11953, 12000), False, 'from funProbCovTXRXDet import funProbCovTXRXDet\n'), ((4266, 4301), 'numpy.exp', 'np.exp', (['(-rrDiffSquared / sigma ** 2)'], {}), '(-rrDiffSquared / sigma ** 2)\n', (4272, 4301), True, 'import numpy as np\n'), ((5318, 5355), 'funSimSimpleDPP.funSimSimpleDPP', 'funSimSimpleDPP', (['eigenVecL', 'eigenValL'], {}), '(eigenVecL, eigenValL)\n', (5333, 5355), False, 'from funSimSimpleDPP import funSimSimpleDPP\n'), ((7699, 7729), 'numpy.hypot', 'np.hypot', (['(xx - xxRX)', '(yy - yyRX)'], {}), '(xx - xxRX, yy - yyRX)\n', (7707, 7729), True, 'import numpy as np\n'), ((8564, 8580), 'numpy.arange', 'np.arange', (['sizeK'], {}), '(sizeK)\n', (8573, 8580), True, 'import numpy as np\n'), ((8865, 8892), 'numpy.sqrt', 'np.sqrt', (['(1 - hMatrixReduced)'], {}), '(1 - hMatrixReduced)\n', (8872, 8892), True, 'import numpy as np\n'), ((9004, 9031), 'numpy.sqrt', 'np.sqrt', (['(1 - hMatrixReduced)'], {}), '(1 - hMatrixReduced)\n', (9011, 9031), True, 'import numpy as np\n'), ((12131, 12148), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (12145, 12148), True, 'import numpy as np\n'), ((12215, 12284), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""ko"""'], {'markerfacecolor': '"""None"""', 'markersize': 'markerSize'}), "(xx, yy, 'ko', markerfacecolor='None', markersize=markerSize)\n", (12223, 12284), True, 'import matplotlib.pyplot as plt\n'), ((12337, 12465), 'matplotlib.pyplot.plot', 'plt.plot', (['xx[indexDPP]', 'yy[indexDPP]', '"""k."""'], {'markerfacecolor': 'vectorColor', 'markersize': '(1.1 * markerSize)', 'markeredgecolor': '"""none"""'}), "(xx[indexDPP], yy[indexDPP], 'k.', markerfacecolor=vectorColor,\n markersize=1.1 * markerSize, markeredgecolor='none')\n", (12345, 12465), True, 'import matplotlib.pyplot as plt\n'), ((12477, 12494), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (12485, 12494), True, 'import matplotlib.pyplot as plt\n'), ((12499, 12514), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12507, 12514), True, 'import matplotlib.pyplot as plt\n'), ((12519, 12581), 'matplotlib.pyplot.legend', 'plt.legend', (["('Original point process', 'Determinantal subset')"], {}), "(('Original point process', 'Determinantal subset'))\n", (12529, 12581), True, 'import matplotlib.pyplot as plt\n'), ((3610, 3664), 'numpy.linspace', 'np.linspace', (['(0)', '((numbNodes - 1) / numbNodes)', 'numbNodes'], {}), '(0, (numbNodes - 1) / numbNodes, numbNodes)\n', (3621, 3664), True, 'import numpy as np\n'), ((4032, 4049), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4039, 4049), True, 'import numpy as np\n'), ((4060, 4077), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4067, 4077), True, 'import numpy as np\n'), ((4105, 4122), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4112, 4122), True, 'import numpy as np\n'), ((4133, 4150), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4140, 4150), True, 'import numpy as np\n'), ((5676, 5707), 'numpy.zeros', 'np.zeros', (['numbNodes'], {'dtype': 'bool'}), '(numbNodes, dtype=bool)\n', (5684, 5707), True, 'import numpy as np\n'), ((5966, 5984), 'numpy.sum', 'np.sum', (['booleInter'], {}), '(booleInter)\n', (5972, 5984), True, 'import numpy as np\n'), ((6051, 6093), 'numpy.random.exponential', 'np.random.exponential', (['muFading', 'numbInter'], {}), '(muFading, numbInter)\n', (6072, 6093), True, 'import numpy as np\n'), ((6128, 6168), 'numpy.hypot', 'np.hypot', (['(xxInter - xxRX)', '(yyInter - yyRX)'], {}), '(xxInter - xxRX, yyInter - yyRX)\n', (6136, 6168), True, 'import numpy as np\n'), ((6323, 6354), 'numpy.random.exponential', 'np.random.exponential', (['muFading'], {}), '(muFading)\n', (6344, 6354), True, 'import numpy as np\n'), ((6387, 6421), 'numpy.hypot', 'np.hypot', (['(xxTX - xxRX)', '(yyTX - yyRX)'], {}), '(xxTX - xxRX, yyTX - yyRX)\n', (6395, 6421), True, 'import numpy as np\n'), ((7177, 7194), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7184, 7194), True, 'import numpy as np\n'), ((7205, 7222), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7212, 7222), True, 'import numpy as np\n'), ((7256, 7273), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7263, 7273), True, 'import numpy as np\n'), ((7284, 7301), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7291, 7301), True, 'import numpy as np\n'), ((3443, 3468), 'numpy.random.rand', 'np.random.rand', (['numbNodes'], {}), '(numbNodes)\n', (3457, 3468), True, 'import numpy as np\n'), ((3492, 3517), 'numpy.random.rand', 'np.random.rand', (['numbNodes'], {}), '(numbNodes)\n', (3506, 3517), True, 'import numpy as np\n'), ((3673, 3690), 'numpy.cos', 'np.cos', (['(5 * t + 1)'], {}), '(5 * t + 1)\n', (3679, 3690), True, 'import numpy as np\n'), ((3702, 3719), 'numpy.sin', 'np.sin', (['(3 * t + 2)'], {}), '(3 * t + 2)\n', (3708, 3719), True, 'import numpy as np\n'), ((10002, 10019), 'numpy.eye', 'np.eye', (['(sizeK - 1)'], {}), '(sizeK - 1)\n', (10008, 10019), True, 'import numpy as np\n'), ((10200, 10217), 'numpy.eye', 'np.eye', (['(sizeK - 1)'], {}), '(sizeK - 1)\n', (10206, 10217), True, 'import numpy as np\n'), ((6563, 6584), 'numpy.sum', 'np.sum', (['proplossInter'], {}), '(proplossInter)\n', (6569, 6584), True, 'import numpy as np\n')]
""" Tests for functions in imaging module Run at the project directory with: nosetests code/utils/tests/test_imaging.py """ # Loading modules. from __future__ import absolute_import, division, print_function import numpy as np import nibabel as nib import os import sys from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal # Add path to functions to the system path. sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/")) # Load our visualization functions. from Image_Visualizing import present_3d, make_mask,present_3d_options # all tests of present are looking at the output sizes of the 2d arrays def test_present(): # Read in the image data. data = np.arange(100000) data = data.reshape((100,100,10)) full=present_3d(data) assert full.shape == (400,300) def test_present_options_2(): data = np.arange(100000) data = data.reshape((100,100,10)) full=present_3d_options(data,axis=2) first=np.ceil(np.sqrt(10)) second=np.ceil(10/first) assert full.shape == (100*first,100*second) def test_present_options_1(): data = np.arange(100000) data = data.reshape((100,100,10)) full=present_3d_options(data,axis=1) assert full.shape == (10*10,100*10) def test_present_options_0(): data = np.arange(100000) data = data.reshape((100,100,10)) full=present_3d_options(data,axis=0) assert full.shape == (10*10,100*10) def test_mask(): # example from http://www.jarrodmillman.com/rcsds/lectures/glm_intro.html # it should be pointed out that hypothesis just looks at simple linear regression data = np.arange(1000000) data = data.reshape((100,100,100)) mask1 = np.ones((100,100,100)) mask2 = np.zeros((100,100,100)) mask3 = np.ones((200,200,100)) assert_equal(make_mask(data, mask1), data) assert_equal(make_mask(data,mask2), mask2) assert_equal(make_mask(data,mask3,fit=True).shape, data.shape) x= False try: make_mask(data,mask3,fit=False) except ValueError: x=True assert(x==True)
[ "Image_Visualizing.present_3d", "numpy.ceil", "Image_Visualizing.make_mask", "numpy.sqrt", "numpy.ones", "os.path.dirname", "numpy.zeros", "Image_Visualizing.present_3d_options", "numpy.arange" ]
[((717, 734), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (726, 734), True, 'import numpy as np\n'), ((787, 803), 'Image_Visualizing.present_3d', 'present_3d', (['data'], {}), '(data)\n', (797, 803), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((886, 903), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (895, 903), True, 'import numpy as np\n'), ((956, 988), 'Image_Visualizing.present_3d_options', 'present_3d_options', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (974, 988), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1035, 1054), 'numpy.ceil', 'np.ceil', (['(10 / first)'], {}), '(10 / first)\n', (1042, 1054), True, 'import numpy as np\n'), ((1144, 1161), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (1153, 1161), True, 'import numpy as np\n'), ((1214, 1246), 'Image_Visualizing.present_3d_options', 'present_3d_options', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (1232, 1246), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1328, 1345), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (1337, 1345), True, 'import numpy as np\n'), ((1398, 1430), 'Image_Visualizing.present_3d_options', 'present_3d_options', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1416, 1430), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1670, 1688), 'numpy.arange', 'np.arange', (['(1000000)'], {}), '(1000000)\n', (1679, 1688), True, 'import numpy as np\n'), ((1740, 1764), 'numpy.ones', 'np.ones', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (1747, 1764), True, 'import numpy as np\n'), ((1775, 1800), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (1783, 1800), True, 'import numpy as np\n'), ((1811, 1835), 'numpy.ones', 'np.ones', (['(200, 200, 100)'], {}), '((200, 200, 100))\n', (1818, 1835), True, 'import numpy as np\n'), ((429, 454), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (444, 454), False, 'import os\n'), ((1011, 1022), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (1018, 1022), True, 'import numpy as np\n'), ((1856, 1878), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask1'], {}), '(data, mask1)\n', (1865, 1878), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1904, 1926), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask2'], {}), '(data, mask2)\n', (1913, 1926), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((2036, 2069), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask3'], {'fit': '(False)'}), '(data, mask3, fit=False)\n', (2045, 2069), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1951, 1983), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask3'], {'fit': '(True)'}), '(data, mask3, fit=True)\n', (1960, 1983), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n')]
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import h5py import json import os import scipy.misc import sys import re import fnmatch import datetime from PIL import Image import numpy as np ''' srun --mem 10000 python lib/datasets/wider/convert_face_to_coco.py --dataset cs6-train-det ''' def add_path(path): if path not in sys.path: sys.path.insert(0, path) this_dir = os.path.dirname(__file__) add_path(this_dir) # print(this_dir) add_path(os.path.join(this_dir, '..', '..')) import utils import utils.boxes as bboxs_util import utils.face_utils as face_util def parse_args(): parser = argparse.ArgumentParser(description='Convert dataset') parser.add_argument( '--dataset', help="wider", default='wider', type=str) parser.add_argument( '--outdir', help="output dir for json files", default='', type=str) parser.add_argument( '--datadir', help="data dir for annotations to be converted", default='', type=str) parser.add_argument( '--imdir', help="root directory for loading dataset images", default='', type=str) parser.add_argument( '--annotfile', help="directly specify the annotations file", default='', type=str) parser.add_argument( '--thresh', help="specify the confidence threshold on detections", default=-1, type=float) # if len(sys.argv) == 1: # parser.print_help() # sys.exit(1) return parser.parse_args() def convert_wider_annots(data_dir, out_dir, data_set='WIDER', conf_thresh=0.5): """Convert from WIDER FDDB-style format to COCO bounding box""" # http://cocodataset.org/#format-data: [x,w,width,height] json_name = 'wider_face_train_annot_coco_style.json' img_id = 0 ann_id = 0 cat_id = 1 print('Starting %s' % data_set) ann_dict = {} categories = [{"id": 1, "name": 'face'}] images = [] annotations = [] ann_file = os.path.join(data_dir, 'wider_face_train_annot.txt') wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...] for filename in wider_annot_dict.keys(): if len(images) % 50 == 0: print("Processed %s images, %s annotations" % ( len(images), len(annotations))) image = {} image['id'] = img_id img_id += 1 im = Image.open(os.path.join(data_dir, filename)) image['width'] = im.height image['height'] = im.width image['file_name'] = filename images.append(image) for gt_bbox in wider_annot_dict[filename]: ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = [] ann['category_id'] = cat_id # 1:"face" for WIDER ann['iscrowd'] = 0 ann['area'] = gt_bbox[2] * gt_bbox[3] ann['bbox'] = gt_bbox annotations.append(ann) ann_dict['images'] = images ann_dict['categories'] = categories ann_dict['annotations'] = annotations print("Num categories: %s" % len(categories)) print("Num images: %s" % len(images)) print("Num annotations: %s" % len(annotations)) with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile: outfile.write(json.dumps(ann_dict)) def convert_cs6_annots(ann_file, im_dir, out_dir, data_set='CS6-subset', conf_thresh=0.5): """Convert from WIDER FDDB-style format to COCO bounding box""" # cs6 subsets if data_set=='CS6-subset': json_name = 'cs6-subset_face_train_annot_coco_style.json' elif data_set=='CS6-subset-score': # include "scores" as soft-labels json_name = 'cs6-subset_face_train_score-annot_coco_style.json' elif data_set=='CS6-subset-gt': json_name = 'cs6-subset-gt_face_train_annot_coco_style.json' elif data_set=='CS6-train-gt': # full train set of CS6 (86 videos) json_name = 'cs6-train-gt.json' elif data_set=='CS6-train-det-score': # soft-labels used in distillation json_name = 'cs6-train-det-score_face_train_annot_coco_style.json' elif data_set=='CS6-train-det-score-0.5': # soft-labels used in distillation, keeping dets with score > 0.5 json_name = 'cs6-train-det-score-0.5_face_train_annot_coco_style.json' conf_thresh = 0.5 elif data_set=='CS6-train-det': json_name = 'cs6-train-det_face_train_annot_coco_style.json' elif data_set=='CS6-train-det-0.5': json_name = 'cs6-train-det-0.5_face_train_annot_coco_style.json' elif data_set=='CS6-train-easy-hp': json_name = 'cs6-train-easy-hp.json' elif data_set=='CS6-train-easy-gt': json_name = 'cs6-train-easy-gt.json' elif data_set=='CS6-train-easy-det': json_name = 'cs6-train-easy-det.json' elif data_set=='CS6-train-hp': json_name = 'cs6-train-hp.json' else: raise NotImplementedError img_id = 0 ann_id = 0 cat_id = 1 print('Starting %s' % data_set) ann_dict = {} categories = [{"id": 1, "name": 'face'}] images = [] annotations = [] wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...] for filename in wider_annot_dict.keys(): if len(images) % 50 == 0: print("Processed %s images, %s annotations" % ( len(images), len(annotations))) if 'score' in data_set: dets = np.array(wider_annot_dict[filename]) if not any(dets[:,4] > conf_thresh): continue image = {} image['id'] = img_id img_id += 1 im = Image.open(os.path.join(im_dir, filename)) image['width'] = im.height image['height'] = im.width image['file_name'] = filename images.append(image) for gt_bbox in wider_annot_dict[filename]: ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = [] ann['category_id'] = cat_id # 1:"face" for WIDER ann['iscrowd'] = 0 ann['area'] = gt_bbox[2] * gt_bbox[3] ann['bbox'] = gt_bbox[:4] ann['dataset'] = data_set score = gt_bbox[4] if score < conf_thresh: continue if 'hp' in data_set: ann['score'] = score # for soft-label distillation ann['source'] = gt_bbox[5] # annot source: {1: detection, 2:tracker} if data_set=='CS6-train-easy-det': if gt_bbox[5] != 1: continue # ignore if annot source is not detection (i.e. skip HP) annotations.append(ann) ann_dict['images'] = images ann_dict['categories'] = categories ann_dict['annotations'] = annotations print("Num categories: %s" % len(categories)) print("Num images: %s" % len(images)) print("Num annotations: %s" % len(annotations)) with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile: outfile.write(json.dumps(ann_dict, indent=2)) if __name__ == '__main__': args = parse_args() if args.dataset == "wider": convert_wider_annots(args.datadir, args.outdir) # -------------------------------------------------------------------------- # CS6 Train GT # -------------------------------------------------------------------------- elif args.dataset == "cs6-subset": convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-subset') elif args.dataset == "cs6-subset-score": convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-subset-score') elif args.dataset == "cs6-subset-gt": convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-subset-gt') elif args.dataset == "cs6-train-gt": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_gt_annot_train.txt' if not args.imdir: args.imdir = 'data/CS6_annot' if not args.outdir: args.outdir = 'data/CS6_annot' convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-train-gt') # Distillation scores for CS6-Train detections (conf 0.25) elif args.dataset == "cs6-train-det-score": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_det_annot_train_scores.txt' # -------------------------------------------------------------------------- # CS6 Train unlabeled # -------------------------------------------------------------------------- # Pseudo-labels from CS6-Train elif args.dataset == "cs6-train-det": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_det_annot_train_conf-0.25.txt' if not args.imdir: args.imdir = 'data/CS6_annot' if not args.outdir: args.outdir = 'data/CS6_annot' convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-train-det') elif args.dataset == "cs6-train-det-0.5": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_det_annot_train_conf-0.50.txt' if not args.imdir: args.imdir = 'data/CS6_annot' if not args.outdir: args.outdir = 'data/CS6_annot' convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-train-det-0.5') # Hard positives from CS6-Train elif args.dataset == "cs6-train-hp": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'Outputs/tracklets/hp-res-cs6/hp_cs6_train.txt' if not args.imdir: args.imdir = 'data/CS6_annot' if not args.outdir: args.outdir = 'data/CS6_annot' convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-train-hp', conf_thresh=0.5) # -------------------------------------------------------------------------- # CS6 "EASY" set # -------------------------------------------------------------------------- elif args.dataset == "cs6-train-easy-hp": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'Outputs/tracklets/hp-res-cs6/hp_cs6_easy.txt' if not args.imdir: args.imdir = 'data/CS6_annot' if not args.outdir: args.outdir = 'data/CS6_annot' convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-train-easy-hp') elif args.dataset == "cs6-train-easy-gt": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_gt_annot_train-easy.txt' if not args.imdir: args.imdir = 'data/CS6_annot' if not args.outdir: args.outdir = 'data/CS6_annot' convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-train-easy-gt') elif args.dataset == "cs6-train-easy-det": # set defaults if inputs args are empty if not args.annotfile: args.annotfile = 'Outputs/tracklets/hp-res-cs6/hp_cs6_train_easy.txt' if not args.imdir: args.imdir = 'data/CS6_annot' if not args.outdir: args.outdir = 'data/CS6_annot' convert_cs6_annots(args.annotfile, args.imdir, args.outdir, data_set='CS6-train-easy-det') else: print("Dataset not supported: %s" % args.dataset)
[ "sys.path.insert", "argparse.ArgumentParser", "json.dumps", "os.path.join", "utils.face_utils.parse_wider_gt", "os.path.dirname", "numpy.array" ]
[((512, 537), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (527, 537), False, 'import os\n'), ((584, 618), 'os.path.join', 'os.path.join', (['this_dir', '""".."""', '""".."""'], {}), "(this_dir, '..', '..')\n", (596, 618), False, 'import os\n'), ((738, 792), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert dataset"""'}), "(description='Convert dataset')\n", (761, 792), False, 'import argparse\n'), ((2075, 2127), 'os.path.join', 'os.path.join', (['data_dir', '"""wider_face_train_annot.txt"""'], {}), "(data_dir, 'wider_face_train_annot.txt')\n", (2087, 2127), False, 'import os\n'), ((2151, 2185), 'utils.face_utils.parse_wider_gt', 'face_util.parse_wider_gt', (['ann_file'], {}), '(ann_file)\n', (2175, 2185), True, 'import utils.face_utils as face_util\n'), ((5343, 5377), 'utils.face_utils.parse_wider_gt', 'face_util.parse_wider_gt', (['ann_file'], {}), '(ann_file)\n', (5367, 5377), True, 'import utils.face_utils as face_util\n'), ((475, 499), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (490, 499), False, 'import sys\n'), ((2498, 2530), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (2510, 2530), False, 'import os\n'), ((3362, 3394), 'os.path.join', 'os.path.join', (['out_dir', 'json_name'], {}), '(out_dir, json_name)\n', (3374, 3394), False, 'import os\n'), ((3452, 3472), 'json.dumps', 'json.dumps', (['ann_dict'], {}), '(ann_dict)\n', (3462, 3472), False, 'import json\n'), ((5653, 5689), 'numpy.array', 'np.array', (['wider_annot_dict[filename]'], {}), '(wider_annot_dict[filename])\n', (5661, 5689), True, 'import numpy as np\n'), ((5859, 5889), 'os.path.join', 'os.path.join', (['im_dir', 'filename'], {}), '(im_dir, filename)\n', (5871, 5889), False, 'import os\n'), ((7221, 7253), 'os.path.join', 'os.path.join', (['out_dir', 'json_name'], {}), '(out_dir, json_name)\n', (7233, 7253), False, 'import os\n'), ((7311, 7341), 'json.dumps', 'json.dumps', (['ann_dict'], {'indent': '(2)'}), '(ann_dict, indent=2)\n', (7321, 7341), False, 'import json\n')]
""" ================================================= Deterministic Tracking with EuDX on Tensor Fields ================================================= In this example we do deterministic fiber tracking on Tensor fields with EuDX [Garyfallidis12]_. This example requires to import example `reconst_dti.py` to run. EuDX was primarily made with cpu efficiency in mind. Therefore, it should be useful to give you a quick overview of your reconstruction results with the help of tracking. """ import os import numpy as np import nibabel as nib if not os.path.exists('tensor_fa.nii.gz'): import reconst_dti """ EuDX will use the directions (eigen vectors) of the Tensors to propagate streamlines from voxel to voxel and fractional anisotropy to stop tracking. """ fa_img = nib.load('tensor_fa.nii.gz') FA = fa_img.get_data() evecs_img = nib.load('tensor_evecs.nii.gz') evecs = evecs_img.get_data() """ In the background of the image the fitting will not be accurate because there all measured signal is mostly noise and possibly we will find FA values with nans (not a number). We can easily remove these in the following way. """ FA[np.isnan(FA)] = 0 """ EuDX takes as input discretized voxel directions on a unit sphere. Therefore, it is necessary to discretize the eigen vectors before feeding them in EuDX. For the discretization procedure we use an evenly distributed sphere of 724 points which we can access using the get_sphere function. """ from dipy.data import get_sphere sphere = get_sphere('symmetric724') """ We use quantize_evecs (evecs here stands for eigen vectors) to apply the discretization. """ from dipy.reconst.dti import quantize_evecs peak_indices = quantize_evecs(evecs, sphere.vertices) """ EuDX is the fiber tracking algorithm that we use in this example. The most important parameters are the first one which represents the magnitude of the peak of a scalar anisotropic function, the second which represents the indices of the discretized directions of the peaks and odf_vertices are the vertices of the input sphere. """ from dipy.tracking.eudx import EuDX from dipy.tracking.streamline import Streamlines eu = EuDX(FA.astype('f8'), peak_indices, seeds=50000, odf_vertices=sphere.vertices, a_low=0.2) tensor_streamlines = Streamlines(eu) """ We can now save the results in the disk. For this purpose we can use the TrackVis format (``*.trk``). First, we need to import ``save_trk`` function. """ from dipy.io.streamline import save_trk """ Save the streamlines. """ ten_sl_fname = 'tensor_streamlines.trk' save_trk(ten_sl_fname, tensor_streamlines, affine=np.eye(4), vox_size=fa_img.header.get_zooms()[:3], shape=FA.shape) """ If you don't want to use Trackvis to visualize the file you can use our lightweight `dipy.viz` module. """ try: from dipy.viz import window, actor except ImportError: raise ImportError('Python fury module is not installed') import sys sys.exit() """ Create a scene. """ ren = window.Renderer() """ Every streamline will be coloured according to its orientation """ from dipy.viz import colormap as cmap """ `actor.line` creates a streamline actor for streamline visualization and `ren.add` adds this actor to the scene """ ren.add(actor.streamtube(tensor_streamlines, cmap.line_colors(tensor_streamlines))) print('Saving illustration as tensor_tracks.png') ren.SetBackground(1, 1, 1) window.record(ren, out_path='tensor_tracks.png', size=(600, 600)) # Enables/disables interactive visualization interactive = False if interactive: window.show(ren) """ .. figure:: tensor_tracks.png :align: center Deterministic streamlines with EuDX on a Tensor Field. References ---------- .. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography", PhD thesis, University of Cambridge, 2012. .. include:: ../links_names.inc """
[ "dipy.reconst.dti.quantize_evecs", "os.path.exists", "numpy.eye", "dipy.tracking.streamline.Streamlines", "nibabel.load", "dipy.data.get_sphere", "numpy.isnan", "dipy.viz.colormap.line_colors", "sys.exit", "dipy.viz.window.show", "dipy.viz.window.record", "dipy.viz.window.Renderer" ]
[((782, 810), 'nibabel.load', 'nib.load', (['"""tensor_fa.nii.gz"""'], {}), "('tensor_fa.nii.gz')\n", (790, 810), True, 'import nibabel as nib\n'), ((846, 877), 'nibabel.load', 'nib.load', (['"""tensor_evecs.nii.gz"""'], {}), "('tensor_evecs.nii.gz')\n", (854, 877), True, 'import nibabel as nib\n'), ((1506, 1532), 'dipy.data.get_sphere', 'get_sphere', (['"""symmetric724"""'], {}), "('symmetric724')\n", (1516, 1532), False, 'from dipy.data import get_sphere\n'), ((1692, 1730), 'dipy.reconst.dti.quantize_evecs', 'quantize_evecs', (['evecs', 'sphere.vertices'], {}), '(evecs, sphere.vertices)\n', (1706, 1730), False, 'from dipy.reconst.dti import quantize_evecs\n'), ((2283, 2298), 'dipy.tracking.streamline.Streamlines', 'Streamlines', (['eu'], {}), '(eu)\n', (2294, 2298), False, 'from dipy.tracking.streamline import Streamlines\n'), ((3016, 3033), 'dipy.viz.window.Renderer', 'window.Renderer', ([], {}), '()\n', (3031, 3033), False, 'from dipy.viz import window, actor\n'), ((3455, 3520), 'dipy.viz.window.record', 'window.record', (['ren'], {'out_path': '"""tensor_tracks.png"""', 'size': '(600, 600)'}), "(ren, out_path='tensor_tracks.png', size=(600, 600))\n", (3468, 3520), False, 'from dipy.viz import window, actor\n'), ((555, 589), 'os.path.exists', 'os.path.exists', (['"""tensor_fa.nii.gz"""'], {}), "('tensor_fa.nii.gz')\n", (569, 589), False, 'import os\n'), ((1145, 1157), 'numpy.isnan', 'np.isnan', (['FA'], {}), '(FA)\n', (1153, 1157), True, 'import numpy as np\n'), ((3606, 3622), 'dipy.viz.window.show', 'window.show', (['ren'], {}), '(ren)\n', (3617, 3622), False, 'from dipy.viz import window, actor\n'), ((2631, 2640), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2637, 2640), True, 'import numpy as np\n'), ((2973, 2983), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2981, 2983), False, 'import sys\n'), ((3337, 3373), 'dipy.viz.colormap.line_colors', 'cmap.line_colors', (['tensor_streamlines'], {}), '(tensor_streamlines)\n', (3353, 3373), True, 'from dipy.viz import colormap as cmap\n')]
import pandas as pd import numpy as np import seaborn as sns import os import matplotlib.pyplot as plt df = pd.read_csv( os.path.join( "fairness-2021", "simple-rank-res.csv" ) ) df["pp"] = [ "fs" if x == 'fairsmote' else x for x in df["pp"] ] df["pt"] = [ "rs" if x == 'random' else x for x in df["pt"] ] df["tech"] = [ x + "+rf+" + y for x, y in zip( df["pp"], df["pt"] ) ] df["tech"] = [ x.replace("none+","").upper() for x in df["tech"] ] df["tech"] = [ x.replace("+DEFAULT","").upper() for x in df["tech"] ] df = df[ [x in ["d2h1", "d2h2", "d2h3"] for x in df["m"]] ] df["d2h"] = [ "overall" if x == "d2h1" else "classification" if x == "d2h2" else "fairness" for x in df["m"] ] ds_list = df["ds"].unique() m_list = ["d2h1", "d2h2", "d2h3"] mn_list = ["Prediction", "Fairness", "Overall"] tech_list = df["tech"].unique() for ds in ds_list: top, bottom = [], [] for m in m_list: for t in tech_list: sub_df = df[ np.logical_and(np.logical_and(df["ds"]==ds, df["m"]==m), df["tech"]==t) ] val = sub_df["val"] iqr = val.quantile(0.75) - val.quantile(0.25) bottom += [val.quantile(0.25) - 1.5*iqr] top += [val.quantile(0.75) + 1.5*iqr] top, bottom = max(top), min(bottom) for m, mn in zip( m_list, mn_list ): sub_df = df[ np.logical_and(df["ds"]==ds, df["m"]==m) ] ranks = pd.DataFrame([ sub_df[ sub_df["tech"] == x ].iloc[0][["tech", "rank"]] for x in tech_list ]) ranks = ranks.sort_values(by=['rank']) plt.clf() fig, ax = plt.subplots(figsize=(5, 8.5)) plt.ylim((bottom, top)) plt.tight_layout(pad=2.25) ax.tick_params(axis='x', rotation=25) g = sns.boxplot( x = "tech", y = "val", hue = "rank", data = sub_df, dodge = False, order = list(ranks["tech"]), showfliers = False, palette = sns.cubehelix_palette(start=1.5, rot=0.4, dark=0.35, light=1, reverse=True) ).set( xlabel='Model', ylabel='Distance to heaven', title=f'{mn}', ) ax.get_legend().remove() fig.savefig( os.path.join( "fairness-2021", "box", f"box-{ds}-{m}.png" ) ) plt.close()
[ "seaborn.cubehelix_palette", "numpy.logical_and", "matplotlib.pyplot.clf", "os.path.join", "matplotlib.pyplot.close", "matplotlib.pyplot.tight_layout", "pandas.DataFrame", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots" ]
[((122, 174), 'os.path.join', 'os.path.join', (['"""fairness-2021"""', '"""simple-rank-res.csv"""'], {}), "('fairness-2021', 'simple-rank-res.csv')\n", (134, 174), False, 'import os\n'), ((1416, 1508), 'pandas.DataFrame', 'pd.DataFrame', (["[sub_df[sub_df['tech'] == x].iloc[0][['tech', 'rank']] for x in tech_list]"], {}), "([sub_df[sub_df['tech'] == x].iloc[0][['tech', 'rank']] for x in\n tech_list])\n", (1428, 1508), True, 'import pandas as pd\n'), ((1573, 1582), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1580, 1582), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1631), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 8.5)'}), '(figsize=(5, 8.5))\n', (1613, 1631), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1663), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(bottom, top)'], {}), '((bottom, top))\n', (1648, 1663), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1698), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(2.25)'}), '(pad=2.25)\n', (1688, 1698), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2362), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1392), 'numpy.logical_and', 'np.logical_and', (["(df['ds'] == ds)", "(df['m'] == m)"], {}), "(df['ds'] == ds, df['m'] == m)\n", (1362, 1392), True, 'import numpy as np\n'), ((2281, 2338), 'os.path.join', 'os.path.join', (['"""fairness-2021"""', '"""box"""', 'f"""box-{ds}-{m}.png"""'], {}), "('fairness-2021', 'box', f'box-{ds}-{m}.png')\n", (2293, 2338), False, 'import os\n'), ((975, 1019), 'numpy.logical_and', 'np.logical_and', (["(df['ds'] == ds)", "(df['m'] == m)"], {}), "(df['ds'] == ds, df['m'] == m)\n", (989, 1019), True, 'import numpy as np\n'), ((1957, 2032), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'start': '(1.5)', 'rot': '(0.4)', 'dark': '(0.35)', 'light': '(1)', 'reverse': '(True)'}), '(start=1.5, rot=0.4, dark=0.35, light=1, reverse=True)\n', (1978, 2032), True, 'import seaborn as sns\n')]
import warnings import numpy as np import pandas as pd import sklearn from sklearn import metrics class MetricCatalog: catalog_dict = { 'accuracy': { 'func': metrics.accuracy_score, 'params': {}, 'require_score': False, 'binary': True, 'multi': True}, # AP is not straightfoward to apply to multiclass 'average_precision': { 'func': metrics.average_precision_score, 'params': {}, 'require_score': True, 'binary': True, 'multi': False}, # Default configuration only handles binary classification 'f1': { 'func': metrics.f1_score, 'params': {'average': 'macro'}, 'require_score': False, 'binary': True, 'multi': True}, 'f1_micro': { 'func': metrics.f1_score, 'params': {'average': 'micro'}, 'require_score': False, 'binary': True, 'multi': True}, 'f1_macro': { 'func': metrics.f1_score, 'params': {'average': 'macro'}, 'require_score': False, 'binary': True, 'multi': True}, # Note: log_loss returns "loss" value 'neg_log_loss': { 'func': lambda y_true, y_pred: - metrics.log_loss(y_true, y_pred), 'params': {}, 'require_score': True, 'binary': True, 'multi': True}, # Same problem as f1_score 'precision': { 'func': metrics.precision_score, 'params': {'average': 'macro'}, 'require_score': False, 'binary': True, 'multi': True}, 'precision_micro': { 'func': metrics.precision_score, 'params': {'average': 'micro'}, 'require_score': False, 'binary': True, 'multi': True}, 'precision_macro': { 'func': metrics.precision_score, 'params': {'average': 'macro'}, 'require_score': False, 'binary': True, 'multi': True}, # Same problem as f1_score 'recall': { 'func': metrics.recall_score, 'params': {'average': 'macro'}, 'require_score': False, 'binary': True, 'multi': True}, 'recall_micro': { 'func': metrics.recall_score, 'params': {'average': 'micro'}, 'require_score': False, 'binary': True, 'multi': True}, 'recall_macro': { 'func': metrics.recall_score, 'params': {'average': 'macro'}, 'require_score': False, 'binary': True, 'multi': True}, 'roc_auc': { 'func': metrics.roc_auc_score, 'params': {}, 'require_score': True, 'binary': True, 'multi': False}, # Regression metrics 'explained_variance': { 'func': metrics.explained_variance_score, 'params': {}, 'require_score': False, 'regression': True}, 'neg_mean_absolute_error': { 'func': lambda y_true, y_pred: - metrics.mean_absolute_error( y_true, y_pred), 'params': {}, 'require_score': False, 'regression': True}, 'neg_mean_squared_error': { 'func': lambda y_true, y_pred: - metrics.mean_squared_error( y_true, y_pred), 'params': {}, 'require_score': False, 'regression': True}, 'neg_median_absolute_error': { 'func': lambda y_true, y_pred: - metrics.median_absolute_error( y_true, y_pred), 'params': {}, 'require_score': False, 'regression': True}, 'r2': { 'func': metrics.r2_score, 'params': {}, 'require_score': False, 'regression': True}} @classmethod def get_basic_metrics(cls, task_type="classification"): if task_type in ["classification", "binary", "multi"]: return dict( filter(lambda x: x[0] in ["accuracy", "precision", "recall"], cls.catalog_dict.items())) elif task_type in ["regression", "reg"]: return dict( filter(lambda x: x[0] in ["neg_mean_absolute_error", "neg_mean_squared_error", "r2"], cls.catalog_dict.items())) class ErrorSummary(object): """Error Analysis summary class.""" def __init__(self, error_dist=None, diversity=None, errors=None): """Initialization Args: error_dist (pd.DataFrame): Error distribution table diversity (pd.DataFrame): Diversity metric table errors (pd.DataFrame): Misclassified examples """ self.error_dist = error_dist self.diversity = diversity self.errors = errors class Evaluate(): def __init__(self, alearn, ac=None, feature_names=None, random_state=7): """Data evaluation class Args: alearn (AutoLearn or sklearn classifier instance): Trained model instance ac (AutoConverter instance): Autoconverter for converting column data to feature matrix feature_names (list): List of feature names (str) If ac is given, the parameter will be disregarded. If not, feature_names becomes mandatory. random_state (int): random seed for pandas.sample. Default: 7 """ if ac is None: if feature_names is None: raise ValueError("Either AutoConverter or feature_names must", "be given.") self.feature_names = feature_names self.ac = None else: self.ac = ac if feature_names is not None: warnings.warn("AutoConverter instance is given so", "feature_names will be discarded.") self.feature_names = None # TODO(Yoshi): Need to modify when it incorporates regression type assert hasattr(alearn, "predict") assert hasattr(alearn, "predict_proba") if alearn.__class__.__name__ == "AutoLearn": assert alearn.trained else: # scikit-learn classifiers do not have "fitted" flag # A solution would be calling predict()/predict_proba() # to see if it returns exception. pass self.alearn = alearn self.rs = random_state self.orig_eval_s = None def _task_type(self): """Extract task_type from alearn (could be sklearn clf) instance.""" if hasattr(self.alearn, 'task'): # AutoLearn instance passed if self.alearn.task == 'regression': task_type = 'regression' elif hasattr(self.alearn.learner, "task_type"): task_type = self.alearn.learner.task_type else: raise ValueError("wrong task_type passed to evaluate") else: # in this case we have scikit-learn classifier passed if isinstance(self.alearn, sklearn.base.ClassifierMixin): if len(self.alearn.classes_) == 2: task_type = "binary" else: task_type = "multi" elif isinstance(self.alearn, sklearn.base.RegressorMixin): task_type = "regression" else: raise ValueError("Unknown instance type: {}".format( type(self.alearn))) return task_type def _pos_label(self): if hasattr(self.alearn, "pos_label"): return self.alearn.pos_label else: # Assume that the second index is positive return 1 def get_feature_indexes(self): """Returns di Returns: table_colname_pos_dict = {"main..Ticket": [0, 20], "main..Age": [21, 30], ...} """ if self.ac is not None: all_feature_names = self.ac.feature_names else: all_feature_names = self.feature_names # table_feature_names_cols = # ["main..Ticket", "main..Ticket", ...] table_feature_name_cols = list(map( lambda x: x.split('..')[0] + ".." + x.split('..')[1].split('.')[0], all_feature_names)) table_colname_pos_dict = {} begin = 0 table_colname = table_feature_name_cols[0] counter = 0 for i, feature_name in enumerate(table_feature_name_cols): if feature_name == table_colname: counter += 1 else: # end is not included to the interval table_colname_pos_dict[table_colname] = [begin, i] begin = i counter = 1 table_colname = feature_name table_colname_pos_dict[table_colname] = [begin, len(table_feature_name_cols)] return table_colname_pos_dict @classmethod def run_metric_functions(cls, y, y_pred, y_prob, metric_func_dict, task_type): """Run metric functions Args: y (np.ndarray): True label vector y_pred (np.ndarray): Predicted label vector y_prob (np.ndarray): Probability vector None if task_type == "regression" metric_func_dict (dict): metric func dictionary see MetricCatalog for details task_type (str): task type {"binary", "multi", "regression"} Returns: orig_eval_s (pd.Series) """ if task_type not in ["binary", "multi", "regression"]: raise ValueError('task_type must be {"binary", "multi",' '"regression"}') if task_type == "regression" and y_prob is not None: warnings.warn("y_prob will be disregarded for" "task_type=regression") # Only use evaluation metric that supports task_type sorted_metric_names = sorted( filter(lambda x: (task_type in metric_func_dict[x] and metric_func_dict[x][task_type]), metric_func_dict.keys())) # Evaluate prediction eval_list = [] for metric_name in sorted_metric_names: metric_info = metric_func_dict[metric_name] metric_func = metric_info['func'] metric_params = metric_info['params'] assert metric_info[task_type] if metric_info["require_score"]: score = metric_func(y, y_prob, **metric_params) else: # Evaluation metrics for regression use y_pred score = metric_func(y, y_pred, **metric_params) eval_list.append(score) orig_eval_s = pd.Series(eval_list, index=sorted_metric_names) return orig_eval_s def evaluate_performance(self, X=None, y=None, metric_func_dict=None): """Evaluate prediction performance. Args: df (pd.DataFrame): Main table X (np.array): Test feature matrix y (np.array): Test label vector metric_func_dict (dict): if None, it will use MetricCatalog {"metric_name": {"func": func, "params": {}, "require_score": True, "binary": True, "multi": True}} Returns: orig_eval_s (pd.Series): Evaluation values """ if metric_func_dict is None: metric_func_dict = MetricCatalog.catalog_dict if (X is None) or (y is None): if self.ac is None: raise ValueError( "X and y are missing since AutoConverter instance was not", "given.") if not self.ac.hasdata: raise RuntimeError( "AutoConverter instance does not store X and y.") X = self.ac.X y = self.ac.y # 1. pure prediction y_pred = self.alearn.predict(X) if self._task_type() in ["binary", "multi"]: y_prob = self.alearn.predict_proba(X) if self._task_type() == "binary": y_prob = y_prob[:, self._pos_label()] else: # y_prob is empty for regression y_prob = None # y_pred, y_prob, metric_func_dict self.orig_eval_s = Evaluate.run_metric_functions(y, y_pred, y_prob, metric_func_dict, self._task_type()) return self.orig_eval_s def calculate_column_importance(self, X=None, y=None, target=None, metric_func_dict=None): """Evaluate column importance scores Args: X (np.array): Test feature matrix y (np.array): Test label vector column_importance (bool): Calculate column importance if True Default=True, metric_func_dict (dict): if None, it will use MetricCatalog {"metric_name": {"func": func, "params": {}, "require_score": True, "binary": True, "multi": True}} Returns: col_imp_df (pd.DataFrame): accuracy average_precision f1 ... tablename colname main Age 0.012240 0.007844 0.013407 ... Cabin 0.040392 0.024465 0.044803 ... Embarked 0.008568 0.006306 0.009215 ... Fare 0.009792 0.002827 0.010472 ... Name 0.046512 0.057124 0.050983 ... Parch 0.000000 0.000600 0.000127 ... Pclass 0.029376 0.027463 0.031666 ... Sex 0.227662 0.236873 0.244964 ... SibSp 0.006120 0.006541 0.006973 ... Ticket 0.055080 0.072796 0.058413 ... """ if metric_func_dict is None: metric_func_dict = MetricCatalog.catalog_dict if (X is None) or (y is None): if self.ac is None: raise ValueError( "X and y must be given since it has no AutoConverter", "instance.") if not self.ac.hasdata: raise RuntimeError( "AutoConverter instance does not store X and y.") X = self.ac.X y = self.ac.y if self.ac is None: if target is None: raise ValueError("target parameter must be given since", "it has no AutoConverter instance.") else: target = self.ac.target if target is not None: warnings.warn("Give target will be discarded.") if self.orig_eval_s is None: self.evaluate_performance(X=X, y=y, metric_func_dict=metric_func_dict) assert self.orig_eval_s is not None # feature_indexes_dict[table_colname] = [begin, end] feature_indexes_dict = self.get_feature_indexes() # Only use evaluation metric that supports task_type sorted_metric_names = sorted( filter(lambda x: (self._task_type() in metric_func_dict[x] and metric_func_dict[x][self._task_type()]), metric_func_dict.keys())) # Column importance col_importance_list = [] col_imp_index_list = [] for table_colname in sorted(feature_indexes_dict.keys()): tablename, colname = table_colname.split('..') if tablename == 'main' and colname == target: continue col_imp_index_list.append(table_colname) # Get needed feature columns range and spoil them beg_idx, end_idx = feature_indexes_dict[table_colname] X_shuf = X.copy() np.random.shuffle(X_shuf[:, beg_idx:end_idx]) # Permuted prediction y_shuf_pred = self.alearn.predict(X_shuf) if self._task_type() in ["binary", "multi"]: y_shuf_prob = self.alearn.predict_proba(X_shuf) if self._task_type() == 'binary': y_shuf_prob = y_shuf_prob[:, self._pos_label()] # Calculate evaluation metric_list = [] for metric_name in sorted_metric_names: metric_info = metric_func_dict[metric_name] metric_func = metric_info['func'] metric_params = metric_info['params'] assert metric_info[self._task_type()] if metric_info["require_score"]: # orig_score = metric_func(y, y_prob) orig_score = self.orig_eval_s[metric_name] shuf_score = metric_func(y, y_shuf_prob, **metric_params) else: # orig_score = metric_func(y, y_pred) orig_score = self.orig_eval_s[metric_name] shuf_score = metric_func(y, y_shuf_pred, **metric_params) # TODO(Yoshi): Double check if there is no problem # for neg_log_loss if orig_score == 0: metric_list.append(0.0) else: metric_list.append((orig_score - shuf_score) / orig_score) col_importance_list.append(metric_list) col_imp_df = pd.DataFrame(col_importance_list) col_imp_df.columns = sorted_metric_names tablename_list = list(map(lambda x: x.split('..')[0], col_imp_index_list)) colname_list = list(map(lambda x: x.split('..')[1], col_imp_index_list)) assert len(tablename_list) == len(col_imp_df) assert len(tablename_list) == len(colname_list) assert "tablename" not in sorted_metric_names assert "colname" not in sorted_metric_names col_imp_df["tablename"] = tablename_list col_imp_df["colname"] = colname_list col_imp_df.set_index(["tablename", "colname"], inplace=True) return col_imp_df def get_top_columns(self, n=3): """Returns n most important columns in the DataFrame Args: n (integer): number of columns returned Returns: list of [tablename..columname, ...] of most important columns, sorted in descending order """ col_imp_df = self.calculate_column_importance() if self._task_type() == 'binary': metric = 'roc_auc' else: metric = 'neg_log_loss' new_df = col_imp_df[metric].sort_values(ascending=False).head(n) return list(map(lambda x: x[0] + '..' + x[1], new_df.index.values)) def get_mispredictions(self, df): """Get mispredicted examples based on the classifier Args: df (pd.DateFrame): dataset to evaluate. Returns: mispred_df (pd.DataFrame): TODO(Yoshi): subtable support """ # Assume AutoConverter is mandatory for the function if self.ac is None: raise ValueError("AutoConverter instance is required to call", "get_mispredictions()") # TODO(Yoshi): This is not accurate. # AutoConverter also should have "fitted" flag or something like that. assert self.ac.hasdata X, y = self.ac.transform(df) pred_y = self.alearn.predict(X) # TODO(Yoshi): Add some columns such as ==prediction== column, # ==confidence==. To be disccused and will be another ticket. return df.ix[y != pred_y] def stratify_errors(self, df, max_numcat=5): """Stratify mispredicted examples. TODO(Yoshi): Will avoid hand-crafted configuration Args: df (pd.DataFrame): Returns: es (ErrorSummary) """ # Assume AutoConverter is mandatory for the function if self.ac is None: raise ValueError("AutoConverter instance is required to call", "stratify_errors()") def calc_diversity(s): """Calculate entropy as a diversity metric.""" probs = s / s.sum() return (probs * np.log(1.0 / probs)).sum() assert self.ac.hasdata error_df = self.get_mispredictions(df) # Conduct for loop for each column colname_list = [] error_dist_df_list = [] diversity_list = [] sorted_colnames = sorted(error_df.columns.tolist()) for colname in sorted_colnames: if colname not in self.ac.colname_type_dict: continue error_count_s = error_df[colname].value_counts() total_count_s = df[colname].value_counts() error_dist_df = pd.concat([error_count_s, total_count_s], axis=1) error_dist_df.columns = ["error_count", "total_count"] error_dist_df["error_rate"] = (error_dist_df["error_count"] / error_dist_df["total_count"]) if len(error_dist_df) > max_numcat: continue error_dist_df.index.name = "group" error_dist_df = error_dist_df.reset_index() # Calculate diversity score diversity_score = calc_diversity(error_dist_df["error_rate"]) error_dist_df.loc[:, 'colname'] = colname error_dist_df_list.append(error_dist_df) diversity_list.append(diversity_score) colname_list.append(colname) if len(error_dist_df_list) < 1: # No grouped result found # TODO(Yoshi): Output any message? return None error_dist_concat_df = pd.concat(error_dist_df_list, axis=0) error_dist_concat_df.set_index(["colname", "group"], inplace=True) diversity_df = pd.DataFrame({"diversity": diversity_list}, index=colname_list) return ErrorSummary(error_dist=error_dist_concat_df, diversity=diversity_df, errors=error_df) def get_explanations(self, test_df, X=None, topk=3, max_candidates=10, num_sampling=10, spoil_method='random'): """Returns explanations (previously known as reason codes) V1 simply calculates the average difference of class probabilities no matter whether binary or multiclass Args: test_df (pd.DataFrame): Original DataFrame X (np.array): Test feature matrix topk (int): select top-k colnames for explanations max_candidates (int): At most <max_candidates> columns will be used for explanations (Default 10) num_sampling (int): Number of sampling iterations (Default 10) spoil_method (str): {"random"} Returns: """ # Assume AutoConverter is mandatory for the function if self.ac is None: raise ValueError("AutoConverter instance is required to call", "get_explanations()") # TODO(Yoshi): spoil_method should be improved top_colnames = self.get_top_columns(n=max_candidates) # TODO(Yoshi): it's not straightforward to visualize representative # values for subtables. Only focus on main table for now top_colnames = list(filter(lambda x: x.split('..')[0] == 'main', top_colnames)) assert len(top_colnames) > 0 table_colname_feature_pos_dict = self.get_feature_indexes() if X is None: assert self.ac.hasdata X = self.ac.X all_pred = self.alearn.predict_proba(X) table_colname_impact_dict = {} for table_colname in top_colnames: abs_diff_probs = np.zeros_like(all_pred) beg_idx, end_idx = table_colname_feature_pos_dict[table_colname] for _ in range(num_sampling): X_shuf = X.copy() np.random.shuffle(X_shuf[:, beg_idx:end_idx]) all_pred_shuf = self.alearn.predict_proba(X_shuf) abs_diff_probs += np.abs(all_pred - all_pred_shuf) # <num_sample>-dimensional vector impact_scores = np.mean(abs_diff_probs, axis=1) table_colname_impact_dict[table_colname] = impact_scores impact_df = pd.DataFrame(table_colname_impact_dict) assert len(impact_df) == len(test_df) impact_df.index = test_df.index all_explanation_list = [] for index, row in impact_df.iterrows(): top_s = row.sort_values(ascending=False).head(topk) top_colnames = top_s.index.tolist() cur_explanation_list = [] for table_colname in top_colnames: # split colanme in to tablename and colname tablename, colname = table_colname.split("..") val = test_df.ix[index][colname] cur_explanation_list.append((colname, val)) all_explanation_list.append(cur_explanation_list) explain_df = pd.DataFrame({"explanations": all_explanation_list}) assert len(explain_df) == len(test_df) explain_df.index = test_df.index return explain_df
[ "pandas.Series", "numpy.mean", "numpy.abs", "pandas.DataFrame", "sklearn.metrics.median_absolute_error", "numpy.log", "numpy.zeros_like", "sklearn.metrics.mean_squared_error", "sklearn.metrics.log_loss", "warnings.warn", "sklearn.metrics.mean_absolute_error", "pandas.concat", "numpy.random.shuffle" ]
[((11681, 11728), 'pandas.Series', 'pd.Series', (['eval_list'], {'index': 'sorted_metric_names'}), '(eval_list, index=sorted_metric_names)\n', (11690, 11728), True, 'import pandas as pd\n'), ((19146, 19179), 'pandas.DataFrame', 'pd.DataFrame', (['col_importance_list'], {}), '(col_importance_list)\n', (19158, 19179), True, 'import pandas as pd\n'), ((23608, 23645), 'pandas.concat', 'pd.concat', (['error_dist_df_list'], {'axis': '(0)'}), '(error_dist_df_list, axis=0)\n', (23617, 23645), True, 'import pandas as pd\n'), ((23744, 23807), 'pandas.DataFrame', 'pd.DataFrame', (["{'diversity': diversity_list}"], {'index': 'colname_list'}), "({'diversity': diversity_list}, index=colname_list)\n", (23756, 23807), True, 'import pandas as pd\n'), ((26494, 26533), 'pandas.DataFrame', 'pd.DataFrame', (['table_colname_impact_dict'], {}), '(table_colname_impact_dict)\n', (26506, 26533), True, 'import pandas as pd\n'), ((27215, 27267), 'pandas.DataFrame', 'pd.DataFrame', (["{'explanations': all_explanation_list}"], {}), "({'explanations': all_explanation_list})\n", (27227, 27267), True, 'import pandas as pd\n'), ((10700, 10767), 'warnings.warn', 'warnings.warn', (['"""y_prob will be disregarded fortask_type=regression"""'], {}), "('y_prob will be disregarded fortask_type=regression')\n", (10713, 10767), False, 'import warnings\n'), ((17609, 17654), 'numpy.random.shuffle', 'np.random.shuffle', (['X_shuf[:, beg_idx:end_idx]'], {}), '(X_shuf[:, beg_idx:end_idx])\n', (17626, 17654), True, 'import numpy as np\n'), ((22671, 22720), 'pandas.concat', 'pd.concat', (['[error_count_s, total_count_s]'], {'axis': '(1)'}), '([error_count_s, total_count_s], axis=1)\n', (22680, 22720), True, 'import pandas as pd\n'), ((25927, 25950), 'numpy.zeros_like', 'np.zeros_like', (['all_pred'], {}), '(all_pred)\n', (25940, 25950), True, 'import numpy as np\n'), ((26373, 26404), 'numpy.mean', 'np.mean', (['abs_diff_probs'], {'axis': '(1)'}), '(abs_diff_probs, axis=1)\n', (26380, 26404), True, 'import numpy as np\n'), ((6420, 6511), 'warnings.warn', 'warnings.warn', (['"""AutoConverter instance is given so"""', '"""feature_names will be discarded."""'], {}), "('AutoConverter instance is given so',\n 'feature_names will be discarded.')\n", (6433, 6511), False, 'import warnings\n'), ((16382, 16429), 'warnings.warn', 'warnings.warn', (['"""Give target will be discarded."""'], {}), "('Give target will be discarded.')\n", (16395, 16429), False, 'import warnings\n'), ((26120, 26165), 'numpy.random.shuffle', 'np.random.shuffle', (['X_shuf[:, beg_idx:end_idx]'], {}), '(X_shuf[:, beg_idx:end_idx])\n', (26137, 26165), True, 'import numpy as np\n'), ((26266, 26298), 'numpy.abs', 'np.abs', (['(all_pred - all_pred_shuf)'], {}), '(all_pred - all_pred_shuf)\n', (26272, 26298), True, 'import numpy as np\n'), ((1353, 1385), 'sklearn.metrics.log_loss', 'metrics.log_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1369, 1385), False, 'from sklearn import metrics\n'), ((3278, 3321), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3305, 3321), False, 'from sklearn import metrics\n'), ((3516, 3558), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3542, 3558), False, 'from sklearn import metrics\n'), ((3756, 3801), 'sklearn.metrics.median_absolute_error', 'metrics.median_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3785, 3801), False, 'from sklearn import metrics\n'), ((22109, 22128), 'numpy.log', 'np.log', (['(1.0 / probs)'], {}), '(1.0 / probs)\n', (22115, 22128), True, 'import numpy as np\n')]
from abc import ABC, abstractmethod import numpy as np from .activations import Identity class Layer(ABC): def __init__(self, size, input_shape=(None, None)): ''' Params ------ size : int number of neurons (size) of output layer input_shape : (int, int) (number of input features, number of samples) *only required for first layer* ''' self.size = size self.IN, self.n = self.input_shape = input_shape def compile(self, input_shape, output_shape): ''' Notes ----- IN - number of neurons (size) of input layer OUT - number of neurons (size) of output layer n - number of samples ''' assert len(input_shape)== 2 IN, n = input_shape self.input_shape = input_shape self.output_shape = output_shape self.IN = IN self.OUT = output_shape[0] self.n = n @abstractmethod def forward(self, X): pass @abstractmethod def backward(self, dA): pass class Dense(Layer): def __init__(self, size, input_shape=(None, None), activation=Identity): super().__init__(size, input_shape) self.activation = activation() def compile(self, input_shape, output_shape=(1,)): ''' W is the weights matrix | [in x out] Z is Sum(w_i * x_i) | [out x n] A is activation.apply(Z)| [out x n] ''' super().compile(input_shape, output_shape) #self.W = np.random.rand(self.OUT, self.IN) self.W = np.random.randn(self.OUT, self.IN) * np.sqrt(2 / (self.IN + self.OUT)) # Important note: for tanh: 1/self.IN, Relu: 2/self.IN. Instead, I'm using new theory self.alpha = 0.1 # Place holder for optimizer def forward(self, X): '''Applies forward propagation to inputs X, i.e. self.Z = W * X self.A = a(Z) ''' assert X.ndim == 2 and X.shape[0] == self.input_shape[0] self.X = X self.Z = np.dot(self.W, self.X) self.A = self.activation.apply(self.Z) assert self.A.shape == self.Z.shape # Sanity check return self.A def backward(self, dA): '''Given derivatives of next layer, adjust the weights Math: dZ = dA .* a'(Z), .* - element wise multiplication dW = dZ dot X.T dX = dW.T dot dZ Params: dA := partial derivative dJ / dA Notes: dX is dA of left layer ''' assert dA.shape == self.Z.shape dZ = dA * self.activation.derivative(self.Z, A=self.A) assert dZ.shape == dA.shape dW = np.dot(dZ, self.X.transpose()) / self.n assert dW.shape == self.W.shape dX = np.dot(self.W.transpose(), dZ) self.W = self.W - self.alpha * dW return dX, dW class Lambda(Layer): '''Gotta think about this one''' def __init__(self, function): self.function = function def compile(self): pass def forward(self, X): return def backward(self, dA): pass
[ "numpy.sqrt", "numpy.dot", "numpy.random.randn" ]
[((2107, 2129), 'numpy.dot', 'np.dot', (['self.W', 'self.X'], {}), '(self.W, self.X)\n', (2113, 2129), True, 'import numpy as np\n'), ((1633, 1667), 'numpy.random.randn', 'np.random.randn', (['self.OUT', 'self.IN'], {}), '(self.OUT, self.IN)\n', (1648, 1667), True, 'import numpy as np\n'), ((1670, 1703), 'numpy.sqrt', 'np.sqrt', (['(2 / (self.IN + self.OUT))'], {}), '(2 / (self.IN + self.OUT))\n', (1677, 1703), True, 'import numpy as np\n')]
__copyright__ = """ Copyright (c) 2018 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numbers import threading from queue import Queue import numpy as np import math import pickle import os class SharedNoiseTable(object): def __init__(self): import ctypes, multiprocessing seed = 123 count = 250000000 # 1 gigabyte of 32-bit numbers. Will actually sample 2 gigabytes below. print('Sampling {} random numbers with seed {}'.format(count, seed)) self._shared_mem = multiprocessing.Array(ctypes.c_float, count) self.noise = np.ctypeslib.as_array(self._shared_mem.get_obj()) assert self.noise.dtype == np.float32 sharednoisetablefile = "/tmp/sharednoisetable" if False: #os.path.isfile(sharednoisetablefile): print("Loading shared noise from {}".format(sharednoisetablefile)) with open(sharednoisetablefile, 'rb') as fh: self.noise[:] = pickle.load(fh) else: self.noise[:] = np.random.RandomState(seed).randn(count) # 64-bit to 32-bit conversion here print('Sampled {} bytes'.format(self.noise.size * 4)) with open(sharednoisetablefile, 'wb') as fh: print("Saving shared noise table to {}".format(sharednoisetablefile)) pickle.dump(self.noise, fh) def get(self, i, dim): return self.noise[i:i + dim] def sample_index(self, stream, dim): return stream.randint(0, len(self.noise) - dim + 1) class ConstantSchedule(object): def __init__(self, value): self._value = value def value(self, **kwargs): return self._value class LinearSchedule(object): def __init__(self, schedule, final_p, initial_p, field): self.schedule = schedule self.field = field self.final_p = final_p self.initial_p = initial_p def value(self, **kwargs): assert self.field in kwargs, "Argument {} not provided to scheduler Available: {}".format(self.field, kwargs) fraction = min(float(kwargs[self.field]) / self.schedule, 1.0) return self.initial_p + fraction * (self.final_p - self.initial_p) class ExponentialSchedule(object): def __init__(self, initial_p, final_p, schedule, field): self.initial_p = initial_p self.final_p = final_p self.schedule = schedule self.field = field self.linear = LinearSchedule( initial_p=math.log(self.initial_p), final_p=math.log(self.final_p), schedule=self.schedule, field=self.field) def value(self, **kwargs): return math.exp(self.linear(**kwargs)) def make_schedule(args): if isinstance(args, numbers.Number): return ConstantSchedule(args) else: return globals()[args['type']](**{key: value for key, value in args.items() if key != 'type'})
[ "pickle.dump", "multiprocessing.Array", "pickle.load", "math.log", "numpy.random.RandomState" ]
[((1518, 1562), 'multiprocessing.Array', 'multiprocessing.Array', (['ctypes.c_float', 'count'], {}), '(ctypes.c_float, count)\n', (1539, 1562), False, 'import ctypes, multiprocessing\n'), ((1960, 1975), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (1971, 1975), False, 'import pickle\n'), ((2320, 2347), 'pickle.dump', 'pickle.dump', (['self.noise', 'fh'], {}), '(self.noise, fh)\n', (2331, 2347), False, 'import pickle\n'), ((3472, 3496), 'math.log', 'math.log', (['self.initial_p'], {}), '(self.initial_p)\n', (3480, 3496), False, 'import math\n'), ((3522, 3544), 'math.log', 'math.log', (['self.final_p'], {}), '(self.final_p)\n', (3530, 3544), False, 'import math\n'), ((2018, 2045), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2039, 2045), True, 'import numpy as np\n')]
import random import glob import torch from torch import nn import numpy as np from torch.nn import init import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter as P from torchvision import transforms, set_image_backend # set_image_backend('accimage') from models.pytorch_biggan.datasets import default_loader from pytorch_pretrained_biggan import one_hot_from_names alpha = np.concatenate([np.linspace(0, 1, 256), np.linspace(1, 0, 256)]) alpha = torch.from_numpy(alpha).to('cuda', dtype=torch.float32) Sx = torch.Tensor([[[[1, 0, -1], [2, 0, -2], [1, 0, -1]]]]).to('cuda', dtype=torch.float32) Sy = torch.transpose(Sx, 1, 0) data_dir = 'data/processed/vase_fragment_dataset/' full_img = lambda img_id: f'{data_dir}/full_{img_id}.jpg' frag_img = lambda img_id, n_frag: f'{data_dir}/frag_{img_id}_{n_frag}.jpg' def vase_vector(batch_size): return one_hot_from_names(['vase'], batch_size=batch_size) def gather_ids(): img_ids = list() for f in glob.glob(f'{data_dir}/full_*.jpg'): img_id = f.split('_')[-1].split('.')[0] img_ids.append(int(img_id)) assert img_ids # print('num frags', n_frags) # print('num ids', len(img_ids)) # print(img_ids[:10]) n_frags = len(glob.glob(f'{data_dir}/frag_{img_ids[0]}_*.jpg')) return img_ids, n_frags def loss_fn_scaled_mse(x, y): loss = (x-y)**2 n_terms = np.product(loss.shape) # print(loss.shape) loss = torch.einsum('bcmn,n->bcm', loss, alpha) # print(loss.shape) loss = torch.einsum('bcm,m->bc', loss, alpha) # print(loss.shape) loss = torch.mean(loss) / n_terms # print(loss.shape) # input() return loss def loss_fn_scaled_mae(x, y): loss = torch.abs(x-y) n_terms = np.product(loss.shape) # print(loss.shape) loss = torch.einsum('bcmn,n->bcm', loss, alpha) # print(loss.shape) loss = torch.einsum('bcm,m->bc', loss, alpha) # print(loss.shape) loss = torch.mean(loss) / n_terms # print(loss.shape) # input() return loss def sobel(img): # print(img.shape) gray = torch.sum(img, keepdim=True, dim=1) # print(gray.shape) edge_x = torch.conv2d(gray, Sx, padding=1) # print(edge_x.shape) edge_y = torch.conv2d(gray, Sy, padding=1) # input() return edge_x**2 + edge_y**2 # return torch.sqrt(edge_x**2 + edge_y**2) class FragmentDataset: def __init__(self): img_ids, n_frags = gather_ids() self.to_tensor = transforms.ToTensor() self.data_dir = data_dir self.img_ids = img_ids self.n_frags = n_frags self.loader = default_loader def take(self, N, batch_size=1): for _ in range(N): imgs, frags = [], [] for _ in range(batch_size): img_id = random.choice(self.img_ids) # print(img_id) n_frag = random.randint(0, self.n_frags-1) img = self.loader(full_img(img_id)) frag = self.loader(frag_img(img_id, n_frag)) imgs += [self.to_tensor(img).unsqueeze(0)] frags += [self.to_tensor(frag).unsqueeze(0)] imgs = torch.cat(imgs, axis=0) frags = torch.cat(frags, axis=0) yield frags, imgs class PreGAN(nn.Module): def __init__(self): super(PreGAN, self).__init__() # 1 input image channel, 6 output channels, 3x3 square convolution # kernel # self.conv1 = nn.Conv2d(3, 16, 3) # self.conv2 = nn.Conv2d(16, 16, 3) # an affine operation: y = Wx + b # self.fc1 = nn.Linear(120*120, 128) # 6*6 from image dimension # self.fc2 = nn.Linear(128, 128) # self.fc3 = nn.Linear(128, 128) # # mods = [ # self.conv1, # self.conv2, # self.fc1, # self.fc2, # self.fc3, # ] # self.layers = nn.ModuleList(mods) # OLD FORWARD # Max pooling over a (2, 2) window # x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number # x = F.max_pool2d(F.relu(self.conv2(x)), 2) # x = x.view(-1, self.num_flat_features(x)) # x = F.relu(self.fc1(x)) # x = F.relu(self.fc2(x)) # x = self.fc3(x) self.main = nn.Sequential( nn.Conv2d(3, 16, 3), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(16, 16, 3), nn.ReLU(), nn.MaxPool2d(2), nn.Flatten(), # nn.Flatten(), nn.Linear(120 * 120, 128), nn.ReLU(), nn.Linear(128, 128), nn.ReLU(), # nn.Linear(128, 128), ) # added custom self.init = 'ortho' self.param_count = 0 def forward(self, x): return self.main(x) def init_weights(self): for module in self.modules(): if (isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Embedding)): if self.init == 'ortho': init.orthogonal_(module.weight) elif self.init == 'N02': init.normal_(module.weight, 0, 0.02) elif self.init in ['glorot', 'xavier']: init.xavier_uniform_(module.weight) else: print('Init style not recognized...') self.param_count += sum([p.data.nelement() for p in module.parameters()]) print('Param count for PreGAN initialized parameters: %d' % self.param_count) class BothGAN(nn.Module): def __init__(self, pregan, biggan, lr=1e-4): super(BothGAN, self).__init__() self.pregan = pregan self.biggan = biggan self.vase_vec = torch.from_numpy(vase_vector(1)) self.add_module('pregan', self.pregan) self.add_module('biggan', self.biggan) # optim called last # for k, v in self.named_parameters(): # print('BothGAN parameter', k) self.optim = optim.Adam(self.parameters(), lr=lr) # self.optim = optim.Adam(params=self.parameters(), lr=self.lr, # betas=(self.B1, self.B2), weight_decay=0, # eps=self.adam_eps) def forward(self, frag): noise = self.pregan(frag) vase_vec = torch.cat([self.vase_vec]*noise.shape[0], dim=0) return self.biggan(noise, vase_vec, 1.0) def to(self, *args, **kwargs): super().to(*args, **kwargs) self.vase_vec = self.vase_vec.to(*args, **kwargs) class View(nn.Module): def __init__(self, shape): super(View, self).__init__() self.shape = shape def forward(self, x): return x.view(*self.shape) class ScratchGAN(nn.Module): def __init__(self): super(ScratchGAN, self).__init__() # 1 input image channel, 6 output channels, 3x3 square convolution # kernel # self.conv1 = nn.Conv2d(3, 16, 3) # self.conv2 = nn.Conv2d(16, 16, 3) # an affine operation: y = Wx + b # self.fc1 = nn.Linear(120*120, 128) # 6*6 from image dimension # self.fc2 = nn.Linear(128, 128) # self.fc3 = nn.Linear(128, 128) # # mods = [ # self.conv1, # self.conv2, # self.fc1, # self.fc2, # self.fc3, # ] # self.layers = nn.ModuleList(mods) # OLD FORWARD # Max pooling over a (2, 2) window # x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number # x = F.max_pool2d(F.relu(self.conv2(x)), 2) # x = x.view(-1, self.num_flat_features(x)) # x = F.relu(self.fc1(x)) # x = F.relu(self.fc2(x)) # x = self.fc3(x) self.main = nn.Sequential( # nn.Conv2d(3, 8, 3, padding=1), # nn.ReLU(), nn.Flatten(), nn.Linear(128*128*3, 32*32*4), View((-1, 4, 32, 32)), nn.Conv2d(4, 4, 3, padding=1), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(4, 4, 3, padding=1), nn.ReLU(), nn.Upsample(scale_factor=2), # nn.MaxPool2d(2), # nn.Conv2d(4, 4, 3, padding=1), # nn.ReLU(), nn.Conv2d(4, 4, 3, padding=1), nn.ReLU(), nn.Upsample(scale_factor=2), # nn.Conv2d(4, 4, 3, padding=1), # nn.ReLU(), nn.Conv2d(4, 3, 3, padding=1), nn.ReLU(), nn.Upsample(scale_factor=2), nn.Conv2d(3, 3, 3, padding=1), # nn.MaxPool2d(2), # nn.Flatten(), # nn.Linear(675, 512*512*3), # nn.ReLU(), # nn.Linear(512*512*3, 512*512*3), # nn.ReLU(), # nn.Linear(512*512*3, 512*512*3), # nn.Sigmoid(), ) # added custom self.init = 'ortho' self.param_count = 0 # optim called last self.optim = optim.Adam(self.parameters()) # self.optim = optim.Adam(params=self.parameters(), lr=self.lr, # betas=(self.B1, self.B2), weight_decay=0, # eps=self.adam_eps) def forward(self, x): return self.main(x).view(-1, 3, 512, 512) def init_weights(self): for module in self.modules(): if (isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Embedding)): if self.init == 'ortho': init.orthogonal_(module.weight) elif self.init == 'N02': init.normal_(module.weight, 0, 0.02) elif self.init in ['glorot', 'xavier']: init.xavier_uniform_(module.weight) else: print('Init style not recognized...') self.param_count += sum([p.data.nelement() for p in module.parameters()]) print('Param count for PreGAN initialized parameters: %d' % self.param_count)
[ "numpy.product", "torch.nn.ReLU", "torch.from_numpy", "torch.nn.init.orthogonal_", "torch.sum", "torch.conv2d", "torch.mean", "torch.nn.Flatten", "torch.nn.init.xavier_uniform_", "numpy.linspace", "torchvision.transforms.ToTensor", "random.randint", "glob.glob", "torch.abs", "random.choice", "torch.Tensor", "torch.transpose", "torch.einsum", "torch.nn.Upsample", "torch.cat", "torch.nn.init.normal_", "torch.nn.Conv2d", "pytorch_pretrained_biggan.one_hot_from_names", "torch.nn.MaxPool2d", "torch.nn.Linear" ]
[((641, 666), 'torch.transpose', 'torch.transpose', (['Sx', '(1)', '(0)'], {}), '(Sx, 1, 0)\n', (656, 666), False, 'import torch\n'), ((894, 945), 'pytorch_pretrained_biggan.one_hot_from_names', 'one_hot_from_names', (["['vase']"], {'batch_size': 'batch_size'}), "(['vase'], batch_size=batch_size)\n", (912, 945), False, 'from pytorch_pretrained_biggan import one_hot_from_names\n'), ((1000, 1035), 'glob.glob', 'glob.glob', (['f"""{data_dir}/full_*.jpg"""'], {}), "(f'{data_dir}/full_*.jpg')\n", (1009, 1035), False, 'import glob\n'), ((1401, 1423), 'numpy.product', 'np.product', (['loss.shape'], {}), '(loss.shape)\n', (1411, 1423), True, 'import numpy as np\n'), ((1459, 1499), 'torch.einsum', 'torch.einsum', (['"""bcmn,n->bcm"""', 'loss', 'alpha'], {}), "('bcmn,n->bcm', loss, alpha)\n", (1471, 1499), False, 'import torch\n'), ((1535, 1573), 'torch.einsum', 'torch.einsum', (['"""bcm,m->bc"""', 'loss', 'alpha'], {}), "('bcm,m->bc', loss, alpha)\n", (1547, 1573), False, 'import torch\n'), ((1733, 1749), 'torch.abs', 'torch.abs', (['(x - y)'], {}), '(x - y)\n', (1742, 1749), False, 'import torch\n'), ((1762, 1784), 'numpy.product', 'np.product', (['loss.shape'], {}), '(loss.shape)\n', (1772, 1784), True, 'import numpy as np\n'), ((1820, 1860), 'torch.einsum', 'torch.einsum', (['"""bcmn,n->bcm"""', 'loss', 'alpha'], {}), "('bcmn,n->bcm', loss, alpha)\n", (1832, 1860), False, 'import torch\n'), ((1896, 1934), 'torch.einsum', 'torch.einsum', (['"""bcm,m->bc"""', 'loss', 'alpha'], {}), "('bcm,m->bc', loss, alpha)\n", (1908, 1934), False, 'import torch\n'), ((2103, 2138), 'torch.sum', 'torch.sum', (['img'], {'keepdim': '(True)', 'dim': '(1)'}), '(img, keepdim=True, dim=1)\n', (2112, 2138), False, 'import torch\n'), ((2176, 2209), 'torch.conv2d', 'torch.conv2d', (['gray', 'Sx'], {'padding': '(1)'}), '(gray, Sx, padding=1)\n', (2188, 2209), False, 'import torch\n'), ((2249, 2282), 'torch.conv2d', 'torch.conv2d', (['gray', 'Sy'], {'padding': '(1)'}), '(gray, Sy, padding=1)\n', (2261, 2282), False, 'import torch\n'), ((431, 453), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(256)'], {}), '(0, 1, 256)\n', (442, 453), True, 'import numpy as np\n'), ((455, 477), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (466, 477), True, 'import numpy as np\n'), ((488, 511), 'torch.from_numpy', 'torch.from_numpy', (['alpha'], {}), '(alpha)\n', (504, 511), False, 'import torch\n'), ((549, 603), 'torch.Tensor', 'torch.Tensor', (['[[[[1, 0, -1], [2, 0, -2], [1, 0, -1]]]]'], {}), '([[[[1, 0, -1], [2, 0, -2], [1, 0, -1]]]])\n', (561, 603), False, 'import torch\n'), ((1257, 1305), 'glob.glob', 'glob.glob', (['f"""{data_dir}/frag_{img_ids[0]}_*.jpg"""'], {}), "(f'{data_dir}/frag_{img_ids[0]}_*.jpg')\n", (1266, 1305), False, 'import glob\n'), ((1609, 1625), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (1619, 1625), False, 'import torch\n'), ((1970, 1986), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (1980, 1986), False, 'import torch\n'), ((2491, 2512), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2510, 2512), False, 'from torchvision import transforms, set_image_backend\n'), ((6476, 6526), 'torch.cat', 'torch.cat', (['([self.vase_vec] * noise.shape[0])'], {'dim': '(0)'}), '([self.vase_vec] * noise.shape[0], dim=0)\n', (6485, 6526), False, 'import torch\n'), ((3179, 3202), 'torch.cat', 'torch.cat', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3188, 3202), False, 'import torch\n'), ((3223, 3247), 'torch.cat', 'torch.cat', (['frags'], {'axis': '(0)'}), '(frags, axis=0)\n', (3232, 3247), False, 'import torch\n'), ((4393, 4412), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (4402, 4412), False, 'from torch import nn\n'), ((4426, 4435), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4433, 4435), False, 'from torch import nn\n'), ((4449, 4464), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (4461, 4464), False, 'from torch import nn\n'), ((4479, 4499), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(16)', '(3)'], {}), '(16, 16, 3)\n', (4488, 4499), False, 'from torch import nn\n'), ((4513, 4522), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4520, 4522), False, 'from torch import nn\n'), ((4536, 4551), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (4548, 4551), False, 'from torch import nn\n'), ((4565, 4577), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (4575, 4577), False, 'from torch import nn\n'), ((4619, 4644), 'torch.nn.Linear', 'nn.Linear', (['(120 * 120)', '(128)'], {}), '(120 * 120, 128)\n', (4628, 4644), False, 'from torch import nn\n'), ((4658, 4667), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4665, 4667), False, 'from torch import nn\n'), ((4681, 4700), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (4690, 4700), False, 'from torch import nn\n'), ((4714, 4723), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4721, 4723), False, 'from torch import nn\n'), ((8079, 8091), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (8089, 8091), False, 'from torch import nn\n'), ((8105, 8142), 'torch.nn.Linear', 'nn.Linear', (['(128 * 128 * 3)', '(32 * 32 * 4)'], {}), '(128 * 128 * 3, 32 * 32 * 4)\n', (8114, 8142), False, 'from torch import nn\n'), ((8183, 8212), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)', '(3)'], {'padding': '(1)'}), '(4, 4, 3, padding=1)\n', (8192, 8212), False, 'from torch import nn\n'), ((8226, 8235), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8233, 8235), False, 'from torch import nn\n'), ((8249, 8276), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8260, 8276), False, 'from torch import nn\n'), ((8290, 8319), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)', '(3)'], {'padding': '(1)'}), '(4, 4, 3, padding=1)\n', (8299, 8319), False, 'from torch import nn\n'), ((8333, 8342), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8340, 8342), False, 'from torch import nn\n'), ((8356, 8383), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8367, 8383), False, 'from torch import nn\n'), ((8499, 8528), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)', '(3)'], {'padding': '(1)'}), '(4, 4, 3, padding=1)\n', (8508, 8528), False, 'from torch import nn\n'), ((8542, 8551), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8549, 8551), False, 'from torch import nn\n'), ((8565, 8592), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8576, 8592), False, 'from torch import nn\n'), ((8676, 8705), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(3)', '(3)'], {'padding': '(1)'}), '(4, 3, 3, padding=1)\n', (8685, 8705), False, 'from torch import nn\n'), ((8719, 8728), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8726, 8728), False, 'from torch import nn\n'), ((8742, 8769), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8753, 8769), False, 'from torch import nn\n'), ((8783, 8812), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)', '(3)'], {'padding': '(1)'}), '(3, 3, 3, padding=1)\n', (8792, 8812), False, 'from torch import nn\n'), ((2808, 2835), 'random.choice', 'random.choice', (['self.img_ids'], {}), '(self.img_ids)\n', (2821, 2835), False, 'import random\n'), ((2893, 2928), 'random.randint', 'random.randint', (['(0)', '(self.n_frags - 1)'], {}), '(0, self.n_frags - 1)\n', (2907, 2928), False, 'import random\n'), ((5191, 5222), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['module.weight'], {}), '(module.weight)\n', (5207, 5222), False, 'from torch.nn import init\n'), ((9820, 9851), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['module.weight'], {}), '(module.weight)\n', (9836, 9851), False, 'from torch.nn import init\n'), ((5284, 5320), 'torch.nn.init.normal_', 'init.normal_', (['module.weight', '(0)', '(0.02)'], {}), '(module.weight, 0, 0.02)\n', (5296, 5320), False, 'from torch.nn import init\n'), ((9913, 9949), 'torch.nn.init.normal_', 'init.normal_', (['module.weight', '(0)', '(0.02)'], {}), '(module.weight, 0, 0.02)\n', (9925, 9949), False, 'from torch.nn import init\n'), ((5397, 5432), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['module.weight'], {}), '(module.weight)\n', (5417, 5432), False, 'from torch.nn import init\n'), ((10026, 10061), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['module.weight'], {}), '(module.weight)\n', (10046, 10061), False, 'from torch.nn import init\n')]
#!/usr/bin/python3 import math import numpy as np import pdb import time import torch from mseg_semantic.domain_generalization.ccsa_utils import ( contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor ) """ For sake of unit tests, pretend we have the following categories: Let 0 = Sky 1 = Mountain 2 = Road 3 = Person 4 = Vegetation """ def test_contrastive_loss1(): """ Should be no loss here (zero from pull term, and zero from push term) """ # which pairs share the same semantic class label y_c = torch.tensor([ 1., 0., 0., 0., 1.], dtype=torch.float32) # distances between pairs pred_dists = torch.tensor([0, 1.1, 1.1, 1.1, 0], dtype=torch.float32) loss = contrastive_loss(y_c, pred_dists) gt_loss = torch.tensor([0]) assert torch.allclose(loss, gt_loss) def test_contrastive_loss2(): """ There should be more loss here (coming only from push term) """ # which pairs share the same semantic class label y_c = torch.tensor([ 1., 0., 0., 0., 1.], dtype=torch.float32) # distances between pairs pred_dists = torch.tensor([0, 0.2, 0.3, 0.1, 0], dtype=torch.float32) loss = contrastive_loss(y_c, pred_dists) gt_loss = torch.tensor([0.3880]) assert torch.allclose(loss, gt_loss, atol=1e-3) def test_contrastive_loss3(): """ There should be the most loss here (some from pull term, and some from push term also) """ # which pairs share the same semantic class label y_c = torch.tensor([ 1., 0., 0., 0., 1.], dtype=torch.float32) # distances between pairs pred_dists = torch.tensor([2.0, 0.2, 0.3, 0.1, 4.0], dtype=torch.float32) loss = contrastive_loss(y_c, pred_dists) gt_loss = torch.tensor([4.3880]) assert torch.allclose(loss, gt_loss, atol=1e-3) def test_paired_euclidean_distance(): """ """ X = torch.tensor( [ [3,0], [4,0], [1,1] ], dtype=torch.float32) Y = torch.tensor( [ [1,1], [0,3], [0,4] ], dtype=torch.float32) dists = paired_euclidean_distance(X, Y) gt_dists = torch.tensor( [ [ math.sqrt(2*2 + 1) ], # (3,0) vs. (1,1) [ math.sqrt(3*3 + 4*4) ], # (4,0) vs. (0,3) [ math.sqrt(3*3 + 1) ] # (1,1) vs. (0,4) ]) torch.allclose(gt_dists.squeeze(), dists, atol=1e-3) def test_downsample_label_map(): """ Downsample two label maps "Y" """ labelmap_1 = torch.tensor( [ [0,0,0,0,0,0,0,0], [4,4,0,0,0,0,4,4], [4,3,2,2,2,2,3,4], [4,2,2,2,2,2,2,4] ]) labelmap_2 = torch.tensor( [ [1,1,1,1,0,0,0,0], [1,1,1,1,2,2,2,4], [4,4,4,4,2,2,2,4], [4,4,4,3,2,2,2,4] ]) Y = torch.stack([labelmap_1, labelmap_2]) Y = Y.type(torch.float32) assert Y.shape == (2,4,8) dY = downsample_label_map(Y, d=2) assert dY.shape == (2,2,4) gt_dY = torch.tensor( [ [[0., 0., 0., 0.], [4., 2., 2., 3.]], [[1., 1., 0., 0.], [4., 4., 2., 2.]] ]) dY = downsample_label_map(Y, d=4) gt_dY = torch.tensor( [ [[0., 0.]], [[1., 0.]] ]) assert dY.shape == (2,1,2) def test_sample_pair_indices1(): """ Given labels for 3 images, sample corresponding pixels that are known positives and that are known negatives. Suppose images 0 and 2 come from Domain-0, and image 1 comes from Domain-1. """ labelmap_0 = torch.tensor( [ [0,0,0,0,0,0,0,0], [4,4,0,0,0,0,4,4], [4,3,2,2,2,2,3,4], [4,2,2,2,2,2,2,4] ], dtype=torch.float32) labelmap_1 = torch.tensor( [ [1,1,1,1,0,0,0,0], [1,1,1,1,2,2,2,4], [4,4,4,4,2,2,2,4], [4,4,4,3,2,2,2,4] ], dtype=torch.float32) labelmap_2 = torch.tensor( [ [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4] ], dtype=torch.float32) Y = torch.stack([labelmap_0, labelmap_1, labelmap_2]) assert Y.shape == (3,4,8) batch_domain_indices = torch.tensor([0,1,0], dtype=torch.int32) pos_pair_info, neg_pair_info = sample_pair_indices(Y, batch_domain_indices, num_pos_pairs=30000, neg_to_pos_ratio=3, downsample_factor=1) for (bi, hi, wi, bj, hj, wj) in pos_pair_info: assert Y[bi,hi,wi] == Y[bj,hj,wj] # is same class assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain for (bi, hi, wi, bj, hj, wj) in neg_pair_info: assert Y[bi,hi,wi] != Y[bj,hj,wj] # is different class assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain def test_sample_pair_indices2(): """ Given labels for 3 images, sample corresponding pixels that are known positives and that are known negatives. Suppose images 0 and 2 come from Domain-0, and image 1 comes from Domain-1. """ labelmap_0 = torch.tensor( [ [0,0,0,0,1,1,1,1], [0,0,0,0,1,1,1,1], [2,2,2,2,4,4,4,4], [2,2,2,2,4,4,4,4] ], dtype=torch.float32) labelmap_1 = torch.tensor( [ [1,1,1,1,0,0,0,0], [1,1,1,1,0,0,0,0], [4,4,4,4,2,2,2,2], [4,4,4,4,2,2,2,2] ], dtype=torch.float32) labelmap_2 = torch.tensor( [ [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4] ], dtype=torch.float32) Y = torch.stack([labelmap_0, labelmap_1, labelmap_2]) assert Y.shape == (3,4,8) batch_domain_indices = torch.tensor([0,1,0], dtype=torch.int32) pos_pair_info, neg_pair_info = sample_pair_indices(Y, batch_domain_indices, num_pos_pairs=3000, neg_to_pos_ratio=3, downsample_factor=2) for (bi, hi, wi, bj, hj, wj) in pos_pair_info: assert Y[:,::2,::2][bi,hi,wi] == Y[:,::2,::2][bj,hj,wj] # is same class assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain for (bi, hi, wi, bj, hj, wj) in neg_pair_info: assert Y[:,::2,::2][bi,hi,wi] != Y[:,::2,::2][bj,hj,wj] # is different class assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain def test_remove_pairs_from_same_domain(): """ Consider a minibatch of size 5 (examples). Suppose we have sampled 4 pairs of pixel locations. In training, we want only pairs from different domains. We enforce that their feature embeddings are similar. We could have 1 million sampled pairs from a minibatch of size 5. (Number of elements in batch (batch_domain_indices) need not agree with number of sampled pairs!) """ # show which minibatch examples belong to which domain batch_domain_indices = torch.tensor([0,1,2,1,0]) # sampled pairs (a,b) are enumerated here. a_info_ = torch.tensor( [ [0, 1, 2], # Belongs to domain 0 (will be removed) [0, 1, 2], # Belongs to domain 0 [2, 1, 2], # Belongs to domain 2 [3, 1, 2] # Belongs to domain 1 (will be removed) ]) b_info_ = torch.tensor( [ [4, 3, 4], # Belongs to domain 0 (will be removed) [1, 3, 4], # Belongs to domain 1 [3, 3, 4], # Belongs to domain 1 [1, 3, 4] # Belongs to domain 1 (will be removed) ]) a_pair_info, b_pair_info = remove_pairs_from_same_domain(batch_domain_indices, a_info_, b_info_) gt_a_pair_info = torch.tensor( [ [0, 1, 2], [2, 1, 2] ]) assert torch.allclose(gt_a_pair_info, a_pair_info) gt_b_pair_info = torch.tensor( [ [1, 3, 4], [3, 3, 4] ]) assert torch.allclose(gt_b_pair_info, b_pair_info) def test_form_pair_info_tensor(): """ Ensure hstacking of 3 length-N 1d arrays into a (N,3) array is successful. Given batch_dim_idxs (representing indices of examples in a minibatch), and px_1d_y (representing row indices) and px_1d_x (representing column indices), stack them along axis-0 (row dimension). """ batch_dim_idxs = torch.tensor([5,6,7,8,9], dtype=torch.int32) px_1d_y = torch.tensor([4,3,2,1,0], dtype=torch.int32) px_1d_x = torch.tensor([0,2,4,6,8], dtype=torch.int32) pair_info = form_pair_info_tensor(batch_dim_idxs, px_1d_y, px_1d_x) gt_pair_info = torch.tensor( [ [5,4,0], [6,3,2], [7,2,4], [8,1,6], [9,0,8] ], dtype=torch.int32) assert torch.allclose(pair_info, gt_pair_info) def test_find_matching_pairs(): """ Given a batch of ground truth label maps, and sampled pixel pair locations (pairs are across label maps), identify which pairs are matching vs. non-matching and return corresponding metadata (basically, partition them). Get back pos_pair_info -- Pytorch tensor containing info about each positive pair (a,b). Contains (a batch_idx, a row, a col, b batch_idx, b row, b col) Also get back neg_pair_info -- same as above, but for negative pairs. """ labelmap_0 = torch.tensor( [ [0,0,0,0,0,0,0,0], [4,4,0,0,0,0,4,4], [4,3,2,2,2,2,3,4], [4,2,2,2,2,2,2,4] ]) labelmap_1 = torch.tensor( [ [1,1,1,1,0,0,0,0], [1,1,1,1,2,2,2,4], [4,4,4,4,2,2,2,4], [4,4,4,3,2,2,2,4] ]) labelmap_2 = torch.tensor( [ [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4] ]) Y = torch.stack([labelmap_0, labelmap_1, labelmap_2]) assert Y.shape == (3,4,8) a_pair_info = torch.tensor( [ [0,1,1], # pos [2,1,4], # neg [1,1,7], # pos [0,2,2] # neg ]) b_pair_info = torch.tensor( [ [2,3,7], # pos [0,1,4], # neg [2,3,0], # pos [1,3,3] # neg ]) pos_pair_info, neg_pair_info = find_matching_pairs(Y, a_pair_info, b_pair_info) gt_pos_pair_info = torch.tensor( [ [0, 1, 1, 2, 3, 7], # pos pairs [1, 1, 7, 2, 3, 0] ]) assert torch.allclose(pos_pair_info, gt_pos_pair_info) gt_neg_pair_info = torch.tensor( [ [2, 1, 4, 0, 1, 4], # neg pairs [0, 2, 2, 1, 3, 3] ]) assert torch.allclose(neg_pair_info, gt_neg_pair_info) def test_sample_crossdomain_pos_neg_pairs(): """ """ labelmap_0 = torch.tensor( [ [0,0,0,0,0,0,0,0], [4,4,0,0,0,0,4,4], [4,3,2,2,2,2,3,4], [4,2,2,2,2,2,2,4] ]) labelmap_1 = torch.tensor( [ [1,1,1,1,0,0,0,0], [1,1,1,1,2,2,2,4], [4,4,4,4,2,2,2,4], [4,4,4,3,2,2,2,4] ]) labelmap_2 = torch.tensor( [ [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4] ]) Y = torch.stack([labelmap_0, labelmap_1, labelmap_2]) assert Y.shape == (3,4,8) # here, domain 1 would be sampled more than others batch_domain_indices = torch.tensor([0,1,0], dtype=torch.int64) _, unique_domain_idxs = count_per_domain_statistics(batch_domain_indices) b, h, w = Y.shape INITIAL_SAMPLE_NUM = int(1e4) pos_pair_info, neg_pair_info = sample_crossdomain_pos_neg_pairs(Y, batch_domain_indices, unique_domain_idxs, w, h, INITIAL_SAMPLE_NUM) for (bi, hi, wi, bj, hj, wj) in pos_pair_info: assert Y[bi,hi,wi] == Y[bj,hj,wj] # is same class assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain for (bi, hi, wi, bj, hj, wj) in neg_pair_info: assert Y[bi,hi,wi] != Y[bj,hj,wj] # is different class assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain def test_count_per_domain_statistics(): """ """ domain_idxs = torch.tensor([0,1,0,1,4]) examples_per_domain, unique_domain_idxs = count_per_domain_statistics(domain_idxs) gt_examples_per_domain = np.array([2., 2., 0., 0., 1.], dtype=np.int32) gt_unique_domain_idxs = np.array([0, 1, 4]) assert np.allclose(examples_per_domain, gt_examples_per_domain) assert np.allclose(unique_domain_idxs, gt_unique_domain_idxs) assert examples_per_domain.dtype == np.int64 def test_sample_px_locations_uniformly(): """ Let 0 = Sky 1 = Mountain 2 = Road 3 = Person 4 = Vegetation In expectation, minibatch examples from less common domains should be sampled more often, if domains sampled uniformly. """ labelmap_1 = torch.tensor( [ [0,0,0,0,0,0,0,0], [4,4,0,0,0,0,4,4], [4,3,2,2,2,2,3,4], [4,2,2,2,2,2,2,4] ]) labelmap_2 = torch.tensor( [ [1,1,1,1,0,0,0,0], [1,1,1,1,2,2,2,4], [4,4,4,4,2,2,2,4], [4,4,4,3,2,2,2,4] ]) labelmap_3 = torch.tensor( [ [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4], [4,4,4,4,4,4,4,4] ]) Y = torch.stack([labelmap_1, labelmap_2, labelmap_3]) assert Y.shape == (3,4,8) # here, domain 1 would be sampled more than others (sampled twice as often) domain_indices = torch.tensor([0,1,0], dtype=torch.int64) # unique domain indices would be [0,1] _, unique_domain_idxs = count_per_domain_statistics(domain_indices) b, h, w = Y.shape INITIAL_SAMPLE_NUM = int(1e6) b_idxs, w_idxs, h_idxs = sample_px_locations_uniformly( domain_indices, unique_domain_idxs, w, h, INITIAL_SAMPLE_NUM ) # Verify expected value vs. empirical. Allow for some margin of error. # Less common domain (minibatch example 1) should be sampled roughly # 2x as often, since it appears less often. assert 245000 < (b_idxs == 0).sum() and (b_idxs == 0).sum() < 255000 assert 495000 < (b_idxs == 1).sum() and (b_idxs == 1).sum() < 505000 assert 245000 < (b_idxs == 2).sum() and (b_idxs == 2).sum() < 255000 # Sample minibatch indices should lie in [0,b) assert (b_idxs >= 0).sum() == INITIAL_SAMPLE_NUM assert (b_idxs < b).sum() == INITIAL_SAMPLE_NUM # Sampled pixel rows should lie in [0,h) assert (h_idxs >= 0).sum() == INITIAL_SAMPLE_NUM assert (h_idxs < h).sum() == INITIAL_SAMPLE_NUM # Sampled pixel columns should lie in [0,w) assert (w_idxs >= 0).sum() == INITIAL_SAMPLE_NUM assert (w_idxs < w).sum() == INITIAL_SAMPLE_NUM def test_shuffle_pytorch_tensor(): """ Given all possible permutations, ensure that the shuffling that was executed corresponds to any valid permutation. """ t = torch.tensor( [ [1,2], [3,4], [5,6] ]) shuffled = shuffle_pytorch_tensor(t) gt_permutations = torch.tensor( [ [[1,2], [3,4], [5,6]], [[1,2], [5,6], [3,4]], [[3,4], [5,6], [1,2]], [[5,6], [3,4], [1,2]], [[3,4], [1,2], [5,6]], [[5,6], [1,2], [3,4]] ]) assert any([torch.allclose(gt_permutations[i], shuffled) for i in range(6)]) def test_pytorch_random_choice(): """ Ensure that sampling with replacement returns values that are found in original array, and of correct shape. """ x = np.array([0,2,4,5,6]) vals = pytorch_random_choice(x, num_samples=10) for val in list(torch.unique(vals).cpu().numpy()): assert val in list(x) assert vals.shape == (10,) x = np.array([0,2,4,5,6]) vals = pytorch_random_choice(x, num_samples=3) for val in list(torch.unique(vals).cpu().numpy()): assert val in list(x) assert vals.shape == (3,) x = np.array([0,2]) vals = pytorch_random_choice(x, num_samples=10) for val in list(torch.unique(vals).cpu().numpy()): assert val in list(x) assert vals.shape == (10,) def test_get_merged_pair_embeddings(): """ """ pos_pair_info = torch.tensor( [ [0,1,1,1,2,2], [1,3,4,2,0,0] ]) neg_pair_info = torch.tensor( [ [0,1,1,1,2,2], [1,3,4,2,0,0] ]) resnet_embedding = torch.arange(2*3*4*5).reshape(3,2,4,5) y_c, a_embedding, b_embedding = get_merged_pair_embeddings( pos_pair_info, neg_pair_info, resnet_embedding ) gt_y_c = torch.tensor([1,1,0,0], dtype=torch.float32) gt_a_embedding = torch.tensor( [ [ 6, 26], [59, 79], [ 6, 26], [59, 79] ]) gt_b_embedding = torch.tensor( [ [ 52, 72], [ 80, 100], [ 52, 72], [ 80, 100] ]) assert torch.allclose(a_embedding, gt_a_embedding) assert torch.allclose(b_embedding, gt_b_embedding) assert torch.allclose(y_c, gt_y_c) def test_get_pair_embedding(): """ """ pair_info = torch.tensor( [ # (bi,hi,wi,bj,hj,wj) [0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0] ]) embedding = torch.arange(2*3*4*5).reshape(3,2,4,5) a_embedding, b_embedding = get_pair_embedding(pair_info, embedding) gt_a_embedding = torch.tensor( [ [ 6, 26], [59, 79] ]) gt_b_embedding = torch.tensor( [ [ 52, 72], [ 80, 100] ]) assert torch.allclose(a_embedding, gt_a_embedding) assert torch.allclose(b_embedding, gt_b_embedding) def time_sample_pair_indices(): """ Count how long it takes to sample pairs. Suppose we have a batch size of 128 images, and 194 possible classes. Suppose the 128 minibatch examples come from 7 different domains. Takes around 0.5 sec on Macbook Pro to sample pair indices each time. """ for _ in range(10): batch_domain_idxs = torch.randint(low=0, high=7, size=(128,)) Y = torch.randint(low=0, high=194, size=(128,201,201)) start = time.time() out = sample_pair_indices( Y.type(torch.float32), batch_domain_idxs, num_pos_pairs=int(1e3), neg_to_pos_ratio=3, downsample_factor=8 ) end = time.time() duration = end - start print(f'Duration was {duration}') if __name__ == '__main__': """ """ test_contrastive_loss1() test_contrastive_loss2() test_contrastive_loss3() test_paired_euclidean_distance() test_downsample_label_map() test_shuffle_pytorch_tensor() test_pytorch_random_choice() test_count_per_domain_statistics() test_sample_px_locations_uniformly() test_form_pair_info_tensor() test_remove_pairs_from_same_domain() test_find_matching_pairs() test_sample_crossdomain_pos_neg_pairs() test_sample_pair_indices1() test_sample_pair_indices2() test_get_pair_embedding() test_get_merged_pair_embeddings() time_sample_pair_indices()
[ "mseg_semantic.domain_generalization.ccsa_utils.sample_pair_indices", "math.sqrt", "mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice", "numpy.array", "torch.arange", "mseg_semantic.domain_generalization.ccsa_utils.paired_euclidean_distance", "torch.unique", "mseg_semantic.domain_generalization.ccsa_utils.get_merged_pair_embeddings", "mseg_semantic.domain_generalization.ccsa_utils.form_pair_info_tensor", "torch.randint", "mseg_semantic.domain_generalization.ccsa_utils.get_pair_embedding", "mseg_semantic.domain_generalization.ccsa_utils.contrastive_loss", "mseg_semantic.domain_generalization.ccsa_utils.sample_crossdomain_pos_neg_pairs", "numpy.allclose", "mseg_semantic.domain_generalization.ccsa_utils.sample_px_locations_uniformly", "mseg_semantic.domain_generalization.ccsa_utils.count_per_domain_statistics", "mseg_semantic.domain_generalization.ccsa_utils.shuffle_pytorch_tensor", "time.time", "mseg_semantic.domain_generalization.ccsa_utils.remove_pairs_from_same_domain", "mseg_semantic.domain_generalization.ccsa_utils.find_matching_pairs", "torch.stack", "torch.tensor", "torch.allclose", "mseg_semantic.domain_generalization.ccsa_utils.downsample_label_map" ]
[((854, 914), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0, 0.0, 1.0]'], {'dtype': 'torch.float32'}), '([1.0, 0.0, 0.0, 0.0, 1.0], dtype=torch.float32)\n', (866, 914), False, 'import torch\n'), ((959, 1015), 'torch.tensor', 'torch.tensor', (['[0, 1.1, 1.1, 1.1, 0]'], {'dtype': 'torch.float32'}), '([0, 1.1, 1.1, 1.1, 0], dtype=torch.float32)\n', (971, 1015), False, 'import torch\n'), ((1028, 1061), 'mseg_semantic.domain_generalization.ccsa_utils.contrastive_loss', 'contrastive_loss', (['y_c', 'pred_dists'], {}), '(y_c, pred_dists)\n', (1044, 1061), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((1076, 1093), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (1088, 1093), False, 'import torch\n'), ((1106, 1135), 'torch.allclose', 'torch.allclose', (['loss', 'gt_loss'], {}), '(loss, gt_loss)\n', (1120, 1135), False, 'import torch\n'), ((1313, 1373), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0, 0.0, 1.0]'], {'dtype': 'torch.float32'}), '([1.0, 0.0, 0.0, 0.0, 1.0], dtype=torch.float32)\n', (1325, 1373), False, 'import torch\n'), ((1418, 1474), 'torch.tensor', 'torch.tensor', (['[0, 0.2, 0.3, 0.1, 0]'], {'dtype': 'torch.float32'}), '([0, 0.2, 0.3, 0.1, 0], dtype=torch.float32)\n', (1430, 1474), False, 'import torch\n'), ((1487, 1520), 'mseg_semantic.domain_generalization.ccsa_utils.contrastive_loss', 'contrastive_loss', (['y_c', 'pred_dists'], {}), '(y_c, pred_dists)\n', (1503, 1520), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((1535, 1556), 'torch.tensor', 'torch.tensor', (['[0.388]'], {}), '([0.388])\n', (1547, 1556), False, 'import torch\n'), ((1570, 1611), 'torch.allclose', 'torch.allclose', (['loss', 'gt_loss'], {'atol': '(0.001)'}), '(loss, gt_loss, atol=0.001)\n', (1584, 1611), False, 'import torch\n'), ((1814, 1874), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0, 0.0, 1.0]'], {'dtype': 'torch.float32'}), '([1.0, 0.0, 0.0, 0.0, 1.0], dtype=torch.float32)\n', (1826, 1874), False, 'import torch\n'), ((1919, 1979), 'torch.tensor', 'torch.tensor', (['[2.0, 0.2, 0.3, 0.1, 4.0]'], {'dtype': 'torch.float32'}), '([2.0, 0.2, 0.3, 0.1, 4.0], dtype=torch.float32)\n', (1931, 1979), False, 'import torch\n'), ((1992, 2025), 'mseg_semantic.domain_generalization.ccsa_utils.contrastive_loss', 'contrastive_loss', (['y_c', 'pred_dists'], {}), '(y_c, pred_dists)\n', (2008, 2025), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((2040, 2061), 'torch.tensor', 'torch.tensor', (['[4.388]'], {}), '([4.388])\n', (2052, 2061), False, 'import torch\n'), ((2075, 2116), 'torch.allclose', 'torch.allclose', (['loss', 'gt_loss'], {'atol': '(0.001)'}), '(loss, gt_loss, atol=0.001)\n', (2089, 2116), False, 'import torch\n'), ((2176, 2235), 'torch.tensor', 'torch.tensor', (['[[3, 0], [4, 0], [1, 1]]'], {'dtype': 'torch.float32'}), '([[3, 0], [4, 0], [1, 1]], dtype=torch.float32)\n', (2188, 2235), False, 'import torch\n'), ((2296, 2355), 'torch.tensor', 'torch.tensor', (['[[1, 1], [0, 3], [0, 4]]'], {'dtype': 'torch.float32'}), '([[1, 1], [0, 3], [0, 4]], dtype=torch.float32)\n', (2308, 2355), False, 'import torch\n'), ((2420, 2451), 'mseg_semantic.domain_generalization.ccsa_utils.paired_euclidean_distance', 'paired_euclidean_distance', (['X', 'Y'], {}), '(X, Y)\n', (2445, 2451), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((2826, 2948), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (2838, 2948), False, 'import torch\n'), ((3002, 3124), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (3014, 3124), False, 'import torch\n'), ((3168, 3205), 'torch.stack', 'torch.stack', (['[labelmap_1, labelmap_2]'], {}), '([labelmap_1, labelmap_2])\n', (3179, 3205), False, 'import torch\n'), ((3276, 3304), 'mseg_semantic.domain_generalization.ccsa_utils.downsample_label_map', 'downsample_label_map', (['Y'], {'d': '(2)'}), '(Y, d=2)\n', (3296, 3304), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((3348, 3458), 'torch.tensor', 'torch.tensor', (['[[[0.0, 0.0, 0.0, 0.0], [4.0, 2.0, 2.0, 3.0]], [[1.0, 1.0, 0.0, 0.0], [4.0,\n 4.0, 2.0, 2.0]]]'], {}), '([[[0.0, 0.0, 0.0, 0.0], [4.0, 2.0, 2.0, 3.0]], [[1.0, 1.0, 0.0,\n 0.0], [4.0, 4.0, 2.0, 2.0]]])\n', (3360, 3458), False, 'import torch\n'), ((3517, 3545), 'mseg_semantic.domain_generalization.ccsa_utils.downsample_label_map', 'downsample_label_map', (['Y'], {'d': '(4)'}), '(Y, d=4)\n', (3537, 3545), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((3558, 3600), 'torch.tensor', 'torch.tensor', (['[[[0.0, 0.0]], [[1.0, 0.0]]]'], {}), '([[[0.0, 0.0]], [[1.0, 0.0]]])\n', (3570, 3600), False, 'import torch\n'), ((3942, 4085), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {'dtype': 'torch.float32'}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]], dtype=torch.float32)\n', (3954, 4085), False, 'import torch\n'), ((4139, 4282), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {'dtype': 'torch.float32'}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]], dtype=torch.float32)\n', (4151, 4282), False, 'import torch\n'), ((4335, 4478), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {'dtype': 'torch.float32'}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]], dtype=torch.float32)\n', (4347, 4478), False, 'import torch\n'), ((4523, 4572), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (4534, 4572), False, 'import torch\n'), ((4631, 4673), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int32'}), '([0, 1, 0], dtype=torch.int32)\n', (4643, 4673), False, 'import torch\n'), ((4708, 4818), 'mseg_semantic.domain_generalization.ccsa_utils.sample_pair_indices', 'sample_pair_indices', (['Y', 'batch_domain_indices'], {'num_pos_pairs': '(30000)', 'neg_to_pos_ratio': '(3)', 'downsample_factor': '(1)'}), '(Y, batch_domain_indices, num_pos_pairs=30000,\n neg_to_pos_ratio=3, downsample_factor=1)\n', (4727, 4818), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((5492, 5635), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1], [2, 2, 2, 2, 4, 4, 4, \n 4], [2, 2, 2, 2, 4, 4, 4, 4]]'], {'dtype': 'torch.float32'}), '([[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1], [2, 2, 2,\n 2, 4, 4, 4, 4], [2, 2, 2, 2, 4, 4, 4, 4]], dtype=torch.float32)\n', (5504, 5635), False, 'import torch\n'), ((5689, 5832), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [4, 4, 4, 4, 2, 2, 2, \n 2], [4, 4, 4, 4, 2, 2, 2, 2]]'], {'dtype': 'torch.float32'}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [4, 4, 4,\n 4, 2, 2, 2, 2], [4, 4, 4, 4, 2, 2, 2, 2]], dtype=torch.float32)\n', (5701, 5832), False, 'import torch\n'), ((5885, 6028), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {'dtype': 'torch.float32'}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]], dtype=torch.float32)\n', (5897, 6028), False, 'import torch\n'), ((6073, 6122), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (6084, 6122), False, 'import torch\n'), ((6181, 6223), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int32'}), '([0, 1, 0], dtype=torch.int32)\n', (6193, 6223), False, 'import torch\n'), ((6258, 6367), 'mseg_semantic.domain_generalization.ccsa_utils.sample_pair_indices', 'sample_pair_indices', (['Y', 'batch_domain_indices'], {'num_pos_pairs': '(3000)', 'neg_to_pos_ratio': '(3)', 'downsample_factor': '(2)'}), '(Y, batch_domain_indices, num_pos_pairs=3000,\n neg_to_pos_ratio=3, downsample_factor=2)\n', (6277, 6367), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((7360, 7389), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 1, 0]'], {}), '([0, 1, 2, 1, 0])\n', (7372, 7389), False, 'import torch\n'), ((7447, 7505), 'torch.tensor', 'torch.tensor', (['[[0, 1, 2], [0, 1, 2], [2, 1, 2], [3, 1, 2]]'], {}), '([[0, 1, 2], [0, 1, 2], [2, 1, 2], [3, 1, 2]])\n', (7459, 7505), False, 'import torch\n'), ((7712, 7770), 'torch.tensor', 'torch.tensor', (['[[4, 3, 4], [1, 3, 4], [3, 3, 4], [1, 3, 4]]'], {}), '([[4, 3, 4], [1, 3, 4], [3, 3, 4], [1, 3, 4]])\n', (7724, 7770), False, 'import torch\n'), ((7994, 8063), 'mseg_semantic.domain_generalization.ccsa_utils.remove_pairs_from_same_domain', 'remove_pairs_from_same_domain', (['batch_domain_indices', 'a_info_', 'b_info_'], {}), '(batch_domain_indices, a_info_, b_info_)\n', (8023, 8063), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((8085, 8121), 'torch.tensor', 'torch.tensor', (['[[0, 1, 2], [2, 1, 2]]'], {}), '([[0, 1, 2], [2, 1, 2]])\n', (8097, 8121), False, 'import torch\n'), ((8176, 8219), 'torch.allclose', 'torch.allclose', (['gt_a_pair_info', 'a_pair_info'], {}), '(gt_a_pair_info, a_pair_info)\n', (8190, 8219), False, 'import torch\n'), ((8241, 8277), 'torch.tensor', 'torch.tensor', (['[[1, 3, 4], [3, 3, 4]]'], {}), '([[1, 3, 4], [3, 3, 4]])\n', (8253, 8277), False, 'import torch\n'), ((8332, 8375), 'torch.allclose', 'torch.allclose', (['gt_b_pair_info', 'b_pair_info'], {}), '(gt_b_pair_info, b_pair_info)\n', (8346, 8375), False, 'import torch\n'), ((8740, 8788), 'torch.tensor', 'torch.tensor', (['[5, 6, 7, 8, 9]'], {'dtype': 'torch.int32'}), '([5, 6, 7, 8, 9], dtype=torch.int32)\n', (8752, 8788), False, 'import torch\n'), ((8799, 8847), 'torch.tensor', 'torch.tensor', (['[4, 3, 2, 1, 0]'], {'dtype': 'torch.int32'}), '([4, 3, 2, 1, 0], dtype=torch.int32)\n', (8811, 8847), False, 'import torch\n'), ((8858, 8906), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8]'], {'dtype': 'torch.int32'}), '([0, 2, 4, 6, 8], dtype=torch.int32)\n', (8870, 8906), False, 'import torch\n'), ((8920, 8975), 'mseg_semantic.domain_generalization.ccsa_utils.form_pair_info_tensor', 'form_pair_info_tensor', (['batch_dim_idxs', 'px_1d_y', 'px_1d_x'], {}), '(batch_dim_idxs, px_1d_y, px_1d_x)\n', (8941, 8975), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((8995, 9088), 'torch.tensor', 'torch.tensor', (['[[5, 4, 0], [6, 3, 2], [7, 2, 4], [8, 1, 6], [9, 0, 8]]'], {'dtype': 'torch.int32'}), '([[5, 4, 0], [6, 3, 2], [7, 2, 4], [8, 1, 6], [9, 0, 8]], dtype\n =torch.int32)\n', (9007, 9088), False, 'import torch\n'), ((9164, 9203), 'torch.allclose', 'torch.allclose', (['pair_info', 'gt_pair_info'], {}), '(pair_info, gt_pair_info)\n', (9178, 9203), False, 'import torch\n'), ((9757, 9879), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (9769, 9879), False, 'import torch\n'), ((9933, 10055), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (9945, 10055), False, 'import torch\n'), ((10108, 10230), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]])\n', (10120, 10230), False, 'import torch\n'), ((10275, 10324), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (10286, 10324), False, 'import torch\n'), ((10378, 10436), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1], [2, 1, 4], [1, 1, 7], [0, 2, 2]]'], {}), '([[0, 1, 1], [2, 1, 4], [1, 1, 7], [0, 2, 2]])\n', (10390, 10436), False, 'import torch\n'), ((10538, 10596), 'torch.tensor', 'torch.tensor', (['[[2, 3, 7], [0, 1, 4], [2, 3, 0], [1, 3, 3]]'], {}), '([[2, 3, 7], [0, 1, 4], [2, 3, 0], [1, 3, 3]])\n', (10550, 10596), False, 'import torch\n'), ((10715, 10763), 'mseg_semantic.domain_generalization.ccsa_utils.find_matching_pairs', 'find_matching_pairs', (['Y', 'a_pair_info', 'b_pair_info'], {}), '(Y, a_pair_info, b_pair_info)\n', (10734, 10763), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((10787, 10841), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 2, 3, 7], [1, 1, 7, 2, 3, 0]]'], {}), '([[0, 1, 1, 2, 3, 7], [1, 1, 7, 2, 3, 0]])\n', (10799, 10841), False, 'import torch\n'), ((10908, 10955), 'torch.allclose', 'torch.allclose', (['pos_pair_info', 'gt_pos_pair_info'], {}), '(pos_pair_info, gt_pos_pair_info)\n', (10922, 10955), False, 'import torch\n'), ((10979, 11033), 'torch.tensor', 'torch.tensor', (['[[2, 1, 4, 0, 1, 4], [0, 2, 2, 1, 3, 3]]'], {}), '([[2, 1, 4, 0, 1, 4], [0, 2, 2, 1, 3, 3]])\n', (10991, 11033), False, 'import torch\n'), ((11100, 11147), 'torch.allclose', 'torch.allclose', (['neg_pair_info', 'gt_neg_pair_info'], {}), '(neg_pair_info, gt_neg_pair_info)\n', (11114, 11147), False, 'import torch\n'), ((11224, 11346), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (11236, 11346), False, 'import torch\n'), ((11400, 11522), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (11412, 11522), False, 'import torch\n'), ((11575, 11697), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]])\n', (11587, 11697), False, 'import torch\n'), ((11742, 11791), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (11753, 11791), False, 'import torch\n'), ((11909, 11951), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int64'}), '([0, 1, 0], dtype=torch.int64)\n', (11921, 11951), False, 'import torch\n'), ((11979, 12028), 'mseg_semantic.domain_generalization.ccsa_utils.count_per_domain_statistics', 'count_per_domain_statistics', (['batch_domain_indices'], {}), '(batch_domain_indices)\n', (12006, 12028), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((12121, 12228), 'mseg_semantic.domain_generalization.ccsa_utils.sample_crossdomain_pos_neg_pairs', 'sample_crossdomain_pos_neg_pairs', (['Y', 'batch_domain_indices', 'unique_domain_idxs', 'w', 'h', 'INITIAL_SAMPLE_NUM'], {}), '(Y, batch_domain_indices,\n unique_domain_idxs, w, h, INITIAL_SAMPLE_NUM)\n', (12153, 12228), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((12707, 12736), 'torch.tensor', 'torch.tensor', (['[0, 1, 0, 1, 4]'], {}), '([0, 1, 0, 1, 4])\n', (12719, 12736), False, 'import torch\n'), ((12779, 12819), 'mseg_semantic.domain_generalization.ccsa_utils.count_per_domain_statistics', 'count_per_domain_statistics', (['domain_idxs'], {}), '(domain_idxs)\n', (12806, 12819), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((12849, 12900), 'numpy.array', 'np.array', (['[2.0, 2.0, 0.0, 0.0, 1.0]'], {'dtype': 'np.int32'}), '([2.0, 2.0, 0.0, 0.0, 1.0], dtype=np.int32)\n', (12857, 12900), True, 'import numpy as np\n'), ((12924, 12943), 'numpy.array', 'np.array', (['[0, 1, 4]'], {}), '([0, 1, 4])\n', (12932, 12943), True, 'import numpy as np\n'), ((12955, 13011), 'numpy.allclose', 'np.allclose', (['examples_per_domain', 'gt_examples_per_domain'], {}), '(examples_per_domain, gt_examples_per_domain)\n', (12966, 13011), True, 'import numpy as np\n'), ((13023, 13077), 'numpy.allclose', 'np.allclose', (['unique_domain_idxs', 'gt_unique_domain_idxs'], {}), '(unique_domain_idxs, gt_unique_domain_idxs)\n', (13034, 13077), True, 'import numpy as np\n'), ((13449, 13571), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (13461, 13571), False, 'import torch\n'), ((13625, 13747), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (13637, 13747), False, 'import torch\n'), ((13800, 13922), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]])\n', (13812, 13922), False, 'import torch\n'), ((13967, 14016), 'torch.stack', 'torch.stack', (['[labelmap_1, labelmap_2, labelmap_3]'], {}), '([labelmap_1, labelmap_2, labelmap_3])\n', (13978, 14016), False, 'import torch\n'), ((14153, 14195), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int64'}), '([0, 1, 0], dtype=torch.int64)\n', (14165, 14195), False, 'import torch\n'), ((14266, 14309), 'mseg_semantic.domain_generalization.ccsa_utils.count_per_domain_statistics', 'count_per_domain_statistics', (['domain_indices'], {}), '(domain_indices)\n', (14293, 14309), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((14396, 14491), 'mseg_semantic.domain_generalization.ccsa_utils.sample_px_locations_uniformly', 'sample_px_locations_uniformly', (['domain_indices', 'unique_domain_idxs', 'w', 'h', 'INITIAL_SAMPLE_NUM'], {}), '(domain_indices, unique_domain_idxs, w, h,\n INITIAL_SAMPLE_NUM)\n', (14425, 14491), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((15595, 15633), 'torch.tensor', 'torch.tensor', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (15607, 15633), False, 'import torch\n'), ((15702, 15727), 'mseg_semantic.domain_generalization.ccsa_utils.shuffle_pytorch_tensor', 'shuffle_pytorch_tensor', (['t'], {}), '(t)\n', (15724, 15727), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((15751, 15930), 'torch.tensor', 'torch.tensor', (['[[[1, 2], [3, 4], [5, 6]], [[1, 2], [5, 6], [3, 4]], [[3, 4], [5, 6], [1, 2\n ]], [[5, 6], [3, 4], [1, 2]], [[3, 4], [1, 2], [5, 6]], [[5, 6], [1, 2],\n [3, 4]]]'], {}), '([[[1, 2], [3, 4], [5, 6]], [[1, 2], [5, 6], [3, 4]], [[3, 4],\n [5, 6], [1, 2]], [[5, 6], [3, 4], [1, 2]], [[3, 4], [1, 2], [5, 6]], [[\n 5, 6], [1, 2], [3, 4]]])\n', (15763, 15930), False, 'import torch\n'), ((16403, 16428), 'numpy.array', 'np.array', (['[0, 2, 4, 5, 6]'], {}), '([0, 2, 4, 5, 6])\n', (16411, 16428), True, 'import numpy as np\n'), ((16436, 16476), 'mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice', 'pytorch_random_choice', (['x'], {'num_samples': '(10)'}), '(x, num_samples=10)\n', (16457, 16476), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((16602, 16627), 'numpy.array', 'np.array', (['[0, 2, 4, 5, 6]'], {}), '([0, 2, 4, 5, 6])\n', (16610, 16627), True, 'import numpy as np\n'), ((16635, 16674), 'mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice', 'pytorch_random_choice', (['x'], {'num_samples': '(3)'}), '(x, num_samples=3)\n', (16656, 16674), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((16799, 16815), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (16807, 16815), True, 'import numpy as np\n'), ((16826, 16866), 'mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice', 'pytorch_random_choice', (['x'], {'num_samples': '(10)'}), '(x, num_samples=10)\n', (16847, 16866), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((17060, 17114), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]]'], {}), '([[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]])\n', (17072, 17114), False, 'import torch\n'), ((17168, 17222), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]]'], {}), '([[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]])\n', (17180, 17222), False, 'import torch\n'), ((17355, 17429), 'mseg_semantic.domain_generalization.ccsa_utils.get_merged_pair_embeddings', 'get_merged_pair_embeddings', (['pos_pair_info', 'neg_pair_info', 'resnet_embedding'], {}), '(pos_pair_info, neg_pair_info, resnet_embedding)\n', (17381, 17429), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((17473, 17520), 'torch.tensor', 'torch.tensor', (['[1, 1, 0, 0]'], {'dtype': 'torch.float32'}), '([1, 1, 0, 0], dtype=torch.float32)\n', (17485, 17520), False, 'import torch\n'), ((17539, 17591), 'torch.tensor', 'torch.tensor', (['[[6, 26], [59, 79], [6, 26], [59, 79]]'], {}), '([[6, 26], [59, 79], [6, 26], [59, 79]])\n', (17551, 17591), False, 'import torch\n'), ((17682, 17738), 'torch.tensor', 'torch.tensor', (['[[52, 72], [80, 100], [52, 72], [80, 100]]'], {}), '([[52, 72], [80, 100], [52, 72], [80, 100]])\n', (17694, 17738), False, 'import torch\n'), ((17823, 17866), 'torch.allclose', 'torch.allclose', (['a_embedding', 'gt_a_embedding'], {}), '(a_embedding, gt_a_embedding)\n', (17837, 17866), False, 'import torch\n'), ((17878, 17921), 'torch.allclose', 'torch.allclose', (['b_embedding', 'gt_b_embedding'], {}), '(b_embedding, gt_b_embedding)\n', (17892, 17921), False, 'import torch\n'), ((17933, 17960), 'torch.allclose', 'torch.allclose', (['y_c', 'gt_y_c'], {}), '(y_c, gt_y_c)\n', (17947, 17960), False, 'import torch\n'), ((18025, 18079), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]]'], {}), '([[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]])\n', (18037, 18079), False, 'import torch\n'), ((18235, 18275), 'mseg_semantic.domain_generalization.ccsa_utils.get_pair_embedding', 'get_pair_embedding', (['pair_info', 'embedding'], {}), '(pair_info, embedding)\n', (18253, 18275), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((18298, 18331), 'torch.tensor', 'torch.tensor', (['[[6, 26], [59, 79]]'], {}), '([[6, 26], [59, 79]])\n', (18310, 18331), False, 'import torch\n'), ((18397, 18432), 'torch.tensor', 'torch.tensor', (['[[52, 72], [80, 100]]'], {}), '([[52, 72], [80, 100]])\n', (18409, 18432), False, 'import torch\n'), ((18491, 18534), 'torch.allclose', 'torch.allclose', (['a_embedding', 'gt_a_embedding'], {}), '(a_embedding, gt_a_embedding)\n', (18505, 18534), False, 'import torch\n'), ((18546, 18589), 'torch.allclose', 'torch.allclose', (['b_embedding', 'gt_b_embedding'], {}), '(b_embedding, gt_b_embedding)\n', (18560, 18589), False, 'import torch\n'), ((18960, 19001), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(7)', 'size': '(128,)'}), '(low=0, high=7, size=(128,))\n', (18973, 19001), False, 'import torch\n'), ((19014, 19066), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(194)', 'size': '(128, 201, 201)'}), '(low=0, high=194, size=(128, 201, 201))\n', (19027, 19066), False, 'import torch\n'), ((19082, 19093), 'time.time', 'time.time', ([], {}), '()\n', (19091, 19093), False, 'import time\n'), ((19319, 19330), 'time.time', 'time.time', ([], {}), '()\n', (19328, 19330), False, 'import time\n'), ((16160, 16204), 'torch.allclose', 'torch.allclose', (['gt_permutations[i]', 'shuffled'], {}), '(gt_permutations[i], shuffled)\n', (16174, 16204), False, 'import torch\n'), ((17279, 17306), 'torch.arange', 'torch.arange', (['(2 * 3 * 4 * 5)'], {}), '(2 * 3 * 4 * 5)\n', (17291, 17306), False, 'import torch\n'), ((18165, 18192), 'torch.arange', 'torch.arange', (['(2 * 3 * 4 * 5)'], {}), '(2 * 3 * 4 * 5)\n', (18177, 18192), False, 'import torch\n'), ((2505, 2525), 'math.sqrt', 'math.sqrt', (['(2 * 2 + 1)'], {}), '(2 * 2 + 1)\n', (2514, 2525), False, 'import math\n'), ((2559, 2583), 'math.sqrt', 'math.sqrt', (['(3 * 3 + 4 * 4)'], {}), '(3 * 3 + 4 * 4)\n', (2568, 2583), False, 'import math\n'), ((2616, 2636), 'math.sqrt', 'math.sqrt', (['(3 * 3 + 1)'], {}), '(3 * 3 + 1)\n', (2625, 2636), False, 'import math\n'), ((16497, 16515), 'torch.unique', 'torch.unique', (['vals'], {}), '(vals)\n', (16509, 16515), False, 'import torch\n'), ((16695, 16713), 'torch.unique', 'torch.unique', (['vals'], {}), '(vals)\n', (16707, 16713), False, 'import torch\n'), ((16887, 16905), 'torch.unique', 'torch.unique', (['vals'], {}), '(vals)\n', (16899, 16905), False, 'import torch\n')]
from src.modeling import util import numpy as np from dskc import dskc_modeling def test(model): x_test, y_test, _ = util.read_test_data() # predict y_pred = model.predict(x_test) # clean y_pred = np.asarray([x[0] for x in y_pred]) # evaluate report = dskc_modeling.EvaluationReport(y_test, y_pred, name="Neural Network") return report def predict(model,data): data = np.array(data,ndmin=2) y_pred = model.predict(data) value = y_pred[0][0] return value
[ "dskc.dskc_modeling.EvaluationReport", "numpy.array", "src.modeling.util.read_test_data", "numpy.asarray" ]
[((123, 144), 'src.modeling.util.read_test_data', 'util.read_test_data', ([], {}), '()\n', (142, 144), False, 'from src.modeling import util\n'), ((221, 255), 'numpy.asarray', 'np.asarray', (['[x[0] for x in y_pred]'], {}), '([x[0] for x in y_pred])\n', (231, 255), True, 'import numpy as np\n'), ((285, 354), 'dskc.dskc_modeling.EvaluationReport', 'dskc_modeling.EvaluationReport', (['y_test', 'y_pred'], {'name': '"""Neural Network"""'}), "(y_test, y_pred, name='Neural Network')\n", (315, 354), False, 'from dskc import dskc_modeling\n'), ((411, 434), 'numpy.array', 'np.array', (['data'], {'ndmin': '(2)'}), '(data, ndmin=2)\n', (419, 434), True, 'import numpy as np\n')]
""" Derived from keras-yolo3 train.py (https://github.com/qqwweee/keras-yolo3), with additions from https://github.com/AntonMu/TrainYourOwnYOLO. """ import os import sys import argparse import pickle import numpy as np import keras.backend as K from keras.layers import Input, Lambda from keras.models import Model from keras.optimizers import Adam from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from PIL import Image from time import time from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss from yolo3.utils import get_random_data def get_curr_dir(): return os.path.dirname(os.path.abspath(__file__)) def get_parent_dir(n=1): """ returns the n-th parent dicrectory of the current working directory """ current_path = get_curr_dir() for k in range(n): current_path = os.path.dirname(current_path) return current_path # --- global constants EXPORT_DIR = os.path.join(get_parent_dir(), 'for_yolo', 'vott', 'vott-export') ANNOT_FILE = os.path.join(EXPORT_DIR, 'yolo_annotations.txt') WEIGHTS_DIR = os.path.join(get_curr_dir(), 'model_data') YOLO_CLASSES = os.path.join(EXPORT_DIR, 'classes.names') LOG_DIR = 'logs/000/' ANCHORS_PATH = os.path.join(WEIGHTS_DIR, 'yolo_anchors.txt') WEIGHTS_PATH = os.path.join(WEIGHTS_DIR, 'yolo_weights.h5') VAL_SPLIT = 0.1 # 10% validation data EPOCHS = 102 # number of epochs to train; 50% transfer, 50% fine-tuning def _main(): class_names = get_classes(YOLO_CLASSES) num_classes = len(class_names) anchors = get_anchors(ANCHORS_PATH) input_shape = (416, 416) # multiple of 32, height, width epoch1, epoch2 = EPOCHS // 2, EPOCHS // 2 model = create_model(input_shape, anchors, num_classes, freeze_body=2, weights_path=WEIGHTS_PATH) # make sure you know what you freeze logging = TensorBoard(log_dir=LOG_DIR) checkpoint = ModelCheckpoint(LOG_DIR + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=3) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) with open(ANNOT_FILE) as f: lines = f.readlines() np.random.seed(10101) np.random.shuffle(lines) num_val = int(len(lines) * VAL_SPLIT) num_train = len(lines) - num_val # Train with frozen layers first, to get a stable loss. # Adjust num epochs to your dataset. This step is enough to obtain a decent model. if True: model.compile(optimizer=Adam(lr=1e-3), loss={ # use custom yolo_loss Lambda layer. 'yolo_loss': lambda y_true, y_pred: y_pred}) batch_size = 32 print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) history = model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), epochs=epoch1, initial_epoch=0, callbacks=[logging, checkpoint]) model.save_weights(os.path.join(LOG_DIR, 'trained_weights_stage_1.h5')) step1_train_loss = history.history['loss'] with open(os.path.join(log_dir_time,'step1_loss.npy'), 'w') as f: for item in step1_train_loss: f.write("%s\n" % item) step1_val_loss = np.array(history.history['val_loss']) with open(os.path.join(log_dir_time,'step1_val_loss.npy'), 'w') as f: for item in step1_val_loss: f.write("%s\n" % item) # Unfreeze and continue training, to fine-tune. # Train longer if the result is not good. if True: for i in range(len(model.layers)): model.layers[i].trainable = True model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change print('Unfreeze all layers.') batch_size = 4 # note that more GPU memory is required after unfreezing the body print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) history=model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), epochs=epoch1+epoch2, initial_epoch=epoch1, callbacks=[logging, checkpoint, reduce_lr, early_stopping]) model.save_weights(os.path.join(LOG_DIR, 'trained_weights_final.h5')) step2_train_loss = history.history['loss'] with open(os.path.join(log_dir_time,'step2_loss.npy'), 'w') as f: for item in step2_train_loss: f.write("%s\n" % item) step2_val_loss = np.array(history.history['val_loss']) with open(os.path.join(log_dir_time,'step2_val_loss.npy'), 'w') as f: for item in step2_val_loss: f.write("%s\n" % item) # --- HELPER FUNCS def get_classes(classes_path): """ loads the classes """ with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names def get_anchors(anchors_path): '''loads the anchors from a file''' with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='keras_yolo3/model_data/yolo_weights.h5'): '''create the training model''' K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ num_anchors//3, num_classes+5)) for l in range(3)] model_body = yolo_body(image_input, num_anchors//3, num_classes) print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze darknet53 body or freeze all but 3 output layers. num = (185, len(model_body.layers)-3)[freeze_body-1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): '''data generator for fit_generator''' n = len(annotation_lines) i = 0 while True: image_data = [] box_data = [] for b in range(batch_size): if i==0: np.random.shuffle(annotation_lines) image, box = get_random_data(annotation_lines[i], input_shape, random=True) image_data.append(image) box_data.append(box) i = (i+1) % n image_data = np.array(image_data) box_data = np.array(box_data) y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes) yield [image_data, *y_true], np.zeros(batch_size) def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes): n = len(annotation_lines) if n==0 or batch_size<=0: return None return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes) # ---- if __name__ == '__main__': _main()
[ "yolo3.model.preprocess_true_boxes", "numpy.array", "yolo3.model.yolo_body", "numpy.random.seed", "keras.callbacks.EarlyStopping", "keras.backend.clear_session", "keras.models.Model", "keras.optimizers.Adam", "keras.callbacks.ReduceLROnPlateau", "os.path.dirname", "yolo3.utils.get_random_data", "keras.callbacks.ModelCheckpoint", "keras.layers.Lambda", "os.path.join", "keras.callbacks.TensorBoard", "keras.layers.Input", "numpy.zeros", "os.path.abspath", "numpy.random.shuffle" ]
[((1060, 1108), 'os.path.join', 'os.path.join', (['EXPORT_DIR', '"""yolo_annotations.txt"""'], {}), "(EXPORT_DIR, 'yolo_annotations.txt')\n", (1072, 1108), False, 'import os\n'), ((1182, 1223), 'os.path.join', 'os.path.join', (['EXPORT_DIR', '"""classes.names"""'], {}), "(EXPORT_DIR, 'classes.names')\n", (1194, 1223), False, 'import os\n'), ((1262, 1307), 'os.path.join', 'os.path.join', (['WEIGHTS_DIR', '"""yolo_anchors.txt"""'], {}), "(WEIGHTS_DIR, 'yolo_anchors.txt')\n", (1274, 1307), False, 'import os\n'), ((1323, 1367), 'os.path.join', 'os.path.join', (['WEIGHTS_DIR', '"""yolo_weights.h5"""'], {}), "(WEIGHTS_DIR, 'yolo_weights.h5')\n", (1335, 1367), False, 'import os\n'), ((1911, 1939), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'LOG_DIR'}), '(log_dir=LOG_DIR)\n', (1922, 1939), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((1957, 2124), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(LOG_DIR + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5')"], {'monitor': '"""val_loss"""', 'save_weights_only': '(True)', 'save_best_only': '(True)', 'period': '(3)'}), "(LOG_DIR +\n 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor=\n 'val_loss', save_weights_only=True, save_best_only=True, period=3)\n", (1972, 2124), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2140, 2212), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(3)', 'verbose': '(1)'}), "(monitor='val_loss', factor=0.1, patience=3, verbose=1)\n", (2157, 2212), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2234, 2304), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='val_loss', min_delta=0, patience=10, verbose=1)\n", (2247, 2304), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2373, 2394), 'numpy.random.seed', 'np.random.seed', (['(10101)'], {}), '(10101)\n', (2387, 2394), True, 'import numpy as np\n'), ((2399, 2423), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (2416, 2423), True, 'import numpy as np\n'), ((6205, 6222), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6220, 6222), True, 'import keras.backend as K\n'), ((6261, 6289), 'keras.layers.Input', 'Input', ([], {'shape': '(None, None, 3)'}), '(shape=(None, None, 3))\n', (6266, 6289), False, 'from keras.layers import Input, Lambda\n'), ((6501, 6554), 'yolo3.model.yolo_body', 'yolo_body', (['image_input', '(num_anchors // 3)', 'num_classes'], {}), '(image_input, num_anchors // 3, num_classes)\n', (6510, 6554), False, 'from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\n'), ((7372, 7418), 'keras.models.Model', 'Model', (['[model_body.input, *y_true]', 'model_loss'], {}), '([model_body.input, *y_true], model_loss)\n', (7377, 7418), False, 'from keras.models import Model\n'), ((662, 687), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (677, 687), False, 'import os\n'), ((889, 918), 'os.path.dirname', 'os.path.dirname', (['current_path'], {}), '(current_path)\n', (904, 918), False, 'import os\n'), ((3767, 3804), 'numpy.array', 'np.array', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (3775, 3804), True, 'import numpy as np\n'), ((5351, 5388), 'numpy.array', 'np.array', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (5359, 5388), True, 'import numpy as np\n'), ((6359, 6484), 'keras.layers.Input', 'Input', ([], {'shape': '(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2): 8}[l], \n num_anchors // 3, num_classes + 5)'}), '(shape=(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2\n ): 8}[l], num_anchors // 3, num_classes + 5))\n', (6364, 6484), False, 'from keras.layers import Input, Lambda\n'), ((7175, 7315), 'keras.layers.Lambda', 'Lambda', (['yolo_loss'], {'output_shape': '(1,)', 'name': '"""yolo_loss"""', 'arguments': "{'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5}"}), "(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors':\n anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})\n", (7181, 7315), False, 'from keras.layers import Input, Lambda\n'), ((7982, 8002), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (7990, 8002), True, 'import numpy as np\n'), ((8022, 8040), 'numpy.array', 'np.array', (['box_data'], {}), '(box_data)\n', (8030, 8040), True, 'import numpy as np\n'), ((8058, 8124), 'yolo3.model.preprocess_true_boxes', 'preprocess_true_boxes', (['box_data', 'input_shape', 'anchors', 'num_classes'], {}), '(box_data, input_shape, anchors, num_classes)\n', (8079, 8124), False, 'from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\n'), ((3480, 3531), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""trained_weights_stage_1.h5"""'], {}), "(LOG_DIR, 'trained_weights_stage_1.h5')\n", (3492, 3531), False, 'import os\n'), ((5066, 5115), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""trained_weights_final.h5"""'], {}), "(LOG_DIR, 'trained_weights_final.h5')\n", (5078, 5115), False, 'import os\n'), ((5974, 5991), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (5982, 5991), True, 'import numpy as np\n'), ((7802, 7864), 'yolo3.utils.get_random_data', 'get_random_data', (['annotation_lines[i]', 'input_shape'], {'random': '(True)'}), '(annotation_lines[i], input_shape, random=True)\n', (7817, 7864), False, 'from yolo3.utils import get_random_data\n'), ((2696, 2710), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (2700, 2710), False, 'from keras.optimizers import Adam\n'), ((3603, 3647), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step1_loss.npy"""'], {}), "(log_dir_time, 'step1_loss.npy')\n", (3615, 3647), False, 'import os\n'), ((3823, 3871), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step1_val_loss.npy"""'], {}), "(log_dir_time, 'step1_val_loss.npy')\n", (3835, 3871), False, 'import os\n'), ((4195, 4210), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (4199, 4210), False, 'from keras.optimizers import Adam\n'), ((5187, 5231), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step2_loss.npy"""'], {}), "(log_dir_time, 'step2_loss.npy')\n", (5199, 5231), False, 'import os\n'), ((5407, 5455), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step2_val_loss.npy"""'], {}), "(log_dir_time, 'step2_val_loss.npy')\n", (5419, 5455), False, 'import os\n'), ((7741, 7776), 'numpy.random.shuffle', 'np.random.shuffle', (['annotation_lines'], {}), '(annotation_lines)\n', (7758, 7776), True, 'import numpy as np\n'), ((8162, 8182), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (8170, 8182), True, 'import numpy as np\n')]
#!/usr/bin/env python ''' Created by Seria at 02/11/2018 3:38 PM Email: <EMAIL> _ooOoo_ o888888888o o88`_ . _`88o (| 0 0 |) O \ 。 / O _____/`-----‘\_____ .’ \|| _ _ ||/ `. | _ ||| | ||| _ | | | \\ // | | | | \-----/ | | \ .\ ___/- -\___ /. / ,--- / ___\<|>/___ \ ---, | |: \ \ / / :| | `\--\_ -. ___ .- _/--/‘ =========== \__ NOBUG __/ =========== ''' # -*- coding:utf-8 -*- import numpy as np import random as rand from collections import abc from torchvision.transforms import * F = functional from PIL import Image from ..toolkit import byte2arr __all__ = ('Comburant', 'HWC2CHW', 'Random', 'NEAREST', 'LINEAR', 'CUBIC', 'HORIZONTAL', 'VERTICAL', 'Resize', 'Crop', 'Flip', 'Rotate', 'Brighten', 'Contrast', 'Saturate', 'Hue') NEAREST = 0 LINEAR = 1 CUBIC = 2 PIL_INTERP = {NEAREST: Image.NEAREST, LINEAR: Image.BILINEAR, CUBIC: Image.BICUBIC} HORIZONTAL = 10 VERTICAL = 11 class Comburant(object): def __init__(self, *args, is_encoded=False): if isinstance(args[-1], HWC2CHW): ls_args = list(args[:-1]) self.cvt_form = args[-1] else: ls_args = list(args) self.comburant = Compose(ls_args) self.is_encoded = is_encoded def __call__(self, imgs): if self.is_encoded: if isinstance(imgs, abc.Sequence): img = [] for i in imgs: img.append(byte2arr(i, as_np=False)) else: img = byte2arr(imgs, as_np=False) imgs = img imgs = self.comburant(imgs) if isinstance(imgs, abc.Sequence): img = [] for i in imgs: i = np.array(i) i = i.astype(np.float32) / 255 if hasattr(self, 'cvt_form'): i = self.cvt_form(i) img.append(i) else: img = np.array(imgs) img = img.astype(np.float32) / 255 if hasattr(self, 'cvt_form'): img= self.cvt_form(img) return img class ABC(object): def __init__(self): pass def call(self, *args, **kwargs): raise NotImplementedError def __call__(self, imgs): if isinstance(imgs, abc.Sequence): ret = [] for i in imgs: ret.append(self.call(i)) else: ret = self.call(imgs) return ret class HWC2CHW(ABC): def __init__(self): super(HWC2CHW, self).__init__() def __call__(self, img): return np.transpose(img, (2, 0, 1)) class Random(ABC): def __init__(self, p, comburant): super(Random, self).__init__() self.p = p self.cbr = comburant def call(self, img, conduct): if conduct: return self.cbr(img) else: return img def __call__(self, imgs): if rand.random() < self.p: conduct = True else: conduct = False if isinstance(imgs, abc.Sequence): ret = [] for i in imgs: ret.append(self.call(i, conduct)) else: ret = self.call(imgs, conduct) return ret class Resize(ABC): def __init__(self, size, interp=LINEAR): # size: (height, width) super(Resize, self).__init__() self.size = size self.interp = interp def call(self, img): return img.resize((self.size[1], self.size[0]), PIL_INTERP[self.interp]) class Crop(ABC): def __init__(self, size, padding=(0, 0, 0, 0), area_ratio=(1, 1), aspect_ratio=(1, 1), interp=LINEAR, scale=()): # size: (height, width) # padding: (left, top, right, bottom) super(Crop, self).__init__() self.size = size self.padding = padding self.area_ratio = area_ratio self.aspect_ratio = aspect_ratio self.scale = scale if area_ratio == aspect_ratio == (1,1): self.reshape = False self.comburant = RandomCrop(size) else: self.reshape = True self.comburant = RandomResizedCrop(size, area_ratio, aspect_ratio, PIL_INTERP[interp]) def call(self, img, param, t=1): param = [p * t for p in param] y, x, h, w = param padding = tuple([p * t for p in self.padding]) size = tuple([s * t for s in self.size]) img = F.pad(img, padding, 0, 'constant') # pad the width if needed if img.size[0] < size[1]: img = F.pad(img, (size[1] - img.size[0], 0), 0, 'constant') # pad the height if needed if img.size[1] < size[0]: img = F.pad(img, (0, size[0] - img.size[1]), 0, 'constant') if self.reshape: return F.resized_crop(img, y, x, h, w, size, self.comburant.interpolation) else: return F.crop(img, y, x, h, w) def __call__(self, imgs): if len(self.scale) == 0: self.scale = len(imgs) * [1] img = F.pad(imgs[0], self.padding, 0, 'constant') # pad the width if needed if img.size[0] < self.size[1]: img = F.pad(img, (self.size[1] - img.size[0], 0), 0, 'constant') # pad the height if needed if img.size[1] < self.size[0]: img = F.pad(img, (0, self.size[0] - img.size[1]), 0, 'constant') if self.reshape: param = self.comburant.get_params(img, self.comburant.scale, self.comburant.ratio) else: param = self.comburant.get_params(img, self.comburant.size) if isinstance(imgs, abc.Sequence): ret = [] for i, v in enumerate(imgs): ret.append(self.call(v, param, self.scale[i])) else: ret = self.call(imgs, param) return ret class Flip(ABC): def __init__(self, axial): super(Flip, self).__init__() if axial == HORIZONTAL: self.comburant = RandomVerticalFlip(1) elif axial == VERTICAL: self.comburant = RandomHorizontalFlip(1) else: raise Exception('NEBULAE ERROR ⨷ the invoked flip type is not defined or supported.') def call(self, img): return self.comburant(img) class Rotate(ABC): def __init__(self, degree, intact=False, interp=NEAREST): ''' Args intact: whether to keep image intact which might enlarge the output size ''' super(Rotate, self).__init__() self.comburant = RandomRotation(degree, PIL_INTERP[interp], intact) def call(self, img, angle): return F.rotate(img, angle, self.comburant.resample, self.comburant.expand, self.comburant.center, self.comburant.fill) def __call__(self, imgs): angle = self.comburant.get_params(self.comburant.degrees) if isinstance(imgs, abc.Sequence): ret = [] for i in imgs: ret.append(self.call(i, angle)) else: ret = self.call(imgs, angle) return ret class Brighten(ABC): def __init__(self, range): super(Brighten, self).__init__() self.comburant = ColorJitter(brightness=range) def call(self, img, factor): return F.adjust_brightness(img, factor) def __call__(self, imgs): factor = rand.uniform(self.comburant.brightness[0], self.comburant.brightness[1]) if isinstance(imgs, abc.Sequence): ret = [] for i in imgs: ret.append(self.call(i, factor)) else: ret = self.call(imgs, factor) return ret class Contrast(ABC): def __init__(self, range): super(Contrast, self).__init__() self.comburant = ColorJitter(contrast=range) def call(self, img, factor): return F.adjust_contrast(img, factor) def __call__(self, imgs): factor = rand.uniform(self.comburant.contrast[0], self.comburant.contrast[1]) if isinstance(imgs, abc.Sequence): ret = [] for i in imgs: ret.append(self.call(i, factor)) else: ret = self.call(imgs, factor) return ret class Saturate(ABC): def __init__(self, range): super(Saturate, self).__init__() self.comburant = ColorJitter(saturation=range) def call(self, img, factor): return F.adjust_saturation(img, factor) def __call__(self, imgs): factor = rand.uniform(self.comburant.saturation[0], self.comburant.saturation[1]) if isinstance(imgs, abc.Sequence): ret = [] for i in imgs: ret.append(self.call(i, factor)) else: ret = self.call(imgs, factor) return ret class Hue(ABC): def __init__(self, range): super(Hue, self).__init__() self.comburant = ColorJitter(hue=range) def call(self, img, factor): return F.adjust_hue(img, factor) def __call__(self, imgs): factor = rand.uniform(self.comburant.hue[0], self.comburant.hue[1]) if isinstance(imgs, abc.Sequence): ret = [] for i in imgs: ret.append(self.call(i, factor)) else: ret = self.call(imgs, factor) return ret
[ "random.random", "numpy.array", "random.uniform", "numpy.transpose" ]
[((2822, 2850), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (2834, 2850), True, 'import numpy as np\n'), ((7607, 7679), 'random.uniform', 'rand.uniform', (['self.comburant.brightness[0]', 'self.comburant.brightness[1]'], {}), '(self.comburant.brightness[0], self.comburant.brightness[1])\n', (7619, 7679), True, 'import random as rand\n'), ((8173, 8241), 'random.uniform', 'rand.uniform', (['self.comburant.contrast[0]', 'self.comburant.contrast[1]'], {}), '(self.comburant.contrast[0], self.comburant.contrast[1])\n', (8185, 8241), True, 'import random as rand\n'), ((8739, 8811), 'random.uniform', 'rand.uniform', (['self.comburant.saturation[0]', 'self.comburant.saturation[1]'], {}), '(self.comburant.saturation[0], self.comburant.saturation[1])\n', (8751, 8811), True, 'import random as rand\n'), ((9285, 9343), 'random.uniform', 'rand.uniform', (['self.comburant.hue[0]', 'self.comburant.hue[1]'], {}), '(self.comburant.hue[0], self.comburant.hue[1])\n', (9297, 9343), True, 'import random as rand\n'), ((2165, 2179), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (2173, 2179), True, 'import numpy as np\n'), ((3165, 3178), 'random.random', 'rand.random', ([], {}), '()\n', (3176, 3178), True, 'import random as rand\n'), ((1957, 1968), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (1965, 1968), True, 'import numpy as np\n')]
import obspy import unittest import numpy as np import madpy.checks as ch import madpy.tests.testdata.config as cfg class TestChecks(unittest.TestCase): def test_check_config(self): class Measurements: pass self.assertRaises(AttributeError, ch.check_config, Measurements()) self.assertIsNone(ch.check_config(cfg.Amplitude())) cfg.Amplitude.noise_phase = 'Pn' self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude()) cfg.Amplitude.noise_phase = 'P' cfg.Amplitude.amp_factor = -2. self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude()) cfg.Amplitude.plot = 'Yes' self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude()) cfg.Amplitude.plot = False cfg.Amplitude.signal_window_begin = 50. self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude()) cfg.Amplitude.signal_window_begin = -1 cfg.Amplitude.save_figure = True self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude()) cfg.Amplitude.save_figure = False self.assertIsNone(ch.check_config(cfg.Duration())) cfg.Duration.signal_phase = 'Sg' self.assertRaises(AssertionError, ch.check_config, cfg.Duration()) cfg.Duration.signal_phase = 'S' cfg.Duration.moving_average_window = -2 self.assertRaises(AssertionError, ch.check_config, cfg.Duration()) cfg.moving_average_window = 2 cfg.threshold_type = 'pre-p noise' self.assertRaises(AssertionError, ch.check_config, cfg.Duration()) cfg.threshold_type = 'noise' cfg.plot = True cfg.save_figure = True cfg.figure_path = '' self.assertRaises(AssertionError, ch.check_config, cfg.Duration()) cfg.plot = False cfg.save_figure = False def test_check_waveform(self): st = obspy.read('testdata/*.mseed') for tr in st: tr.stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00') tr.stats.p = 10. self.assertRaises(AttributeError, ch.check_config, tr) tr.stats.s = 20. self.assertIsNone(ch.check_stats(tr)) tr.stats.o = '2020-10-10T13:05:00.00' self.assertRaises(AssertionError, ch.check_stats, tr) tr.stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00') def test_check_datagaps(self): st = obspy.read('testdata/*.mseed') for tr in st: self.assertIsNone(ch.check_datagaps(tr)) n = int(len(tr.data) * 0.25) tr.data = tr.data[0:n] self.assertRaises(AssertionError, ch.check_datagaps, tr) def test_check_window(self): st = obspy.read('testdata/*.mseed') for tr in st: starttime = obspy.UTCDateTime('2020-10-10T13:05:00.00') endtime = obspy.UTCDateTime('2020-10-10T13:07:00.00') self.assertIsNone(ch.check_window(tr, starttime, endtime)) starttime = obspy.UTCDateTime('2020-10-10T13:04:00.00') self.assertRaises(AssertionError, ch.check_window, tr, starttime, endtime) endtime = obspy.UTCDateTime('2020-10-10T13:08:00.00') self.assertRaises(AssertionError, ch.check_window, tr, starttime, endtime) def test_check_amplitude(self): self.assertIsNone(ch.check_amplitude(0.5)) self.assertRaises(ValueError, ch.check_amplitude, np.nan) self.assertRaises(ValueError, ch.check_amplitude, np.inf) self.assertRaises(ValueError, ch.check_amplitude, -np.inf) self.assertRaises(ValueError, ch.check_amplitude, -0.5) self.assertRaises(ValueError, ch.check_amplitude, None) self.assertRaises(ValueError, ch.check_amplitude, True) self.assertRaises(ValueError, ch.check_amplitude, {'test': 'dict'}) self.assertRaises(ValueError, ch.check_amplitude, ['list', 5]) def test_check_fitting_window_end(self): i_max0 = 20000 i_end0 = np.arange(500, 5005) dt = 0.01 sp = 10 self.assertIsNone(ch.check_fitting_window_end(i_end0, i_max0, dt, sp)) i_end1 = [] self.assertRaises(AssertionError, ch.check_fitting_window_end, i_end1, i_max0, dt, sp) i_max1 = 2 self.assertRaises(AssertionError, ch.check_fitting_window_end, i_end0, i_max1, dt, sp) def test_check_plottype(self): self.assertIsNone(ch.check_plottype('linear')) self.assertIsNone(ch.check_plottype('log')) self.assertRaises(AssertionError, ch.check_plottype, 2) self.assertRaises(AssertionError, ch.check_plottype, 'fourier') def test_check_duration_index(self): cross = np.arange(0, 10, dtype=float) self.assertIsNone(ch.check_duration_index(cross)) self.assertRaises(AssertionError, ch.check_duration_index, []) def test_check_cc(self): cc = np.array([ [0.1, 0.8, 0.5, 0.9], [0.9, 0.1, 0.8, 0.5], [0.5, 0.9, 0.1, 0.8], [0.8, 0.5, 0.9, 0.1] ]) self.assertIsNone(ch.check_cc(cc, 1, 2)) self.assertRaises(AssertionError, ch.check_cc, cc.astype(int), 1, 2) self.assertRaises(AssertionError, ch.check_cc, cc[0:3, :], 1, 2) self.assertRaises(AssertionError, ch.check_cc, cc[:, 0:3], 1, 2) cc[1, 2] = np.nan self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2) cc[1, 2] = np.inf self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2) cc[1, 2] = -np.inf self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2) cc[1, 2] = 0 self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2) cc[1, 2] = -10 self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2) cc[1, 2] = 25 self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2) def test_check_coda(self): x0 = np.arange(0, 100) y0 = np.arange(0, 100) x1 = np.arange(0, 100, dtype=float) y1 = np.arange(0, 100, dtype=float) self.assertRaises(AssertionError, ch.check_coda, x0, y1) self.assertRaises(AssertionError, ch.check_coda, x1, y0) self.assertRaises(AssertionError, ch.check_coda, x0, y0) x2, y2 = ch.check_coda(x1, y1) self.assertEqual(len(x2), 100) self.assertEqual(len(y2), 100) x2[5:10] = np.nan self.assertRaises(AssertionError, ch.check_coda, x2, y1) y2[60:72] = np.nan x3, y3 = ch.check_coda(x1, y2) self.assertEqual(len(x3), 88) self.assertEqual(len(y3), 88) if __name__ == '__main__': unittest.main()
[ "obspy.read", "madpy.checks.check_amplitude", "numpy.arange", "madpy.checks.check_stats", "madpy.checks.check_duration_index", "madpy.checks.check_cc", "obspy.UTCDateTime", "madpy.checks.check_window", "madpy.checks.check_datagaps", "numpy.array", "madpy.checks.check_plottype", "unittest.main", "madpy.checks.check_fitting_window_end", "madpy.tests.testdata.config.Duration", "madpy.tests.testdata.config.Amplitude", "madpy.checks.check_coda" ]
[((6909, 6924), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6922, 6924), False, 'import unittest\n'), ((1949, 1979), 'obspy.read', 'obspy.read', (['"""testdata/*.mseed"""'], {}), "('testdata/*.mseed')\n", (1959, 1979), False, 'import obspy\n'), ((2506, 2536), 'obspy.read', 'obspy.read', (['"""testdata/*.mseed"""'], {}), "('testdata/*.mseed')\n", (2516, 2536), False, 'import obspy\n'), ((2827, 2857), 'obspy.read', 'obspy.read', (['"""testdata/*.mseed"""'], {}), "('testdata/*.mseed')\n", (2837, 2857), False, 'import obspy\n'), ((4165, 4185), 'numpy.arange', 'np.arange', (['(500)', '(5005)'], {}), '(500, 5005)\n', (4174, 4185), True, 'import numpy as np\n'), ((4909, 4938), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {'dtype': 'float'}), '(0, 10, dtype=float)\n', (4918, 4938), True, 'import numpy as np\n'), ((5135, 5237), 'numpy.array', 'np.array', (['[[0.1, 0.8, 0.5, 0.9], [0.9, 0.1, 0.8, 0.5], [0.5, 0.9, 0.1, 0.8], [0.8, \n 0.5, 0.9, 0.1]]'], {}), '([[0.1, 0.8, 0.5, 0.9], [0.9, 0.1, 0.8, 0.5], [0.5, 0.9, 0.1, 0.8],\n [0.8, 0.5, 0.9, 0.1]])\n', (5143, 5237), True, 'import numpy as np\n'), ((6170, 6187), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (6179, 6187), True, 'import numpy as np\n'), ((6201, 6218), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (6210, 6218), True, 'import numpy as np\n'), ((6232, 6262), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {'dtype': 'float'}), '(0, 100, dtype=float)\n', (6241, 6262), True, 'import numpy as np\n'), ((6276, 6306), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {'dtype': 'float'}), '(0, 100, dtype=float)\n', (6285, 6306), True, 'import numpy as np\n'), ((6519, 6540), 'madpy.checks.check_coda', 'ch.check_coda', (['x1', 'y1'], {}), '(x1, y1)\n', (6532, 6540), True, 'import madpy.checks as ch\n'), ((6754, 6775), 'madpy.checks.check_coda', 'ch.check_coda', (['x1', 'y2'], {}), '(x1, y2)\n', (6767, 6775), True, 'import madpy.checks as ch\n'), ((478, 493), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (491, 493), True, 'import madpy.tests.testdata.config as cfg\n'), ((633, 648), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (646, 648), True, 'import madpy.tests.testdata.config as cfg\n'), ((744, 759), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (757, 759), True, 'import madpy.tests.testdata.config as cfg\n'), ((903, 918), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (916, 918), True, 'import madpy.tests.testdata.config as cfg\n'), ((1067, 1082), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (1080, 1082), True, 'import madpy.tests.testdata.config as cfg\n'), ((1294, 1308), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1306, 1308), True, 'import madpy.tests.testdata.config as cfg\n'), ((1457, 1471), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1469, 1471), True, 'import madpy.tests.testdata.config as cfg\n'), ((1613, 1627), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1625, 1627), True, 'import madpy.tests.testdata.config as cfg\n'), ((1809, 1823), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1821, 1823), True, 'import madpy.tests.testdata.config as cfg\n'), ((2027, 2070), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:05:00.00"""'], {}), "('2020-10-10T13:05:00.00')\n", (2044, 2070), False, 'import obspy\n'), ((2387, 2430), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:05:00.00"""'], {}), "('2020-10-10T13:05:00.00')\n", (2404, 2430), False, 'import obspy\n'), ((2904, 2947), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:05:00.00"""'], {}), "('2020-10-10T13:05:00.00')\n", (2921, 2947), False, 'import obspy\n'), ((2970, 3013), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:07:00.00"""'], {}), "('2020-10-10T13:07:00.00')\n", (2987, 3013), False, 'import obspy\n'), ((3109, 3152), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:04:00.00"""'], {}), "('2020-10-10T13:04:00.00')\n", (3126, 3152), False, 'import obspy\n'), ((3262, 3305), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:08:00.00"""'], {}), "('2020-10-10T13:08:00.00')\n", (3279, 3305), False, 'import obspy\n'), ((3482, 3505), 'madpy.checks.check_amplitude', 'ch.check_amplitude', (['(0.5)'], {}), '(0.5)\n', (3500, 3505), True, 'import madpy.checks as ch\n'), ((4246, 4297), 'madpy.checks.check_fitting_window_end', 'ch.check_fitting_window_end', (['i_end0', 'i_max0', 'dt', 'sp'], {}), '(i_end0, i_max0, dt, sp)\n', (4273, 4297), True, 'import madpy.checks as ch\n'), ((4608, 4635), 'madpy.checks.check_plottype', 'ch.check_plottype', (['"""linear"""'], {}), "('linear')\n", (4625, 4635), True, 'import madpy.checks as ch\n'), ((4663, 4687), 'madpy.checks.check_plottype', 'ch.check_plottype', (['"""log"""'], {}), "('log')\n", (4680, 4687), True, 'import madpy.checks as ch\n'), ((4965, 4995), 'madpy.checks.check_duration_index', 'ch.check_duration_index', (['cross'], {}), '(cross)\n', (4988, 4995), True, 'import madpy.checks as ch\n'), ((5318, 5339), 'madpy.checks.check_cc', 'ch.check_cc', (['cc', '(1)', '(2)'], {}), '(cc, 1, 2)\n', (5329, 5339), True, 'import madpy.checks as ch\n'), ((360, 375), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (373, 375), True, 'import madpy.tests.testdata.config as cfg\n'), ((1177, 1191), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1189, 1191), True, 'import madpy.tests.testdata.config as cfg\n'), ((2226, 2244), 'madpy.checks.check_stats', 'ch.check_stats', (['tr'], {}), '(tr)\n', (2240, 2244), True, 'import madpy.checks as ch\n'), ((2589, 2610), 'madpy.checks.check_datagaps', 'ch.check_datagaps', (['tr'], {}), '(tr)\n', (2606, 2610), True, 'import madpy.checks as ch\n'), ((3044, 3083), 'madpy.checks.check_window', 'ch.check_window', (['tr', 'starttime', 'endtime'], {}), '(tr, starttime, endtime)\n', (3059, 3083), True, 'import madpy.checks as ch\n')]
""" Plot gas, tar, char, water, and water vapor from primary and secondary reactions based on Blasi / Chan / Liden kinetic schemes for biomass pyrolysis. This combined scheme is referred to as the Cpc 2016 kinetic scheme. A similar scheme but without water reaction was proposed in Papadikis 2010 paper. References: Blasi, 1993. Combustion Science and Technology, 90, pp 315–340. <NAME>, Krieger, 1985. Fuel, 64, pp 1505–1513. <NAME>, Scott, 1988. Chem. Eng. Comm., 65, pp 207-221. <NAME>, 2010. Fuel Processing Technology, 91, pp 68–79. """ import numpy as np import matplotlib.pyplot as py # Parameters # ------------------------------------------------------------------------------ T = 773 # temperature for rate constants, K mc = 0.20 # moisture content as weight fraction, (-) dt = 0.01 # time step, delta t tmax = 25 # max time, s t = np.linspace(0, tmax, num=tmax/dt) # time vector nt = len(t) # total number of time steps # Function for Cpc 2016 Kinetic Scheme # ------------------------------------------------------------------------------ def cpc(wood, gas, tar, char, water, vapor, T, dt, s=1): """ Primary and secondary kinetic reactions for Cpc 2016 scheme based on Blasi 1993, Chan 1985, and Liden 1988 kinetics. Same scheme as presented in Papadikis 2010 but with the addition of the water reaction. Parameters ---------- wood = wood concentration, kg/m^3 gas = gas concentation, kg/m^3 tar = tar concentation, kg/m^3 char = char concentation, kg/m^3 water = water concentration based on moisture content, kg/m^3 vapor = water vapor concentration, kg/m^3 T = temperature, K dt = time step, s s = 1 primary reactions only, 2 primary and secondary reactions Returns ------- nwood = new wood concentration, kg/m^3 ngas = new gas concentration, kg/m^3 ntar = new tar concentration, kg/m^3 nchar = new char concentration, kg/m^3 nwater = new water concentration, kg/m^3 nvapor = new water vapor concentration, kg/m^3 """ # A = pre-factor (1/s) and E = activation energy (kJ/mol) A1 = 1.3e8; E1 = 140 # wood -> gas from Chan 1985 A2 = 2.0e8; E2 = 133 # wood -> tar from Chan 1985 A3 = 1.08e7; E3 = 121 # wood -> char from Chan 1985 A4 = 4.28e6; E4 = 107.5 # tar -> gas from Liden 1988 A5 = 1.0e6; E5 = 108 # tar -> char from Blasi 1993 Aw = 5.13e6; Ew = 87.9 # water -> water vapor from Chan 1985 R = 0.008314 # universal gas constant, kJ/mol*K # reaction rate constant for each reaction, 1/s K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char K4 = A4 * np.exp(-E4 / (R * T)) # tar -> gas K5 = A5 * np.exp(-E5 / (R * T)) # tar -> char Kw = Aw * np.exp(-Ew / (R * T)) # water -> vapor if s == 1: # primary reactions only rw = -(K1+K2+K3)*wood # wood rate rg = K1*wood # gas rate rt = K2*wood # tar rate rc = K3*wood # char rate rwt = -Kw*water # moisture content rate rwv = Kw*water # water vapor rate nwood = wood + rw*dt # update wood concentration ngas = gas + rg*dt # update gas concentration ntar = tar + rt*dt # update tar concentration nchar = char + rc*dt # update char concentration nwater = water + rwt*dt # update water concentration nvapor = vapor + rwv*dt # update water vapor concentation elif s == 2: # primary and secondary reactions rw = -(K1+K2+K3)*wood # wood rate rg = K1*wood + K4*tar # gas rate rt = K2*wood - (K4+K5)*tar # tar rate rc = K3*wood + K5*tar # char rate rwt = -Kw*water # moisture content rate rwv = Kw*water # water vapor rate nwood = wood + rw*dt # update wood concentration ngas = gas + rg*dt # update gas concentration ntar = tar + rt*dt # update tar concentration nchar = char + rc*dt # update char concentration nwater = water + rwt*dt # update water concentration nvapor = vapor + rwv*dt # update water vapor concentation # return new mass concentrations of products, kg/m^3 return nwood, ngas, ntar, nchar, nwater, nvapor # Products from Kinetic Scheme # ------------------------------------------------------------------------------ # store concentrations from primary reactions at each time step # concentrations calculated on a mass basis such as kg/m^3 wood = np.ones(nt) * (1-mc) # wood concentration vector gas = np.zeros(nt) # gas concentration vector tar = np.zeros(nt) # tar concentration vector char = np.zeros(nt) # char concentration vector water = np.ones(nt) * mc # water concentration vector vapor = np.zeros(nt) # water vapor concentration vector # products from primary reactions only for i in range(1, nt): wood[i], gas[i], tar[i], char[i], water[i], vapor[i] = cpc(wood[i-1], gas[i-1], tar[i-1], char[i-1], water[i-1], vapor[i-1], T, dt) # store concentrations from primary and secondary reactions at each time step # concentrations calculated on a mass basis such as kg/m^3 wood2 = np.ones(nt)*(1-mc) # wood concentration vector gas2 = np.zeros(nt) # gas concentration vector tar2 = np.zeros(nt) # tar concentration vector char2 = np.zeros(nt) # char concentration vector water2 = np.ones(nt)*mc # water concentration vector vapor2 = np.zeros(nt) # water vapor concentration vector # products from primary and secondary reactions only for i in range(1, nt): wood2[i], gas2[i], tar2[i], char2[i], water2[i], vapor2[i] = cpc(wood2[i-1], gas2[i-1], tar2[i-1], char2[i-1], water2[i-1], vapor2[i-1], T, dt, s=2) # Print Mass Balances # ------------------------------------------------------------------------------ # check mass balance at each time step tot1 = wood + gas + tar + char + water + vapor print('total mass fraction (primary) \n', tot1) tot2 = wood2 + gas2 + tar2 + char2 + water2 + vapor2 print('total mass fraction (pri+sec) \n', tot2) # Plot Results # ------------------------------------------------------------------------------ py.ion() py.close('all') py.figure(1) py.plot(t, wood, lw=2, label='wood') py.plot(t, gas, lw=2, label='gas') py.plot(t, tar, lw=2, label='tar') py.plot(t, char, lw=2, label='char') py.plot(t, water, lw=2, label='water') py.plot(t, vapor, lw=2, label='vapor') py.title('Cpc 2016 primary reactions at T = {} K'.format(T)) py.xlabel('Time (s)') py.ylabel('Concentration (m.f. basis)') py.legend(loc='best', numpoints=1, fontsize=12) py.grid() py.figure(2) py.plot(t, wood2, lw=2, label='wood') py.plot(t, gas2, lw=2, label='gas') py.plot(t, tar2, lw=2, label='tar') py.plot(t, char2, lw=2, label='char') py.plot(t, water2, lw=2, label='water') py.plot(t, vapor2, lw=2, label='vapor') py.title('Cpc 2016 primary and secondary reactions at T = {} K'.format(T)) py.xlabel('Time (s)') py.ylabel('Concentration (m.f. basis)') py.legend(loc='best', numpoints=1, fontsize=12) py.grid()
[ "matplotlib.pyplot.grid", "numpy.ones", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "numpy.exp", "numpy.linspace", "matplotlib.pyplot.figure", "numpy.zeros", "matplotlib.pyplot.ion", "matplotlib.pyplot.legend" ]
[((913, 948), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax'], {'num': '(tmax / dt)'}), '(0, tmax, num=tmax / dt)\n', (924, 948), True, 'import numpy as np\n'), ((4914, 4926), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (4922, 4926), True, 'import numpy as np\n'), ((4971, 4983), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (4979, 4983), True, 'import numpy as np\n'), ((5029, 5041), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5037, 5041), True, 'import numpy as np\n'), ((5147, 5159), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5155, 5159), True, 'import numpy as np\n'), ((5606, 5618), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5614, 5618), True, 'import numpy as np\n'), ((5663, 5675), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5671, 5675), True, 'import numpy as np\n'), ((5721, 5733), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5729, 5733), True, 'import numpy as np\n'), ((5839, 5851), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5847, 5851), True, 'import numpy as np\n'), ((6564, 6572), 'matplotlib.pyplot.ion', 'py.ion', ([], {}), '()\n', (6570, 6572), True, 'import matplotlib.pyplot as py\n'), ((6573, 6588), 'matplotlib.pyplot.close', 'py.close', (['"""all"""'], {}), "('all')\n", (6581, 6588), True, 'import matplotlib.pyplot as py\n'), ((6590, 6602), 'matplotlib.pyplot.figure', 'py.figure', (['(1)'], {}), '(1)\n', (6599, 6602), True, 'import matplotlib.pyplot as py\n'), ((6603, 6639), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'wood'], {'lw': '(2)', 'label': '"""wood"""'}), "(t, wood, lw=2, label='wood')\n", (6610, 6639), True, 'import matplotlib.pyplot as py\n'), ((6640, 6674), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'gas'], {'lw': '(2)', 'label': '"""gas"""'}), "(t, gas, lw=2, label='gas')\n", (6647, 6674), True, 'import matplotlib.pyplot as py\n'), ((6675, 6709), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'tar'], {'lw': '(2)', 'label': '"""tar"""'}), "(t, tar, lw=2, label='tar')\n", (6682, 6709), True, 'import matplotlib.pyplot as py\n'), ((6710, 6746), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'char'], {'lw': '(2)', 'label': '"""char"""'}), "(t, char, lw=2, label='char')\n", (6717, 6746), True, 'import matplotlib.pyplot as py\n'), ((6747, 6785), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'water'], {'lw': '(2)', 'label': '"""water"""'}), "(t, water, lw=2, label='water')\n", (6754, 6785), True, 'import matplotlib.pyplot as py\n'), ((6786, 6824), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'vapor'], {'lw': '(2)', 'label': '"""vapor"""'}), "(t, vapor, lw=2, label='vapor')\n", (6793, 6824), True, 'import matplotlib.pyplot as py\n'), ((6886, 6907), 'matplotlib.pyplot.xlabel', 'py.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (6895, 6907), True, 'import matplotlib.pyplot as py\n'), ((6908, 6947), 'matplotlib.pyplot.ylabel', 'py.ylabel', (['"""Concentration (m.f. basis)"""'], {}), "('Concentration (m.f. basis)')\n", (6917, 6947), True, 'import matplotlib.pyplot as py\n'), ((6948, 6995), 'matplotlib.pyplot.legend', 'py.legend', ([], {'loc': '"""best"""', 'numpoints': '(1)', 'fontsize': '(12)'}), "(loc='best', numpoints=1, fontsize=12)\n", (6957, 6995), True, 'import matplotlib.pyplot as py\n'), ((6996, 7005), 'matplotlib.pyplot.grid', 'py.grid', ([], {}), '()\n', (7003, 7005), True, 'import matplotlib.pyplot as py\n'), ((7007, 7019), 'matplotlib.pyplot.figure', 'py.figure', (['(2)'], {}), '(2)\n', (7016, 7019), True, 'import matplotlib.pyplot as py\n'), ((7020, 7057), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'wood2'], {'lw': '(2)', 'label': '"""wood"""'}), "(t, wood2, lw=2, label='wood')\n", (7027, 7057), True, 'import matplotlib.pyplot as py\n'), ((7058, 7093), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'gas2'], {'lw': '(2)', 'label': '"""gas"""'}), "(t, gas2, lw=2, label='gas')\n", (7065, 7093), True, 'import matplotlib.pyplot as py\n'), ((7094, 7129), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'tar2'], {'lw': '(2)', 'label': '"""tar"""'}), "(t, tar2, lw=2, label='tar')\n", (7101, 7129), True, 'import matplotlib.pyplot as py\n'), ((7130, 7167), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'char2'], {'lw': '(2)', 'label': '"""char"""'}), "(t, char2, lw=2, label='char')\n", (7137, 7167), True, 'import matplotlib.pyplot as py\n'), ((7168, 7207), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'water2'], {'lw': '(2)', 'label': '"""water"""'}), "(t, water2, lw=2, label='water')\n", (7175, 7207), True, 'import matplotlib.pyplot as py\n'), ((7208, 7247), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'vapor2'], {'lw': '(2)', 'label': '"""vapor"""'}), "(t, vapor2, lw=2, label='vapor')\n", (7215, 7247), True, 'import matplotlib.pyplot as py\n'), ((7323, 7344), 'matplotlib.pyplot.xlabel', 'py.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (7332, 7344), True, 'import matplotlib.pyplot as py\n'), ((7345, 7384), 'matplotlib.pyplot.ylabel', 'py.ylabel', (['"""Concentration (m.f. basis)"""'], {}), "('Concentration (m.f. basis)')\n", (7354, 7384), True, 'import matplotlib.pyplot as py\n'), ((7385, 7432), 'matplotlib.pyplot.legend', 'py.legend', ([], {'loc': '"""best"""', 'numpoints': '(1)', 'fontsize': '(12)'}), "(loc='best', numpoints=1, fontsize=12)\n", (7394, 7432), True, 'import matplotlib.pyplot as py\n'), ((7433, 7442), 'matplotlib.pyplot.grid', 'py.grid', ([], {}), '()\n', (7440, 7442), True, 'import matplotlib.pyplot as py\n'), ((4857, 4868), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (4864, 4868), True, 'import numpy as np\n'), ((5088, 5099), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (5095, 5099), True, 'import numpy as np\n'), ((5549, 5560), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (5556, 5560), True, 'import numpy as np\n'), ((5780, 5791), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (5787, 5791), True, 'import numpy as np\n'), ((2706, 2727), 'numpy.exp', 'np.exp', (['(-E1 / (R * T))'], {}), '(-E1 / (R * T))\n', (2712, 2727), True, 'import numpy as np\n'), ((2758, 2779), 'numpy.exp', 'np.exp', (['(-E2 / (R * T))'], {}), '(-E2 / (R * T))\n', (2764, 2779), True, 'import numpy as np\n'), ((2810, 2831), 'numpy.exp', 'np.exp', (['(-E3 / (R * T))'], {}), '(-E3 / (R * T))\n', (2816, 2831), True, 'import numpy as np\n'), ((2863, 2884), 'numpy.exp', 'np.exp', (['(-E4 / (R * T))'], {}), '(-E4 / (R * T))\n', (2869, 2884), True, 'import numpy as np\n'), ((2914, 2935), 'numpy.exp', 'np.exp', (['(-E5 / (R * T))'], {}), '(-E5 / (R * T))\n', (2920, 2935), True, 'import numpy as np\n'), ((2966, 2987), 'numpy.exp', 'np.exp', (['(-Ew / (R * T))'], {}), '(-Ew / (R * T))\n', (2972, 2987), True, 'import numpy as np\n')]
''' original implementation credit: https://github.com/openai/baselines heavily adapted to suit our needs. ''' import argparse import tempfile import os.path as osp import gym import logging from tqdm import tqdm import tensorflow as tf import numpy as np import os import sys import glob file_path = os.path.dirname(os.path.realpath(__file__)) src_path = os.path.abspath(os.path.join(file_path, os.pardir)) root_path = os.path.abspath(os.path.join(src_path, os.pardir)) sys.path.insert(0, src_path) from contrib.baselines.gail import mlp_policy from contrib.baselines import bench from contrib.baselines import logger from contrib.baselines.common import set_global_seeds, tf_util as U from contrib.baselines.common.misc_util import boolean_flag from contrib.baselines.common.mpi_adam import MpiAdam from core.data_util import GymDataset, SepsisDataset from core.run_gym import run_gym def learn_original(pi, dataset, env_name, n_action, prefix, traj_lim, seed, optim_batch_size=128, max_iters=5e3, adam_epsilon=1e-4, optim_stepsize=1e-4, ckpt_dir=None, plot_dir=None, task_name=None, verbose=False): """ learn without regularization """ # custom hyperparams seed = 0 max_iters = 5e4 val_per_iter = int(max_iters/10) # placeholder ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) stochastic = U.get_placeholder_cached(name="stochastic") loss = tf.reduce_mean(tf.square(tf.to_float(ac-pi.ac))) var_list = pi.get_trainable_variables() adam = MpiAdam(var_list, epsilon=adam_epsilon) lossandgrad = U.function([ob, ac, stochastic], [loss]+[U.flatgrad(loss, var_list)]) U.initialize() adam.sync() logger.log("Training a policy with Behavior Cloning") logger.log("with {} trajs, {} steps".format(dataset.num_traj, dataset.num_transition)) loss_history = {} loss_history["train_action_loss"] = [] loss_history["val_action_loss"] = [] for iter_so_far in tqdm(range(int(max_iters))): ob_expert, ac_expert, _, _ = dataset.get_next_batch(optim_batch_size, 'train') train_loss, g = lossandgrad(ob_expert, ac_expert, True) adam.update(g, optim_stepsize) if verbose and iter_so_far % val_per_iter == 0: ob_expert, ac_expert, _, _ = dataset.get_next_batch(-1, 'val') val_loss, _ = lossandgrad(ob_expert, ac_expert, True) logger.log("Training loss: {}, Validation loss: {}".format(train_loss, val_loss)) loss_history["train_action_loss"].append(train_loss) loss_history["val_action_loss"].append(val_loss) plot(env_name, loss_history, traj_lim, plot_dir) os.makedirs(ckpt_dir, exist_ok=True) if ckpt_dir is None: savedir_fname = tempfile.TemporaryDirectory().name else: ckpt_fname = "ckpt.bc.{}.{}".format(traj_lim, seed) savedir_fname = osp.join(ckpt_dir, ckpt_fname) U.save_state(savedir_fname, var_list=pi.get_variables()) return savedir_fname def learn(network, dataset, env_name, n_action, prefix, traj_lim, seed, optim_batch_size=32, max_iters=1e4, adam_epsilon=1e-4, optim_stepsize=3e-4, ckpt_dir=None, plot_dir=None, task_name=None, verbose=False): """ learn with regularization """ seed = 0 alpha = 0.7 beta = 1.0 pi = network.pi T = network.T val_per_iter = int(max_iters/20) ob = U.get_placeholder_cached(name="ob") T_ac = U.get_placeholder_cached(name="T_ac") pi_stochastic = U.get_placeholder_cached(name="pi_stochastic") T_stochastic = U.get_placeholder_cached(name="T_stochastic") ac = network.pdtype.sample_placeholder([None]) ob_next = network.ob_next_pdtype.sample_placeholder([None]) onehot_ac = tf.one_hot(ac, depth=n_action) ce_loss = tf.losses.softmax_cross_entropy(logits=pi.logits, onehot_labels=onehot_ac) ce_loss = tf.reduce_mean(ce_loss) reg_loss = tf.reduce_mean(tf.square(tf.to_float(ob_next-network.ob_next))) losses = [ce_loss, reg_loss] total_loss = alpha * ce_loss + beta * reg_loss var_list = network.get_trainable_variables() adam = MpiAdam(var_list, epsilon=adam_epsilon) lossandgrad = U.function([ob, ac, T_ac, ob_next, pi_stochastic, T_stochastic], losses +[U.flatgrad(total_loss, var_list)]) U.initialize() adam.sync() logger.log("Training a policy with Behavior Cloning") logger.log("with {} trajs, {} steps".format(dataset.num_traj, dataset.num_transition)) loss_history = {} loss_history["train_action_loss"] = [] loss_history["train_transition_loss"] = [] loss_history["val_action_loss"] = [] loss_history["val_transition_loss"] = [] for iter_so_far in tqdm(range(int(max_iters))): #ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size, 'train') ob_expert, ac_expert, ob_next_expert, info = dataset.get_next_batch(optim_batch_size, 'train') train_loss_ce, train_loss_reg, g = lossandgrad(ob_expert, ac_expert, ac_expert, ob_next_expert, True, True) adam.update(g, optim_stepsize) if verbose and iter_so_far % val_per_iter == 0: #ob_expert, ac_expert = dataset.get_next_batch(-1, 'val') ob_expert, ac_expert, ob_next_expert, info = dataset.get_next_batch(-1, 'val') val_loss_ce, val_loss_reg, _ = lossandgrad(ob_expert, ac_expert, ac_expert, ob_next_expert, True, True) items = [train_loss_ce, train_loss_reg, val_loss_ce, val_loss_reg] logger.log("Training Action loss: {}\n" \ "Training Transition loss: {}\n" \ "Validation Action loss: {}\n" \ "Validation Transition Loss:{}\n".format(*items)) loss_history["train_action_loss"].append(train_loss_ce) loss_history["train_transition_loss"].append(train_loss_reg) loss_history["val_action_loss"].append(val_loss_ce) loss_history["val_transition_loss"].append(val_loss_reg) #if len(loss_history["val_action_loss"]) > 1: # val_loss_ce_delta = loss_history["val_action_loss"][-1] - val_loss_ce # if np.abs(val_loss_ce_delta) < val_stop_threshold: # logger.log("validation error seems to have converged.") # break plot(env_name, loss_history, traj_lim, plot_dir) os.makedirs(ckpt_dir, exist_ok=True) if ckpt_dir is None: savedir_fname = tempfile.TemporaryDirectory().name else: ckpt_fname = "ckpt.bc.{}.{}".format(traj_lim, seed) savedir_fname = osp.join(ckpt_dir, ckpt_fname) U.save_state(savedir_fname, var_list=network.get_variables()) return savedir_fname def plot(env_name, loss, traj_lim, save_path): """TODO: Docstring for plot. Parameters ---------- arg1 : TODO Returns ------- TODO """ import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') num_data= len(loss["train_action_loss"]) #plt.ylim([0, 0.1]) plt.ylabel('loss') plt.title('pretraining loss for {}'.format(env_name)) plt.plot(np.arange(num_data), loss["train_action_loss"], c="r", linestyle="--") plt.plot(np.arange(num_data), loss["val_action_loss"], c="r") if "train_transition_loss" in loss: plt.plot(np.arange(num_data), loss["train_transition_loss"], c="b", linestyle="--") plt.plot(np.arange(num_data), loss["val_transition_loss"], c="b") plt.legend(['train_action', 'train_transition', 'val_action', 'val_transition'], loc='best') plt.legend(['train_action', 'val_action'], loc='best') plt.savefig(os.path.join(save_path, "loss.{}.{}.png".format(env_name, traj_lim)), format="png") plt.close() def train_bc(task, params, ob_space, ac_space, args, env): task_path = os.path.join(root_path, "task", args.task) plot_path = os.path.join(task_path, "result") dataset = GymDataset(expert_path=args.expert_path, traj_limitation=args.traj_limitation) U.make_session(num_cpu=1).__enter__() set_global_seeds(args.seed) def policy_fn(name, ob_space, ac_space, reuse=False): return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, reuse=reuse, hid_size_phi=args.policy_hidden_size, num_hid_layers_phi=2, dim_phi=args.dim_phi) env_name = task["env_id"] name = "pi.{}.{}".format(env_name.lower().split("-")[0], args.traj_limitation) pi = policy_fn(name, ob_space, ac_space) n_action = env.action_space.n fname = "ckpt.bc.{}.{}".format(args.traj_limitation, args.seed) savedir_fname = osp.join(args.checkpoint_dir, fname, fname) if not os.path.exists(savedir_fname + ".index"): savedir_fname = learn(pi, dataset, env_name, n_action, prefix="bc", seed=args.seed, traj_lim=args.traj_limitation, max_iters=args.BC_max_iter, ckpt_dir=osp.join(args.checkpoint_dir, fname), plot_dir=plot_path, task_name=task["env_id"], verbose=True) logger.log(savedir_fname + "saved") # avg_len, avg_ret = run_gym(env, # policy_fn, # savedir_fname, # timesteps_per_batch=args.horizon, # number_trajs=10, # stochastic_policy=args.stochastic_policy, # save=args.save_sample, # reuse=True) # # return savedir_fname def train_bc_sepsis(task, params, ob_space, ac_space, args): task_path = os.path.join(root_path, "task", args.task) plot_path = os.path.join(task_path, "result") dataset = SepsisDataset(expert_path=args.expert_path, traj_limitation=args.traj_limitation) U.make_session(num_cpu=1).__enter__() set_global_seeds(args.seed) # just im #def policy_fn(name, ob_space, ac_space, reuse=False): # return mlp_policy.MlpPolicyOriginal(name=name, # ob_space=ob_space, # ac_space=ac_space, # reuse=reuse, # hid_size=args.policy_hidden_size, # num_hid_layers=2) # im + reg def policy_fn(name, ob_space, ac_space, reuse=False): return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, reuse=reuse, hid_size_phi=args.policy_hidden_size, num_hid_layers_phi=2, dim_phi=args.dim_phi) env_name = task["env_id"] name = "pi.{}.{}".format(env_name.lower().split("-")[0], args.traj_limitation) pi = policy_fn(name, ob_space, ac_space) n_action = ac_space.n fname = "ckpt.bc.{}.{}".format(args.traj_limitation, args.seed) savedir_fname = osp.join(args.checkpoint_dir, fname, fname) if not os.path.exists(savedir_fname + ".index"): #savedir_fname = learn_original(pi, # dataset, # env_name, # n_action, # prefix="bc", # seed=args.seed, # traj_lim=args.traj_limitation, # max_iters=args.BC_max_iter, # ckpt_dir=osp.join(args.checkpoint_dir, fname), # plot_dir=plot_path, # task_name=task["env_id"], # verbose=True) savedir_fname = learn(pi, dataset, env_name, n_action, prefix="bc", seed=args.seed, traj_lim=args.traj_limitation, max_iters=args.BC_max_iter, ckpt_dir=osp.join(args.checkpoint_dir, fname), plot_dir=plot_path, task_name=task["env_id"], verbose=True) logger.log(savedir_fname + "saved") # avg_len, avg_ret = run_gym(env, # policy_fn, # savedir_fname, # timesteps_per_batch=args.horizon, # number_trajs=10, # stochastic_policy=args.stochastic_policy, # save=args.save_sample, # reuse=True) return savedir_fname
[ "sys.path.insert", "matplotlib.pyplot.ylabel", "tensorflow.reduce_mean", "numpy.arange", "contrib.baselines.gail.mlp_policy.MlpPolicy", "os.path.exists", "contrib.baselines.common.set_global_seeds", "matplotlib.pyplot.style.use", "matplotlib.pyplot.close", "contrib.baselines.common.tf_util.flatgrad", "tensorflow.one_hot", "contrib.baselines.common.mpi_adam.MpiAdam", "contrib.baselines.logger.log", "core.data_util.SepsisDataset", "contrib.baselines.common.tf_util.get_placeholder_cached", "matplotlib.pyplot.legend", "contrib.baselines.common.tf_util.initialize", "tempfile.TemporaryDirectory", "os.makedirs", "tensorflow.to_float", "contrib.baselines.common.tf_util.make_session", "tensorflow.losses.softmax_cross_entropy", "os.path.join", "os.path.realpath", "core.data_util.GymDataset" ]
[((476, 504), 'sys.path.insert', 'sys.path.insert', (['(0)', 'src_path'], {}), '(0, src_path)\n', (491, 504), False, 'import sys\n'), ((322, 348), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (338, 348), False, 'import os\n'), ((377, 411), 'os.path.join', 'os.path.join', (['file_path', 'os.pardir'], {}), '(file_path, os.pardir)\n', (389, 411), False, 'import os\n'), ((441, 474), 'os.path.join', 'os.path.join', (['src_path', 'os.pardir'], {}), '(src_path, os.pardir)\n', (453, 474), False, 'import os\n'), ((1323, 1358), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""ob"""'}), "(name='ob')\n", (1347, 1358), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1422, 1465), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""stochastic"""'}), "(name='stochastic')\n", (1446, 1465), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1581, 1620), 'contrib.baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['var_list'], {'epsilon': 'adam_epsilon'}), '(var_list, epsilon=adam_epsilon)\n', (1588, 1620), False, 'from contrib.baselines.common.mpi_adam import MpiAdam\n'), ((1714, 1728), 'contrib.baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (1726, 1728), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1749, 1802), 'contrib.baselines.logger.log', 'logger.log', (['"""Training a policy with Behavior Cloning"""'], {}), "('Training a policy with Behavior Cloning')\n", (1759, 1802), False, 'from contrib.baselines import logger\n'), ((2723, 2759), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {'exist_ok': '(True)'}), '(ckpt_dir, exist_ok=True)\n', (2734, 2759), False, 'import os\n'), ((3484, 3519), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""ob"""'}), "(name='ob')\n", (3508, 3519), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3531, 3568), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""T_ac"""'}), "(name='T_ac')\n", (3555, 3568), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3589, 3635), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""pi_stochastic"""'}), "(name='pi_stochastic')\n", (3613, 3635), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3655, 3700), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""T_stochastic"""'}), "(name='T_stochastic')\n", (3679, 3700), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3834, 3864), 'tensorflow.one_hot', 'tf.one_hot', (['ac'], {'depth': 'n_action'}), '(ac, depth=n_action)\n', (3844, 3864), True, 'import tensorflow as tf\n'), ((3879, 3953), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', ([], {'logits': 'pi.logits', 'onehot_labels': 'onehot_ac'}), '(logits=pi.logits, onehot_labels=onehot_ac)\n', (3910, 3953), True, 'import tensorflow as tf\n'), ((3981, 4004), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ce_loss'], {}), '(ce_loss)\n', (3995, 4004), True, 'import tensorflow as tf\n'), ((4232, 4271), 'contrib.baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['var_list'], {'epsilon': 'adam_epsilon'}), '(var_list, epsilon=adam_epsilon)\n', (4239, 4271), False, 'from contrib.baselines.common.mpi_adam import MpiAdam\n'), ((4416, 4430), 'contrib.baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (4428, 4430), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((4451, 4504), 'contrib.baselines.logger.log', 'logger.log', (['"""Training a policy with Behavior Cloning"""'], {}), "('Training a policy with Behavior Cloning')\n", (4461, 4504), False, 'from contrib.baselines import logger\n'), ((6496, 6532), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {'exist_ok': '(True)'}), '(ckpt_dir, exist_ok=True)\n', (6507, 6532), False, 'import os\n'), ((7045, 7079), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (7058, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7153, 7171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (7163, 7171), True, 'import matplotlib.pyplot as plt\n'), ((7699, 7753), 'matplotlib.pyplot.legend', 'plt.legend', (["['train_action', 'val_action']"], {'loc': '"""best"""'}), "(['train_action', 'val_action'], loc='best')\n", (7709, 7753), True, 'import matplotlib.pyplot as plt\n'), ((7866, 7877), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7875, 7877), True, 'import matplotlib.pyplot as plt\n'), ((7955, 7997), 'os.path.join', 'os.path.join', (['root_path', '"""task"""', 'args.task'], {}), "(root_path, 'task', args.task)\n", (7967, 7997), False, 'import os\n'), ((8014, 8047), 'os.path.join', 'os.path.join', (['task_path', '"""result"""'], {}), "(task_path, 'result')\n", (8026, 8047), False, 'import os\n'), ((8063, 8141), 'core.data_util.GymDataset', 'GymDataset', ([], {'expert_path': 'args.expert_path', 'traj_limitation': 'args.traj_limitation'}), '(expert_path=args.expert_path, traj_limitation=args.traj_limitation)\n', (8073, 8141), False, 'from core.data_util import GymDataset, SepsisDataset\n'), ((8212, 8239), 'contrib.baselines.common.set_global_seeds', 'set_global_seeds', (['args.seed'], {}), '(args.seed)\n', (8228, 8239), False, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((8979, 9022), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname', 'fname'], {}), '(args.checkpoint_dir, fname, fname)\n', (8987, 9022), True, 'import os.path as osp\n'), ((10235, 10277), 'os.path.join', 'os.path.join', (['root_path', '"""task"""', 'args.task'], {}), "(root_path, 'task', args.task)\n", (10247, 10277), False, 'import os\n'), ((10294, 10327), 'os.path.join', 'os.path.join', (['task_path', '"""result"""'], {}), "(task_path, 'result')\n", (10306, 10327), False, 'import os\n'), ((10343, 10429), 'core.data_util.SepsisDataset', 'SepsisDataset', ([], {'expert_path': 'args.expert_path', 'traj_limitation': 'args.traj_limitation'}), '(expert_path=args.expert_path, traj_limitation=args.\n traj_limitation)\n', (10356, 10429), False, 'from core.data_util import GymDataset, SepsisDataset\n'), ((10501, 10528), 'contrib.baselines.common.set_global_seeds', 'set_global_seeds', (['args.seed'], {}), '(args.seed)\n', (10517, 10528), False, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((11698, 11741), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname', 'fname'], {}), '(args.checkpoint_dir, fname, fname)\n', (11706, 11741), True, 'import os.path as osp\n'), ((2938, 2968), 'os.path.join', 'osp.join', (['ckpt_dir', 'ckpt_fname'], {}), '(ckpt_dir, ckpt_fname)\n', (2946, 2968), True, 'import os.path as osp\n'), ((6711, 6741), 'os.path.join', 'osp.join', (['ckpt_dir', 'ckpt_fname'], {}), '(ckpt_dir, ckpt_fname)\n', (6719, 6741), True, 'import os.path as osp\n'), ((7243, 7262), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7252, 7262), True, 'import numpy as np\n'), ((7335, 7354), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7344, 7354), True, 'import numpy as np\n'), ((7602, 7698), 'matplotlib.pyplot.legend', 'plt.legend', (["['train_action', 'train_transition', 'val_action', 'val_transition']"], {'loc': '"""best"""'}), "(['train_action', 'train_transition', 'val_action',\n 'val_transition'], loc='best')\n", (7612, 7698), True, 'import matplotlib.pyplot as plt\n'), ((8314, 8487), 'contrib.baselines.gail.mlp_policy.MlpPolicy', 'mlp_policy.MlpPolicy', ([], {'name': 'name', 'ob_space': 'ob_space', 'ac_space': 'ac_space', 'reuse': 'reuse', 'hid_size_phi': 'args.policy_hidden_size', 'num_hid_layers_phi': '(2)', 'dim_phi': 'args.dim_phi'}), '(name=name, ob_space=ob_space, ac_space=ac_space, reuse\n =reuse, hid_size_phi=args.policy_hidden_size, num_hid_layers_phi=2,\n dim_phi=args.dim_phi)\n', (8334, 8487), False, 'from contrib.baselines.gail import mlp_policy\n'), ((9035, 9075), 'os.path.exists', 'os.path.exists', (["(savedir_fname + '.index')"], {}), "(savedir_fname + '.index')\n", (9049, 9075), False, 'import os\n'), ((9673, 9708), 'contrib.baselines.logger.log', 'logger.log', (["(savedir_fname + 'saved')"], {}), "(savedir_fname + 'saved')\n", (9683, 9708), False, 'from contrib.baselines import logger\n'), ((11038, 11211), 'contrib.baselines.gail.mlp_policy.MlpPolicy', 'mlp_policy.MlpPolicy', ([], {'name': 'name', 'ob_space': 'ob_space', 'ac_space': 'ac_space', 'reuse': 'reuse', 'hid_size_phi': 'args.policy_hidden_size', 'num_hid_layers_phi': '(2)', 'dim_phi': 'args.dim_phi'}), '(name=name, ob_space=ob_space, ac_space=ac_space, reuse\n =reuse, hid_size_phi=args.policy_hidden_size, num_hid_layers_phi=2,\n dim_phi=args.dim_phi)\n', (11058, 11211), False, 'from contrib.baselines.gail import mlp_policy\n'), ((11754, 11794), 'os.path.exists', 'os.path.exists', (["(savedir_fname + '.index')"], {}), "(savedir_fname + '.index')\n", (11768, 11794), False, 'import os\n'), ((13003, 13038), 'contrib.baselines.logger.log', 'logger.log', (["(savedir_fname + 'saved')"], {}), "(savedir_fname + 'saved')\n", (13013, 13038), False, 'from contrib.baselines import logger\n'), ((1502, 1525), 'tensorflow.to_float', 'tf.to_float', (['(ac - pi.ac)'], {}), '(ac - pi.ac)\n', (1513, 1525), True, 'import tensorflow as tf\n'), ((2809, 2838), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2836, 2838), False, 'import tempfile\n'), ((4046, 4084), 'tensorflow.to_float', 'tf.to_float', (['(ob_next - network.ob_next)'], {}), '(ob_next - network.ob_next)\n', (4057, 4084), True, 'import tensorflow as tf\n'), ((6582, 6611), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6609, 6611), False, 'import tempfile\n'), ((7445, 7464), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7454, 7464), True, 'import numpy as np\n'), ((7537, 7556), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7546, 7556), True, 'import numpy as np\n'), ((8170, 8195), 'contrib.baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': '(1)'}), '(num_cpu=1)\n', (8184, 8195), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((10459, 10484), 'contrib.baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': '(1)'}), '(num_cpu=1)\n', (10473, 10484), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1680, 1706), 'contrib.baselines.common.tf_util.flatgrad', 'U.flatgrad', (['loss', 'var_list'], {}), '(loss, var_list)\n', (1690, 1706), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((4376, 4408), 'contrib.baselines.common.tf_util.flatgrad', 'U.flatgrad', (['total_loss', 'var_list'], {}), '(total_loss, var_list)\n', (4386, 4408), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((9477, 9513), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname'], {}), '(args.checkpoint_dir, fname)\n', (9485, 9513), True, 'import os.path as osp\n'), ((12807, 12843), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname'], {}), '(args.checkpoint_dir, fname)\n', (12815, 12843), True, 'import os.path as osp\n')]
import glob import numpy as np import sys def group_memory_footprint(memory_list, th_size): # group similar consecutive entries in the memory list (ts, size) ts = 0 size_list = [memory_list[0][1]] group_footprint = [] for i in range(1, len(memory_list)): if abs(memory_list[i][1] - memory_list[i-1][1]) > th_size: if len(group_footprint) == 0 or \ abs(np.mean(size_list) - group_footprint[-1][1]) > th_size: group_footprint.append([ts, np.mean(size_list)]) ts = memory_list[i][0] size_list = [] size_list.append(memory_list[i][1]) return group_footprint def pad_array(a, n): if len(a) >= n: return a pad_a = np.zeros((n, 2)) if len(a) > 0: pad_a[:len(a), :] = a return pad_a if __name__ == '__main__': if len(sys.argv) < 2: print("Usage: %s memory_file_prefix [output_file]" %(sys.argv[0])) exit() memory_log = [] for file_name in glob.glob(sys.argv[1]+"*"): print(file_name) data = np.loadtxt(file_name, delimiter=',') maxlen = max(len(data), len(memory_log)) memory_log = pad_array(memory_log, maxlen) data = pad_array(data, maxlen) memory_log = np.maximum(data, memory_log) # threshold of 200 MB memory_log = group_memory_footprint(memory_log, 204800) # store data in MB format memory_log = np.array([[i[0], i[1]/1024] for i in memory_log]) outfile = "out_memory.log" if len(sys.argv) == 3: outfile = sys.argv[2] np.savetxt(outfile, memory_log, delimiter=',', fmt='%.2f')
[ "numpy.mean", "numpy.array", "numpy.zeros", "numpy.savetxt", "numpy.maximum", "numpy.loadtxt", "glob.glob" ]
[((742, 758), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (750, 758), True, 'import numpy as np\n'), ((1011, 1039), 'glob.glob', 'glob.glob', (["(sys.argv[1] + '*')"], {}), "(sys.argv[1] + '*')\n", (1020, 1039), False, 'import glob\n'), ((1448, 1499), 'numpy.array', 'np.array', (['[[i[0], i[1] / 1024] for i in memory_log]'], {}), '([[i[0], i[1] / 1024] for i in memory_log])\n', (1456, 1499), True, 'import numpy as np\n'), ((1590, 1648), 'numpy.savetxt', 'np.savetxt', (['outfile', 'memory_log'], {'delimiter': '""","""', 'fmt': '"""%.2f"""'}), "(outfile, memory_log, delimiter=',', fmt='%.2f')\n", (1600, 1648), True, 'import numpy as np\n'), ((1079, 1115), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {'delimiter': '""","""'}), "(file_name, delimiter=',')\n", (1089, 1115), True, 'import numpy as np\n'), ((1276, 1304), 'numpy.maximum', 'np.maximum', (['data', 'memory_log'], {}), '(data, memory_log)\n', (1286, 1304), True, 'import numpy as np\n'), ((509, 527), 'numpy.mean', 'np.mean', (['size_list'], {}), '(size_list)\n', (516, 527), True, 'import numpy as np\n'), ((409, 427), 'numpy.mean', 'np.mean', (['size_list'], {}), '(size_list)\n', (416, 427), True, 'import numpy as np\n')]
"""Test for hydromt.gis_utils submodule""" import pytest import numpy as np from hydromt import gis_utils as gu from hydromt.raster import full_from_transform, RasterDataArray from rasterio.transform import from_origin def test_crs(): bbox = [3, 51.5, 4, 52] # NL assert gu.utm_crs(bbox).to_epsg() == 32631 assert gu.parse_crs("utm", bbox).to_epsg() == 32631 bbox1 = [-77.5, -12.2, -77.0, -12.0] assert gu.utm_crs(bbox1).to_epsg() == 32718 _, _, xattrs, yattrs = gu.axes_attrs(gu.parse_crs(4326)) assert xattrs["units"] == "degrees_east" assert yattrs["units"] == "degrees_north" _, _, xattrs, yattrs = gu.axes_attrs(gu.utm_crs(bbox1)) assert xattrs["units"] == yattrs["units"] == "m" def test_transform(): transform = from_origin(0, 90, 1, 1) shape = (180, 360) xs, ys = gu.affine_to_coords(transform, shape) assert np.all(ys == 90 - np.arange(0.5, shape[0])) assert np.all(xs == np.arange(0.5, shape[1])) # offset for geographic crs da = full_from_transform(transform, shape, crs=4326) da1 = gu.meridian_offset(da, x_name="x") assert da1.raster.bounds[0] == -180 da2 = gu.meridian_offset(da1, x_name="x", bbox=[170, 0, 190, 10]) assert da2.raster.bounds[0] == 170 da3 = gu.meridian_offset(da1, x_name="x", bbox=[-190, 0, -170, 10]) assert da3.raster.bounds[2] == -170 def test_area_res(): # surface area of earth should be approx 510.100.000 km2 transform = from_origin(-180, 90, 1, 1) shape = (180, 360) da = full_from_transform(transform, shape, crs=4326) assert np.isclose(da.raster.area_grid().sum() / 1e6, 510064511.156224) assert gu.cellres(0) == (111319.458, 110574.2727) def test_gdf(world): country = world.iloc[[0], :].to_crs(3857) assert np.all(gu.filter_gdf(world, country) == 0) idx0 = gu.filter_gdf(world, bbox=[3, 51.5, 4, 52])[0] assert ( world.iloc[ idx0, ]["iso_a3"] == "NLD" ) with pytest.raises(ValueError, match="Unknown geometry mask type"): gu.filter_gdf(world, geom=[3, 51.5, 4, 52]) def test_nearest(world, geodf): idx, _ = gu.nearest(geodf, geodf) assert np.all(idx == geodf.index) idx, dst = gu.nearest(geodf, world) assert np.all(dst == 0) assert np.all(world.loc[idx, "name"].values == geodf["country"].values) gdf0 = geodf.copy() gdf0["iso_a3"] = "" gdf1 = gu.nearest_merge(geodf, world.drop(idx), max_dist=1e6) assert np.all(gdf1.loc[gdf1["distance_right"] > 1e6, "index_right"] == -1) assert np.all(gdf1.loc[gdf1["distance_right"] > 1e6, "iso_a3"] != "") def test_spread(): transform = from_origin(-15, 10, 1, 1) shape = (20, 30) data = np.zeros(shape) data[10, 10] = 1 # lin index 310 frc = np.ones(shape) msk = np.ones(shape, dtype=bool) da_obs = RasterDataArray.from_numpy(data, transform=transform, nodata=0, crs=4326) da_msk = RasterDataArray.from_numpy(msk, transform=transform, crs=4326) da_frc = RasterDataArray.from_numpy(frc, transform=transform, crs=4326) # only testing the wrapping of pyflwdir method, not the method itself ds_out = gu.spread2d(da_obs, da_friction=da_frc, da_mask=da_msk) assert np.all(ds_out["source_value"] == 1) assert np.all(ds_out["source_idx"] == 310) assert ds_out["source_dst"].values[10, 10] == 0 with pytest.raises(ValueError, match='"nodata" must be a finite value'): gu.spread2d(da_obs, nodata=np.nan)
[ "hydromt.raster.full_from_transform", "numpy.ones", "rasterio.transform.from_origin", "hydromt.gis_utils.spread2d", "hydromt.gis_utils.parse_crs", "hydromt.gis_utils.filter_gdf", "hydromt.gis_utils.affine_to_coords", "hydromt.raster.RasterDataArray.from_numpy", "hydromt.gis_utils.utm_crs", "numpy.zeros", "pytest.raises", "hydromt.gis_utils.cellres", "numpy.all", "hydromt.gis_utils.nearest", "hydromt.gis_utils.meridian_offset", "numpy.arange" ]
[((769, 793), 'rasterio.transform.from_origin', 'from_origin', (['(0)', '(90)', '(1)', '(1)'], {}), '(0, 90, 1, 1)\n', (780, 793), False, 'from rasterio.transform import from_origin\n'), ((830, 867), 'hydromt.gis_utils.affine_to_coords', 'gu.affine_to_coords', (['transform', 'shape'], {}), '(transform, shape)\n', (849, 867), True, 'from hydromt import gis_utils as gu\n'), ((1015, 1062), 'hydromt.raster.full_from_transform', 'full_from_transform', (['transform', 'shape'], {'crs': '(4326)'}), '(transform, shape, crs=4326)\n', (1034, 1062), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((1073, 1107), 'hydromt.gis_utils.meridian_offset', 'gu.meridian_offset', (['da'], {'x_name': '"""x"""'}), "(da, x_name='x')\n", (1091, 1107), True, 'from hydromt import gis_utils as gu\n'), ((1158, 1217), 'hydromt.gis_utils.meridian_offset', 'gu.meridian_offset', (['da1'], {'x_name': '"""x"""', 'bbox': '[170, 0, 190, 10]'}), "(da1, x_name='x', bbox=[170, 0, 190, 10])\n", (1176, 1217), True, 'from hydromt import gis_utils as gu\n'), ((1267, 1328), 'hydromt.gis_utils.meridian_offset', 'gu.meridian_offset', (['da1'], {'x_name': '"""x"""', 'bbox': '[-190, 0, -170, 10]'}), "(da1, x_name='x', bbox=[-190, 0, -170, 10])\n", (1285, 1328), True, 'from hydromt import gis_utils as gu\n'), ((1469, 1496), 'rasterio.transform.from_origin', 'from_origin', (['(-180)', '(90)', '(1)', '(1)'], {}), '(-180, 90, 1, 1)\n', (1480, 1496), False, 'from rasterio.transform import from_origin\n'), ((1529, 1576), 'hydromt.raster.full_from_transform', 'full_from_transform', (['transform', 'shape'], {'crs': '(4326)'}), '(transform, shape, crs=4326)\n', (1548, 1576), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((2152, 2176), 'hydromt.gis_utils.nearest', 'gu.nearest', (['geodf', 'geodf'], {}), '(geodf, geodf)\n', (2162, 2176), True, 'from hydromt import gis_utils as gu\n'), ((2188, 2214), 'numpy.all', 'np.all', (['(idx == geodf.index)'], {}), '(idx == geodf.index)\n', (2194, 2214), True, 'import numpy as np\n'), ((2230, 2254), 'hydromt.gis_utils.nearest', 'gu.nearest', (['geodf', 'world'], {}), '(geodf, world)\n', (2240, 2254), True, 'from hydromt import gis_utils as gu\n'), ((2266, 2282), 'numpy.all', 'np.all', (['(dst == 0)'], {}), '(dst == 0)\n', (2272, 2282), True, 'import numpy as np\n'), ((2294, 2358), 'numpy.all', 'np.all', (["(world.loc[idx, 'name'].values == geodf['country'].values)"], {}), "(world.loc[idx, 'name'].values == geodf['country'].values)\n", (2300, 2358), True, 'import numpy as np\n'), ((2484, 2557), 'numpy.all', 'np.all', (["(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'index_right'] == -1)"], {}), "(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'index_right'] == -1)\n", (2490, 2557), True, 'import numpy as np\n'), ((2563, 2631), 'numpy.all', 'np.all', (["(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'iso_a3'] != '')"], {}), "(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'iso_a3'] != '')\n", (2569, 2631), True, 'import numpy as np\n'), ((2663, 2689), 'rasterio.transform.from_origin', 'from_origin', (['(-15)', '(10)', '(1)', '(1)'], {}), '(-15, 10, 1, 1)\n', (2674, 2689), False, 'from rasterio.transform import from_origin\n'), ((2722, 2737), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2730, 2737), True, 'import numpy as np\n'), ((2786, 2800), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (2793, 2800), True, 'import numpy as np\n'), ((2811, 2837), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'bool'}), '(shape, dtype=bool)\n', (2818, 2837), True, 'import numpy as np\n'), ((2851, 2924), 'hydromt.raster.RasterDataArray.from_numpy', 'RasterDataArray.from_numpy', (['data'], {'transform': 'transform', 'nodata': '(0)', 'crs': '(4326)'}), '(data, transform=transform, nodata=0, crs=4326)\n', (2877, 2924), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((2938, 3000), 'hydromt.raster.RasterDataArray.from_numpy', 'RasterDataArray.from_numpy', (['msk'], {'transform': 'transform', 'crs': '(4326)'}), '(msk, transform=transform, crs=4326)\n', (2964, 3000), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((3014, 3076), 'hydromt.raster.RasterDataArray.from_numpy', 'RasterDataArray.from_numpy', (['frc'], {'transform': 'transform', 'crs': '(4326)'}), '(frc, transform=transform, crs=4326)\n', (3040, 3076), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((3164, 3219), 'hydromt.gis_utils.spread2d', 'gu.spread2d', (['da_obs'], {'da_friction': 'da_frc', 'da_mask': 'da_msk'}), '(da_obs, da_friction=da_frc, da_mask=da_msk)\n', (3175, 3219), True, 'from hydromt import gis_utils as gu\n'), ((3231, 3266), 'numpy.all', 'np.all', (["(ds_out['source_value'] == 1)"], {}), "(ds_out['source_value'] == 1)\n", (3237, 3266), True, 'import numpy as np\n'), ((3278, 3313), 'numpy.all', 'np.all', (["(ds_out['source_idx'] == 310)"], {}), "(ds_out['source_idx'] == 310)\n", (3284, 3313), True, 'import numpy as np\n'), ((505, 523), 'hydromt.gis_utils.parse_crs', 'gu.parse_crs', (['(4326)'], {}), '(4326)\n', (517, 523), True, 'from hydromt import gis_utils as gu\n'), ((657, 674), 'hydromt.gis_utils.utm_crs', 'gu.utm_crs', (['bbox1'], {}), '(bbox1)\n', (667, 674), True, 'from hydromt import gis_utils as gu\n'), ((1663, 1676), 'hydromt.gis_utils.cellres', 'gu.cellres', (['(0)'], {}), '(0)\n', (1673, 1676), True, 'from hydromt import gis_utils as gu\n'), ((1840, 1883), 'hydromt.gis_utils.filter_gdf', 'gu.filter_gdf', (['world'], {'bbox': '[3, 51.5, 4, 52]'}), '(world, bbox=[3, 51.5, 4, 52])\n', (1853, 1883), True, 'from hydromt import gis_utils as gu\n'), ((1990, 2051), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Unknown geometry mask type"""'}), "(ValueError, match='Unknown geometry mask type')\n", (2003, 2051), False, 'import pytest\n'), ((2061, 2104), 'hydromt.gis_utils.filter_gdf', 'gu.filter_gdf', (['world'], {'geom': '[3, 51.5, 4, 52]'}), '(world, geom=[3, 51.5, 4, 52])\n', (2074, 2104), True, 'from hydromt import gis_utils as gu\n'), ((3375, 3441), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""""nodata" must be a finite value"""'}), '(ValueError, match=\'"nodata" must be a finite value\')\n', (3388, 3441), False, 'import pytest\n'), ((3451, 3485), 'hydromt.gis_utils.spread2d', 'gu.spread2d', (['da_obs'], {'nodata': 'np.nan'}), '(da_obs, nodata=np.nan)\n', (3462, 3485), True, 'from hydromt import gis_utils as gu\n'), ((947, 971), 'numpy.arange', 'np.arange', (['(0.5)', 'shape[1]'], {}), '(0.5, shape[1])\n', (956, 971), True, 'import numpy as np\n'), ((1793, 1822), 'hydromt.gis_utils.filter_gdf', 'gu.filter_gdf', (['world', 'country'], {}), '(world, country)\n', (1806, 1822), True, 'from hydromt import gis_utils as gu\n'), ((283, 299), 'hydromt.gis_utils.utm_crs', 'gu.utm_crs', (['bbox'], {}), '(bbox)\n', (293, 299), True, 'from hydromt import gis_utils as gu\n'), ((330, 355), 'hydromt.gis_utils.parse_crs', 'gu.parse_crs', (['"""utm"""', 'bbox'], {}), "('utm', bbox)\n", (342, 355), True, 'from hydromt import gis_utils as gu\n'), ((427, 444), 'hydromt.gis_utils.utm_crs', 'gu.utm_crs', (['bbox1'], {}), '(bbox1)\n', (437, 444), True, 'from hydromt import gis_utils as gu\n'), ((897, 921), 'numpy.arange', 'np.arange', (['(0.5)', 'shape[0]'], {}), '(0.5, shape[0])\n', (906, 921), True, 'import numpy as np\n')]
# -*- coding: UTF-8 -*- import os import matplotlib.pyplot as plt import numpy as np from PIL import Image def show_images(name): folders = {"A", "b", "c", "d", "e", "f", "g", "h", "i", "j"} images = [] for folder in folders: findInFolder = "output/notMNIST_large/" + folder + "/" + name images.append(np.asarray(Image.open(findInFolder).convert('RGB'))) return np.asarray(images) def show_image(path): image = Image.open(path) Image._show(image) def gallery(array, ncols=10): nindex, height, width, intensity = array.shape nrows = nindex//ncols assert nindex == nrows*ncols # want result.shape = (height*nrows, width*ncols, intensity) result = (array.reshape((nrows, ncols, height, width, intensity)).swapaxes(1,2).reshape((height*nrows, width*ncols, intensity))) return result def make_array(png_path): pics = recognize(png_path) print("image size:", len(pics)) images = [] for pic in pics: images.append(np.asarray(Image.open(pic).convert('RGB'))) return np.asarray(images) def recognize(png_path, max = 1000): image_files = os.listdir(png_path) folders = {"A","b","c","d","e","f","g","h","i","j"} #folders = {"f"} images = [] errorImages = ['RnJlaWdodERpc3BCb29rSXRhbGljLnR0Zg==.png', 'SG90IE11c3RhcmQgQlROIFBvc3Rlci50dGY=.png', 'Um9tYW5hIEJvbGQucGZi.png' ] for image in image_files: name = str(image) try: aaa = errorImages.index(name) print(aaa,name) continue except: if name.endswith(".png"): #print(name) for folder in folders: try: findInFolder = "output/notMNIST_large/" + folder + "/" + name images.append(findInFolder) if len(images) == (max): return images except IOError as e: print('Could not read:', e) return images def show_filtered_dir(): ''' 输入一个文件夹名。将文件夹内的文件做为查找字符串。去查找a-j内相同文件名的文件。并显示 :return: ''' array = make_array("output/notMNIST_large/A11") result = gallery(array) plt.imshow(result) plt.show() #show_filtered_dir() def showImageInAtoJByApath(): ''' 输入一个文件的文件名,并在a-j文件夹内找到这个文件显示 :return: ''' result = gallery(show_images("RGV2aWwgQm9sZC50dGY=.png")) plt.imshow(result) plt.show() #showImageInAtoJByApath() def delFiles(files): for file in files: delFile(file=file) def delFile(file): if os.path.exists(file): os.remove(file) else: print('no such file:%s' % file) #delFile("output/notMNIST_large/A12/a2Fua2FuYSBLLnR0Zg==.png") def delFileByIndexFolder(indexFolder): ''' indexFolder做为"索引文件夹",该文件夹内的所有文件作为要删除的文件。 indexFolder文件夹的每一个文件,在a-j文件夹内都有对应文件。原因是 它们都属于同一类字体。同类字体容易发生在表达字母a-j存在相同的缺陷,至少人不能理解其为字母 所以该任务将移除这类"错误"(有些图片只是表达了字母意思。比如数字1-10对应字母a-j。单显然数字1就是1。我们不去猜想非视觉意外的内涵)的字体 :param indexFolder: :return: ''' pics = recognize(indexFolder, 65535) print("file size:(%s)" % len(pics))#18340 delFiles(pics) #delFileByIndexFolder("output/notMNIST_large/A11")
[ "matplotlib.pyplot.imshow", "os.path.exists", "os.listdir", "PIL.Image.open", "PIL.Image._show", "numpy.asarray", "os.remove", "matplotlib.pyplot.show" ]
[((397, 415), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (407, 415), True, 'import numpy as np\n'), ((451, 467), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (461, 467), False, 'from PIL import Image\n'), ((472, 490), 'PIL.Image._show', 'Image._show', (['image'], {}), '(image)\n', (483, 490), False, 'from PIL import Image\n'), ((1056, 1074), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (1066, 1074), True, 'import numpy as np\n'), ((1132, 1152), 'os.listdir', 'os.listdir', (['png_path'], {}), '(png_path)\n', (1142, 1152), False, 'import os\n'), ((2281, 2299), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (2291, 2299), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2314), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2312, 2314), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2515), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (2507, 2515), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2528, 2530), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2678), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (2672, 2678), False, 'import os\n'), ((2688, 2703), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (2697, 2703), False, 'import os\n'), ((344, 368), 'PIL.Image.open', 'Image.open', (['findInFolder'], {}), '(findInFolder)\n', (354, 368), False, 'from PIL import Image\n'), ((1012, 1027), 'PIL.Image.open', 'Image.open', (['pic'], {}), '(pic)\n', (1022, 1027), False, 'from PIL import Image\n')]
import matplotlib.pyplot as plt import rebalancer from sp500_data_loader import load_data import numpy as np import itertools import os from multiprocessing import Process def interpet_results(assets, rebalance_inv, bah_inv, data, condition, dir): prices = [] for key in data.keys(): prices.append(data[key][data.index[-1]]) # rebalancer.writeResults('REBALANCE:', data, prices, rebalance_inv) # rebalancer.writeResults('B&H:', data, prices, bah_inv) print('rebalance: %f' % rebalance_inv.history[-1]) print('b&h: %f' % bah_inv.history[-1]) if condition: for key in data.keys(): plt.plot(data[key], color='black') plt.axis('off') plt.savefig(dir + assets[0] + '_' + assets[1] + '.png') plt.clf() def chunkIt(seq, num): avg = len(seq) / float(num) out = [] last = 0.0 while last < len(seq): out.append(seq[int(last):int(last + avg)]) last += avg return out def process_stock_list(stock_list): start_date = '2010-01-01' end_date = '2017-12-12' for stock in stock_list: stock = list(stock) print('simulating: ' + str(stock)) dir_reb = 'stock_results_50_perc_reb/' dir_bah = 'stock_results_50_perc_bah/' if not os.path.isdir(dir_reb): os.makedirs(dir_reb) if not os.path.isdir(dir_bah): os.makedirs(dir_bah) file = stock[0] + '_' + stock[1] + '.png' file2 = stock[1] + '_' + stock[0] + '.png' if os.path.isfile(dir_reb + file) or os.path.isfile(dir_reb + file2) or os.path.isfile( dir_bah + file) or os.path.isfile(dir_bah + file2): continue df_open, df_close, df_high, df_low, df_adj_close = load_data(stock, start_date, end_date) i0, = np.shape(df_adj_close[stock[0]]) i1, = np.shape(df_adj_close[stock[1]]) if i0 == 0 or i1 == 0: continue rebalance_inv, bah_inv = rebalancer.simulate(df_adj_close, df_high, df_low, crypto=False) condition = (rebalance_inv.history[-1] - bah_inv.history[-1]) / bah_inv.history[-1] > 0.5 if condition: interpet_results(stock, rebalance_inv, bah_inv, df_adj_close, condition, dir_reb) else: condition = (bah_inv.history[-1] - rebalance_inv.history[-1]) / rebalance_inv.history[-1] > 0.5 if condition: interpet_results(stock, rebalance_inv, bah_inv, df_adj_close, condition, dir_bah) def main(): with open('s&p500.txt', 'r') as fd: stocks = list(fd.read().splitlines()) stock_list = list(itertools.combinations(stocks, 2)) stock_lists = chunkIt(stock_list, 4) processes = [] for stock_list in stock_lists: print(stock_list) process = Process(target=process_stock_list, args=([stock_list])) process.start() processes.append(process) for process in processes: process.join() if __name__ == '__main__': main()
[ "matplotlib.pyplot.savefig", "os.makedirs", "multiprocessing.Process", "matplotlib.pyplot.clf", "matplotlib.pyplot.plot", "itertools.combinations", "os.path.isfile", "sp500_data_loader.load_data", "os.path.isdir", "rebalancer.simulate", "matplotlib.pyplot.axis", "numpy.shape" ]
[((682, 697), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (690, 697), True, 'import matplotlib.pyplot as plt\n'), ((706, 761), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(dir + assets[0] + '_' + assets[1] + '.png')"], {}), "(dir + assets[0] + '_' + assets[1] + '.png')\n", (717, 761), True, 'import matplotlib.pyplot as plt\n'), ((770, 779), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (777, 779), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1809), 'sp500_data_loader.load_data', 'load_data', (['stock', 'start_date', 'end_date'], {}), '(stock, start_date, end_date)\n', (1780, 1809), False, 'from sp500_data_loader import load_data\n'), ((1824, 1856), 'numpy.shape', 'np.shape', (['df_adj_close[stock[0]]'], {}), '(df_adj_close[stock[0]])\n', (1832, 1856), True, 'import numpy as np\n'), ((1871, 1903), 'numpy.shape', 'np.shape', (['df_adj_close[stock[1]]'], {}), '(df_adj_close[stock[1]])\n', (1879, 1903), True, 'import numpy as np\n'), ((1989, 2053), 'rebalancer.simulate', 'rebalancer.simulate', (['df_adj_close', 'df_high', 'df_low'], {'crypto': '(False)'}), '(df_adj_close, df_high, df_low, crypto=False)\n', (2008, 2053), False, 'import rebalancer\n'), ((2639, 2672), 'itertools.combinations', 'itertools.combinations', (['stocks', '(2)'], {}), '(stocks, 2)\n', (2661, 2672), False, 'import itertools\n'), ((2814, 2867), 'multiprocessing.Process', 'Process', ([], {'target': 'process_stock_list', 'args': '[stock_list]'}), '(target=process_stock_list, args=[stock_list])\n', (2821, 2867), False, 'from multiprocessing import Process\n'), ((639, 673), 'matplotlib.pyplot.plot', 'plt.plot', (['data[key]'], {'color': '"""black"""'}), "(data[key], color='black')\n", (647, 673), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1309), 'os.path.isdir', 'os.path.isdir', (['dir_reb'], {}), '(dir_reb)\n', (1300, 1309), False, 'import os\n'), ((1323, 1343), 'os.makedirs', 'os.makedirs', (['dir_reb'], {}), '(dir_reb)\n', (1334, 1343), False, 'import os\n'), ((1360, 1382), 'os.path.isdir', 'os.path.isdir', (['dir_bah'], {}), '(dir_bah)\n', (1373, 1382), False, 'import os\n'), ((1396, 1416), 'os.makedirs', 'os.makedirs', (['dir_bah'], {}), '(dir_bah)\n', (1407, 1416), False, 'import os\n'), ((1530, 1560), 'os.path.isfile', 'os.path.isfile', (['(dir_reb + file)'], {}), '(dir_reb + file)\n', (1544, 1560), False, 'import os\n'), ((1564, 1595), 'os.path.isfile', 'os.path.isfile', (['(dir_reb + file2)'], {}), '(dir_reb + file2)\n', (1578, 1595), False, 'import os\n'), ((1599, 1629), 'os.path.isfile', 'os.path.isfile', (['(dir_bah + file)'], {}), '(dir_bah + file)\n', (1613, 1629), False, 'import os\n'), ((1658, 1689), 'os.path.isfile', 'os.path.isfile', (['(dir_bah + file2)'], {}), '(dir_bah + file2)\n', (1672, 1689), False, 'import os\n')]
""" =============== GTK Spreadsheet =============== Example of embedding Matplotlib in an application and interacting with a treeview to store data. Double click on an entry to update plot data. """ import gi gi.require_version('Gtk', '3.0') gi.require_version('Gdk', '3.0') from gi.repository import Gtk, Gdk from matplotlib.backends.backend_gtk3agg import FigureCanvas # or gtk3cairo. from numpy.random import random from matplotlib.figure import Figure class DataManager(Gtk.Window): num_rows, num_cols = 20, 10 data = random((num_rows, num_cols)) def __init__(self): super().__init__() self.set_default_size(600, 600) self.connect('destroy', lambda win: Gtk.main_quit()) self.set_title('GtkListStore demo') self.set_border_width(8) vbox = Gtk.VBox(homogeneous=False, spacing=8) self.add(vbox) label = Gtk.Label(label='Double click a row to plot the data') vbox.pack_start(label, False, False, 0) sw = Gtk.ScrolledWindow() sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN) sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC) vbox.pack_start(sw, True, True, 0) model = self.create_model() self.treeview = Gtk.TreeView(model=model) # Matplotlib stuff fig = Figure(figsize=(6, 4)) self.canvas = FigureCanvas(fig) # a Gtk.DrawingArea vbox.pack_start(self.canvas, True, True, 0) ax = fig.add_subplot(111) self.line, = ax.plot(self.data[0, :], 'go') # plot the first row self.treeview.connect('row-activated', self.plot_row) sw.add(self.treeview) self.add_columns() self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.KEY_PRESS_MASK | Gdk.EventMask.KEY_RELEASE_MASK) def plot_row(self, treeview, path, view_column): ind, = path # get the index into data points = self.data[ind, :] self.line.set_ydata(points) self.canvas.draw() def add_columns(self): for i in range(self.num_cols): column = Gtk.TreeViewColumn(str(i), Gtk.CellRendererText(), text=i) self.treeview.append_column(column) def create_model(self): types = [float] * self.num_cols store = Gtk.ListStore(*types) for row in self.data: store.append(tuple(row)) return store manager = DataManager() manager.show_all() Gtk.main()
[ "gi.repository.Gtk.TreeView", "gi.repository.Gtk.main_quit", "numpy.random.random", "matplotlib.figure.Figure", "gi.repository.Gtk.ListStore", "gi.require_version", "gi.repository.Gtk.Label", "matplotlib.backends.backend_gtk3agg.FigureCanvas", "gi.repository.Gtk.CellRendererText", "gi.repository.Gtk.ScrolledWindow", "gi.repository.Gtk.main", "gi.repository.Gtk.VBox" ]
[((212, 244), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (230, 244), False, 'import gi\n'), ((245, 277), 'gi.require_version', 'gi.require_version', (['"""Gdk"""', '"""3.0"""'], {}), "('Gdk', '3.0')\n", (263, 277), False, 'import gi\n'), ((2499, 2509), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (2507, 2509), False, 'from gi.repository import Gtk, Gdk\n'), ((539, 567), 'numpy.random.random', 'random', (['(num_rows, num_cols)'], {}), '((num_rows, num_cols))\n', (545, 567), False, 'from numpy.random import random\n'), ((815, 853), 'gi.repository.Gtk.VBox', 'Gtk.VBox', ([], {'homogeneous': '(False)', 'spacing': '(8)'}), '(homogeneous=False, spacing=8)\n', (823, 853), False, 'from gi.repository import Gtk, Gdk\n'), ((894, 948), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': '"""Double click a row to plot the data"""'}), "(label='Double click a row to plot the data')\n", (903, 948), False, 'from gi.repository import Gtk, Gdk\n'), ((1012, 1032), 'gi.repository.Gtk.ScrolledWindow', 'Gtk.ScrolledWindow', ([], {}), '()\n', (1030, 1032), False, 'from gi.repository import Gtk, Gdk\n'), ((1261, 1286), 'gi.repository.Gtk.TreeView', 'Gtk.TreeView', ([], {'model': 'model'}), '(model=model)\n', (1273, 1286), False, 'from gi.repository import Gtk, Gdk\n'), ((1329, 1351), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (1335, 1351), False, 'from matplotlib.figure import Figure\n'), ((1375, 1392), 'matplotlib.backends.backend_gtk3agg.FigureCanvas', 'FigureCanvas', (['fig'], {}), '(fig)\n', (1387, 1392), False, 'from matplotlib.backends.backend_gtk3agg import FigureCanvas\n'), ((2344, 2365), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['*types'], {}), '(*types)\n', (2357, 2365), False, 'from gi.repository import Gtk, Gdk\n'), ((704, 719), 'gi.repository.Gtk.main_quit', 'Gtk.main_quit', ([], {}), '()\n', (717, 719), False, 'from gi.repository import Gtk, Gdk\n'), ((2179, 2201), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (2199, 2201), False, 'from gi.repository import Gtk, Gdk\n')]
import cv2 import numpy as np def MaxPooling(_img): img = _img.copy() result = np.zeros_like(img) for i in range(img.shape[0]//8): ind_11 = i * 8 ind_12 = ind_11 + 8 for j in range(img.shape[1]//8): ind_21 = j * 8 ind_22 = ind_21 + 8 result[ind_11:ind_12, ind_21:ind_22, 0] = np.max(img[ind_11:ind_12, ind_21:ind_22, 0]) result[ind_11:ind_12, ind_21:ind_22, 1] = np.max(img[ind_11:ind_12, ind_21:ind_22, 1]) result[ind_11:ind_12, ind_21:ind_22, 2] = np.max(img[ind_11:ind_12, ind_21:ind_22, 2]) return result img = cv2.imread("imori.jpg") result = MaxPooling(img) cv2.imwrite("myans_08.jpg", result)
[ "cv2.imwrite", "numpy.zeros_like", "cv2.imread", "numpy.max" ]
[((618, 641), 'cv2.imread', 'cv2.imread', (['"""imori.jpg"""'], {}), "('imori.jpg')\n", (628, 641), False, 'import cv2\n'), ((668, 703), 'cv2.imwrite', 'cv2.imwrite', (['"""myans_08.jpg"""', 'result'], {}), "('myans_08.jpg', result)\n", (679, 703), False, 'import cv2\n'), ((88, 106), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (101, 106), True, 'import numpy as np\n'), ((349, 393), 'numpy.max', 'np.max', (['img[ind_11:ind_12, ind_21:ind_22, 0]'], {}), '(img[ind_11:ind_12, ind_21:ind_22, 0])\n', (355, 393), True, 'import numpy as np\n'), ((448, 492), 'numpy.max', 'np.max', (['img[ind_11:ind_12, ind_21:ind_22, 1]'], {}), '(img[ind_11:ind_12, ind_21:ind_22, 1])\n', (454, 492), True, 'import numpy as np\n'), ((547, 591), 'numpy.max', 'np.max', (['img[ind_11:ind_12, ind_21:ind_22, 2]'], {}), '(img[ind_11:ind_12, ind_21:ind_22, 2])\n', (553, 591), True, 'import numpy as np\n')]
import numpy as np from numpy.testing import assert_equal import scipy.sparse as sp import tensorflow as tf from .math import (sparse_scalar_multiply, sparse_tensor_diag_matmul, _diag_matmul_py, _diag_matmul_transpose_py) from .convert import sparse_to_tensor class MathTest(tf.test.TestCase): def test_sparse_scalar_multiply(self): a = [[0, 2, 3], [0, 1, 0]] a = sp.coo_matrix(a) a = sparse_to_tensor(a) a = sparse_scalar_multiply(a, 2) a = tf.sparse_tensor_to_dense(a) expected = [[0, 4, 6], [0, 2, 0]] with self.test_session(): self.assertAllEqual(a.eval(), expected) def test_sparse_tensor_diag_matmul(self): a = [[2, 3, 0], [1, 0, 2], [0, 3, 0]] a = sp.coo_matrix(a) a = sparse_to_tensor(a) diag = [2, 0.5, 3] diag = tf.constant(diag) b = sparse_tensor_diag_matmul(a, diag) b = tf.sparse_tensor_to_dense(b) expected = [[4, 6, 0], [0.5, 0, 1], [0, 9, 0]] with self.test_session(): self.assertAllEqual(b.eval(), expected) b = sparse_tensor_diag_matmul(a, diag, transpose=True) b = tf.sparse_tensor_to_dense(b) expected = [[4, 1.5, 0], [2, 0, 6], [0, 1.5, 0]] with self.test_session(): self.assertAllEqual(b.eval(), expected) def test_diag_matmul_py(self): indices = np.array([[0, 0], [0, 1], [1, 0], [1, 2], [2, 1]]) values = np.array([2, 3, 1, 2, 3]) diag = np.array([2, 0.5, 3]) result = _diag_matmul_py(indices, values, diag) expected = [4, 6, 0.5, 1, 9] assert_equal(result, expected) def test_diag_matmul_transpose_py(self): indices = np.array([[1, 0], [0, 0], [0, 1], [1, 2], [2, 1]]) values = np.array([1, 2, 3, 2, 3]) diag = np.array([2, 0.5, 3]) result = _diag_matmul_transpose_py(indices, values, diag) expected = [2, 4, 1.5, 6, 1.5] assert_equal(result, expected)
[ "numpy.testing.assert_equal", "numpy.array", "tensorflow.constant", "scipy.sparse.coo_matrix", "tensorflow.sparse_tensor_to_dense" ]
[((406, 422), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['a'], {}), '(a)\n', (419, 422), True, 'import scipy.sparse as sp\n'), ((508, 536), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (['a'], {}), '(a)\n', (533, 536), True, 'import tensorflow as tf\n'), ((771, 787), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['a'], {}), '(a)\n', (784, 787), True, 'import scipy.sparse as sp\n'), ((863, 880), 'tensorflow.constant', 'tf.constant', (['diag'], {}), '(diag)\n', (874, 880), True, 'import tensorflow as tf\n'), ((941, 969), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (['b'], {}), '(b)\n', (966, 969), True, 'import tensorflow as tf\n'), ((1188, 1216), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (['b'], {}), '(b)\n', (1213, 1216), True, 'import tensorflow as tf\n'), ((1415, 1465), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 2], [2, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 2], [2, 1]])\n', (1423, 1465), True, 'import numpy as np\n'), ((1483, 1508), 'numpy.array', 'np.array', (['[2, 3, 1, 2, 3]'], {}), '([2, 3, 1, 2, 3])\n', (1491, 1508), True, 'import numpy as np\n'), ((1524, 1545), 'numpy.array', 'np.array', (['[2, 0.5, 3]'], {}), '([2, 0.5, 3])\n', (1532, 1545), True, 'import numpy as np\n'), ((1648, 1678), 'numpy.testing.assert_equal', 'assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (1660, 1678), False, 'from numpy.testing import assert_equal\n'), ((1743, 1793), 'numpy.array', 'np.array', (['[[1, 0], [0, 0], [0, 1], [1, 2], [2, 1]]'], {}), '([[1, 0], [0, 0], [0, 1], [1, 2], [2, 1]])\n', (1751, 1793), True, 'import numpy as np\n'), ((1811, 1836), 'numpy.array', 'np.array', (['[1, 2, 3, 2, 3]'], {}), '([1, 2, 3, 2, 3])\n', (1819, 1836), True, 'import numpy as np\n'), ((1852, 1873), 'numpy.array', 'np.array', (['[2, 0.5, 3]'], {}), '([2, 0.5, 3])\n', (1860, 1873), True, 'import numpy as np\n'), ((1988, 2018), 'numpy.testing.assert_equal', 'assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (2000, 2018), False, 'from numpy.testing import assert_equal\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Sep 22 13:13:06 2021 @author: hossam """ from base import BaseTrain import numpy as np import matplotlib.pylab as plt from matplotlib.pyplot import savefig from scipy.stats import multivariate_normal class vaeTrainer(BaseTrain): def __init__(self, sess, model, data, config): super(vaeTrainer, self).__init__(sess, model, data, config) def train_epoch(self): self.cur_epoch = self.model.cur_epoch_tensor.eval(self.sess) # training self.sess.run(self.model.iterator.initializer, feed_dict={self.model.original_signal: self.data.train_set_vae['data'], self.model.seed: self.cur_epoch}) self.n_train_iter = self.data.n_train_vae // self.config['batch_size'] idx_check_point = (self.n_train_iter - 1) train_loss_cur_epoch = 0.0 for i in range(self.n_train_iter): loss = self.train_step() self.sess.run(self.model.increment_global_step_tensor) self.train_loss.append(np.squeeze(loss)) train_loss_cur_epoch = train_loss_cur_epoch + loss if i == idx_check_point: test_loss, test_recons_loss_weighted, test_kl, test_sigma_regularisor, test_code_std_norm, test_cur_sigma2, test_recons_loss_ls = self.test_step() self.train_loss_ave_epoch.append(train_loss_cur_epoch / self.n_train_iter) # validation self.iter_epochs_list.append(self.n_train_iter * (self.cur_epoch + 1)) self.sess.run(self.model.iterator.initializer, feed_dict={self.model.original_signal: self.data.val_set_vae['data'], self.model.seed: self.cur_epoch}) self.n_val_iter = self.data.n_val_vae // self.config['batch_size'] val_loss_cur_epoch = 0.0 for i in range(self.n_val_iter): val_loss = self.val_step() val_loss_cur_epoch = val_loss_cur_epoch + val_loss self.val_loss_ave_epoch.append(val_loss_cur_epoch / self.n_val_iter) # save the model parameters at the end of this epoch self.model.save(self.sess) print( "{}/{}, test loss: -elbo: {:.4f}, recons_loss_weighted: {:.4f}, recons_loss_ls: {:.4f}, KL_loss: {:.4f}, sigma_regularisor: {:.4f}, code_std_dev: {}".format( self.cur_epoch, self.config['num_epochs_vae'] - 1, test_loss, test_recons_loss_weighted, np.squeeze(np.mean(test_recons_loss_ls)), test_kl, test_sigma_regularisor, np.squeeze(test_code_std_norm))) print("Loss on training and val sets:\ntrain: {:.4f}, val: {:.4f}".format( self.train_loss_ave_epoch[self.cur_epoch], self.val_loss_ave_epoch[self.cur_epoch])) print("Current sigma2: {:.7f}".format(test_cur_sigma2)) # save the current variables self.save_variables_VAE() # reconstruction plot self.plot_reconstructed_signal() # generate samples from prior self.generate_samples_from_prior() # plot the training and validation loss over iterations/epochs self.plot_train_and_val_loss() def train_step(self): batch_image = self.sess.run(self.model.input_image) feed_dict = {self.model.original_signal: batch_image, self.model.is_code_input: False, self.model.code_input: np.zeros((1, self.config['code_size'])), self.model.lr: self.config['learning_rate_vae'] * (0.98 ** self.cur_epoch)} train_loss, _ = self.sess.run([self.model.elbo_loss, self.model.train_step_gradient], feed_dict=feed_dict) return train_loss def val_step(self): input_image_val = self.sess.run(self.model.input_image) val_cost, recon_loss_val, kl_loss_val, std_dev_loss_val = self.sess.run([self.model.elbo_loss, self.model.ls_reconstruction_error, self.model.KL_loss, self.model.std_dev_norm], feed_dict={ self.model.original_signal: input_image_val, self.model.is_code_input: False, self.model.code_input: np.zeros( (1, self.config['code_size']))}) self.val_loss.append(np.squeeze(val_cost)) self.recons_loss_val.append(np.squeeze(np.mean(recon_loss_val))) self.KL_loss_val.append(kl_loss_val) return val_cost def test_step(self): feed_dict = {self.model.original_signal: self.data.test_set_vae['data'], self.model.is_code_input: False, self.model.code_input: np.zeros((1, self.config['code_size']))} self.output_test, test_loss, test_recons_loss_weighted, test_kl, test_sigma_regularisor, test_code_std_norm, test_cur_sigma2, test_recons_loss_ls = self.sess.run( [self.model.decoded, self.model.elbo_loss, self.model.weighted_reconstruction_error_dataset, self.model.KL_loss, self.model.sigma_regularisor_dataset, self.model.std_dev_norm, self.model.sigma2, self.model.ls_reconstruction_error], feed_dict=feed_dict) self.test_sigma2.append(np.squeeze(test_cur_sigma2)) return test_loss, test_recons_loss_weighted, test_kl, test_sigma_regularisor, test_code_std_norm, np.squeeze( test_cur_sigma2), test_recons_loss_ls def plot_reconstructed_signal(self): input_images = np.squeeze(self.data.test_set_vae['data']) decoded_images = np.squeeze(self.output_test) n_images = 20 # plot the reconstructed image for a shape for j in range(self.config['n_channel']): fig, axs = plt.subplots(4, 5, figsize=(18, 10), edgecolor='k') fig.subplots_adjust(hspace=.4, wspace=.4) axs = axs.ravel() for i in range(n_images): if self.config['n_channel'] == 1: axs[i].plot(input_images[i]) axs[i].plot(decoded_images[i]) else: axs[i].plot(input_images[i, :, j]) axs[i].plot(decoded_images[i, :, j]) axs[i].grid(True) axs[i].set_xlim(0, self.config['l_win']) axs[i].set_ylim(-5, 5) if i == 19: axs[i].legend(('original', 'reconstructed')) plt.suptitle('Channel {}'.format(j)) savefig(self.config['result_dir'] + 'test_reconstructed_{}_{}.pdf'.format(self.cur_epoch, j)) fig.clf() plt.close() def generate_samples_from_prior(self): rv = multivariate_normal(np.zeros(self.config['code_size']), np.diag(np.ones(self.config['code_size']))) # Generate a batch size of samples from the prior samples n_images = 20 samples_code_prior = rv.rvs(n_images) sampled_images = self.sess.run(self.model.decoded, feed_dict={self.model.original_signal: np.zeros( (n_images, self.config['l_win'], self.config['n_channel'])), self.model.is_code_input: True, self.model.code_input: samples_code_prior}) sampled_images = np.squeeze(sampled_images) for j in range(self.config['n_channel']): fig, axs = plt.subplots(4, 5, figsize=(18, 10), edgecolor='k') fig.subplots_adjust(hspace=.4, wspace=.4) axs = axs.ravel() for i in range(n_images): if self.config['n_channel'] == 1: axs[i].plot(sampled_images[i]) else: axs[i].plot(sampled_images[i, :, j]) axs[i].grid(True) axs[i].set_xlim(0, self.config['l_win']) axs[i].set_ylim(-5, 5) plt.suptitle('Channel {}'.format(j)) savefig(self.config['result_dir'] + 'generated_samples_{}_{}.pdf'.format(self.cur_epoch, j)) fig.clf() plt.close()
[ "numpy.mean", "matplotlib.pylab.subplots", "numpy.ones", "numpy.squeeze", "numpy.zeros", "matplotlib.pylab.close" ]
[((5772, 5814), 'numpy.squeeze', 'np.squeeze', (["self.data.test_set_vae['data']"], {}), "(self.data.test_set_vae['data'])\n", (5782, 5814), True, 'import numpy as np\n'), ((5836, 5864), 'numpy.squeeze', 'np.squeeze', (['self.output_test'], {}), '(self.output_test)\n', (5846, 5864), True, 'import numpy as np\n'), ((7434, 7460), 'numpy.squeeze', 'np.squeeze', (['sampled_images'], {}), '(sampled_images)\n', (7444, 7460), True, 'import numpy as np\n'), ((3266, 3305), 'numpy.zeros', 'np.zeros', (["(1, self.config['code_size'])"], {}), "((1, self.config['code_size']))\n", (3274, 3305), True, 'import numpy as np\n'), ((4633, 4653), 'numpy.squeeze', 'np.squeeze', (['val_cost'], {}), '(val_cost)\n', (4643, 4653), True, 'import numpy as np\n'), ((4976, 5015), 'numpy.zeros', 'np.zeros', (["(1, self.config['code_size'])"], {}), "((1, self.config['code_size']))\n", (4984, 5015), True, 'import numpy as np\n'), ((5526, 5553), 'numpy.squeeze', 'np.squeeze', (['test_cur_sigma2'], {}), '(test_cur_sigma2)\n', (5536, 5553), True, 'import numpy as np\n'), ((5657, 5684), 'numpy.squeeze', 'np.squeeze', (['test_cur_sigma2'], {}), '(test_cur_sigma2)\n', (5667, 5684), True, 'import numpy as np\n'), ((5993, 6044), 'matplotlib.pylab.subplots', 'plt.subplots', (['(4)', '(5)'], {'figsize': '(18, 10)', 'edgecolor': '"""k"""'}), "(4, 5, figsize=(18, 10), edgecolor='k')\n", (6005, 6044), True, 'import matplotlib.pylab as plt\n'), ((6723, 6734), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (6732, 6734), True, 'import matplotlib.pylab as plt\n'), ((6806, 6840), 'numpy.zeros', 'np.zeros', (["self.config['code_size']"], {}), "(self.config['code_size'])\n", (6814, 6840), True, 'import numpy as np\n'), ((7524, 7575), 'matplotlib.pylab.subplots', 'plt.subplots', (['(4)', '(5)'], {'figsize': '(18, 10)', 'edgecolor': '"""k"""'}), "(4, 5, figsize=(18, 10), edgecolor='k')\n", (7536, 7575), True, 'import matplotlib.pylab as plt\n'), ((8094, 8105), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (8103, 8105), True, 'import matplotlib.pylab as plt\n'), ((1035, 1051), 'numpy.squeeze', 'np.squeeze', (['loss'], {}), '(loss)\n', (1045, 1051), True, 'import numpy as np\n'), ((2463, 2493), 'numpy.squeeze', 'np.squeeze', (['test_code_std_norm'], {}), '(test_code_std_norm)\n', (2473, 2493), True, 'import numpy as np\n'), ((4698, 4721), 'numpy.mean', 'np.mean', (['recon_loss_val'], {}), '(recon_loss_val)\n', (4705, 4721), True, 'import numpy as np\n'), ((6850, 6883), 'numpy.ones', 'np.ones', (["self.config['code_size']"], {}), "(self.config['code_size'])\n", (6857, 6883), True, 'import numpy as np\n'), ((2375, 2403), 'numpy.mean', 'np.mean', (['test_recons_loss_ls'], {}), '(test_recons_loss_ls)\n', (2382, 2403), True, 'import numpy as np\n'), ((4485, 4524), 'numpy.zeros', 'np.zeros', (["(1, self.config['code_size'])"], {}), "((1, self.config['code_size']))\n", (4493, 4524), True, 'import numpy as np\n'), ((7137, 7205), 'numpy.zeros', 'np.zeros', (["(n_images, self.config['l_win'], self.config['n_channel'])"], {}), "((n_images, self.config['l_win'], self.config['n_channel']))\n", (7145, 7205), True, 'import numpy as np\n')]
import astropy.io.fits as pft import numpy as np from astropy.time import Time import itertools from astropy.stats import sigma_clipped_stats from scipy.signal import convolve def group_image_pairs(file_list, by_next=False, by_exptime=False): if by_next: image_pair_lists = [[file_list[2*i], file_list[2*i+1]] for i in range(int(len(file_list)/2)-1)] elif by_exptime: exp_times = np.array([np.float(pft.open(f)[0].header['EXPTIME']) for f in flist]) print(list(set(exp_times))) image_pair_lists = [] for e in exp_times: i = np.where(exp_times == e)[0] imlist = file_list[i] combinations = list(itertools.combinations(imlist,2)) image_pair_lists += combinations else: image_pair_lists = list(itertools.combinations(file_list,2)) return image_pair_lists def diff_image_stats(img1, img2, sigma_clip=True): diff_img = (img1 - img2).flatten() if sigma_clip: mean, med, stddev = sigma_clipped_stats(diff_img) var = stddev**2./2. else: mean = np.mean(diff_img) med = np.median(diff_img) var = np.stddev(diff_img)**2./2. return mean, med, var def rebin_image(img, bin_row, bin_col): kernel = np.ones((bin_row, bin_col)).astype(np.int) c = convolve(img, kernel, mode='valid') return c[::bin_row, ::bin_col]
[ "numpy.mean", "scipy.signal.convolve", "numpy.median", "numpy.ones", "numpy.where", "itertools.combinations", "numpy.stddev", "astropy.io.fits.open", "astropy.stats.sigma_clipped_stats" ]
[((1312, 1347), 'scipy.signal.convolve', 'convolve', (['img', 'kernel'], {'mode': '"""valid"""'}), "(img, kernel, mode='valid')\n", (1320, 1347), False, 'from scipy.signal import convolve\n'), ((1004, 1033), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', (['diff_img'], {}), '(diff_img)\n', (1023, 1033), False, 'from astropy.stats import sigma_clipped_stats\n'), ((1087, 1104), 'numpy.mean', 'np.mean', (['diff_img'], {}), '(diff_img)\n', (1094, 1104), True, 'import numpy as np\n'), ((1119, 1138), 'numpy.median', 'np.median', (['diff_img'], {}), '(diff_img)\n', (1128, 1138), True, 'import numpy as np\n'), ((1261, 1288), 'numpy.ones', 'np.ones', (['(bin_row, bin_col)'], {}), '((bin_row, bin_col))\n', (1268, 1288), True, 'import numpy as np\n'), ((801, 837), 'itertools.combinations', 'itertools.combinations', (['file_list', '(2)'], {}), '(file_list, 2)\n', (823, 837), False, 'import itertools\n'), ((1153, 1172), 'numpy.stddev', 'np.stddev', (['diff_img'], {}), '(diff_img)\n', (1162, 1172), True, 'import numpy as np\n'), ((586, 610), 'numpy.where', 'np.where', (['(exp_times == e)'], {}), '(exp_times == e)\n', (594, 610), True, 'import numpy as np\n'), ((680, 713), 'itertools.combinations', 'itertools.combinations', (['imlist', '(2)'], {}), '(imlist, 2)\n', (702, 713), False, 'import itertools\n'), ((425, 436), 'astropy.io.fits.open', 'pft.open', (['f'], {}), '(f)\n', (433, 436), True, 'import astropy.io.fits as pft\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 12 09:36:00 2019 @author: minjie """ import SimpleITK as sitk import numpy as np from pathlib import Path import os import cv2 import pydicom import h5py from scipy.ndimage import binary_dilation fn1 = 'resources/CT/01/data/A_1.mha' fn2 = 'resources/CT/01/data/PV_1.mha' dicom_fd = './resources/CT/01/data/' name_chs = ['V','A','PV'] out_h5py = './resources/ct01m.h5' n_class = 4 w_class = [0.1, 0.2, 0.2 ,0.2] w_class_in_roi = [1.0, 2.0, 2.0 ,2.0] itkimage1 = sitk.ReadImage(fn1) labels1 = (sitk.GetArrayFromImage(itkimage1)[::-1,:,:]==1).astype('int') #dongmai itkimage2 = sitk.ReadImage(fn2) labels2 = (sitk.GetArrayFromImage(itkimage2)[::-1,:,:]==1).astype('int') #mengjinmai labels3 = (sitk.GetArrayFromImage(itkimage2)[::-1,:,:]==2).astype('int') #mengjinmai labels0 = (labels1+labels2+labels3 ==0).astype('int') labels1_dilate = binary_dilation(labels1,structure = np.ones((5,5,5))).astype(labels1.dtype) labels2_dilate = binary_dilation(labels2,structure = np.ones((5,5,5))).astype(labels2.dtype) labels3_dilate = binary_dilation(labels3,structure = np.ones((5,5,5))).astype(labels3.dtype) labels = np.stack((labels0,labels1,labels2,labels3),axis = 0) labels_dilate = ((labels1_dilate + labels2_dilate + labels3_dilate)>0).astype('int') #%% flist = [str(fn) for fn in (Path(dicom_fd)/name_chs[0]).glob('*')] n_slice = len(flist) row,col = 512,512 labels_pos = np.where(labels_dilate==1) z_min,z_max = labels_pos[0].min(),labels_pos[0].max() y_min,y_max = labels_pos[1].min(),labels_pos[1].max() x_min,x_max = labels_pos[2].min(),labels_pos[2].max() z_min = max(0,z_min - 8) z_max = min(labels.shape[1],z_max + 8) y_min = max(0,y_min - 16) y_max = min(labels.shape[2],y_max + 16) x_min = max(0,x_min - 16) x_max = min(labels.shape[3],x_max + 16) xyz = [x_min,x_max,y_min,y_max,z_min,z_max] labels = labels[:,z_min:z_max+1,y_min:y_max+1,x_min:x_max+1] labels_dilate = labels_dilate[z_min:z_max+1,y_min:y_max+1,x_min:x_max+1] #%% weights = np.zeros_like(labels,dtype = 'float32') for i in range(n_class): weights[i] = w_class[i] weights[i][labels_dilate==1] = w_class_in_roi[i] weights = weights.astype('float32') labels = labels.astype('uint8') #%% raw_im = np.zeros((3,n_slice,row,col),dtype = 'float32') for fn in flist: with pydicom.dcmread(fn) as dc: img_dicom1 = (dc.pixel_array).copy() fn2 = fn.replace('/'+name_chs[0] + '/','/'+name_chs[1] + '/') fn3 = fn.replace('/'+name_chs[0] + '/','/'+name_chs[2] + '/') with pydicom.dcmread(fn2) as dc: img_dicom2 = (dc.pixel_array).copy() with pydicom.dcmread(fn3) as dc: img_dicom3 = (dc.pixel_array).copy() fname = Path(fn).stem idx = int(Path(fn).stem.replace('IM','')) raw_im[0,idx,:,:] = img_dicom1 raw_im[1,idx,:,:] = img_dicom2 raw_im[2,idx,:,:] = img_dicom3 rs = int(dc.RescaleSlope) ri = int(dc.RescaleIntercept) wc = 80#int(dc.WindowCenter) wh = 240#int(dc.WindowWidth) raw_im = rs * raw_im + ri raw_im = (raw_im.astype('float') - (wc - wh/2.0))/wh #img_dicom = (img_dicom/2500.0).astype('float32') raw_im = np.clip(raw_im,0.0,1.0)-0.5 raw_im = raw_im[:,z_min:z_max+1,y_min:y_max+1,x_min:x_max+1].astype('float32') #%% #%% #write data #with h5py.File(out_h5py, 'w') as f: # f.create_dataset('label', data=labels,compression='lzf') # f.create_dataset('raw', data=raw_im,compression='lzf') # f.create_dataset('weight', data=weights,compression='lzf') # f.create_dataset('xyz', data=xyz) # ##%% #with h5py.File(out_h5py, 'w') as f: # f.create_dataset('label', data=labels,compression='gzip') # f.create_dataset('raw', data=raw_im,compression='gzip') # f.create_dataset('weight', data=weights,compression='gzip') # f.create_dataset('xyz', data=xyz) #%% with h5py.File(out_h5py, 'w') as f: f.create_dataset('label', data=labels) f.create_dataset('raw', data=raw_im) f.create_dataset('weight', data=weights) f.create_dataset('xyz', data=xyz)
[ "numpy.clip", "pydicom.dcmread", "numpy.ones", "pathlib.Path", "numpy.where", "SimpleITK.GetArrayFromImage", "h5py.File", "numpy.stack", "numpy.zeros", "SimpleITK.ReadImage", "numpy.zeros_like" ]
[((541, 560), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fn1'], {}), '(fn1)\n', (555, 560), True, 'import SimpleITK as sitk\n'), ((657, 676), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fn2'], {}), '(fn2)\n', (671, 676), True, 'import SimpleITK as sitk\n'), ((1198, 1252), 'numpy.stack', 'np.stack', (['(labels0, labels1, labels2, labels3)'], {'axis': '(0)'}), '((labels0, labels1, labels2, labels3), axis=0)\n', (1206, 1252), True, 'import numpy as np\n'), ((1463, 1491), 'numpy.where', 'np.where', (['(labels_dilate == 1)'], {}), '(labels_dilate == 1)\n', (1471, 1491), True, 'import numpy as np\n'), ((2050, 2088), 'numpy.zeros_like', 'np.zeros_like', (['labels'], {'dtype': '"""float32"""'}), "(labels, dtype='float32')\n", (2063, 2088), True, 'import numpy as np\n'), ((2279, 2328), 'numpy.zeros', 'np.zeros', (['(3, n_slice, row, col)'], {'dtype': '"""float32"""'}), "((3, n_slice, row, col), dtype='float32')\n", (2287, 2328), True, 'import numpy as np\n'), ((3218, 3243), 'numpy.clip', 'np.clip', (['raw_im', '(0.0)', '(1.0)'], {}), '(raw_im, 0.0, 1.0)\n', (3225, 3243), True, 'import numpy as np\n'), ((3914, 3938), 'h5py.File', 'h5py.File', (['out_h5py', '"""w"""'], {}), "(out_h5py, 'w')\n", (3923, 3938), False, 'import h5py\n'), ((2358, 2377), 'pydicom.dcmread', 'pydicom.dcmread', (['fn'], {}), '(fn)\n', (2373, 2377), False, 'import pydicom\n'), ((2591, 2611), 'pydicom.dcmread', 'pydicom.dcmread', (['fn2'], {}), '(fn2)\n', (2606, 2611), False, 'import pydicom\n'), ((2673, 2693), 'pydicom.dcmread', 'pydicom.dcmread', (['fn3'], {}), '(fn3)\n', (2688, 2693), False, 'import pydicom\n'), ((2776, 2784), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (2780, 2784), False, 'from pathlib import Path\n'), ((572, 605), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['itkimage1'], {}), '(itkimage1)\n', (594, 605), True, 'import SimpleITK as sitk\n'), ((688, 721), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['itkimage2'], {}), '(itkimage2)\n', (710, 721), True, 'import SimpleITK as sitk\n'), ((774, 807), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['itkimage2'], {}), '(itkimage2)\n', (796, 807), True, 'import SimpleITK as sitk\n'), ((960, 978), 'numpy.ones', 'np.ones', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (967, 978), True, 'import numpy as np\n'), ((1053, 1071), 'numpy.ones', 'np.ones', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (1060, 1071), True, 'import numpy as np\n'), ((1146, 1164), 'numpy.ones', 'np.ones', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (1153, 1164), True, 'import numpy as np\n'), ((1369, 1383), 'pathlib.Path', 'Path', (['dicom_fd'], {}), '(dicom_fd)\n', (1373, 1383), False, 'from pathlib import Path\n'), ((2804, 2812), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (2808, 2812), False, 'from pathlib import Path\n')]
import sys sys.path.append("../modules/") import numpy as np import matplotlib.pyplot as plt from skimage import feature from utils import conf if __name__ == "__main__": filename = conf.dir_data_temp + "slice.npy" img = np.load(filename) filename = conf.dir_data_mask + "endocardial_mask.npy" endocardial_mask = np.load(filename) filename = conf.dir_data_mask + "epicardial_mask.npy" epicardial_mask = np.load(filename) # get contour of masks endocardial_border = feature.canny(endocardial_mask, sigma=3) #endocardial_border = np.zeros((img.shape[0], img.shape[1])) #endocardial_border = _endocardial_border[ epicardial_border = feature.canny(epicardial_mask, sigma=3) fig1 = plt.figure() ax11 = fig1.add_subplot(1, 3, 1) ax11.imshow(img, cmap=plt.cm.gray) ax11.set_xlim([0., img.shape[1]]) ax11.set_ylim([img.shape[0], 0.]) ax12 = fig1.add_subplot(1, 3, 2) ax12.imshow(epicardial_mask, cmap=plt.cm.gray) ax12.set_xlim([0., endocardial_mask.shape[1]]) ax12.set_ylim([endocardial_mask.shape[0], 0.]) ax13 = fig1.add_subplot(1, 3, 3) ax13.imshow(img, cmap=plt.cm.gray) ax13.set_xlim([0., endocardial_mask.shape[1]]) ax13.set_ylim([endocardial_mask.shape[0], 0.]) ax13.contour(epicardial_border, colors='r') plt.show()
[ "matplotlib.pyplot.figure", "skimage.feature.canny", "numpy.load", "sys.path.append", "matplotlib.pyplot.show" ]
[((11, 41), 'sys.path.append', 'sys.path.append', (['"""../modules/"""'], {}), "('../modules/')\n", (26, 41), False, 'import sys\n'), ((233, 250), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (240, 250), True, 'import numpy as np\n'), ((333, 350), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (340, 350), True, 'import numpy as np\n'), ((431, 448), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (438, 448), True, 'import numpy as np\n'), ((503, 543), 'skimage.feature.canny', 'feature.canny', (['endocardial_mask'], {'sigma': '(3)'}), '(endocardial_mask, sigma=3)\n', (516, 543), False, 'from skimage import feature\n'), ((681, 720), 'skimage.feature.canny', 'feature.canny', (['epicardial_mask'], {'sigma': '(3)'}), '(epicardial_mask, sigma=3)\n', (694, 720), False, 'from skimage import feature\n'), ((733, 745), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (743, 745), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1333), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1331, 1333), True, 'import matplotlib.pyplot as plt\n')]
import numpy as np import torch.nn as nn import torch.nn.functional as F import torch class Generator(nn.Module): def __init__(self, configs, shape): super(Generator, self).__init__() self.label_emb = nn.Embedding(configs.n_classes, configs.n_classes) self.shape = shape def block(in_feat, out_feat, normalize=True): layers = [nn.Linear(in_feat, out_feat)] if normalize: layers.append(nn.BatchNorm1d(out_feat, 0.8)) layers.append(nn.LeakyReLU(0.2, inplace=True)) return layers self.model = nn.Sequential( *block(configs.latent_dim + configs.n_classes, 128, normalize=False), *block(128, 256), *block(256, 512), *block(512, 1024), nn.Linear(1024, int(np.prod(shape))), nn.Tanh() ) def forward(self, noise, labels): # Concatenate label embedding and data to produce input gen_input = torch.cat((self.label_emb(labels), noise), -1) input = self.model(gen_input) input = input.view(input.size(0), -1) # resize return input class Discriminator(nn.Module): def __init__(self, configs, shape): super(Discriminator, self).__init__() self.label_embedding = nn.Embedding(configs.n_classes, configs.n_classes) self.model = nn.Sequential( nn.Linear(configs.n_classes + int(np.prod(shape)), 512), nn.LeakyReLU(0.2, inplace=True), nn.Linear(512, 512), nn.Dropout(0.4), nn.LeakyReLU(0.2, inplace=True), nn.Linear(512, 512), nn.Dropout(0.4), nn.LeakyReLU(0.2, inplace=True), nn.Linear(512, 1), ) def forward(self, input, labels): # Concatenate label embedding and data to produce input d_in = torch.cat((input.view(input.size(0), -1), self.label_embedding(labels)), -1) validity = self.model(d_in) return validity
[ "numpy.prod", "torch.nn.Dropout", "torch.nn.Tanh", "torch.nn.LeakyReLU", "torch.nn.BatchNorm1d", "torch.nn.Linear", "torch.nn.Embedding" ]
[((223, 273), 'torch.nn.Embedding', 'nn.Embedding', (['configs.n_classes', 'configs.n_classes'], {}), '(configs.n_classes, configs.n_classes)\n', (235, 273), True, 'import torch.nn as nn\n'), ((1308, 1358), 'torch.nn.Embedding', 'nn.Embedding', (['configs.n_classes', 'configs.n_classes'], {}), '(configs.n_classes, configs.n_classes)\n', (1320, 1358), True, 'import torch.nn as nn\n'), ((852, 861), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (859, 861), True, 'import torch.nn as nn\n'), ((1477, 1508), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1489, 1508), True, 'import torch.nn as nn\n'), ((1522, 1541), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (1531, 1541), True, 'import torch.nn as nn\n'), ((1555, 1570), 'torch.nn.Dropout', 'nn.Dropout', (['(0.4)'], {}), '(0.4)\n', (1565, 1570), True, 'import torch.nn as nn\n'), ((1584, 1615), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1596, 1615), True, 'import torch.nn as nn\n'), ((1629, 1648), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (1638, 1648), True, 'import torch.nn as nn\n'), ((1662, 1677), 'torch.nn.Dropout', 'nn.Dropout', (['(0.4)'], {}), '(0.4)\n', (1672, 1677), True, 'import torch.nn as nn\n'), ((1691, 1722), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1703, 1722), True, 'import torch.nn as nn\n'), ((1736, 1753), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (1745, 1753), True, 'import torch.nn as nn\n'), ((378, 406), 'torch.nn.Linear', 'nn.Linear', (['in_feat', 'out_feat'], {}), '(in_feat, out_feat)\n', (387, 406), True, 'import torch.nn as nn\n'), ((521, 552), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (533, 552), True, 'import torch.nn as nn\n'), ((464, 493), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_feat', '(0.8)'], {}), '(out_feat, 0.8)\n', (478, 493), True, 'import torch.nn as nn\n'), ((822, 836), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (829, 836), True, 'import numpy as np\n'), ((1442, 1456), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1449, 1456), True, 'import numpy as np\n')]
import BotDecidesPos import numpy as np class Collision_check: def __init__(self): self.m=0.0 self.n=0.0 def load_data(self,bot): tgx,tgy=bot.getTarget() mpx,mpy=bot.getPos() spd=bot.getSpeed() return spd,mpx,mpy,tgx,tgy def checkCollision(self,bot1,bot2): eg = Engine() sp1,x,y,x1,y1=self.load_data(bot1) sp2,a,b,a1,b1=self.load_data(bot2) p=eg.findDist(x,y,x1,y1) q=eg.findDist(a,b,a1,b1) #v1=[sp1*(x-x1)/p,sp1*(y-y1)/p] #v2 = [sp2 * (a - a1) / q, sp2 * (b - b1) / q] #Ax=C, which is the matrix from of the equation on the path of the vehicle s=[[x-x1,y-y1],[a-a1,b-b1]] t=[y*x1-x*y1,b*a1-a*b1] self.m,self.n=eg.eq_StraightLine(s,t) p1=eg.findDist(x,y,self.m,self.n) q1=eg.findDist(a,b,self.m,self.n) eta1=p1/sp1; eta2=q1/sp2; if np.absolute(eta1-eta2)<1 : return True else: return False def getCollisionIndex(self): return self.m,self.n def setCollisionIndex(self,a,b): self.m=a self.n=b
[ "numpy.absolute" ]
[((958, 982), 'numpy.absolute', 'np.absolute', (['(eta1 - eta2)'], {}), '(eta1 - eta2)\n', (969, 982), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt def sigmoid(x, w, b): return 1/(1 + np.exp(-x*w+b)) x = np.arange(-5.0, 5.0, 0.1) y1 = sigmoid(x, 0.5, 0) y2 = sigmoid(x, 1, 0) y3 = sigmoid(x, 2, 0) plt.plot(x, y1, "r", linestyle='--') plt.plot(x, y2, 'g') plt.plot(x, y3, 'b', linestyle='--') plt.plot([0, 0], [1.0, 0.0], ":") plt.title("sigmoid function") plt.show()
[ "matplotlib.pyplot.plot", "numpy.exp", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.show" ]
[((114, 139), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (123, 139), True, 'import numpy as np\n'), ((209, 245), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1', '"""r"""'], {'linestyle': '"""--"""'}), "(x, y1, 'r', linestyle='--')\n", (217, 245), True, 'import matplotlib.pyplot as plt\n'), ((246, 266), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2', '"""g"""'], {}), "(x, y2, 'g')\n", (254, 266), True, 'import matplotlib.pyplot as plt\n'), ((267, 303), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y3', '"""b"""'], {'linestyle': '"""--"""'}), "(x, y3, 'b', linestyle='--')\n", (275, 303), True, 'import matplotlib.pyplot as plt\n'), ((304, 337), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[1.0, 0.0]', '""":"""'], {}), "([0, 0], [1.0, 0.0], ':')\n", (312, 337), True, 'import matplotlib.pyplot as plt\n'), ((338, 367), 'matplotlib.pyplot.title', 'plt.title', (['"""sigmoid function"""'], {}), "('sigmoid function')\n", (347, 367), True, 'import matplotlib.pyplot as plt\n'), ((368, 378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (376, 378), True, 'import matplotlib.pyplot as plt\n'), ((92, 110), 'numpy.exp', 'np.exp', (['(-x * w + b)'], {}), '(-x * w + b)\n', (98, 110), True, 'import numpy as np\n')]
import numpy as np from scipy.optimize import fsolve from scipy.linalg import expm import matplotlib.pyplot as plt # Some utilities # map a vector to a skew symmetric matrix def skew(x): return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]]) # map a twist to its adjoint form def adjoint(x): return np.concatenate( [np.concatenate([skew(x[:3]), np.zeros((3, 3))], 1), np.concatenate([skew(x[3:]), skew(x[:3])], 1)]) # flatten a homogeneous transformation matrix to a vector def flatten(g): return np.concatenate([np.reshape(g[:3, :3], (9,)), g[:3, 3]]) # unflatten a homogeneous transformation def unflatten(g): return np.row_stack((np.column_stack((np.reshape(g[:9], (3, 3)), g[9:])), np.array([0, 0, 0, 1]))) # the matrix representation of a twist vector def se(x): return np.row_stack((np.column_stack((skew(x[:3]), x[3:])), np.array([0, 0, 0, 0]))) # Initialization def initRod(N): L = 10e-2 # length of the rod g = np.zeros((N, 12)) xi = np.repeat(np.array([[0, np.pi/4/L, 0, 0, 0, 1]]), N, 0) eta = np.zeros((N, 6)) #explicit Euler RKMK G = np.eye(4) ds = L / (N - 1) g[0, :] = flatten(G) for i in range(1, N): G = G @ expm(se(ds * xi[i - 1, :])) g[i, :] = flatten(G) return g, xi, eta #Integration def step(g, xi, eta): # determine xi0 by solving tip condition xi0 = fsolve(lambda x: condition(g, xi, eta, x), xi[0, :]) # integrate the system with the solved xi0 return integrate(g, xi, eta, xi0) def condition(g, xi, eta, xi0): g_next, xi_next, eta_next = integrate(g, xi, eta, xi0) return xi_next[-1, :] - np.array([0, 0, 0, 0, 0, 1]) def integrate(g, xi, eta, xi0): # initialize empty matrices for storage g_next = np.zeros_like(g) xi_next = np.zeros_like(xi) eta_next = np.zeros_like(eta) # determine number of spatial points, just believe everything is the right size (N, _) = xi.shape # set the guessed value xi_next[0, :] = xi0 # material and geometric properties xi_ref = np.array([0, 0, 0, 0, 0, 1]) L = 10e-2 D = 1e-2 E = 1e6 rho = 1e3 ds = L / (N - 1) dt = 0.01 A = np.pi / 4 * D ** 2 I = np.pi / 64 * D ** 4 J = 2 * I G = E / 3 K = np.diag([E * I, E * I, G * J, G * A, G * A, E * A]) M = rho * np.diag([I, I, J, A, A, A]) # integration over the body (don't need the initial point as the initial values are determined already) for i in range(N - 1): # averaging over steps to get half step values xi_half = (xi_next[i, :] + xi[i, :]) / 2 eta_half = (eta_next[i, :] + eta[i, :]) / 2 # implicit midpoint approximation xi_dot = (xi_next[i, :] - xi[i, :]) / dt eta_dot = (eta_next[i, :] - eta[i, :]) / dt # spatial derivatives xi_der = np.linalg.inv(K) @ ( (M @ eta_dot) - (adjoint(eta_half).T @ M @ eta_half) + (adjoint(xi_half).T @ K @ (xi_half - xi_ref))) eta_der = xi_dot - (adjoint(xi_half) @ eta_half) # explicit Euler step xi_half_next = xi_half + ds * xi_der eta_half_next = eta_half + ds * eta_der # determine next step from half step value xi_next[i + 1, :] = 2 * xi_half_next - xi[i+1, :] eta_next[i + 1, :] = 2 * eta_half_next - eta[i+1, :] # midpoint RKMK to step the g values for i in range(N): g_next[i, :] = flatten(unflatten(g[i,:]) @ expm(se(dt * (eta_next[i,:] + eta[i,:])/2))) return g_next, xi_next, eta_next # Testing functions def plotDynamics(N, steps): # start figure fig, ax = plt.subplots() g, xi, eta = initRod(N) ax.plot(g[:,9], g[:,11]) ax.set_aspect('equal') plt.pause(0.01) # make the plots show up as they're updated for i in range(steps): g, xi, eta = step(g, xi, eta) ax.plot(g[:,9], g[:,11]) plt.pause(0.01) # make the plots show up as they're updated #make sure it stays open for looking at and saving plt.show() def energy(xi,eta): # similar to the setup for the integrator (N, _) = xi.shape xi_ref = np.array([0, 0, 0, 0, 0, 1]) L = 10e-2 D = 1e-2 E = 1e6 rho = 1e3 ds = L / (N - 1) dt = 0.01 A = np.pi / 4 * D ** 2 I = np.pi / 64 * D ** 4 J = 2 * I G = E / 3 K = np.diag([E * I, E * I, G * J, G * A, G * A, E * A]) M = rho * np.diag([I, I, J, A, A, A]) H = 0 # total energy # integrate over the rod for i in range(N): T = eta[i,:].T @ M @ eta[i,:] U = (xi[i,:]-xi_ref).T @ K @ (xi[i,:]-xi_ref) H += 1/2*(T + U) return ds*H #multiply by discrete step size to scale def plotEnergy(N, steps): fig, ax = plt.subplots() g, xi, eta = initRod(N) E = [] for i in range(steps): g, xi, eta = step(g, xi, eta) E.append(energy(xi,eta)) ax.plot(E) plt.show() # Call the script as python conservative.py if __name__ == "__main__": # plotDynamics(100, 20) plotEnergy(100,100)
[ "numpy.eye", "numpy.reshape", "numpy.diag", "numpy.array", "numpy.zeros", "numpy.linalg.inv", "matplotlib.pyplot.pause", "numpy.zeros_like", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ]
[((199, 263), 'numpy.array', 'np.array', (['[[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]]'], {}), '([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])\n', (207, 263), True, 'import numpy as np\n'), ((981, 998), 'numpy.zeros', 'np.zeros', (['(N, 12)'], {}), '((N, 12))\n', (989, 998), True, 'import numpy as np\n'), ((1074, 1090), 'numpy.zeros', 'np.zeros', (['(N, 6)'], {}), '((N, 6))\n', (1082, 1090), True, 'import numpy as np\n'), ((1125, 1134), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1131, 1134), True, 'import numpy as np\n'), ((1771, 1787), 'numpy.zeros_like', 'np.zeros_like', (['g'], {}), '(g)\n', (1784, 1787), True, 'import numpy as np\n'), ((1802, 1819), 'numpy.zeros_like', 'np.zeros_like', (['xi'], {}), '(xi)\n', (1815, 1819), True, 'import numpy as np\n'), ((1835, 1853), 'numpy.zeros_like', 'np.zeros_like', (['eta'], {}), '(eta)\n', (1848, 1853), True, 'import numpy as np\n'), ((2068, 2096), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 0, 1])\n', (2076, 2096), True, 'import numpy as np\n'), ((2276, 2327), 'numpy.diag', 'np.diag', (['[E * I, E * I, G * J, G * A, G * A, E * A]'], {}), '([E * I, E * I, G * J, G * A, G * A, E * A])\n', (2283, 2327), True, 'import numpy as np\n'), ((3627, 3641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3639, 3641), True, 'import matplotlib.pyplot as plt\n'), ((3730, 3745), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (3739, 3745), True, 'import matplotlib.pyplot as plt\n'), ((4017, 4027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4025, 4027), True, 'import matplotlib.pyplot as plt\n'), ((4130, 4158), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 0, 1])\n', (4138, 4158), True, 'import numpy as np\n'), ((4338, 4389), 'numpy.diag', 'np.diag', (['[E * I, E * I, G * J, G * A, G * A, E * A]'], {}), '([E * I, E * I, G * J, G * A, G * A, E * A])\n', (4345, 4389), True, 'import numpy as np\n'), ((4726, 4740), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4738, 4740), True, 'import matplotlib.pyplot as plt\n'), ((4899, 4909), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4907, 4909), True, 'import matplotlib.pyplot as plt\n'), ((1018, 1060), 'numpy.array', 'np.array', (['[[0, np.pi / 4 / L, 0, 0, 0, 1]]'], {}), '([[0, np.pi / 4 / L, 0, 0, 0, 1]])\n', (1026, 1060), True, 'import numpy as np\n'), ((1652, 1680), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 0, 1])\n', (1660, 1680), True, 'import numpy as np\n'), ((2342, 2369), 'numpy.diag', 'np.diag', (['[I, I, J, A, A, A]'], {}), '([I, I, J, A, A, A])\n', (2349, 2369), True, 'import numpy as np\n'), ((3897, 3912), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (3906, 3912), True, 'import matplotlib.pyplot as plt\n'), ((4404, 4431), 'numpy.diag', 'np.diag', (['[I, I, J, A, A, A]'], {}), '([I, I, J, A, A, A])\n', (4411, 4431), True, 'import numpy as np\n'), ((553, 580), 'numpy.reshape', 'np.reshape', (['g[:3, :3]', '(9,)'], {}), '(g[:3, :3], (9,))\n', (563, 580), True, 'import numpy as np\n'), ((731, 753), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (739, 753), True, 'import numpy as np\n'), ((878, 900), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (886, 900), True, 'import numpy as np\n'), ((2854, 2870), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (2867, 2870), True, 'import numpy as np\n'), ((380, 396), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (388, 396), True, 'import numpy as np\n'), ((695, 720), 'numpy.reshape', 'np.reshape', (['g[:9]', '(3, 3)'], {}), '(g[:9], (3, 3))\n', (705, 720), True, 'import numpy as np\n')]
import numpy as np from gtsam import SfmTrack from gtsfm.common.image import Image import gtsfm.utils.images as image_utils def test_get_average_point_color(): """ Ensure 3d point color is computed as mean of RGB per 2d measurement.""" # random point; 2d measurements below are dummy locations (not actual projection) triangulated_pt = np.array([1, 2, 1]) track_3d = SfmTrack(triangulated_pt) # in camera 0 track_3d.add_measurement(idx=0, m=np.array([130, 80])) # in camera 1 track_3d.add_measurement(idx=1, m=np.array([10, 60])) img0 = np.zeros((100, 200, 3), dtype=np.uint8) img0[80, 130] = np.array([40, 50, 60]) img1 = np.zeros((100, 200, 3), dtype=np.uint8) img1[60, 10] = np.array([60, 70, 80]) images = {0: Image(img0), 1: Image(img1)} r, g, b = image_utils.get_average_point_color(track_3d, images) assert r == 50 assert g == 60 assert b == 70 def test_get_downsampling_factor_per_axis_leaveintact() -> None: """Ensure that image is left intact, when shorter side is smaller than max_resolution.""" img_h = 700 img_w = 1500 img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8)) max_resolution = 800 scale_u, scale_v, new_h, new_w = image_utils.get_downsampling_factor_per_axis(img_h, img_w, max_resolution) assert scale_u == 1.0 assert scale_v == 1.0 assert new_h == 700 assert new_w == 1500 def test_get_rescaling_factor_per_axis_upsample() -> None: """Ensure that max resolution constraint is met, when upsampling image. Resize a 700x1500 image, so that the shorter image side is EXACTLY 800 px. """ img_h = 700 img_w = 1500 img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8)) max_resolution = 800 scale_u, scale_v, new_h, new_w = image_utils.get_rescaling_factor_per_axis(img_h, img_w, max_resolution) # 8/7 will not give a clean integer division assert np.isclose(scale_u, 1.1427, atol=4) assert np.isclose(scale_v, 1.1429, atol=4) assert new_h == 800 assert new_w == 1714 def test_get_downsampling_factor_per_axis() -> None: """Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is AT MOST 600 px. Image is in landscape mode. """ img_h = 700 img_w = 1500 img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8)) max_resolution = 600 scale_u, scale_v, new_h, new_w = image_utils.get_downsampling_factor_per_axis(img_h, img_w, max_resolution) # Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. assert np.isclose(scale_u, 0.8573, atol=4) assert np.isclose(scale_v, 0.8571, atol=4) assert new_h == 600 assert new_w == 1286 def test_get_rescaling_factor_per_axis_downsample() -> None: """Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is EXACTLY 600 px. Image is in landscape mode. """ img_h = 700 img_w = 1500 img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8)) max_resolution = 600 scale_u, scale_v, new_h, new_w = image_utils.get_rescaling_factor_per_axis(img_h, img_w, max_resolution) # Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. assert np.isclose(scale_u, 0.8573, atol=4) assert np.isclose(scale_v, 0.8571, atol=4) assert new_h == 600 assert new_w == 1286 def test_get_downsampling_factor_per_axis_portrait() -> None: """Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is AT MOST 600 px. Image is in portrait mode. """ img_h = 1500 img_w = 700 img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8)) max_resolution = 600 scale_u, scale_v, new_h, new_w = image_utils.get_downsampling_factor_per_axis(img_h, img_w, max_resolution) # Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. assert np.isclose(scale_u, 0.8571, atol=4) assert np.isclose(scale_v, 0.8573, atol=4) assert new_h == 1286 assert new_w == 600 def test_get_rescaling_factor_per_axis_downsample_portrait() -> None: """Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is EXACTLY 600 px. Image is in portrait mode. """ img_h = 1500 img_w = 700 img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8)) max_resolution = 600 scale_u, scale_v, new_h, new_w = image_utils.get_rescaling_factor_per_axis(img_h, img_w, max_resolution) # Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. assert np.isclose(scale_v, 0.8571, atol=4) assert np.isclose(scale_u, 0.8573, atol=4) assert new_h == 1286 assert new_w == 600
[ "gtsfm.utils.images.get_rescaling_factor_per_axis", "gtsfm.utils.images.get_downsampling_factor_per_axis", "numpy.isclose", "numpy.array", "numpy.zeros", "gtsfm.utils.images.get_average_point_color", "gtsfm.common.image.Image", "gtsam.SfmTrack" ]
[((351, 370), 'numpy.array', 'np.array', (['[1, 2, 1]'], {}), '([1, 2, 1])\n', (359, 370), True, 'import numpy as np\n'), ((386, 411), 'gtsam.SfmTrack', 'SfmTrack', (['triangulated_pt'], {}), '(triangulated_pt)\n', (394, 411), False, 'from gtsam import SfmTrack\n'), ((578, 617), 'numpy.zeros', 'np.zeros', (['(100, 200, 3)'], {'dtype': 'np.uint8'}), '((100, 200, 3), dtype=np.uint8)\n', (586, 617), True, 'import numpy as np\n'), ((638, 660), 'numpy.array', 'np.array', (['[40, 50, 60]'], {}), '([40, 50, 60])\n', (646, 660), True, 'import numpy as np\n'), ((673, 712), 'numpy.zeros', 'np.zeros', (['(100, 200, 3)'], {'dtype': 'np.uint8'}), '((100, 200, 3), dtype=np.uint8)\n', (681, 712), True, 'import numpy as np\n'), ((732, 754), 'numpy.array', 'np.array', (['[60, 70, 80]'], {}), '([60, 70, 80])\n', (740, 754), True, 'import numpy as np\n'), ((817, 870), 'gtsfm.utils.images.get_average_point_color', 'image_utils.get_average_point_color', (['track_3d', 'images'], {}), '(track_3d, images)\n', (852, 870), True, 'import gtsfm.utils.images as image_utils\n'), ((1245, 1319), 'gtsfm.utils.images.get_downsampling_factor_per_axis', 'image_utils.get_downsampling_factor_per_axis', (['img_h', 'img_w', 'max_resolution'], {}), '(img_h, img_w, max_resolution)\n', (1289, 1319), True, 'import gtsfm.utils.images as image_utils\n'), ((1803, 1874), 'gtsfm.utils.images.get_rescaling_factor_per_axis', 'image_utils.get_rescaling_factor_per_axis', (['img_h', 'img_w', 'max_resolution'], {}), '(img_h, img_w, max_resolution)\n', (1844, 1874), True, 'import gtsfm.utils.images as image_utils\n'), ((1940, 1975), 'numpy.isclose', 'np.isclose', (['scale_u', '(1.1427)'], {'atol': '(4)'}), '(scale_u, 1.1427, atol=4)\n', (1950, 1975), True, 'import numpy as np\n'), ((1987, 2022), 'numpy.isclose', 'np.isclose', (['scale_v', '(1.1429)'], {'atol': '(4)'}), '(scale_v, 1.1429, atol=4)\n', (1997, 2022), True, 'import numpy as np\n'), ((2481, 2555), 'gtsfm.utils.images.get_downsampling_factor_per_axis', 'image_utils.get_downsampling_factor_per_axis', (['img_h', 'img_w', 'max_resolution'], {}), '(img_h, img_w, max_resolution)\n', (2525, 2555), True, 'import gtsfm.utils.images as image_utils\n'), ((2662, 2697), 'numpy.isclose', 'np.isclose', (['scale_u', '(0.8573)'], {'atol': '(4)'}), '(scale_u, 0.8573, atol=4)\n', (2672, 2697), True, 'import numpy as np\n'), ((2709, 2744), 'numpy.isclose', 'np.isclose', (['scale_v', '(0.8571)'], {'atol': '(4)'}), '(scale_v, 0.8571, atol=4)\n', (2719, 2744), True, 'import numpy as np\n'), ((3211, 3282), 'gtsfm.utils.images.get_rescaling_factor_per_axis', 'image_utils.get_rescaling_factor_per_axis', (['img_h', 'img_w', 'max_resolution'], {}), '(img_h, img_w, max_resolution)\n', (3252, 3282), True, 'import gtsfm.utils.images as image_utils\n'), ((3389, 3424), 'numpy.isclose', 'np.isclose', (['scale_u', '(0.8573)'], {'atol': '(4)'}), '(scale_u, 0.8573, atol=4)\n', (3399, 3424), True, 'import numpy as np\n'), ((3436, 3471), 'numpy.isclose', 'np.isclose', (['scale_v', '(0.8571)'], {'atol': '(4)'}), '(scale_v, 0.8571, atol=4)\n', (3446, 3471), True, 'import numpy as np\n'), ((3939, 4013), 'gtsfm.utils.images.get_downsampling_factor_per_axis', 'image_utils.get_downsampling_factor_per_axis', (['img_h', 'img_w', 'max_resolution'], {}), '(img_h, img_w, max_resolution)\n', (3983, 4013), True, 'import gtsfm.utils.images as image_utils\n'), ((4120, 4155), 'numpy.isclose', 'np.isclose', (['scale_u', '(0.8571)'], {'atol': '(4)'}), '(scale_u, 0.8571, atol=4)\n', (4130, 4155), True, 'import numpy as np\n'), ((4167, 4202), 'numpy.isclose', 'np.isclose', (['scale_v', '(0.8573)'], {'atol': '(4)'}), '(scale_v, 0.8573, atol=4)\n', (4177, 4202), True, 'import numpy as np\n'), ((4677, 4748), 'gtsfm.utils.images.get_rescaling_factor_per_axis', 'image_utils.get_rescaling_factor_per_axis', (['img_h', 'img_w', 'max_resolution'], {}), '(img_h, img_w, max_resolution)\n', (4718, 4748), True, 'import gtsfm.utils.images as image_utils\n'), ((4855, 4890), 'numpy.isclose', 'np.isclose', (['scale_v', '(0.8571)'], {'atol': '(4)'}), '(scale_v, 0.8571, atol=4)\n', (4865, 4890), True, 'import numpy as np\n'), ((4902, 4937), 'numpy.isclose', 'np.isclose', (['scale_u', '(0.8573)'], {'atol': '(4)'}), '(scale_u, 0.8573, atol=4)\n', (4912, 4937), True, 'import numpy as np\n'), ((773, 784), 'gtsfm.common.image.Image', 'Image', (['img0'], {}), '(img0)\n', (778, 784), False, 'from gtsfm.common.image import Image\n'), ((789, 800), 'gtsfm.common.image.Image', 'Image', (['img1'], {}), '(img1)\n', (794, 800), False, 'from gtsfm.common.image import Image\n'), ((1138, 1181), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 3)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 3), dtype=np.uint8)\n', (1146, 1181), True, 'import numpy as np\n'), ((1696, 1739), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 3)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 3), dtype=np.uint8)\n', (1704, 1739), True, 'import numpy as np\n'), ((2374, 2417), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 3)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 3), dtype=np.uint8)\n', (2382, 2417), True, 'import numpy as np\n'), ((3104, 3147), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 3)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 3), dtype=np.uint8)\n', (3112, 3147), True, 'import numpy as np\n'), ((3832, 3875), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 3)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 3), dtype=np.uint8)\n', (3840, 3875), True, 'import numpy as np\n'), ((4570, 4613), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 3)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 3), dtype=np.uint8)\n', (4578, 4613), True, 'import numpy as np\n'), ((469, 488), 'numpy.array', 'np.array', (['[130, 80]'], {}), '([130, 80])\n', (477, 488), True, 'import numpy as np\n'), ((546, 564), 'numpy.array', 'np.array', (['[10, 60]'], {}), '([10, 60])\n', (554, 564), True, 'import numpy as np\n')]
r""" Difference between magnetic dipole and loop sources =================================================== In this example we look at the differences between an electric loop loop, which results in a magnetic source, and a magnetic dipole source. The derivation of the electromagnetic field in Hunziker et al. (2015) is for electric and magnetic point-dipole sources and receivers. The magnetic field due to a magnetic source (:math:`mm`) is obtain from the electric field due to an electric source (:math:`ee`) using the duality principle, given in their Equation (11), .. math:: \hat{G}^{mm}_{pq}(\mathbf{x}, \mathbf{x'}, s, \eta_{kr}, \zeta_{ij}) = -\hat{G}^{ee}_{pq}(\mathbf{x}, \mathbf{x'}, s, -\zeta_{kr}, -\eta_{ij}) \, . \qquad (1) Without going into the details of the different parameters, we can focus on the difference between the :math:`mm` and :math:`ee` fields for a homogeneous, isotropic fullspace by simplifying this further to .. math:: \mathbf{G}^{mm}_\text{dip-dip} = \frac{\eta}{\zeta}\mathbf{G}^{ee} \quad \xrightarrow{\text{diff. approx}} \quad \frac{\sigma}{\mathrm{i}\omega \mu}\mathbf{G}^{ee}_\text{dip-dip} \, . \qquad (2) Here, :math:`\sigma` is conductivity (S/m), :math:`\omega=2\pi f` is angular frequency (Hz), and :math:`\mu` is the magnetic permeability (H/m). So from Equation (2) we see that the :math:`mm` field differs from the :math:`ee` field by a factor :math:`\sigma/(\mathrm{i}\omega\mu)`. A magnetic dipole source has a moment of :math:`I^mds`; however, a magnetic dipole source is basically never used in geophysics. Instead a loop of an electric wire is used, which generates a magnetic field. The moment generated by this loop is given by :math:`I^m = \mathrm{i}\omega\mu N A I^e`, where :math:`A` is the area of the loop (m:math:`^2`), and :math:`N` the number of turns of the loop. So the difference between a unit magnetic dipole and a unit loop (:math:`A=1, N=1`) is the factor :math:`\mathrm{i}\omega\mu`, hence Equation (2) becomes .. math:: \mathbf{G}^{mm}_\text{loop-dip} = \mathrm{i}\omega\mu\mathbf{G}^{mm}_\text{dip-dip} = \sigma\,\mathbf{G}^{ee}_\text{dip-dip} \, . \qquad (3) This notebook shows this relation in the frequency domain, as well as for impulse, step-on, and step-off responses in the time domain. We can actually model an **electric loop** instead of adjusting the magnetic dipole solution to correspond to a loop source. This is shown in the second part of the notebook. **References** - <NAME>., <NAME>, and <NAME>, 2015, The electromagnetic response in a layered vertical transverse isotropic medium: A new look at an old problem: Geophysics, 80(1), F1–F18; DOI: `10.1190/geo2013-0411.1 <https://doi.org/10.1190/geo2013-0411.1>`_. """ import empymod import numpy as np import matplotlib.pyplot as plt plt.style.use('ggplot') # sphinx_gallery_thumbnail_number = 3 ############################################################################### # 1. Using the magnetic dipole solution # ------------------------------------- # # Survey parameters # ~~~~~~~~~~~~~~~~~ # # - Homogenous fullspace of :math:`\sigma` = 0.01 S/m. # - Source at the origin, x-directed. # - Inline receiver with offset of 100 m, x-directed. freq = np.logspace(-1, 5, 301) # Frequencies (Hz) time = np.logspace(-6, 0, 301) # Times (s) src = [0, 0, 0, 0, 0] # x-dir. source at the origin [x, y, z, azimuth, dip] rec = [100, 0, 0, 0, 0] # x-dir. receiver 100m away from source, inline cond = 0.01 # Conductivity (S/m) ############################################################################### # Computation using ``empymod`` # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Collect common parameters inp = {'src': src, 'rec': rec, 'depth': [], 'res': 1/cond, 'verb': 1} # Frequency domain inp['freqtime'] = freq fee_dip_dip = empymod.bipole(**inp) fmm_dip_dip = empymod.bipole(msrc=True, mrec=True, **inp) f_loo_dip = empymod.loop(**inp) # Time domain inp['freqtime'] = time # ee ee_dip_dip_of = empymod.bipole(signal=-1, **inp) ee_dip_dip_im = empymod.bipole(signal=0, **inp) ee_dip_dip_on = empymod.bipole(signal=1, **inp) # mm dip-dip dip_dip_of = empymod.bipole(signal=-1, msrc=True, mrec=True, **inp) dip_dip_im = empymod.bipole(signal=0, msrc=True, mrec=True, **inp) dip_dip_on = empymod.bipole(signal=1, msrc=True, mrec=True, **inp) # mm loop-dip loo_dip_of = empymod.loop(signal=-1, **inp) loo_dip_im = empymod.loop(signal=0, **inp) loo_dip_on = empymod.loop(signal=1, **inp) ############################################################################### # Plot the result # ~~~~~~~~~~~~~~~ fs = 16 # Fontsize # Figure fig = plt.figure(figsize=(12, 8)) # Frequency Domain plt.subplot(231) plt.title(r'$G^{ee}_{\rm{dip-dip}}$', fontsize=fs) plt.plot(freq, fee_dip_dip.real, 'C0-', label='Real') plt.plot(freq, -fee_dip_dip.real, 'C0--') plt.plot(freq, fee_dip_dip.imag, 'C1-', label='Imag') plt.plot(freq, -fee_dip_dip.imag, 'C1--') plt.xscale('log') plt.yscale('log') plt.ylim([5e-8, 2e-5]) ax1 = plt.subplot(232) plt.title(r'$G^{mm}_{\rm{dip-dip}}$', fontsize=fs) plt.plot(freq, fmm_dip_dip.real, 'C0-', label='Real') plt.plot(freq, -fmm_dip_dip.real, 'C0--') plt.plot(freq, fmm_dip_dip.imag, 'C1-', label='Imag') plt.plot(freq, -fmm_dip_dip.imag, 'C1--') plt.xscale('log') plt.yscale('log') plt.xlabel('Frequency (Hz)', fontsize=fs-2) plt.legend() plt.subplot(233) plt.title(r'$G^{mm}_{\rm{loop-dip}}$', fontsize=fs) plt.plot(freq, f_loo_dip.real, 'C0-', label='Real') plt.plot(freq, -f_loo_dip.real, 'C0--') plt.plot(freq, f_loo_dip.imag, 'C1-', label='Imag') plt.plot(freq, -f_loo_dip.imag, 'C1--') plt.xscale('log') plt.yscale('log') plt.ylim([5e-10, 2e-7]) plt.text(1.05, 0.5, "Frequency Domain", {'fontsize': fs}, horizontalalignment='left', verticalalignment='center', rotation=-90, clip_on=False, transform=plt.gca().transAxes) # Time Domain plt.subplot(234) plt.plot(time, ee_dip_dip_of, 'C0-', label='Step-Off') plt.plot(time, -ee_dip_dip_of, 'C0--') plt.plot(time, ee_dip_dip_im, 'C1-', label='Impulse') plt.plot(time, -ee_dip_dip_im, 'C1--') plt.plot(time, ee_dip_dip_on, 'C2-', label='Step-On') plt.plot(time, -ee_dip_dip_on, 'C2--') plt.xscale('log') plt.yscale('log') plt.subplot(235) plt.plot(time, dip_dip_of, 'C0-', label='Step-Off') plt.plot(time, -dip_dip_of, 'C0--') plt.plot(time, dip_dip_im, 'C1-', label='Impulse') plt.plot(time, -dip_dip_im, 'C1--') plt.plot(time, dip_dip_on, 'C2-', label='Step-On') plt.plot(time, -dip_dip_on, 'C2--') plt.xscale('log') plt.yscale('log') plt.xlabel('Time (s)', fontsize=fs-2) plt.legend() plt.subplot(236) plt.plot(time, loo_dip_of, 'C0-', label='Step-Off') plt.plot(time, -loo_dip_of, 'C0--') plt.plot(time, loo_dip_im, 'C1-', label='Impulse') plt.plot(time, -loo_dip_im, 'C1--') plt.plot(time, loo_dip_on, 'C2-', label='Step-On') plt.plot(time, -loo_dip_on, 'C2--') plt.xscale('log') plt.yscale('log') plt.text(1.05, 0.5, "Time Domain", {'fontsize': fs}, horizontalalignment='left', verticalalignment='center', rotation=-90, clip_on=False, transform=plt.gca().transAxes) fig.text(-0.01, 0.5, 'Amplitude; e-rec (V/m); m-rec (A/m)', va='center', rotation='vertical', fontsize=fs, color='.4') plt.tight_layout() plt.show() ############################################################################### # The figure shows the main points of Equations (2) and (3): # # - The magnetic dipole-dipole response differs by a factor # :math:`\sigma/(\mathrm{i}\omega\mu)` from the electric dipole-dipole # response. That means for the time-domain that the magnetic response looks # more like the time derivative of the electric response (e.g., the magnetic # impulse responses resembles the electric step-on response). # - The magnetic loop-dipole response differs only by :math:`\sigma` from the # electric dipole-dipole response, hence a factor of 0.01. # # The units of the response only depend on the receiver, what the receiver # actually measures. So if we change the source from a dipole to a loop it does # not change the units of the received responses. # # 2. Using an electric loop # ------------------------- # # We can use ``empymod`` to model arbitrary shaped sources by simply adding # point dipole sources together. This is what ``empymod`` does internally to # model a finite length dipole (``empymod.bipole``), where it uses a Gaussian # quadrature with a few points. # # Here, we are going to compare the result from ``loop``, as presented above, # with two different simulations of an electric loop source, assuming a square # loop which sides are 1 m long, so the area correspond to one square meter. # # Plotting routines # ~~~~~~~~~~~~~~~~~ def plot_result(data1, data2, x, title, vmin=-15., vmax=-7., rx=0): """Plot result.""" fig = plt.figure(figsize=(18, 10)) def setplot(name): """Plot settings""" plt.title(name) plt.xlim(rx.min(), rx.max()) plt.ylim(rx.min(), rx.max()) plt.axis("equal") # Plot Re(data) ax1 = plt.subplot(231) setplot(r"(a) |Re(magn.dip*iwu)|") cf0 = plt.pcolormesh(rx, rx, np.log10(np.abs(data1.real)), linewidth=0, rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax, shading='nearest') ax2 = plt.subplot(232) setplot(r"(b) |Re(el. square)|") plt.pcolormesh(rx, rx, np.log10(np.abs(data2.real)), linewidth=0, rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax, shading='nearest') ax3 = plt.subplot(233) setplot(r"(c) Error real part") error_r = np.abs((data1.real-data2.real)/data1.real)*100 cf2 = plt.pcolormesh(rx, rx, np.log10(error_r), vmin=-2, vmax=2, linewidth=0, rasterized=True, cmap=plt.cm.get_cmap("RdBu_r", 8), shading='nearest') # Plot Im(data) ax4 = plt.subplot(234) setplot(r"(d) |Im(magn.dip*iwu)|") plt.pcolormesh(rx, rx, np.log10(np.abs(data1.imag)), linewidth=0, rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax, shading='nearest') ax5 = plt.subplot(235) setplot(r"(e) |Im(el. square)|") plt.pcolormesh(rx, rx, np.log10(np.abs(data2.imag)), linewidth=0, rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax, shading='nearest') ax6 = plt.subplot(236) setplot(r"(f) Error imag part") error_i = np.abs((data1.imag-data2.imag)/data1.imag)*100 plt.pcolormesh(rx, rx, np.log10(error_i), vmin=-2, vmax=2, linewidth=0, rasterized=True, cmap=plt.cm.get_cmap("RdBu_r", 8), shading='nearest') # Colorbars fig.colorbar(cf0, ax=[ax1, ax2, ax3], label=r"$\log_{10}$ Amplitude (A/m)") cbar = fig.colorbar(cf2, ax=[ax4, ax5, ax6], label=r"Relative Error") cbar.set_ticks([-2, -1, 0, 1, 2]) cbar.ax.set_yticklabels([r"$0.01\,\%$", r"$0.1\,\%$", r"$1\,\%$", r"$10\,\%$", r"$100\,\%$"]) # Axis label fig.text(0.4, 0.05, "Inline Offset (m)", fontsize=14) fig.text(0.08, 0.5, 'Crossline Offset (m)', rotation=90, fontsize=14) # Title fig.suptitle(title, y=.95, fontsize=20) plt.show() ############################################################################### # Model parameters # ~~~~~~~~~~~~~~~~ # # - Resistivity: :math:`1 \Omega` m fullspace # # Survey # ~~~~~~ # # - Source at [0, 0, 0] # - Receivers at [x, y, 10] # - frequencies: 100 Hz. # - Offsets: -250 m - 250 m # Survey parameters x = ((np.arange(502))-250.5) rx = np.repeat([x, ], np.size(x), axis=0) ry = rx.transpose() rxx = rx.ravel() ryy = ry.ravel() # Model model = { 'depth': [], # Fullspace 'res': 1., # 1 Ohm.m 'freqtime': 100, # 100 Hz 'htarg': {'pts_per_dec': -1}, 'verb': 1, } ############################################################################### # Compute ``empymod.loop`` result # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ epm_loop = empymod.loop(src=[0, 0, 0, 0, 90], rec=[rxx, ryy, 10, 0, 0], **model).reshape(np.shape(rx)) ############################################################################### # 2.1 Point dipoles at (x, y) using ``empymod.dipole`` # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # - (0.5, 0), ab=42 # - (0, 0.5), ab=41 # - (-0.5, 0), ab=-42 # - (0, -0.5), ab=-41 # rec_dip = [rxx, ryy, 10] square_pts = +empymod.dipole(src=[+0.5, +0.0, 0], rec=rec_dip, ab=42, **model).reshape(np.shape(rx)) square_pts += empymod.dipole(src=[+0.0, +0.5, 0], rec=rec_dip, ab=41, **model).reshape(np.shape(rx)) square_pts -= empymod.dipole(src=[-0.5, +0.0, 0], rec=rec_dip, ab=42, **model).reshape(np.shape(rx)) square_pts -= empymod.dipole(src=[+0.0, -0.5, 0], rec=rec_dip, ab=41, **model).reshape(np.shape(rx)) plot_result(epm_loop, square_pts, x, 'Loop made of four points', vmin=-13, vmax=-5, rx=x) ############################################################################### # 2.2 Finite length dipoles using ``empymod.bipole`` # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Each simulated with a 5pt Gaussian quadrature. The dipoles are: # # - (-0.5, -0.5) to (+0.5, -0.5) # - (+0.5, -0.5) to (+0.5, +0.5) # - (+0.5, +0.5) to (-0.5, +0.5) # - (-0.5, +0.5) to (-0.5, -0.5) inp_dip = { 'rec': [rxx, ryy, 10, 0, 0], 'mrec': True, 'srcpts': 5 # Gaussian quadr. with 5 pts to simulate a finite length dip. } square_dip = +empymod.bipole(src=[+0.5, +0.5, -0.5, +0.5, 0, 0], **inp_dip, **model) square_dip += empymod.bipole(src=[+0.5, -0.5, +0.5, +0.5, 0, 0], **inp_dip, **model) square_dip += empymod.bipole(src=[-0.5, -0.5, +0.5, -0.5, 0, 0], **inp_dip, **model) square_dip += empymod.bipole(src=[-0.5, +0.5, -0.5, -0.5, 0, 0], **inp_dip, **model) square_dip = square_dip.reshape(np.shape(rx)) plot_result(epm_loop, square_dip, x, 'Loop made of four dipoles', vmin=-13, vmax=-5, rx=x) ############################################################################### # Close to the source the results between # # - (1) a magnetic dipole, # - (2) an electric loop conisting of four point sources, and # - (3) an electric loop consisting of four finite length dipoles, # # differ, as expected. However, for the vast majority they are identical. Skin # depth for our example with :math:`\rho=1\Omega` m and :math:`f=100` Hz is # roughly 50 m, so the results are basically identical for 4-5 skin depths, # after which the signal is very low. empymod.Report()
[ "numpy.log10", "empymod.loop", "numpy.arange", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylim", "matplotlib.pyplot.axis", "matplotlib.pyplot.yscale", "numpy.logspace", "numpy.abs", "matplotlib.pyplot.gca", "numpy.size", "empymod.Report", "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.title", "numpy.shape", "empymod.bipole", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "empymod.dipole", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xscale" ]
[((2839, 2862), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2852, 2862), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3284), 'numpy.logspace', 'np.logspace', (['(-1)', '(5)', '(301)'], {}), '(-1, 5, 301)\n', (3272, 3284), True, 'import numpy as np\n'), ((3312, 3335), 'numpy.logspace', 'np.logspace', (['(-6)', '(0)', '(301)'], {}), '(-6, 0, 301)\n', (3323, 3335), True, 'import numpy as np\n'), ((3848, 3869), 'empymod.bipole', 'empymod.bipole', ([], {}), '(**inp)\n', (3862, 3869), False, 'import empymod\n'), ((3884, 3927), 'empymod.bipole', 'empymod.bipole', ([], {'msrc': '(True)', 'mrec': '(True)'}), '(msrc=True, mrec=True, **inp)\n', (3898, 3927), False, 'import empymod\n'), ((3940, 3959), 'empymod.loop', 'empymod.loop', ([], {}), '(**inp)\n', (3952, 3959), False, 'import empymod\n'), ((4020, 4052), 'empymod.bipole', 'empymod.bipole', ([], {'signal': '(-1)'}), '(signal=-1, **inp)\n', (4034, 4052), False, 'import empymod\n'), ((4069, 4100), 'empymod.bipole', 'empymod.bipole', ([], {'signal': '(0)'}), '(signal=0, **inp)\n', (4083, 4100), False, 'import empymod\n'), ((4117, 4148), 'empymod.bipole', 'empymod.bipole', ([], {'signal': '(1)'}), '(signal=1, **inp)\n', (4131, 4148), False, 'import empymod\n'), ((4176, 4230), 'empymod.bipole', 'empymod.bipole', ([], {'signal': '(-1)', 'msrc': '(True)', 'mrec': '(True)'}), '(signal=-1, msrc=True, mrec=True, **inp)\n', (4190, 4230), False, 'import empymod\n'), ((4244, 4297), 'empymod.bipole', 'empymod.bipole', ([], {'signal': '(0)', 'msrc': '(True)', 'mrec': '(True)'}), '(signal=0, msrc=True, mrec=True, **inp)\n', (4258, 4297), False, 'import empymod\n'), ((4311, 4364), 'empymod.bipole', 'empymod.bipole', ([], {'signal': '(1)', 'msrc': '(True)', 'mrec': '(True)'}), '(signal=1, msrc=True, mrec=True, **inp)\n', (4325, 4364), False, 'import empymod\n'), ((4393, 4423), 'empymod.loop', 'empymod.loop', ([], {'signal': '(-1)'}), '(signal=-1, **inp)\n', (4405, 4423), False, 'import empymod\n'), ((4437, 4466), 'empymod.loop', 'empymod.loop', ([], {'signal': '(0)'}), '(signal=0, **inp)\n', (4449, 4466), False, 'import empymod\n'), ((4480, 4509), 'empymod.loop', 'empymod.loop', ([], {'signal': '(1)'}), '(signal=1, **inp)\n', (4492, 4509), False, 'import empymod\n'), ((4664, 4691), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (4674, 4691), True, 'import matplotlib.pyplot as plt\n'), ((4712, 4728), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (4723, 4728), True, 'import matplotlib.pyplot as plt\n'), ((4729, 4779), 'matplotlib.pyplot.title', 'plt.title', (['"""$G^{ee}_{\\\\rm{dip-dip}}$"""'], {'fontsize': 'fs'}), "('$G^{ee}_{\\\\rm{dip-dip}}$', fontsize=fs)\n", (4738, 4779), True, 'import matplotlib.pyplot as plt\n'), ((4780, 4833), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', 'fee_dip_dip.real', '"""C0-"""'], {'label': '"""Real"""'}), "(freq, fee_dip_dip.real, 'C0-', label='Real')\n", (4788, 4833), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4875), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', '(-fee_dip_dip.real)', '"""C0--"""'], {}), "(freq, -fee_dip_dip.real, 'C0--')\n", (4842, 4875), True, 'import matplotlib.pyplot as plt\n'), ((4876, 4929), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', 'fee_dip_dip.imag', '"""C1-"""'], {'label': '"""Imag"""'}), "(freq, fee_dip_dip.imag, 'C1-', label='Imag')\n", (4884, 4929), True, 'import matplotlib.pyplot as plt\n'), ((4930, 4971), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', '(-fee_dip_dip.imag)', '"""C1--"""'], {}), "(freq, -fee_dip_dip.imag, 'C1--')\n", (4938, 4971), True, 'import matplotlib.pyplot as plt\n'), ((4972, 4989), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (4982, 4989), True, 'import matplotlib.pyplot as plt\n'), ((4990, 5007), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5000, 5007), True, 'import matplotlib.pyplot as plt\n'), ((5008, 5032), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[5e-08, 2e-05]'], {}), '([5e-08, 2e-05])\n', (5016, 5032), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5054), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (5049, 5054), True, 'import matplotlib.pyplot as plt\n'), ((5055, 5105), 'matplotlib.pyplot.title', 'plt.title', (['"""$G^{mm}_{\\\\rm{dip-dip}}$"""'], {'fontsize': 'fs'}), "('$G^{mm}_{\\\\rm{dip-dip}}$', fontsize=fs)\n", (5064, 5105), True, 'import matplotlib.pyplot as plt\n'), ((5106, 5159), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', 'fmm_dip_dip.real', '"""C0-"""'], {'label': '"""Real"""'}), "(freq, fmm_dip_dip.real, 'C0-', label='Real')\n", (5114, 5159), True, 'import matplotlib.pyplot as plt\n'), ((5160, 5201), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', '(-fmm_dip_dip.real)', '"""C0--"""'], {}), "(freq, -fmm_dip_dip.real, 'C0--')\n", (5168, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5202, 5255), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', 'fmm_dip_dip.imag', '"""C1-"""'], {'label': '"""Imag"""'}), "(freq, fmm_dip_dip.imag, 'C1-', label='Imag')\n", (5210, 5255), True, 'import matplotlib.pyplot as plt\n'), ((5256, 5297), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', '(-fmm_dip_dip.imag)', '"""C1--"""'], {}), "(freq, -fmm_dip_dip.imag, 'C1--')\n", (5264, 5297), True, 'import matplotlib.pyplot as plt\n'), ((5298, 5315), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (5308, 5315), True, 'import matplotlib.pyplot as plt\n'), ((5316, 5333), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5326, 5333), True, 'import matplotlib.pyplot as plt\n'), ((5334, 5379), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {'fontsize': '(fs - 2)'}), "('Frequency (Hz)', fontsize=fs - 2)\n", (5344, 5379), True, 'import matplotlib.pyplot as plt\n'), ((5378, 5390), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5388, 5390), True, 'import matplotlib.pyplot as plt\n'), ((5392, 5408), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (5403, 5408), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5460), 'matplotlib.pyplot.title', 'plt.title', (['"""$G^{mm}_{\\\\rm{loop-dip}}$"""'], {'fontsize': 'fs'}), "('$G^{mm}_{\\\\rm{loop-dip}}$', fontsize=fs)\n", (5418, 5460), True, 'import matplotlib.pyplot as plt\n'), ((5461, 5512), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', 'f_loo_dip.real', '"""C0-"""'], {'label': '"""Real"""'}), "(freq, f_loo_dip.real, 'C0-', label='Real')\n", (5469, 5512), True, 'import matplotlib.pyplot as plt\n'), ((5513, 5552), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', '(-f_loo_dip.real)', '"""C0--"""'], {}), "(freq, -f_loo_dip.real, 'C0--')\n", (5521, 5552), True, 'import matplotlib.pyplot as plt\n'), ((5553, 5604), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', 'f_loo_dip.imag', '"""C1-"""'], {'label': '"""Imag"""'}), "(freq, f_loo_dip.imag, 'C1-', label='Imag')\n", (5561, 5604), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5644), 'matplotlib.pyplot.plot', 'plt.plot', (['freq', '(-f_loo_dip.imag)', '"""C1--"""'], {}), "(freq, -f_loo_dip.imag, 'C1--')\n", (5613, 5644), True, 'import matplotlib.pyplot as plt\n'), ((5645, 5662), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (5655, 5662), True, 'import matplotlib.pyplot as plt\n'), ((5663, 5680), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5673, 5680), True, 'import matplotlib.pyplot as plt\n'), ((5681, 5705), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[5e-10, 2e-07]'], {}), '([5e-10, 2e-07])\n', (5689, 5705), True, 'import matplotlib.pyplot as plt\n'), ((5913, 5929), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (5924, 5929), True, 'import matplotlib.pyplot as plt\n'), ((5930, 5984), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'ee_dip_dip_of', '"""C0-"""'], {'label': '"""Step-Off"""'}), "(time, ee_dip_dip_of, 'C0-', label='Step-Off')\n", (5938, 5984), True, 'import matplotlib.pyplot as plt\n'), ((5985, 6023), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-ee_dip_dip_of)', '"""C0--"""'], {}), "(time, -ee_dip_dip_of, 'C0--')\n", (5993, 6023), True, 'import matplotlib.pyplot as plt\n'), ((6024, 6077), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'ee_dip_dip_im', '"""C1-"""'], {'label': '"""Impulse"""'}), "(time, ee_dip_dip_im, 'C1-', label='Impulse')\n", (6032, 6077), True, 'import matplotlib.pyplot as plt\n'), ((6078, 6116), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-ee_dip_dip_im)', '"""C1--"""'], {}), "(time, -ee_dip_dip_im, 'C1--')\n", (6086, 6116), True, 'import matplotlib.pyplot as plt\n'), ((6117, 6170), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'ee_dip_dip_on', '"""C2-"""'], {'label': '"""Step-On"""'}), "(time, ee_dip_dip_on, 'C2-', label='Step-On')\n", (6125, 6170), True, 'import matplotlib.pyplot as plt\n'), ((6171, 6209), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-ee_dip_dip_on)', '"""C2--"""'], {}), "(time, -ee_dip_dip_on, 'C2--')\n", (6179, 6209), True, 'import matplotlib.pyplot as plt\n'), ((6210, 6227), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (6220, 6227), True, 'import matplotlib.pyplot as plt\n'), ((6228, 6245), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6238, 6245), True, 'import matplotlib.pyplot as plt\n'), ((6247, 6263), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (6258, 6263), True, 'import matplotlib.pyplot as plt\n'), ((6264, 6315), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dip_dip_of', '"""C0-"""'], {'label': '"""Step-Off"""'}), "(time, dip_dip_of, 'C0-', label='Step-Off')\n", (6272, 6315), True, 'import matplotlib.pyplot as plt\n'), ((6316, 6351), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-dip_dip_of)', '"""C0--"""'], {}), "(time, -dip_dip_of, 'C0--')\n", (6324, 6351), True, 'import matplotlib.pyplot as plt\n'), ((6352, 6402), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dip_dip_im', '"""C1-"""'], {'label': '"""Impulse"""'}), "(time, dip_dip_im, 'C1-', label='Impulse')\n", (6360, 6402), True, 'import matplotlib.pyplot as plt\n'), ((6403, 6438), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-dip_dip_im)', '"""C1--"""'], {}), "(time, -dip_dip_im, 'C1--')\n", (6411, 6438), True, 'import matplotlib.pyplot as plt\n'), ((6439, 6489), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dip_dip_on', '"""C2-"""'], {'label': '"""Step-On"""'}), "(time, dip_dip_on, 'C2-', label='Step-On')\n", (6447, 6489), True, 'import matplotlib.pyplot as plt\n'), ((6490, 6525), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-dip_dip_on)', '"""C2--"""'], {}), "(time, -dip_dip_on, 'C2--')\n", (6498, 6525), True, 'import matplotlib.pyplot as plt\n'), ((6526, 6543), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (6536, 6543), True, 'import matplotlib.pyplot as plt\n'), ((6544, 6561), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6554, 6561), True, 'import matplotlib.pyplot as plt\n'), ((6562, 6601), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {'fontsize': '(fs - 2)'}), "('Time (s)', fontsize=fs - 2)\n", (6572, 6601), True, 'import matplotlib.pyplot as plt\n'), ((6600, 6612), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6610, 6612), True, 'import matplotlib.pyplot as plt\n'), ((6614, 6630), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (6625, 6630), True, 'import matplotlib.pyplot as plt\n'), ((6631, 6682), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'loo_dip_of', '"""C0-"""'], {'label': '"""Step-Off"""'}), "(time, loo_dip_of, 'C0-', label='Step-Off')\n", (6639, 6682), True, 'import matplotlib.pyplot as plt\n'), ((6683, 6718), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-loo_dip_of)', '"""C0--"""'], {}), "(time, -loo_dip_of, 'C0--')\n", (6691, 6718), True, 'import matplotlib.pyplot as plt\n'), ((6719, 6769), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'loo_dip_im', '"""C1-"""'], {'label': '"""Impulse"""'}), "(time, loo_dip_im, 'C1-', label='Impulse')\n", (6727, 6769), True, 'import matplotlib.pyplot as plt\n'), ((6770, 6805), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-loo_dip_im)', '"""C1--"""'], {}), "(time, -loo_dip_im, 'C1--')\n", (6778, 6805), True, 'import matplotlib.pyplot as plt\n'), ((6806, 6856), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'loo_dip_on', '"""C2-"""'], {'label': '"""Step-On"""'}), "(time, loo_dip_on, 'C2-', label='Step-On')\n", (6814, 6856), True, 'import matplotlib.pyplot as plt\n'), ((6857, 6892), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '(-loo_dip_on)', '"""C2--"""'], {}), "(time, -loo_dip_on, 'C2--')\n", (6865, 6892), True, 'import matplotlib.pyplot as plt\n'), ((6893, 6910), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (6903, 6910), True, 'import matplotlib.pyplot as plt\n'), ((6911, 6928), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6921, 6928), True, 'import matplotlib.pyplot as plt\n'), ((7247, 7265), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7263, 7265), True, 'import matplotlib.pyplot as plt\n'), ((7266, 7276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7274, 7276), True, 'import matplotlib.pyplot as plt\n'), ((13819, 13889), 'empymod.bipole', 'empymod.bipole', ([], {'src': '[+0.5, -0.5, +0.5, +0.5, 0, 0]'}), '(src=[+0.5, -0.5, +0.5, +0.5, 0, 0], **inp_dip, **model)\n', (13833, 13889), False, 'import empymod\n'), ((13933, 14003), 'empymod.bipole', 'empymod.bipole', ([], {'src': '[-0.5, -0.5, +0.5, -0.5, 0, 0]'}), '(src=[-0.5, -0.5, +0.5, -0.5, 0, 0], **inp_dip, **model)\n', (13947, 14003), False, 'import empymod\n'), ((14047, 14117), 'empymod.bipole', 'empymod.bipole', ([], {'src': '[-0.5, +0.5, -0.5, -0.5, 0, 0]'}), '(src=[-0.5, +0.5, -0.5, -0.5, 0, 0], **inp_dip, **model)\n', (14061, 14117), False, 'import empymod\n'), ((14851, 14867), 'empymod.Report', 'empymod.Report', ([], {}), '()\n', (14865, 14867), False, 'import empymod\n'), ((8825, 8853), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (8835, 8853), True, 'import matplotlib.pyplot as plt\n'), ((9061, 9077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (9072, 9077), True, 'import matplotlib.pyplot as plt\n'), ((9328, 9344), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (9339, 9344), True, 'import matplotlib.pyplot as plt\n'), ((9575, 9591), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (9586, 9591), True, 'import matplotlib.pyplot as plt\n'), ((9948, 9964), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (9959, 9964), True, 'import matplotlib.pyplot as plt\n'), ((10197, 10213), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (10208, 10213), True, 'import matplotlib.pyplot as plt\n'), ((10444, 10460), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (10455, 10460), True, 'import matplotlib.pyplot as plt\n'), ((11309, 11319), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11317, 11319), True, 'import matplotlib.pyplot as plt\n'), ((11642, 11656), 'numpy.arange', 'np.arange', (['(502)'], {}), '(502)\n', (11651, 11656), True, 'import numpy as np\n'), ((11687, 11697), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (11694, 11697), True, 'import numpy as np\n'), ((12197, 12209), 'numpy.shape', 'np.shape', (['rx'], {}), '(rx)\n', (12205, 12209), True, 'import numpy as np\n'), ((12771, 12783), 'numpy.shape', 'np.shape', (['rx'], {}), '(rx)\n', (12779, 12783), True, 'import numpy as np\n'), ((12901, 12913), 'numpy.shape', 'np.shape', (['rx'], {}), '(rx)\n', (12909, 12913), True, 'import numpy as np\n'), ((13031, 13043), 'numpy.shape', 'np.shape', (['rx'], {}), '(rx)\n', (13039, 13043), True, 'import numpy as np\n'), ((13705, 13775), 'empymod.bipole', 'empymod.bipole', ([], {'src': '[+0.5, +0.5, -0.5, +0.5, 0, 0]'}), '(src=[+0.5, +0.5, -0.5, +0.5, 0, 0], **inp_dip, **model)\n', (13719, 13775), False, 'import empymod\n'), ((14179, 14191), 'numpy.shape', 'np.shape', (['rx'], {}), '(rx)\n', (14187, 14191), True, 'import numpy as np\n'), ((8914, 8929), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (8923, 8929), True, 'import matplotlib.pyplot as plt\n'), ((9012, 9029), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (9020, 9029), True, 'import matplotlib.pyplot as plt\n'), ((9642, 9688), 'numpy.abs', 'np.abs', (['((data1.real - data2.real) / data1.real)'], {}), '((data1.real - data2.real) / data1.real)\n', (9648, 9688), True, 'import numpy as np\n'), ((9722, 9739), 'numpy.log10', 'np.log10', (['error_r'], {}), '(error_r)\n', (9730, 9739), True, 'import numpy as np\n'), ((10511, 10557), 'numpy.abs', 'np.abs', (['((data1.imag - data2.imag) / data1.imag)'], {}), '((data1.imag - data2.imag) / data1.imag)\n', (10517, 10557), True, 'import numpy as np\n'), ((10585, 10602), 'numpy.log10', 'np.log10', (['error_i'], {}), '(error_i)\n', (10593, 10602), True, 'import numpy as np\n'), ((12095, 12164), 'empymod.loop', 'empymod.loop', ([], {'src': '[0, 0, 0, 0, 90]', 'rec': '[rxx, ryy, 10, 0, 0]'}), '(src=[0, 0, 0, 0, 90], rec=[rxx, ryy, 10, 0, 0], **model)\n', (12107, 12164), False, 'import empymod\n'), ((12641, 12653), 'numpy.shape', 'np.shape', (['rx'], {}), '(rx)\n', (12649, 12653), True, 'import numpy as np\n'), ((12669, 12733), 'empymod.dipole', 'empymod.dipole', ([], {'src': '[+0.0, +0.5, 0]', 'rec': 'rec_dip', 'ab': '(41)'}), '(src=[+0.0, +0.5, 0], rec=rec_dip, ab=41, **model)\n', (12683, 12733), False, 'import empymod\n'), ((12799, 12863), 'empymod.dipole', 'empymod.dipole', ([], {'src': '[-0.5, +0.0, 0]', 'rec': 'rec_dip', 'ab': '(42)'}), '(src=[-0.5, +0.0, 0], rec=rec_dip, ab=42, **model)\n', (12813, 12863), False, 'import empymod\n'), ((12929, 12993), 'empymod.dipole', 'empymod.dipole', ([], {'src': '[+0.0, -0.5, 0]', 'rec': 'rec_dip', 'ab': '(41)'}), '(src=[+0.0, -0.5, 0], rec=rec_dip, ab=41, **model)\n', (12943, 12993), False, 'import empymod\n'), ((5877, 5886), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5884, 5886), True, 'import matplotlib.pyplot as plt\n'), ((7096, 7105), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7103, 7105), True, 'import matplotlib.pyplot as plt\n'), ((9159, 9177), 'numpy.abs', 'np.abs', (['data1.real'], {}), '(data1.real)\n', (9165, 9177), True, 'import numpy as np\n'), ((9418, 9436), 'numpy.abs', 'np.abs', (['data2.real'], {}), '(data2.real)\n', (9424, 9436), True, 'import numpy as np\n'), ((9843, 9871), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdBu_r"""', '(8)'], {}), "('RdBu_r', 8)\n", (9858, 9871), True, 'import matplotlib.pyplot as plt\n'), ((10040, 10058), 'numpy.abs', 'np.abs', (['data1.imag'], {}), '(data1.imag)\n', (10046, 10058), True, 'import numpy as np\n'), ((10287, 10305), 'numpy.abs', 'np.abs', (['data2.imag'], {}), '(data2.imag)\n', (10293, 10305), True, 'import numpy as np\n'), ((10694, 10722), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdBu_r"""', '(8)'], {}), "('RdBu_r', 8)\n", (10709, 10722), True, 'import matplotlib.pyplot as plt\n'), ((12539, 12603), 'empymod.dipole', 'empymod.dipole', ([], {'src': '[+0.5, +0.0, 0]', 'rec': 'rec_dip', 'ab': '(42)'}), '(src=[+0.5, +0.0, 0], rec=rec_dip, ab=42, **model)\n', (12553, 12603), False, 'import empymod\n')]
import numpy as np import pandas as pd from sklearn import preprocessing import math def load_datasets_feature(filename): features_df = pd.read_csv(filename, delimiter='\\s*,\\s*', header=0) return features_df def load_join_data3(features_df, result_file, histograms_path, num_rows, num_columns): cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration'] # Result DF contains dataset names, result cardinality, # of MBR tests, and duration in seconds result_df = pd.read_csv(result_file, delimiter='\\s*,\\s*', header=None, names=cols) # result_df = result_df.sample(frac=1) # Add dataset information of the first (left) dataset result_df = pd.merge(result_df, features_df, left_on='dataset1', right_on='dataset_name') # Add dataset information for the second (right) dataset result_df = pd.merge(result_df, features_df, left_on='dataset2', right_on='dataset_name') # Load histograms ds1_histograms, ds2_histograms, ds1_original_histograms, ds2_original_histograms, ds_all_histogram, ds_bops_histogram = load_histograms( result_df, histograms_path, num_rows, num_columns) #print(ds1_histograms.shape) #print(result_df.shape) #exit(0) # Compute BOPS # First, do an element-wise multiplication of the two histograms bops = np.multiply(ds1_original_histograms, ds2_original_histograms) # Reshape into a two dimensional array. First dimension represents the dataset number, e.g., first entry # represents the first dataset of each. Second dimension represents the values in the multiplied histograms bops = bops.reshape((bops.shape[0], num_rows * num_columns)) # Sum the values in each row to compute the final BOPS value bops_values = np.sum(bops, axis=1) # The final reshape puts each BOPS value in an array with a single value. Thus it produces a 2D array. bops_values = bops_values.reshape((bops_values.shape[0], 1)) result_df['bops'] = bops_values cardinality_x = result_df['cardinality_x'] cardinality_y = result_df['cardinality_y'] result_size = result_df['result_size'] mbr_tests = result_df['mbr_tests'] # Compute the join selectivity as result_cardinality/(cardinality x * cardinality y) result_df['join_selectivity'] = result_size / (cardinality_x * cardinality_y) # Compute the MBR selectivity in the same way result_df['mbr_tests_selectivity'] = mbr_tests / (cardinality_x * cardinality_y) # Apply MinMaxScaler to normalize numeric columns used in either training or testing to the range [0, 1] # The following transformation tries to adjust relevant columns to be scaled together column_groups = [ ['duration'], ['AVG area_x', 'AVG area_y'], ['AVG x_x', 'AVG y_x', 'AVG x_y', 'AVG y_y'], ['E0_x', 'E2_x', 'E0_y', 'E2_y'], ['join_selectivity'], ['mbr_tests_selectivity'], ['cardinality_x', 'cardinality_y', 'result_size'], ['bops', 'mbr_tests'] ] for column_group in column_groups: input_data = result_df[column_group].to_numpy() original_shape = input_data.shape reshaped = input_data.reshape(input_data.size, 1) reshaped = preprocessing.minmax_scale(reshaped) result_df[column_group] = reshaped.reshape(original_shape) #result_df[column_group] = scaler.fit_transform(result_df[column_group]) return result_df, ds1_histograms, ds2_histograms, ds_all_histogram, ds_bops_histogram def load_join_data(features_df, result_file, histograms_path, num_rows, num_columns): cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration'] # Result DF contains dataset names, result cardinality, # of MBR tests, and duration in seconds result_df = pd.read_csv(result_file, delimiter=',', header=None, names=cols) # result_df = result_df.sample(frac=1) # Add dataset information of the first (left) dataset result_df = pd.merge(result_df, features_df, left_on='dataset1', right_on='dataset_name') # Add dataset information for the second (right) dataset result_df = pd.merge(result_df, features_df, left_on='dataset2', right_on='dataset_name') # Load histograms ds1_histograms, ds2_histograms, ds1_original_histograms, ds2_original_histograms, ds_all_histogram, ds_bops_histogram = load_histograms( result_df, histograms_path, num_rows, num_columns) # Compute BOPS # First, do an element-wise multiplication of the two histograms bops = np.multiply(ds1_original_histograms, ds2_original_histograms) # Reshape into a two dimensional array. First dimension represents the dataset number, e.g., first entry # represents the first dataset of each. Second dimension represents the values in the multiplied histograms bops = bops.reshape((bops.shape[0], num_rows * num_columns)) # Sum the values in each row to compute the final BOPS value bops_values = np.sum(bops, axis=1) # The final reshape puts each BOPS value in an array with a single value. Thus it produces a 2D array. bops_values = bops_values.reshape((bops_values.shape[0], 1)) # result_df['bops'] = bops_values cardinality_x = result_df[' cardinality_x'] cardinality_y = result_df[' cardinality_y'] result_size = result_df['result_size'] mbr_tests = result_df['mbr_tests'] # Compute the join selectivity as result_cardinality/(cardinality x * cardinality y) * 10E+9 join_selectivity = result_size / (cardinality_x * cardinality_y) join_selectivity = join_selectivity * 1E5 # Compute the MBR selectivity in the same way mbr_tests_selectivity = mbr_tests / (cardinality_x * cardinality_y) mbr_tests_selectivity = mbr_tests_selectivity * 1E5 duration = result_df['duration'] dataset1 = result_df['dataset1'] dataset2 = result_df['dataset2'] # result_df = result_df.drop(columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', ' cardinality_x', ' cardinality_y']) # result_df = result_df.drop( # columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y']) result_df = result_df.drop( columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', ' cardinality_x', ' cardinality_y', 'mbr_tests', 'duration']) # Normalize all the values using MinMax scaler # These values are [AVG area_x, AVG x_x, AVG y_x, E0_x, E2_x, AVG area_y, AVG x_y, AVG y_y, E0_y, E2_y] x = result_df.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) result_df = pd.DataFrame(x_scaled, columns=result_df.columns) result_df['cardinality_x'] = cardinality_x result_df['cardinality_y'] = cardinality_y result_df['bops'] = bops_values result_df['dataset1'] = dataset1 result_df['dataset2'] = dataset2 result_df.insert(len(result_df.columns), 'result_size', result_size, True) result_df.insert(len(result_df.columns), 'join_selectivity', join_selectivity, True) result_df.insert(len(result_df.columns), 'mbr_tests', mbr_tests, True) result_df.insert(len(result_df.columns), 'mbr_tests_selectivity', mbr_tests_selectivity, True) result_df.insert(len(result_df.columns), 'duration', duration, True) return result_df, ds1_histograms, ds2_histograms, ds_all_histogram, ds_bops_histogram def load_join_data2(features_df, result_file, histograms_path, num_rows, num_columns): cols = ['count', 'dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration'] result_df = pd.read_csv(result_file, delimiter=',', header=None, names=cols) # result_df = result_df.sample(frac=1) result_df = pd.merge(result_df, features_df, left_on='dataset1', right_on='dataset_name') result_df = pd.merge(result_df, features_df, left_on='dataset2', right_on='dataset_name') # Load histograms ds1_histograms, ds2_histograms, ds1_original_histograms, ds2_original_histograms, ds_all_histogram, ds_bops_histogram = load_histograms2( result_df, histograms_path, num_rows, num_columns) # Compute BOPS bops = np.multiply(ds1_original_histograms, ds2_original_histograms) # print (bops) bops = bops.reshape((bops.shape[0], num_rows * num_columns)) bops_values = np.sum(bops, axis=1) bops_values = bops_values.reshape((bops_values.shape[0], 1)) # result_df['bops'] = bops_values cardinality_x = result_df[' cardinality_x'] cardinality_y = result_df[' cardinality_y'] result_size = result_df['result_size'] mbr_tests = result_df['mbr_tests'] join_selectivity = result_size / (cardinality_x * cardinality_y) join_selectivity = join_selectivity * math.pow(10, 9) dataset1 = result_df['dataset1'] dataset2 = result_df['dataset2'] # result_df = result_df.drop(columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', ' cardinality_x', ' cardinality_y']) # result_df = result_df.drop( # columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y']) result_df = result_df.drop( columns=['count', 'result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', ' cardinality_x', ' cardinality_y', 'mbr_tests', 'duration']) x = result_df.values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) result_df = pd.DataFrame(x_scaled) result_df['cardinality_x'] = cardinality_x result_df['cardinality_y'] = cardinality_y result_df['bops'] = bops_values result_df['dataset1'] = dataset1 result_df['dataset2'] = dataset2 result_df.insert(len(result_df.columns), 'result_size', result_size, True) result_df.insert(len(result_df.columns), 'join_selectivity', join_selectivity, True) result_df.insert(len(result_df.columns), 'mbr_tests', join_selectivity, True) # print (len(result_df)) # result_df.to_csv('result_df.csv') return result_df, ds1_histograms, ds2_histograms, ds_all_histogram, ds_bops_histogram def load_histogram(histograms_path, num_rows, num_columns, dataset): hist = np.genfromtxt('{}/{}x{}/{}'.format(histograms_path, num_rows, num_columns, dataset), delimiter=',') normalized_hist = hist / hist.max() normalized_hist = normalized_hist.reshape((hist.shape[0], hist.shape[1], 1)) hist = hist.reshape((hist.shape[0], hist.shape[1], 1)) return normalized_hist, hist def load_histogram2(histograms_path, num_rows, num_columns, count, dataset): hist = np.genfromtxt('{}/{}x{}/{}/{}'.format(histograms_path, num_rows, num_columns, count, dataset), delimiter=',') normalized_hist = hist / hist.max() normalized_hist = normalized_hist.reshape((hist.shape[0], hist.shape[1], 1)) hist = hist.reshape((hist.shape[0], hist.shape[1], 1)) return normalized_hist, hist def load_histograms(result_df, histograms_path, num_rows, num_columns): ds1_histograms = [] ds2_histograms = [] ds1_original_histograms = [] ds2_original_histograms = [] ds_all_histogram = [] ds_bops_histogram = [] for dataset in result_df['dataset1']: normalized_hist, hist = load_histogram(histograms_path, num_rows, num_columns, dataset) ds1_histograms.append(normalized_hist) ds1_original_histograms.append(hist) for dataset in result_df['dataset2']: normalized_hist, hist = load_histogram(histograms_path, num_rows, num_columns, dataset) ds2_histograms.append(normalized_hist) ds2_original_histograms.append(hist) for i in range(len(ds1_histograms)): hist1 = ds1_original_histograms[i] hist2 = ds2_original_histograms[i] combined_hist = np.dstack((hist1, hist2)) combined_hist = combined_hist / combined_hist.max() ds_all_histogram.append(combined_hist) for i in range(len(ds1_histograms)): hist1 = ds1_original_histograms[i] hist2 = ds2_original_histograms[i] bops_hist = np.multiply(hist1, hist2) if bops_hist.max() > 0: bops_hist = bops_hist / bops_hist.max() ds_bops_histogram.append(bops_hist) return np.array(ds1_histograms), np.array(ds2_histograms), np.array(ds1_original_histograms), np.array( ds2_original_histograms), np.array(ds_all_histogram), np.array(ds_bops_histogram) def load_histograms2(result_df, histograms_path, num_rows, num_columns): ds1_histograms = [] ds2_histograms = [] ds1_original_histograms = [] ds2_original_histograms = [] ds_all_histogram = [] ds_bops_histogram = [] for index, row in result_df.iterrows(): count = row['count'] dataset1 = row['dataset1'] dataset2 = row['dataset2'] normalized_hist, hist = load_histogram2(histograms_path, num_rows, num_columns, count, dataset1) ds1_histograms.append(normalized_hist) ds1_original_histograms.append(hist) normalized_hist, hist = load_histogram2(histograms_path, num_rows, num_columns, count, dataset2) ds2_histograms.append(normalized_hist) ds2_original_histograms.append(hist) # count = 0 # for dataset in result_df['dataset1']: # count += 1 # normalized_hist, hist = load_histogram2(histograms_path, num_rows, num_columns, count, dataset) # ds1_histograms.append(normalized_hist) # ds1_original_histograms.append(hist) # # count = 0 # for dataset in result_df['dataset2']: # count += 1 # normalized_hist, hist = load_histogram2(histograms_path, num_rows, num_columns, count, dataset) # ds2_histograms.append(normalized_hist) # ds2_original_histograms.append(hist) for i in range(len(ds1_histograms)): hist1 = ds1_original_histograms[i] hist2 = ds2_original_histograms[i] combined_hist = np.dstack((hist1, hist2)) combined_hist = combined_hist / combined_hist.max() ds_all_histogram.append(combined_hist) for i in range(len(ds1_histograms)): hist1 = ds1_original_histograms[i] hist2 = ds2_original_histograms[i] bops_hist = np.multiply(hist1, hist2) if bops_hist.max() > 0: bops_hist = bops_hist / bops_hist.max() ds_bops_histogram.append(bops_hist) return np.array(ds1_histograms), np.array(ds2_histograms), np.array(ds1_original_histograms), np.array( ds2_original_histograms), np.array(ds_all_histogram), np.array(ds_bops_histogram) def main(): print('Dataset utils') # features_df = load_datasets_feature('data/uniform_datasets_features.csv') # load_join_data(features_df, 'data/uniform_result_size.csv', 'data/histogram_uniform_values', 16, 16) features_df = load_datasets_feature('data/data_aligned/aligned_small_datasets_features.csv') join_data, ds1_histograms, ds2_histograms, ds_all_histogram = load_join_data(features_df, 'data/data_aligned/join_results_small_datasets.csv', 'data/data_aligned/histograms/small_datasets', 32, 32) print (join_data) if __name__ == '__main__': main()
[ "numpy.dstack", "numpy.multiply", "pandas.read_csv", "math.pow", "pandas.merge", "numpy.sum", "numpy.array", "sklearn.preprocessing.minmax_scale", "pandas.DataFrame", "sklearn.preprocessing.MinMaxScaler" ]
[((142, 196), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delimiter': '"""\\\\s*,\\\\s*"""', 'header': '(0)'}), "(filename, delimiter='\\\\s*,\\\\s*', header=0)\n", (153, 196), True, 'import pandas as pd\n'), ((501, 573), 'pandas.read_csv', 'pd.read_csv', (['result_file'], {'delimiter': '"""\\\\s*,\\\\s*"""', 'header': 'None', 'names': 'cols'}), "(result_file, delimiter='\\\\s*,\\\\s*', header=None, names=cols)\n", (512, 573), True, 'import pandas as pd\n'), ((691, 768), 'pandas.merge', 'pd.merge', (['result_df', 'features_df'], {'left_on': '"""dataset1"""', 'right_on': '"""dataset_name"""'}), "(result_df, features_df, left_on='dataset1', right_on='dataset_name')\n", (699, 768), True, 'import pandas as pd\n'), ((846, 923), 'pandas.merge', 'pd.merge', (['result_df', 'features_df'], {'left_on': '"""dataset2"""', 'right_on': '"""dataset_name"""'}), "(result_df, features_df, left_on='dataset2', right_on='dataset_name')\n", (854, 923), True, 'import pandas as pd\n'), ((1322, 1383), 'numpy.multiply', 'np.multiply', (['ds1_original_histograms', 'ds2_original_histograms'], {}), '(ds1_original_histograms, ds2_original_histograms)\n', (1333, 1383), True, 'import numpy as np\n'), ((1753, 1773), 'numpy.sum', 'np.sum', (['bops'], {'axis': '(1)'}), '(bops, axis=1)\n', (1759, 1773), True, 'import numpy as np\n'), ((3774, 3838), 'pandas.read_csv', 'pd.read_csv', (['result_file'], {'delimiter': '""","""', 'header': 'None', 'names': 'cols'}), "(result_file, delimiter=',', header=None, names=cols)\n", (3785, 3838), True, 'import pandas as pd\n'), ((3956, 4033), 'pandas.merge', 'pd.merge', (['result_df', 'features_df'], {'left_on': '"""dataset1"""', 'right_on': '"""dataset_name"""'}), "(result_df, features_df, left_on='dataset1', right_on='dataset_name')\n", (3964, 4033), True, 'import pandas as pd\n'), ((4111, 4188), 'pandas.merge', 'pd.merge', (['result_df', 'features_df'], {'left_on': '"""dataset2"""', 'right_on': '"""dataset_name"""'}), "(result_df, features_df, left_on='dataset2', right_on='dataset_name')\n", (4119, 4188), True, 'import pandas as pd\n'), ((4512, 4573), 'numpy.multiply', 'np.multiply', (['ds1_original_histograms', 'ds2_original_histograms'], {}), '(ds1_original_histograms, ds2_original_histograms)\n', (4523, 4573), True, 'import numpy as np\n'), ((4943, 4963), 'numpy.sum', 'np.sum', (['bops'], {'axis': '(1)'}), '(bops, axis=1)\n', (4949, 4963), True, 'import numpy as np\n'), ((6551, 6579), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (6577, 6579), False, 'from sklearn import preprocessing\n'), ((6643, 6692), 'pandas.DataFrame', 'pd.DataFrame', (['x_scaled'], {'columns': 'result_df.columns'}), '(x_scaled, columns=result_df.columns)\n', (6655, 6692), True, 'import pandas as pd\n'), ((7594, 7658), 'pandas.read_csv', 'pd.read_csv', (['result_file'], {'delimiter': '""","""', 'header': 'None', 'names': 'cols'}), "(result_file, delimiter=',', header=None, names=cols)\n", (7605, 7658), True, 'import pandas as pd\n'), ((7719, 7796), 'pandas.merge', 'pd.merge', (['result_df', 'features_df'], {'left_on': '"""dataset1"""', 'right_on': '"""dataset_name"""'}), "(result_df, features_df, left_on='dataset1', right_on='dataset_name')\n", (7727, 7796), True, 'import pandas as pd\n'), ((7813, 7890), 'pandas.merge', 'pd.merge', (['result_df', 'features_df'], {'left_on': '"""dataset2"""', 'right_on': '"""dataset_name"""'}), "(result_df, features_df, left_on='dataset2', right_on='dataset_name')\n", (7821, 7890), True, 'import pandas as pd\n'), ((8146, 8207), 'numpy.multiply', 'np.multiply', (['ds1_original_histograms', 'ds2_original_histograms'], {}), '(ds1_original_histograms, ds2_original_histograms)\n', (8157, 8207), True, 'import numpy as np\n'), ((8310, 8330), 'numpy.sum', 'np.sum', (['bops'], {'axis': '(1)'}), '(bops, axis=1)\n', (8316, 8330), True, 'import numpy as np\n'), ((9358, 9386), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (9384, 9386), False, 'from sklearn import preprocessing\n'), ((9450, 9472), 'pandas.DataFrame', 'pd.DataFrame', (['x_scaled'], {}), '(x_scaled)\n', (9462, 9472), True, 'import pandas as pd\n'), ((3219, 3255), 'sklearn.preprocessing.minmax_scale', 'preprocessing.minmax_scale', (['reshaped'], {}), '(reshaped)\n', (3245, 3255), False, 'from sklearn import preprocessing\n'), ((8723, 8738), 'math.pow', 'math.pow', (['(10)', '(9)'], {}), '(10, 9)\n', (8731, 8738), False, 'import math\n'), ((11752, 11777), 'numpy.dstack', 'np.dstack', (['(hist1, hist2)'], {}), '((hist1, hist2))\n', (11761, 11777), True, 'import numpy as np\n'), ((12033, 12058), 'numpy.multiply', 'np.multiply', (['hist1', 'hist2'], {}), '(hist1, hist2)\n', (12044, 12058), True, 'import numpy as np\n'), ((12199, 12223), 'numpy.array', 'np.array', (['ds1_histograms'], {}), '(ds1_histograms)\n', (12207, 12223), True, 'import numpy as np\n'), ((12225, 12249), 'numpy.array', 'np.array', (['ds2_histograms'], {}), '(ds2_histograms)\n', (12233, 12249), True, 'import numpy as np\n'), ((12251, 12284), 'numpy.array', 'np.array', (['ds1_original_histograms'], {}), '(ds1_original_histograms)\n', (12259, 12284), True, 'import numpy as np\n'), ((12286, 12319), 'numpy.array', 'np.array', (['ds2_original_histograms'], {}), '(ds2_original_histograms)\n', (12294, 12319), True, 'import numpy as np\n'), ((12330, 12356), 'numpy.array', 'np.array', (['ds_all_histogram'], {}), '(ds_all_histogram)\n', (12338, 12356), True, 'import numpy as np\n'), ((12358, 12385), 'numpy.array', 'np.array', (['ds_bops_histogram'], {}), '(ds_bops_histogram)\n', (12366, 12385), True, 'import numpy as np\n'), ((13893, 13918), 'numpy.dstack', 'np.dstack', (['(hist1, hist2)'], {}), '((hist1, hist2))\n', (13902, 13918), True, 'import numpy as np\n'), ((14174, 14199), 'numpy.multiply', 'np.multiply', (['hist1', 'hist2'], {}), '(hist1, hist2)\n', (14185, 14199), True, 'import numpy as np\n'), ((14340, 14364), 'numpy.array', 'np.array', (['ds1_histograms'], {}), '(ds1_histograms)\n', (14348, 14364), True, 'import numpy as np\n'), ((14366, 14390), 'numpy.array', 'np.array', (['ds2_histograms'], {}), '(ds2_histograms)\n', (14374, 14390), True, 'import numpy as np\n'), ((14392, 14425), 'numpy.array', 'np.array', (['ds1_original_histograms'], {}), '(ds1_original_histograms)\n', (14400, 14425), True, 'import numpy as np\n'), ((14427, 14460), 'numpy.array', 'np.array', (['ds2_original_histograms'], {}), '(ds2_original_histograms)\n', (14435, 14460), True, 'import numpy as np\n'), ((14471, 14497), 'numpy.array', 'np.array', (['ds_all_histogram'], {}), '(ds_all_histogram)\n', (14479, 14497), True, 'import numpy as np\n'), ((14499, 14526), 'numpy.array', 'np.array', (['ds_bops_histogram'], {}), '(ds_bops_histogram)\n', (14507, 14526), True, 'import numpy as np\n')]
import os import numpy as np import random from math import isclose import torch import matplotlib.pyplot as plt from modelZoo.DyanOF import OFModel, fista from torch.autograd import Variable import torch.nn def gridRing(N): # epsilon_low = 0.25 # epsilon_high = 0.15 # rmin = (1 - epsilon_low) # rmax = (1 + epsilon_high) epsilon_low = 0.25 epsilon_high = 0.15 rmin = (1 - epsilon_low) rmax = (1 + epsilon_high) thetaMin = 0.001 thetaMax = np.pi / 2 - 0.001 delta = 0.001 # Npole = int(N / 4) Npole = int(N/2) Pool = generateGridPoles(delta, rmin, rmax, thetaMin, thetaMax) M = len(Pool) idx = random.sample(range(0, M), Npole) P = Pool[idx] Pall = np.concatenate((P, -P, np.conjugate(P), np.conjugate(-P)), axis=0) return P, Pall ## Generate the grid on poles def generateGridPoles(delta, rmin, rmax, thetaMin, thetaMax): rmin2 = pow(rmin, 2) rmax2 = pow(rmax, 2) xv = np.arange(-rmax, rmax, delta) x, y = np.meshgrid(xv, xv, sparse=False) mask = np.logical_and(np.logical_and(x ** 2 + y ** 2 >= rmin2, x ** 2 + y ** 2 <= rmax2), np.logical_and(np.angle(x + 1j * y) >= thetaMin, np.angle(x + 1j * y) <= thetaMax)) px = x[mask] py = y[mask] P = px + 1j * py return P def getRowSparsity(inputDict): rowNum = inputDict.shape[0] L = inputDict.shape[1] count = 0 for i in range(0, rowNum): dictRow = inputDict[i,:].unsqueeze(0) if len(dictRow.nonzero()) <= round(0.6*L): count+=1 else: continue rowSparsity = count return rowSparsity def get_recover_fista(D, y, key_set, param, gpu_id): if type(D) is np.ndarray: D = torch.Tensor(D) D_r = D[key_set] if len(y.shape)==3: y_r = y[:,key_set] else: y_r = y[key_set] if D.is_cuda: c_r = fista(D_r, y_r, param, 100, gpu_id) y_hat = torch.matmul(D, c_r) else: c_r = fista(D_r.cuda(gpu_id), y_r, param, 100, gpu_id) y_hat = torch.matmul(D.cuda(gpu_id), c_r) return y_hat
[ "numpy.logical_and", "modelZoo.DyanOF.fista", "numpy.conjugate", "torch.Tensor", "numpy.angle", "torch.matmul", "numpy.meshgrid", "numpy.arange" ]
[((967, 996), 'numpy.arange', 'np.arange', (['(-rmax)', 'rmax', 'delta'], {}), '(-rmax, rmax, delta)\n', (976, 996), True, 'import numpy as np\n'), ((1008, 1041), 'numpy.meshgrid', 'np.meshgrid', (['xv', 'xv'], {'sparse': '(False)'}), '(xv, xv, sparse=False)\n', (1019, 1041), True, 'import numpy as np\n'), ((1068, 1134), 'numpy.logical_and', 'np.logical_and', (['(x ** 2 + y ** 2 >= rmin2)', '(x ** 2 + y ** 2 <= rmax2)'], {}), '(x ** 2 + y ** 2 >= rmin2, x ** 2 + y ** 2 <= rmax2)\n', (1082, 1134), True, 'import numpy as np\n'), ((1748, 1763), 'torch.Tensor', 'torch.Tensor', (['D'], {}), '(D)\n', (1760, 1763), False, 'import torch\n'), ((1906, 1941), 'modelZoo.DyanOF.fista', 'fista', (['D_r', 'y_r', 'param', '(100)', 'gpu_id'], {}), '(D_r, y_r, param, 100, gpu_id)\n', (1911, 1941), False, 'from modelZoo.DyanOF import OFModel, fista\n'), ((1958, 1978), 'torch.matmul', 'torch.matmul', (['D', 'c_r'], {}), '(D, c_r)\n', (1970, 1978), False, 'import torch\n'), ((750, 765), 'numpy.conjugate', 'np.conjugate', (['P'], {}), '(P)\n', (762, 765), True, 'import numpy as np\n'), ((767, 783), 'numpy.conjugate', 'np.conjugate', (['(-P)'], {}), '(-P)\n', (779, 783), True, 'import numpy as np\n'), ((1177, 1199), 'numpy.angle', 'np.angle', (['(x + 1.0j * y)'], {}), '(x + 1.0j * y)\n', (1185, 1199), True, 'import numpy as np\n'), ((1211, 1233), 'numpy.angle', 'np.angle', (['(x + 1.0j * y)'], {}), '(x + 1.0j * y)\n', (1219, 1233), True, 'import numpy as np\n')]
import tqdm import mediapipe import requests import cv2 import numpy as np import matplotlib class FullBodyPoseEmbedder(object): """Converts 3D pose landmarks into 3D embedding.""" def __init__(self, torso_size_multiplier=2.5): # Multiplier to apply to the torso to get minimal body size. self._torso_size_multiplier = torso_size_multiplier # Names of the landmarks as they appear in the prediction. self._landmark_names = [ 'nose', 'left_eye_inner', 'left_eye', 'left_eye_outer', 'right_eye_inner', 'right_eye', 'right_eye_outer', 'left_ear', 'right_ear', 'mouth_left', 'mouth_right', 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist', 'left_pinky_1', 'right_pinky_1', 'left_index_1', 'right_index_1', 'left_thumb_2', 'right_thumb_2', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle', 'right_ankle', 'left_heel', 'right_heel', 'left_foot_index', 'right_foot_index', ] def __call__(self, landmarks): """Normalizes pose landmarks and converts to embedding Args: landmarks - NumPy array with 3D landmarks of shape (N, 3). Result: Numpy array with pose embedding of shape (M, 3) where `M` is the number of pairwise distances defined in `_get_pose_distance_embedding`. """ assert landmarks.shape[0] == len(self._landmark_names), 'Unexpected number of landmarks: {}'.format( landmarks.shape[0]) # Get pose landmarks. landmarks = np.copy(landmarks) # Normalize landmarks. landmarks = self._normalize_pose_landmarks(landmarks) # Get embedding. embedding = self._get_pose_distance_embedding(landmarks) return embedding def _normalize_pose_landmarks(self, landmarks): """Normalizes landmarks translation and scale.""" landmarks = np.copy(landmarks) # Normalize translation. pose_center = self._get_pose_center(landmarks) landmarks -= pose_center # Normalize scale. pose_size = self._get_pose_size(landmarks, self._torso_size_multiplier) landmarks /= pose_size # Multiplication by 100 is not required, but makes it eaasier to debug. landmarks *= 100 return landmarks def _get_pose_center(self, landmarks): """Calculates pose center as point between hips.""" left_hip = landmarks[self._landmark_names.index('left_hip')] right_hip = landmarks[self._landmark_names.index('right_hip')] center = (left_hip + right_hip) * 0.5 return center def _get_pose_size(self, landmarks, torso_size_multiplier): """Calculates pose size. It is the maximum of two values: * Torso size multiplied by `torso_size_multiplier` * Maximum distance from pose center to any pose landmark """ # This approach uses only 2D landmarks to compute pose size. landmarks = landmarks[:, :2] # Hips center. left_hip = landmarks[self._landmark_names.index('left_hip')] right_hip = landmarks[self._landmark_names.index('right_hip')] hips = (left_hip + right_hip) * 0.5 # Shoulders center. left_shoulder = landmarks[self._landmark_names.index('left_shoulder')] right_shoulder = landmarks[self._landmark_names.index('right_shoulder')] shoulders = (left_shoulder + right_shoulder) * 0.5 # Torso size as the minimum body size. torso_size = np.linalg.norm(shoulders - hips) # Max dist to pose center. pose_center = self._get_pose_center(landmarks) max_dist = np.max(np.linalg.norm(landmarks - pose_center, axis=1)) return max(torso_size * torso_size_multiplier, max_dist) def _get_pose_distance_embedding(self, landmarks): """Converts pose landmarks into 3D embedding. We use several pairwise 3D distances to form pose embedding. All distances include X and Y components with sign. We differnt types of pairs to cover different pose classes. Feel free to remove some or add new. Args: landmarks - NumPy array with 3D landmarks of shape (N, 3). Result: Numpy array with pose embedding of shape (M, 3) where `M` is the number of pairwise distances. """ embedding = np.array([ # One joint. self._get_distance( self._get_average_by_names(landmarks, 'left_hip', 'right_hip'), self._get_average_by_names(landmarks, 'left_shoulder', 'right_shoulder')), self._get_distance_by_names(landmarks, 'left_shoulder', 'left_elbow'), self._get_distance_by_names(landmarks, 'right_shoulder', 'right_elbow'), self._get_distance_by_names(landmarks, 'left_elbow', 'left_wrist'), self._get_distance_by_names(landmarks, 'right_elbow', 'right_wrist'), self._get_distance_by_names(landmarks, 'left_hip', 'left_knee'), self._get_distance_by_names(landmarks, 'right_hip', 'right_knee'), self._get_distance_by_names(landmarks, 'left_knee', 'left_ankle'), self._get_distance_by_names(landmarks, 'right_knee', 'right_ankle'), # Two joints. self._get_distance_by_names(landmarks, 'left_shoulder', 'left_wrist'), self._get_distance_by_names(landmarks, 'right_shoulder', 'right_wrist'), self._get_distance_by_names(landmarks, 'left_hip', 'left_ankle'), self._get_distance_by_names(landmarks, 'right_hip', 'right_ankle'), # Four joints. self._get_distance_by_names(landmarks, 'left_hip', 'left_wrist'), self._get_distance_by_names(landmarks, 'right_hip', 'right_wrist'), # Five joints. self._get_distance_by_names(landmarks, 'left_shoulder', 'left_ankle'), self._get_distance_by_names(landmarks, 'right_shoulder', 'right_ankle'), self._get_distance_by_names(landmarks, 'left_hip', 'left_wrist'), self._get_distance_by_names(landmarks, 'right_hip', 'right_wrist'), # Cross body. self._get_distance_by_names(landmarks, 'left_elbow', 'right_elbow'), self._get_distance_by_names(landmarks, 'left_knee', 'right_knee'), self._get_distance_by_names(landmarks, 'left_wrist', 'right_wrist'), self._get_distance_by_names(landmarks, 'left_ankle', 'right_ankle'), # Body bent direction. # self._get_distance( # self._get_average_by_names(landmarks, 'left_wrist', 'left_ankle'), # landmarks[self._landmark_names.index('left_hip')]), # self._get_distance( # self._get_average_by_names(landmarks, 'right_wrist', 'right_ankle'), # landmarks[self._landmark_names.index('right_hip')]), ]) return embedding def _get_average_by_names(self, landmarks, name_from, name_to): lmk_from = landmarks[self._landmark_names.index(name_from)] lmk_to = landmarks[self._landmark_names.index(name_to)] return (lmk_from + lmk_to) * 0.5 def _get_distance_by_names(self, landmarks, name_from, name_to): lmk_from = landmarks[self._landmark_names.index(name_from)] lmk_to = landmarks[self._landmark_names.index(name_to)] return self._get_distance(lmk_from, lmk_to) def _get_distance(self, lmk_from, lmk_to): return lmk_to - lmk_from
[ "numpy.copy", "numpy.linalg.norm" ]
[((1734, 1752), 'numpy.copy', 'np.copy', (['landmarks'], {}), '(landmarks)\n', (1741, 1752), True, 'import numpy as np\n'), ((2095, 2113), 'numpy.copy', 'np.copy', (['landmarks'], {}), '(landmarks)\n', (2102, 2113), True, 'import numpy as np\n'), ((3729, 3761), 'numpy.linalg.norm', 'np.linalg.norm', (['(shoulders - hips)'], {}), '(shoulders - hips)\n', (3743, 3761), True, 'import numpy as np\n'), ((3879, 3926), 'numpy.linalg.norm', 'np.linalg.norm', (['(landmarks - pose_center)'], {'axis': '(1)'}), '(landmarks - pose_center, axis=1)\n', (3893, 3926), True, 'import numpy as np\n')]
import numpy as np import math def GMM(alpha, x, u, conv,dim): covdet = np.linalg.det(conv + np.eye(dim) * 0.001) covinv = np.linalg.inv(conv + np.eye(dim) * 0.001) T1 = 1 / ( (2 * math.pi)**(dim/2) * np.sqrt(covdet)) T2 = np.exp((-0.5) * ((np.transpose(x - u)).dot(covinv).dot(x - u))) prob = T1 * T2 return alpha * prob[0] def EM_GMM(weights,mean,cov,data,M,dim): initial_value = 0 for i in data: i = np.expand_dims(i, 1) all_value = 0 for k in range(M): value = GMM(weights[k], i, mean[k], cov[k],dim) all_value = all_value + value intial_value_temp = math.log(all_value + 0.00001) initial_value = initial_value + intial_value_temp flag = 10000 num = 0 while (flag > 0.00001): print("flag",flag) num = num + 1 P = [] for m in range(M): l = [] * (m + 1) P.append(l) # E step for i in data: i = np.reshape(i, (dim, 1)) value = [GMM(weights[k], i, mean[k], cov[k],dim) for k in range(M)] value = np.array(value) sum_value = np.sum(value) for m in range(M): p = GMM(weights[m], i, mean[m], cov[m],dim) / sum_value P[m].append(p) for m in range(M): P[m] = np.array(P[m]) # 1000*1 # M step # update alpha for m in range(M): weights[m] = np.sum(P[m]) / len(data) # update u for m in range(M): result_list = [] for i in range(len(data)): W = np.expand_dims(data[i], 1) result = P[m][i] * W result_list.append(result) result_list = np.array(result_list) mean_sum = np.sum(result_list, 0) mean[m] = mean_sum / np.sum(P[m]) # update cov for m in range(M): result_list = [] for i in range(len(data)): W = np.expand_dims(data[i], 1) # 2 * 1 T = W - mean[m] Q = np.transpose(T) temp = (T.dot(Q)) * P[m][i] result_list.append(temp) result_list = np.array(result_list) cov_sum = np.sum(result_list, 0) cov[m] = cov_sum / np.sum(P[m]) update_value = 0 for i in data: i = np.expand_dims(i, 1) all_value = 0 for k in range(M): value = GMM(weights[k], i, mean[k], cov[k],dim) all_value = all_value + value update_value_temp = math.log(all_value) update_value = update_value + update_value_temp flag = abs(update_value - initial_value) initial_value = update_value return weights,mean,cov,num
[ "numpy.eye", "numpy.reshape", "numpy.sqrt", "math.log", "numpy.array", "numpy.sum", "numpy.expand_dims", "numpy.transpose" ]
[((446, 466), 'numpy.expand_dims', 'np.expand_dims', (['i', '(1)'], {}), '(i, 1)\n', (460, 466), True, 'import numpy as np\n'), ((647, 674), 'math.log', 'math.log', (['(all_value + 1e-05)'], {}), '(all_value + 1e-05)\n', (655, 674), False, 'import math\n'), ((215, 230), 'numpy.sqrt', 'np.sqrt', (['covdet'], {}), '(covdet)\n', (222, 230), True, 'import numpy as np\n'), ((994, 1017), 'numpy.reshape', 'np.reshape', (['i', '(dim, 1)'], {}), '(i, (dim, 1))\n', (1004, 1017), True, 'import numpy as np\n'), ((1119, 1134), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (1127, 1134), True, 'import numpy as np\n'), ((1159, 1172), 'numpy.sum', 'np.sum', (['value'], {}), '(value)\n', (1165, 1172), True, 'import numpy as np\n'), ((1355, 1369), 'numpy.array', 'np.array', (['P[m]'], {}), '(P[m])\n', (1363, 1369), True, 'import numpy as np\n'), ((1766, 1787), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (1774, 1787), True, 'import numpy as np\n'), ((1811, 1833), 'numpy.sum', 'np.sum', (['result_list', '(0)'], {}), '(result_list, 0)\n', (1817, 1833), True, 'import numpy as np\n'), ((2232, 2253), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (2240, 2253), True, 'import numpy as np\n'), ((2276, 2298), 'numpy.sum', 'np.sum', (['result_list', '(0)'], {}), '(result_list, 0)\n', (2282, 2298), True, 'import numpy as np\n'), ((2408, 2428), 'numpy.expand_dims', 'np.expand_dims', (['i', '(1)'], {}), '(i, 1)\n', (2422, 2428), True, 'import numpy as np\n'), ((2629, 2648), 'math.log', 'math.log', (['all_value'], {}), '(all_value)\n', (2637, 2648), False, 'import math\n'), ((99, 110), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (105, 110), True, 'import numpy as np\n'), ((154, 165), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (160, 165), True, 'import numpy as np\n'), ((1473, 1485), 'numpy.sum', 'np.sum', (['P[m]'], {}), '(P[m])\n', (1479, 1485), True, 'import numpy as np\n'), ((1633, 1659), 'numpy.expand_dims', 'np.expand_dims', (['data[i]', '(1)'], {}), '(data[i], 1)\n', (1647, 1659), True, 'import numpy as np\n'), ((1867, 1879), 'numpy.sum', 'np.sum', (['P[m]'], {}), '(P[m])\n', (1873, 1879), True, 'import numpy as np\n'), ((2017, 2043), 'numpy.expand_dims', 'np.expand_dims', (['data[i]', '(1)'], {}), '(data[i], 1)\n', (2031, 2043), True, 'import numpy as np\n'), ((2105, 2120), 'numpy.transpose', 'np.transpose', (['T'], {}), '(T)\n', (2117, 2120), True, 'import numpy as np\n'), ((2330, 2342), 'numpy.sum', 'np.sum', (['P[m]'], {}), '(P[m])\n', (2336, 2342), True, 'import numpy as np\n'), ((259, 278), 'numpy.transpose', 'np.transpose', (['(x - u)'], {}), '(x - u)\n', (271, 278), True, 'import numpy as np\n')]
# -*- encoding: utf-8 -*- """ @Author : zYx.Tom @Contact : <EMAIL> @site : https://zhuyuanxiang.github.io --------------------------- @Software : PyCharm @Project : deep-learning-with-python-notebooks @File : ch0604_conv1D.py @Version : v0.1 @Time : 2019-11-26 11:08 @License : (C)Copyright 2018-2019, zYx.Tom @Reference : 《Python 深度学习,Francois Chollet》, Sec0604,P188 @Desc : 深度学习用于文本和序列,用卷积神经网络处理序列 """ import os import sys import matplotlib.pyplot as plt import numpy as np # pip install numpy<1.17,小于1.17就不会报错 import winsound from keras.activations import relu from keras.datasets import imdb from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D from keras.layers import Dense from keras.losses import binary_crossentropy from keras.metrics import binary_accuracy from keras.models import Sequential from keras.optimizers import rmsprop from keras.preprocessing.sequence import pad_sequences # 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA from tools import plot_classes_results os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 设置数据显示的精确度为小数点后3位 np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200) # to make this notebook's output stable across runs seed = 42 np.random.seed(seed) # Python ≥3.5 is required assert sys.version_info >= (3, 5) # numpy 1.16.4 is required assert np.__version__ in ["1.16.5", "1.16.4"] # ---------------------------------------------------------------------- max_features = 10000 max_len = 500 embedding_size = 128 epochs = 15 batch_size = 128 verbose = 2 validation_split = 0.2 print("Listing 6.45:准备 IMDB 数据集...") (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words = max_features) print('\t', len(x_train), 'train sequences(训练序列)') print('\t', len(x_test), 'test sequences(测试序列)') print('Pad sequences (samples x time)') x_train = pad_sequences(x_train, maxlen = max_len) x_test = pad_sequences(x_test, maxlen = max_len) print('\t x_train shape:', x_train.shape) print('\t x_test shape:', x_test.shape) # ---------------------------------------------------------------------- def simple_conv1d(): print("Listing 6.46:在 IMDB 数据上训练并且评估一个简单的一维卷积神经网络") model = Sequential(name = "简单的一维卷积神经网络") model.add(Embedding(max_features, embedding_size, input_length = max_len)) model.add(Conv1D(32, 7, activation = relu)) model.add(MaxPooling1D(5)) model.add(Conv1D(32, 7, activation = relu)) model.add(GlobalMaxPooling1D()) model.add(Dense(1)) model.summary() model.compile(optimizer = rmsprop(lr = 1e-4), loss = binary_crossentropy, metrics = [binary_accuracy]) history = model.fit(x_train, y_train, epochs = epochs, batch_size = batch_size, validation_split = validation_split, verbose = verbose, use_multiprocessing = True) title = "应用简单的一维卷积神经网络在 IMDB 数据集" plot_classes_results(history, title, epochs) pass # ---------------------------------------------------------------------- simple_conv1d() # 6.4.4 结合 CNN 和 RNN 来处理长序列 # 因为使用的是温度数据集,因此实现在 ch0603_predict_temperature.py 中,方便对比 # 运行结束的提醒 winsound.Beep(600, 500) if len(plt.get_fignums()) != 0: plt.show() pass
[ "keras.layers.MaxPooling1D", "keras.optimizers.rmsprop", "keras.datasets.imdb.load_data", "tools.plot_classes_results", "matplotlib.pyplot.show", "matplotlib.pyplot.get_fignums", "keras.layers.GlobalMaxPooling1D", "keras.models.Sequential", "numpy.random.seed", "winsound.Beep", "keras.layers.Dense", "keras.preprocessing.sequence.pad_sequences", "keras.layers.Embedding", "keras.layers.Conv1D", "numpy.set_printoptions" ]
[((1192, 1277), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)', 'threshold': 'np.inf', 'linewidth': '(200)'}), '(precision=3, suppress=True, threshold=np.inf, linewidth=200\n )\n', (1211, 1277), True, 'import numpy as np\n'), ((1343, 1363), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1357, 1363), True, 'import numpy as np\n'), ((1767, 1805), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (1781, 1805), False, 'from keras.datasets import imdb\n'), ((1958, 1996), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_train'], {'maxlen': 'max_len'}), '(x_train, maxlen=max_len)\n', (1971, 1996), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2008, 2045), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_test'], {'maxlen': 'max_len'}), '(x_test, maxlen=max_len)\n', (2021, 2045), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3195, 3218), 'winsound.Beep', 'winsound.Beep', (['(600)', '(500)'], {}), '(600, 500)\n', (3208, 3218), False, 'import winsound\n'), ((2294, 2324), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""简单的一维卷积神经网络"""'}), "(name='简单的一维卷积神经网络')\n", (2304, 2324), False, 'from keras.models import Sequential\n'), ((2954, 2998), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (2974, 2998), False, 'from tools import plot_classes_results\n'), ((3255, 3265), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3263, 3265), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2402), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_size'], {'input_length': 'max_len'}), '(max_features, embedding_size, input_length=max_len)\n', (2350, 2402), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2420, 2450), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(7)'], {'activation': 'relu'}), '(32, 7, activation=relu)\n', (2426, 2450), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2468, 2483), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(5)'], {}), '(5)\n', (2480, 2483), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2499, 2529), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(7)'], {'activation': 'relu'}), '(32, 7, activation=relu)\n', (2505, 2529), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2547, 2567), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (2565, 2567), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2583, 2591), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2588, 2591), False, 'from keras.layers import Dense\n'), ((3226, 3243), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (3241, 3243), True, 'import matplotlib.pyplot as plt\n'), ((2643, 2661), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (2650, 2661), False, 'from keras.optimizers import rmsprop\n')]
#! /usr/bin/env python3 """ Stein PPO: Sample-efficient Policy Optimization with Stein Control Variate Motivated by the Stein’s identity, Stein PPO extends the previous control variate methods used in REINFORCE and advantage actor-critic by introducing more general action-dependent baseline functions. Details see the following papers: Stein PPO: https://arxiv.org/pdf/1710.11198.pdf Distributed PPO: https://arxiv.org/abs/1707.02286 Proximal Policy Optimization Algorithms https://arxiv.org/pdf/1707.06347.pdf Generalized Advantage Estimation: https://arxiv.org/pdf/1506.02438.pdf Code modified from this Github repo: https://github.com/pat-coady/trpo This GitHub repo is also helpful. https://github.com/joschu/modular_rl This implementation learns policies for continuous environments in the OpenAI Gym (https://gym.openai.com/). Testing was focused on the MuJoCo control tasks. """ import os import gym import random import numpy as np import tb_logger as logger import scipy.signal from gym import wrappers from utils import Scaler from policy import Policy from datetime import datetime from value_function import NNValueFunction def set_global_seeds(i): try: import tensorflow as tf except ImportError: pass else: tf.set_random_seed(i) np.random.seed(i) random.seed(i) def init_gym(env_name): """ Initialize gym environment, return dimension of observation and action spaces. Args: env_name: str environment name (e.g. "Humanoid-v1") Returns: 3-tuple gym environment (object) number of observation dimensions (int) number of action dimensions (int) """ env = gym.make(env_name) obs_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] return env, obs_dim, act_dim def run_episode(env, policy, scaler, max_timesteps, animate=False): """ Run single episode with option to animate Args: env: ai gym environment policy: policy object with sample() method scaler: scaler object, used to scale/offset each observation dimension to a similar range animate: boolean, True uses env.render() method to animate episode Returns: 4-tuple of NumPy arrays observes: shape = (episode len, obs_dim) actions: shape = (episode len, act_dim) rewards: shape = (episode len,) unscaled_obs: useful for training scaler, shape = (episode len, obs_dim) """ obs = env.reset() observes, actions, rewards, unscaled_obs = [], [], [], [] done = False step = 0.0 scale, offset = scaler.get() scale[-1] = 1.0 # don't scale time step feature offset[-1] = 0.0 # don't offset time step feature for _ in range(max_timesteps): if animate: env.render() obs = obs.astype(np.float32).reshape((1, -1)) obs = np.append(obs, [[step]], axis=1) # add time step feature unscaled_obs.append(obs) obs = (obs - offset) * scale # center and scale observations observes.append(obs) action = policy.sample(obs).reshape((1, -1)).astype(np.float32) actions.append(action) obs, reward, done, _ = env.step(np.squeeze(action, axis=0)) if not isinstance(reward, float): reward = np.asscalar(reward) rewards.append(reward) step += 1e-3 # increment time step feature if done: break return (np.concatenate(observes), np.concatenate(actions), np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs)) def run_policy(env, policy, scaler, batch_size, max_timesteps): """ Run policy and collect data for a minimum of min_steps and min_episodes Args: env: ai gym environment policy: policy object with sample() method scaler: scaler object, used to scale/offset each observation dimension to a similar range episodes: total episodes to run max_timesteps: max timesteps per episode to run Returns: list of trajectory dictionaries, list length = number of episodes 'observes' : NumPy array of states from episode 'actions' : NumPy array of actions from episode 'rewards' : NumPy array of (un-discounted) rewards from episode 'unscaled_obs' : NumPy array of (un-discounted) rewards from episode """ total_steps = 0 trajectories = [] while total_steps < batch_size: observes, actions, rewards, unscaled_obs = run_episode(env, \ policy, scaler, max_timesteps=max_timesteps) total_steps += observes.shape[0] trajectory = {'observes': observes, 'actions': actions, 'rewards': rewards, 'unscaled_obs': unscaled_obs} trajectories.append(trajectory) unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories]) scaler.update(unscaled) # update running statistics for scaling observations logger.record_dicts({ "_MeanReward":np.mean([t['rewards'].sum() for t in trajectories]), 'Steps': total_steps,}) return trajectories def discount(x, gamma): """ Calculate discounted forward sum of a sequence at each point """ return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1] def add_disc_sum_rew(trajectories, gamma): """ Adds discounted sum of rewards to all time steps of all trajectories Args: trajectories: as returned by run_policy() gamma: discount Returns: None (mutates trajectories dictionary to add 'disc_sum_rew') """ for trajectory in trajectories: if gamma < 0.999: # don't scale for gamma ~= 1 rewards = trajectory['rewards'] * (1 - gamma) else: rewards = trajectory['rewards'] disc_sum_rew = discount(rewards, gamma) trajectory['disc_sum_rew'] = disc_sum_rew def add_value(trajectories, val_func): """ Adds estimated value to all time steps of all trajectories Args: trajectories: as returned by run_policy() val_func: object with predict() method, takes observations and returns predicted state value Returns: None (mutates trajectories dictionary to add 'values') """ for trajectory in trajectories: observes = trajectory['observes'] values = val_func.predict(observes) trajectory['values'] = values def add_gae(trajectories, gamma, lam): """ Add generalized advantage estimator. https://arxiv.org/pdf/1506.02438.pdf Args: trajectories: as returned by run_policy(), must include 'values' key from add_value(). gamma: reward discount lam: lambda (see paper). lam=0 : use TD residuals lam=1 : A = Sum Discounted Rewards - V_hat(s) Returns: None (mutates trajectories dictionary to add 'advantages') """ for trajectory in trajectories: if gamma < 0.999: # don't scale for gamma ~= 1 rewards = trajectory['rewards'] * (1 - gamma) else: rewards = trajectory['rewards'] values = trajectory['values'] # temporal differences tds = rewards - values + np.append(values[1:] * gamma, 0) advantages = discount(tds, gamma * lam) trajectory['advantages'] = advantages def build_train_set(trajectories): """ Args: trajectories: trajectories after processing by add_disc_sum_rew(), add_value(), and add_gae() Returns: 4-tuple of NumPy arrays observes: shape = (N, obs_dim) actions: shape = (N, act_dim) advantages: shape = (N,) disc_sum_rew: shape = (N,) """ observes = np.concatenate([t['observes'] for t in trajectories]) actions = np.concatenate([t['actions'] for t in trajectories]) disc_sum_rew = np.concatenate([t['disc_sum_rew'] for t in trajectories]) advantages = np.concatenate([t['advantages'] for t in trajectories]) # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6) return observes, actions, advantages, disc_sum_rew def log_batch_stats(observes, actions, advantages, disc_sum_rew): """ Log batch statistics """ logger.record_dicts({ '_mean_obs': np.mean(observes), '_min_obs': np.min(observes), '_max_obs': np.max(observes), '_mean_act': np.mean(actions), '_max_act': np.max(actions), '_std_act': np.mean(np.var(actions, axis=0)), '_mean_adv': np.mean(advantages), '_min_adv': np.min(advantages), '_max_adv': np.max(advantages), '_std_adv': np.var(advantages), '_mean_discrew': np.mean(disc_sum_rew), '_min_discrew': np.min(disc_sum_rew), '_max_discrew': np.max(disc_sum_rew), '_std_discrew': np.var(disc_sum_rew)}) logger.dump_tabular() def main(env_name, num_iterations, gamma, lam, kl_targ, batch_size,hid1_mult, policy_logvar, coef, use_lr_adjust, ada_kl_penalty, seed, epochs, phi_epochs, max_timesteps, reg_scale, phi_lr, phi_hs, policy_size, phi_obj): """ Main training loop Args: env_name: OpenAI Gym environment name, e.g. 'Hopper-v1' num_iterations: maximum number of iterations to run gamma: reward discount factor (float) lam: lambda from Generalized Advantage Estimate kl_targ: D_KL target for policy update [D_KL(pi_old || pi_new) batch_size: number of episodes per policy training batch hid1_mult: hid1 size for policy and value_f (mutliplier of obs dimension) policy_logvar: natural log of initial policy variance coef: coefficient of Stein control variate use_lr_adjust: whether adjust lr based on kl ada_kl_penalty: whether adjust kl penalty max_timesteps: maximum time steps per trajectory reg_scale: regularization coefficient policy_size: policy network size phi_obj: FitQ or MinVar """ env, obs_dim, act_dim = init_gym(env_name) set_global_seeds(seed) env.seed(seed) env._max_episode_steps = max_timesteps obs_dim += 1 # add 1 to obs dimension for time step feature (see run_episode()) now = datetime.utcnow().strftime("%b-%d_%H:%M:%S") aigym_path = os.path.join('log-files/', env_name, now) env = wrappers.Monitor(env, aigym_path, force=True, video_callable=False) scaler = Scaler(obs_dim) val_func = NNValueFunction(obs_dim, hid1_mult) policy = Policy(obs_dim, act_dim, kl_targ, hid1_mult, policy_logvar, epochs, phi_epochs, policy_size=policy_size, phi_hidden_sizes=phi_hs, c_ph=coef, reg_scale=reg_scale, lr_phi=phi_lr, phi_obj=phi_obj) # run a few episodes of untrained policy to initialize scaler: run_policy(env, policy, scaler, batch_size=1000, max_timesteps=max_timesteps) for _ in range(num_iterations): logger.log("\n#Training Iter %d"%(_)) logger.log("Draw Samples..") trajectories = run_policy(env, policy, scaler, batch_size=batch_size, max_timesteps=max_timesteps) add_value(trajectories, val_func) # add estimated values to episodes add_disc_sum_rew(trajectories, gamma) # calculated discounted sum of Rs add_gae(trajectories, gamma, lam) # calculate advantage # concatenate all episodes into single NumPy arrays observes, actions, advantages, disc_sum_rew = build_train_set(trajectories) # add various stats to training log: log_batch_stats(observes, actions, advantages, disc_sum_rew) logger.log("Starting Training...") policy.update(observes, actions, advantages, \ use_lr_adjust, ada_kl_penalty) # update policy val_func.fit(observes, disc_sum_rew) # update value function logger.log('--------------------------------\n') policy.close_sess() val_func.close_sess()
[ "utils.Scaler", "numpy.array", "gym.wrappers.Monitor", "tensorflow.set_random_seed", "gym.make", "numpy.mean", "numpy.max", "numpy.random.seed", "numpy.concatenate", "numpy.min", "policy.Policy", "tb_logger.log", "value_function.NNValueFunction", "numpy.squeeze", "datetime.datetime.utcnow", "os.path.join", "random.seed", "numpy.asscalar", "numpy.append", "tb_logger.dump_tabular", "numpy.var" ]
[((1299, 1316), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (1313, 1316), True, 'import numpy as np\n'), ((1321, 1335), 'random.seed', 'random.seed', (['i'], {}), '(i)\n', (1332, 1335), False, 'import random\n'), ((1689, 1707), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1697, 1707), False, 'import gym\n'), ((4897, 4954), 'numpy.concatenate', 'np.concatenate', (["[t['unscaled_obs'] for t in trajectories]"], {}), "([t['unscaled_obs'] for t in trajectories])\n", (4911, 4954), True, 'import numpy as np\n'), ((7806, 7859), 'numpy.concatenate', 'np.concatenate', (["[t['observes'] for t in trajectories]"], {}), "([t['observes'] for t in trajectories])\n", (7820, 7859), True, 'import numpy as np\n'), ((7874, 7926), 'numpy.concatenate', 'np.concatenate', (["[t['actions'] for t in trajectories]"], {}), "([t['actions'] for t in trajectories])\n", (7888, 7926), True, 'import numpy as np\n'), ((7946, 8003), 'numpy.concatenate', 'np.concatenate', (["[t['disc_sum_rew'] for t in trajectories]"], {}), "([t['disc_sum_rew'] for t in trajectories])\n", (7960, 8003), True, 'import numpy as np\n'), ((8021, 8076), 'numpy.concatenate', 'np.concatenate', (["[t['advantages'] for t in trajectories]"], {}), "([t['advantages'] for t in trajectories])\n", (8035, 8076), True, 'import numpy as np\n'), ((8970, 8991), 'tb_logger.dump_tabular', 'logger.dump_tabular', ([], {}), '()\n', (8989, 8991), True, 'import tb_logger as logger\n'), ((10446, 10487), 'os.path.join', 'os.path.join', (['"""log-files/"""', 'env_name', 'now'], {}), "('log-files/', env_name, now)\n", (10458, 10487), False, 'import os\n'), ((10498, 10565), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', 'aigym_path'], {'force': '(True)', 'video_callable': '(False)'}), '(env, aigym_path, force=True, video_callable=False)\n', (10514, 10565), False, 'from gym import wrappers\n'), ((10584, 10599), 'utils.Scaler', 'Scaler', (['obs_dim'], {}), '(obs_dim)\n', (10590, 10599), False, 'from utils import Scaler\n'), ((10615, 10650), 'value_function.NNValueFunction', 'NNValueFunction', (['obs_dim', 'hid1_mult'], {}), '(obs_dim, hid1_mult)\n', (10630, 10650), False, 'from value_function import NNValueFunction\n'), ((10669, 10870), 'policy.Policy', 'Policy', (['obs_dim', 'act_dim', 'kl_targ', 'hid1_mult', 'policy_logvar', 'epochs', 'phi_epochs'], {'policy_size': 'policy_size', 'phi_hidden_sizes': 'phi_hs', 'c_ph': 'coef', 'reg_scale': 'reg_scale', 'lr_phi': 'phi_lr', 'phi_obj': 'phi_obj'}), '(obs_dim, act_dim, kl_targ, hid1_mult, policy_logvar, epochs,\n phi_epochs, policy_size=policy_size, phi_hidden_sizes=phi_hs, c_ph=coef,\n reg_scale=reg_scale, lr_phi=phi_lr, phi_obj=phi_obj)\n', (10675, 10870), False, 'from policy import Policy\n'), ((1273, 1294), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['i'], {}), '(i)\n', (1291, 1294), True, 'import tensorflow as tf\n'), ((2905, 2937), 'numpy.append', 'np.append', (['obs', '[[step]]'], {'axis': '(1)'}), '(obs, [[step]], axis=1)\n', (2914, 2937), True, 'import numpy as np\n'), ((3480, 3504), 'numpy.concatenate', 'np.concatenate', (['observes'], {}), '(observes)\n', (3494, 3504), True, 'import numpy as np\n'), ((3506, 3529), 'numpy.concatenate', 'np.concatenate', (['actions'], {}), '(actions)\n', (3520, 3529), True, 'import numpy as np\n'), ((3543, 3578), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float64'}), '(rewards, dtype=np.float64)\n', (3551, 3578), True, 'import numpy as np\n'), ((3580, 3608), 'numpy.concatenate', 'np.concatenate', (['unscaled_obs'], {}), '(unscaled_obs)\n', (3594, 3608), True, 'import numpy as np\n'), ((11161, 11201), 'tb_logger.log', 'logger.log', (['("""\n#Training Iter %d""" % _)'], {}), '("""\n#Training Iter %d""" % _)\n', (11171, 11201), True, 'import tb_logger as logger\n'), ((11207, 11235), 'tb_logger.log', 'logger.log', (['"""Draw Samples.."""'], {}), "('Draw Samples..')\n", (11217, 11235), True, 'import tb_logger as logger\n'), ((11884, 11918), 'tb_logger.log', 'logger.log', (['"""Starting Training..."""'], {}), "('Starting Training...')\n", (11894, 11918), True, 'import tb_logger as logger\n'), ((12119, 12167), 'tb_logger.log', 'logger.log', (['"""--------------------------------\n"""'], {}), "('--------------------------------\\n')\n", (12129, 12167), True, 'import tb_logger as logger\n'), ((3238, 3264), 'numpy.squeeze', 'np.squeeze', (['action'], {'axis': '(0)'}), '(action, axis=0)\n', (3248, 3264), True, 'import numpy as np\n'), ((3329, 3348), 'numpy.asscalar', 'np.asscalar', (['reward'], {}), '(reward)\n', (3340, 3348), True, 'import numpy as np\n'), ((7303, 7335), 'numpy.append', 'np.append', (['(values[1:] * gamma)', '(0)'], {}), '(values[1:] * gamma, 0)\n', (7312, 7335), True, 'import numpy as np\n'), ((8387, 8404), 'numpy.mean', 'np.mean', (['observes'], {}), '(observes)\n', (8394, 8404), True, 'import numpy as np\n'), ((8426, 8442), 'numpy.min', 'np.min', (['observes'], {}), '(observes)\n', (8432, 8442), True, 'import numpy as np\n'), ((8464, 8480), 'numpy.max', 'np.max', (['observes'], {}), '(observes)\n', (8470, 8480), True, 'import numpy as np\n'), ((8503, 8519), 'numpy.mean', 'np.mean', (['actions'], {}), '(actions)\n', (8510, 8519), True, 'import numpy as np\n'), ((8541, 8556), 'numpy.max', 'np.max', (['actions'], {}), '(actions)\n', (8547, 8556), True, 'import numpy as np\n'), ((8633, 8652), 'numpy.mean', 'np.mean', (['advantages'], {}), '(advantages)\n', (8640, 8652), True, 'import numpy as np\n'), ((8674, 8692), 'numpy.min', 'np.min', (['advantages'], {}), '(advantages)\n', (8680, 8692), True, 'import numpy as np\n'), ((8714, 8732), 'numpy.max', 'np.max', (['advantages'], {}), '(advantages)\n', (8720, 8732), True, 'import numpy as np\n'), ((8754, 8772), 'numpy.var', 'np.var', (['advantages'], {}), '(advantages)\n', (8760, 8772), True, 'import numpy as np\n'), ((8799, 8820), 'numpy.mean', 'np.mean', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8806, 8820), True, 'import numpy as np\n'), ((8846, 8866), 'numpy.min', 'np.min', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8852, 8866), True, 'import numpy as np\n'), ((8892, 8912), 'numpy.max', 'np.max', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8898, 8912), True, 'import numpy as np\n'), ((8938, 8958), 'numpy.var', 'np.var', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8944, 8958), True, 'import numpy as np\n'), ((10384, 10401), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (10399, 10401), False, 'from datetime import datetime\n'), ((8586, 8609), 'numpy.var', 'np.var', (['actions'], {'axis': '(0)'}), '(actions, axis=0)\n', (8592, 8609), True, 'import numpy as np\n')]
""" Functions to load and process Ausgrid dataset. The dataset contains 300 users with their location, PV production and electrical consumption. The timeline for this dataset is 3 years separated in 3 files. """ import os import pickle import numpy as np import pandas as pd from pandas.tseries.offsets import Day from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from ...common import enable_cuda DATA_PATH_ROOT = os.path.expanduser('~/Documents/Deep_Learning_Resources/datasets/ausgrid') FILE_PATH_DICT = { '2010-2011': '2010-2011 Solar home electricity data.csv', '2011-2012': '2011-2012 Solar home electricity data.csv', '2012-2013': '2012-2013 Solar home electricity data.csv' } DATA_FRAME_PATH_DICT = { '2011-2012': '2011-2012 Solar home electricity data.pkl' } def process_reshape_data_frame(year='2011-2012'): assert year in FILE_PATH_DICT fname = os.path.join(DATA_PATH_ROOT, FILE_PATH_DICT[year]) d_raw = pd.read_csv(fname, skiprows=1, parse_dates=['date'], dayfirst=True, na_filter=False, dtype={'Row Quality': str}) d0, d1 = d_raw.date.min(), d_raw.date.max() index = pd.date_range(d0, d1 + Day(1), freq='30T', closed='left') customers = sorted(d_raw.Customer.unique()) channels = ['GC', 'GG', 'CL'] empty_cols = pd.MultiIndex(levels=[customers, channels], labels=[[], []], names=['Customer', 'Channel']) df = pd.DataFrame(index=index, columns=empty_cols) missing_records = [] for c in customers: d_c = d_raw[d_raw.Customer == c] for ch in channels: d_c_ch = d_c[d_c['Consumption Category'] == ch] ts = d_c_ch.iloc[:, 5:-1].values.ravel() if len(ts) != len(index): missing_records.append((c, ch, len(ts))) else: df[c, ch] = ts d_customer_cap = d_raw[['Customer', 'Generator Capacity']] gen_cap = d_customer_cap.groupby('Customer')['Generator Capacity'].mean() d_customer_post = d_raw[['Customer', 'Postcode']] postcode = d_customer_post.groupby('Customer')['Postcode'].mean() return df, missing_records, gen_cap, postcode def save_data_frame(year='2011-2012'): path = os.path.join(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year]) df, missing_records, gen_cap, postcode = process_reshape_data_frame(year) data_dict = { 'df': df, 'miss_records': missing_records, 'gen_cap': gen_cap, 'postcode': postcode } with open(path, 'wb') as f: pickle.dump(data_dict, f) def load_data_frame(year='2011-2012'): path = os.path.join(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year]) with open(path, 'rb') as f: data_dict = pickle.load(f) df, missing_records, gen_cap, postcode = data_dict['df'], data_dict['miss_records'], data_dict['gen_cap'], \ data_dict['postcode'] return df, missing_records, gen_cap, postcode def create_training_data(year='2011-2012'): """ Create numpy format of training data. We treat generation capacity and postcode as user profiles Args: year: aus year to extract Returns: data with shape (300, 366, 48, 2) -> (user, day, half hour, consumption/PV) generation capacity (300,) postcode (300,) user_id (300,) """ df, _, gen_cap, postcode = load_data_frame(year) data = np.zeros(shape=(300, 366, 48, 2)) for i in range(300): data[i] = df[i + 1].values.reshape(366, 48, -1)[:, :, :2] user_id = np.arange(0, 300) return data, gen_cap.values, postcode.values, user_id class AusgridDataSet(Dataset): def __init__(self, year='2011-2012', train=True, transform=None, target_transform=None): data, gen_cap, postcode, user_id = create_training_data(year) if train: self.data = data[:, :300, :, :] else: self.data = data[:, 300:, :, :] self.num_days = self.data.shape[1] self.data = self.data.reshape((-1, 48, 2)).transpose((0, 2, 1)) self.gen_cap = gen_cap self.postcode = postcode self.user_id = user_id self.transform = transform self.target_transform = target_transform def __getitem__(self, index): data = self.data[index] gen_cap = self.gen_cap[index // self.num_days] postcode = self.postcode[index // self.num_days] user_id = self.user_id[index // self.num_days] if self.transform is not None: data = self.transform(data) if self.target_transform is not None: gen_cap, postcode, user_id = self.target_transform(gen_cap, postcode, user_id) return data, (gen_cap, postcode, user_id) def __len__(self): return len(self.data) def get_ausgrid_default_transform(): return None def get_ausgrid_dataset(train, transform=None): if transform is None: transform = get_ausgrid_default_transform() return AusgridDataSet(train=train, transform=transform) def get_ausgrid_dataloader(train, batch_size=128, transform=None): kwargs = {'num_workers': 1, 'pin_memory': True} if enable_cuda else {} dataset = get_ausgrid_dataset(train=train, transform=transform) data_loader = DataLoader(dataset, batch_size, shuffle=True, **kwargs) return data_loader
[ "pickle.dump", "pandas.MultiIndex", "pandas.read_csv", "numpy.arange", "torch.utils.data.dataloader.DataLoader", "os.path.join", "pickle.load", "numpy.zeros", "pandas.DataFrame", "pandas.tseries.offsets.Day", "os.path.expanduser" ]
[((465, 539), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Documents/Deep_Learning_Resources/datasets/ausgrid"""'], {}), "('~/Documents/Deep_Learning_Resources/datasets/ausgrid')\n", (483, 539), False, 'import os\n'), ((934, 984), 'os.path.join', 'os.path.join', (['DATA_PATH_ROOT', 'FILE_PATH_DICT[year]'], {}), '(DATA_PATH_ROOT, FILE_PATH_DICT[year])\n', (946, 984), False, 'import os\n'), ((997, 1113), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'skiprows': '(1)', 'parse_dates': "['date']", 'dayfirst': '(True)', 'na_filter': '(False)', 'dtype': "{'Row Quality': str}"}), "(fname, skiprows=1, parse_dates=['date'], dayfirst=True,\n na_filter=False, dtype={'Row Quality': str})\n", (1008, 1113), True, 'import pandas as pd\n'), ((1351, 1447), 'pandas.MultiIndex', 'pd.MultiIndex', ([], {'levels': '[customers, channels]', 'labels': '[[], []]', 'names': "['Customer', 'Channel']"}), "(levels=[customers, channels], labels=[[], []], names=[\n 'Customer', 'Channel'])\n", (1364, 1447), True, 'import pandas as pd\n'), ((1452, 1497), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index', 'columns': 'empty_cols'}), '(index=index, columns=empty_cols)\n', (1464, 1497), True, 'import pandas as pd\n'), ((2243, 2299), 'os.path.join', 'os.path.join', (['DATA_PATH_ROOT', 'DATA_FRAME_PATH_DICT[year]'], {}), '(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year])\n', (2255, 2299), False, 'import os\n'), ((2636, 2692), 'os.path.join', 'os.path.join', (['DATA_PATH_ROOT', 'DATA_FRAME_PATH_DICT[year]'], {}), '(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year])\n', (2648, 2692), False, 'import os\n'), ((3454, 3487), 'numpy.zeros', 'np.zeros', ([], {'shape': '(300, 366, 48, 2)'}), '(shape=(300, 366, 48, 2))\n', (3462, 3487), True, 'import numpy as np\n'), ((3593, 3610), 'numpy.arange', 'np.arange', (['(0)', '(300)'], {}), '(0, 300)\n', (3602, 3610), True, 'import numpy as np\n'), ((5306, 5361), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)'}), '(dataset, batch_size, shuffle=True, **kwargs)\n', (5316, 5361), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((2558, 2583), 'pickle.dump', 'pickle.dump', (['data_dict', 'f'], {}), '(data_dict, f)\n', (2569, 2583), False, 'import pickle\n'), ((2745, 2759), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2756, 2759), False, 'import pickle\n'), ((1217, 1223), 'pandas.tseries.offsets.Day', 'Day', (['(1)'], {}), '(1)\n', (1220, 1223), False, 'from pandas.tseries.offsets import Day\n')]
#!/usr/bin/env python """ Estimates the static background in a STORM movie. The estimate is performed by averaging this might not be the best choice for movies with a high density of real localizations. This may be a good choice if you have a largish fixed background and a relatively low density of real localizations. Hazen 8/16 """ import numpy class StaticBackgroundException(Exception): def __init__(self, message): Exception.__init__(self, message) class StaticBGEstimator(object): """ Estimates the background using a simple boxcar average. In the case of movies with activation frames, these frames will be ignored in the background estimate. Note: This expects to be asked for estimates in a sequential fashion as would occur during normal STORM movie analysis. """ def __init__(self, frame_reader = None, start_frame = 0, sample_size = 100, descriptor = "1", **kwds): self.cur_frame = start_frame - 1 self.descriptor = descriptor self.descriptor_len = len(descriptor) self.frame_reader = frame_reader self.number_averaged = 0 self.sample_size = sample_size [movie_w, movie_h, self.movie_l] = frame_reader.filmSize() # Figure out where to start and end the average. end_frame = start_frame + int(self.sample_size/2) start_frame = start_frame - int(self.sample_size/2) if (start_frame < 0): start_frame = 0 end_frame = start_frame + self.sample_size if (end_frame > self.movie_l): end_frame = self.movie_l self.sample_size = self.movie_l if (end_frame > self.movie_l): end_frame = self.movie_l start_frame = end_frame - self.sample_size if (start_frame < 0): start_frame = 0 self.sample_size = self.movie_l self.running_sum = numpy.zeros((movie_h, movie_w)) for i in range(start_frame, end_frame): if not self.shouldIgnore(i): self.number_averaged += 1 self.running_sum += self.frame_reader.loadAFrame(i) def estimateBG(self, frame_number): if (frame_number != (self.cur_frame + 1)): raise StaticBackgroundException("Received request for an estimate of a non-sequential frame " + str(self.cur_frame) + " " + str(frame_number)) else: self.cur_frame = frame_number # Move average forward by 1 frame if possible. start_frame = frame_number - int(self.sample_size/2) end_frame = frame_number + int(self.sample_size/2) if (start_frame > 0) and (end_frame < self.movie_l): # Remove old frame. if not self.shouldIgnore(start_frame - 1): self.number_averaged -= 1 self.running_sum -= self.frame_reader.loadAFrame(start_frame - 1) # Add new frame. if not self.shouldIgnore(end_frame): self.number_averaged += 1 self.running_sum += self.frame_reader.loadAFrame(end_frame) # Return the current average. return self.running_sum/self.number_averaged def shouldIgnore(self, frame_number): desc = self.descriptor[frame_number % self.descriptor_len] if (desc == "0"): #print("Ignoring frame", frame_number) return True else: return False if (__name__ == "__main__"): import argparse import storm_analysis.sa_library.datareader as datareader import storm_analysis.sa_library.datawriter as datawriter import storm_analysis.sa_library.parameters as params # Process command line arguments. parser = argparse.ArgumentParser(description = 'Running average background subtraction') parser.add_argument('--in_movie', dest='in_movie', type=str, required=True, help = "The name of the movie to analyze, can be .dax, .tiff or .spe format.") parser.add_argument('--out_movie', dest='out_movie', type=str, required=True, help = "The name of the output movie (with background subtracted). This will be in .dax format.") parser.add_argument('--xml', dest='settings', type=str, required=True, help = "The name of the settings xml file.") args = parser.parse_args() # Load movies and parameters. input_movie = datareader.inferReader(args.in_movie) [w, h, l] = input_movie.filmSize() output_movie = datawriter.DaxWriter(args.out_movie) parameters = params.ParametersCommon().initFromFile(args.settings) n_frames = parameters.getAttr("max_frame") if (n_frames > l) or (n_frames == -1): n_frames = l # Default to a sample size if the settings file does not specify this. sample_size = 100 if (parameters.getAttr("static_background_estimate", 0) > 0): sample_size = parameters.getAttr("static_background_estimate") else: print("Did not find parameter 'static_background_estimate' in parameters file, defaulting to", sample_size) sbge = StaticBGEstimator(input_movie, sample_size = sample_size, descriptor = parameters.getAttr("descriptor")) for i in range(n_frames): diff = input_movie.loadAFrame(i) - sbge.estimateBG(i) + 100 output_movie.addFrame(diff) output_movie.close() # # The MIT License # # Copyright (c) 2016 Zhuang Lab, Harvard University # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #
[ "storm_analysis.sa_library.datareader.inferReader", "storm_analysis.sa_library.datawriter.DaxWriter", "argparse.ArgumentParser", "numpy.zeros", "storm_analysis.sa_library.parameters.ParametersCommon" ]
[((3756, 3833), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Running average background subtraction"""'}), "(description='Running average background subtraction')\n", (3779, 3833), False, 'import argparse\n'), ((4453, 4490), 'storm_analysis.sa_library.datareader.inferReader', 'datareader.inferReader', (['args.in_movie'], {}), '(args.in_movie)\n', (4475, 4490), True, 'import storm_analysis.sa_library.datareader as datareader\n'), ((4554, 4590), 'storm_analysis.sa_library.datawriter.DaxWriter', 'datawriter.DaxWriter', (['args.out_movie'], {}), '(args.out_movie)\n', (4574, 4590), True, 'import storm_analysis.sa_library.datawriter as datawriter\n'), ((1947, 1978), 'numpy.zeros', 'numpy.zeros', (['(movie_h, movie_w)'], {}), '((movie_h, movie_w))\n', (1958, 1978), False, 'import numpy\n'), ((4608, 4633), 'storm_analysis.sa_library.parameters.ParametersCommon', 'params.ParametersCommon', ([], {}), '()\n', (4631, 4633), True, 'import storm_analysis.sa_library.parameters as params\n')]
import argparse import ast import os import numpy as np import cv2 parser = argparse.ArgumentParser() parser.add_argument('OutputDirectory', help='The directory where the generated images will be saved') parser.add_argument('--numberOfImages', help='The number of generated images. Default: 100', type=int, default=100) parser.add_argument('--imageSize', help="The size of images. Default: '(320, 240)'", default='(320, 240)') parser.add_argument('--circleCenter', help="The circle center. Default: '(160, 120)'", default='(160, 120)') parser.add_argument('--circleDiameter', help='The circle diameter. Default: 180', type=int, default=180) parser.add_argument('--squareCenter', help="The square center. Default: '(210, 150)'", default='(210, 150)') parser.add_argument('--squareSize', help='The square side length. Default: 120', type=int, default=120) args = parser.parse_args() imageSize = ast.literal_eval(args.imageSize) circleCenter = ast.literal_eval(args.circleCenter) squareCenter = ast.literal_eval(args.squareCenter) def main(): print ("generateToyImages.py main()") for imageNdx in range(args.numberOfImages): imageFilepath = os.path.join(args.OutputDirectory, 'image' + str(imageNdx) + '.png') image = np.ones((imageSize[1], imageSize[0]), dtype=np.uint8) * np.random.randint(256) cv2.circle(image, circleCenter, args.circleDiameter//2, np.random.randint(256), thickness=cv2.FILLED) cv2.rectangle(image, (squareCenter[0] - args.squareSize//2, squareCenter[1] - args.squareSize//2), (squareCenter[0] + args.squareSize // 2, squareCenter[1] + args.squareSize // 2), np.random.randint(256), thickness=cv2.FILLED) cv2.imwrite(imageFilepath, image) if __name__ == '__main__': main()
[ "cv2.imwrite", "numpy.ones", "argparse.ArgumentParser", "ast.literal_eval", "numpy.random.randint" ]
[((77, 102), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (100, 102), False, 'import argparse\n'), ((895, 927), 'ast.literal_eval', 'ast.literal_eval', (['args.imageSize'], {}), '(args.imageSize)\n', (911, 927), False, 'import ast\n'), ((943, 978), 'ast.literal_eval', 'ast.literal_eval', (['args.circleCenter'], {}), '(args.circleCenter)\n', (959, 978), False, 'import ast\n'), ((994, 1029), 'ast.literal_eval', 'ast.literal_eval', (['args.squareCenter'], {}), '(args.squareCenter)\n', (1010, 1029), False, 'import ast\n'), ((1720, 1753), 'cv2.imwrite', 'cv2.imwrite', (['imageFilepath', 'image'], {}), '(imageFilepath, image)\n', (1731, 1753), False, 'import cv2\n'), ((1245, 1298), 'numpy.ones', 'np.ones', (['(imageSize[1], imageSize[0])'], {'dtype': 'np.uint8'}), '((imageSize[1], imageSize[0]), dtype=np.uint8)\n', (1252, 1298), True, 'import numpy as np\n'), ((1301, 1323), 'numpy.random.randint', 'np.random.randint', (['(256)'], {}), '(256)\n', (1318, 1323), True, 'import numpy as np\n'), ((1388, 1410), 'numpy.random.randint', 'np.random.randint', (['(256)'], {}), '(256)\n', (1405, 1410), True, 'import numpy as np\n'), ((1666, 1688), 'numpy.random.randint', 'np.random.randint', (['(256)'], {}), '(256)\n', (1683, 1688), True, 'import numpy as np\n')]
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mahalanobis metric.""" import numpy as np import datasets _DESCRIPTION = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. <NAME> in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _CITATION = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={<NAME>, <NAME> <NAME> <NAME>{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _KWARGS_DESCRIPTION = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Mahalanobis(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float", id="sequence"), id="X"), } ), ) def _compute(self, X, reference_distribution): # convert to numpy arrays X = np.array(X) reference_distribution = np.array(reference_distribution) # Assert that arrays are 2D if len(X.shape) != 2: raise ValueError("Expected `X` to be a 2D vector") if len(reference_distribution.shape) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector") if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction X_minus_mu = X - np.mean(reference_distribution) cov = np.cov(reference_distribution.T) try: inv_covmat = np.linalg.inv(cov) except np.linalg.LinAlgError: inv_covmat = np.linalg.pinv(cov) left_term = np.dot(X_minus_mu, inv_covmat) mahal_dist = np.dot(left_term, X_minus_mu.T).diagonal() return {"mahalanobis": mahal_dist}
[ "numpy.mean", "numpy.linalg.pinv", "datasets.utils.file_utils.add_start_docstrings", "numpy.array", "numpy.dot", "numpy.linalg.inv", "datasets.Value", "numpy.cov" ]
[((1945, 2030), 'datasets.utils.file_utils.add_start_docstrings', 'datasets.utils.file_utils.add_start_docstrings', (['_DESCRIPTION', '_KWARGS_DESCRIPTION'], {}), '(_DESCRIPTION,\n _KWARGS_DESCRIPTION)\n', (1991, 2030), False, 'import datasets\n'), ((2534, 2545), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2542, 2545), True, 'import numpy as np\n'), ((2579, 2611), 'numpy.array', 'np.array', (['reference_distribution'], {}), '(reference_distribution)\n', (2587, 2611), True, 'import numpy as np\n'), ((3216, 3248), 'numpy.cov', 'np.cov', (['reference_distribution.T'], {}), '(reference_distribution.T)\n', (3222, 3248), True, 'import numpy as np\n'), ((3409, 3439), 'numpy.dot', 'np.dot', (['X_minus_mu', 'inv_covmat'], {}), '(X_minus_mu, inv_covmat)\n', (3415, 3439), True, 'import numpy as np\n'), ((3170, 3201), 'numpy.mean', 'np.mean', (['reference_distribution'], {}), '(reference_distribution)\n', (3177, 3201), True, 'import numpy as np\n'), ((3287, 3305), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (3300, 3305), True, 'import numpy as np\n'), ((3369, 3388), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cov'], {}), '(cov)\n', (3383, 3388), True, 'import numpy as np\n'), ((3461, 3492), 'numpy.dot', 'np.dot', (['left_term', 'X_minus_mu.T'], {}), '(left_term, X_minus_mu.T)\n', (3467, 3492), True, 'import numpy as np\n'), ((2343, 2381), 'datasets.Value', 'datasets.Value', (['"""float"""'], {'id': '"""sequence"""'}), "('float', id='sequence')\n", (2357, 2381), False, 'import datasets\n')]
import numpy as np import matplotlib.pyplot as plt from .integrate import Integrate class Riemann(Integrate): """ Compute the Riemann sum of f(x) over the interval [a,b]. Parameters ---------- f : function A single variable function f(x), ex: lambda x:np.exp(x**2) """ def __init__(self, f): Integrate.__init__(self, f) self.N = 25 def compute_integral(self, a, b, N = 25, method='midpoint'): """ Approximate the value of the integral of f(x) dx from a to b with N sub-intervals using left, right or midpoint method Parameters ---------- a , b : any numbers Endpoints of the interval [a,b] N : integer Number of subintervals of equal length in the partition of [a,b] method : string Determines the kind of Riemann sum:\n right : Riemann sum using right endpoints\n left : Riemann sum using left endpoints\n midpoint (default) : Riemann sum using midpoints Returns ------- float Approximation of the integral given by the Riemann sum. Examples -------- >>> compute_integral(0,np.pi/2,1000), f = lambda x:1 / (1 + x**2) approx = 1.3731040812301096 actual = 1.373400766945016 """ self.a = a self.b = b self.N = N dx = (self.b - self.a) / self.N x = np.linspace(self.a, self.b, self.N+1) if method == 'left': x_left = x[:-1] # from 0 to N-1 return np.sum(self.f(x_left)*dx) elif method == 'right': x_right = x[1:] # from 1 to N return np.sum(self.f(x_right)*dx) elif method == 'midpoint': x_mid = (x[:-1] + x[1:])/2 # all N but averaged return np.sum(self.f(x_mid)*dx) else: raise ValueError("Method must be 'left', 'right' or 'midpoint'.") def plot_function(self): x = np.linspace(self.a, self.b, self.N+1) y = self.f(x) X = np.linspace(self.a, self.b, 5*self.N+1) Y = self.f(X) plt.figure(figsize=(15,5)) plt.subplot(1,3,1) plt.plot(X,Y,'b') x_left = x[:-1] # Left endpoints y_left = y[:-1] plt.plot(x_left,y_left,'b.',markersize=10) plt.bar(x_left, y_left,width=(self.b - self.a) / self.N, alpha=0.2, align='edge', edgecolor='b') plt.title('Left Riemann Sum, N = {}'.format(self.N)) plt.subplot(1,3,2) plt.plot(X,Y,'b') x_mid = (x[:-1] + x[1:])/2 # Midpoints y_mid = self.f(x_mid) plt.plot(x_mid, y_mid, 'b.', markersize=10) plt.bar(x_mid, y_mid,width=(self.b - self.a) / self.N, alpha=0.2, edgecolor='b') plt.title('Midpoint Riemann Sum, N = {}'.format(self.N)) plt.subplot(1, 3, 3) plt.plot(X, Y, 'b') x_right = x[1:] # Right endpoints y_right = y[1:] plt.plot(x_right, y_right,'b.', markersize=10) plt.bar(x_right, y_right,width=-(self.b - self.a) / self.N, alpha=0.2, align='edge', edgecolor='b') plt.title('Right Riemann Sum, N = {}'.format(self.N)) plt.show()
[ "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.bar", "matplotlib.pyplot.figure", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ]
[((1479, 1518), 'numpy.linspace', 'np.linspace', (['self.a', 'self.b', '(self.N + 1)'], {}), '(self.a, self.b, self.N + 1)\n', (1490, 1518), True, 'import numpy as np\n'), ((2043, 2082), 'numpy.linspace', 'np.linspace', (['self.a', 'self.b', '(self.N + 1)'], {}), '(self.a, self.b, self.N + 1)\n', (2054, 2082), True, 'import numpy as np\n'), ((2116, 2159), 'numpy.linspace', 'np.linspace', (['self.a', 'self.b', '(5 * self.N + 1)'], {}), '(self.a, self.b, 5 * self.N + 1)\n', (2127, 2159), True, 'import numpy as np\n'), ((2187, 2214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (2197, 2214), True, 'import matplotlib.pyplot as plt\n'), ((2223, 2243), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (2234, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2269), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""b"""'], {}), "(X, Y, 'b')\n", (2258, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2386), 'matplotlib.pyplot.plot', 'plt.plot', (['x_left', 'y_left', '"""b."""'], {'markersize': '(10)'}), "(x_left, y_left, 'b.', markersize=10)\n", (2349, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2392, 2494), 'matplotlib.pyplot.bar', 'plt.bar', (['x_left', 'y_left'], {'width': '((self.b - self.a) / self.N)', 'alpha': '(0.2)', 'align': '"""edge"""', 'edgecolor': '"""b"""'}), "(x_left, y_left, width=(self.b - self.a) / self.N, alpha=0.2, align=\n 'edge', edgecolor='b')\n", (2399, 2494), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2579), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (2570, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2586, 2605), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""b"""'], {}), "(X, Y, 'b')\n", (2594, 2605), True, 'import matplotlib.pyplot as plt\n'), ((2689, 2732), 'matplotlib.pyplot.plot', 'plt.plot', (['x_mid', 'y_mid', '"""b."""'], {'markersize': '(10)'}), "(x_mid, y_mid, 'b.', markersize=10)\n", (2697, 2732), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2826), 'matplotlib.pyplot.bar', 'plt.bar', (['x_mid', 'y_mid'], {'width': '((self.b - self.a) / self.N)', 'alpha': '(0.2)', 'edgecolor': '"""b"""'}), "(x_mid, y_mid, width=(self.b - self.a) / self.N, alpha=0.2,\n edgecolor='b')\n", (2748, 2826), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2916), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2907, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2944), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""b"""'], {}), "(X, Y, 'b')\n", (2933, 2944), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3066), 'matplotlib.pyplot.plot', 'plt.plot', (['x_right', 'y_right', '"""b."""'], {'markersize': '(10)'}), "(x_right, y_right, 'b.', markersize=10)\n", (3027, 3066), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3178), 'matplotlib.pyplot.bar', 'plt.bar', (['x_right', 'y_right'], {'width': '(-(self.b - self.a) / self.N)', 'alpha': '(0.2)', 'align': '"""edge"""', 'edgecolor': '"""b"""'}), "(x_right, y_right, width=-(self.b - self.a) / self.N, alpha=0.2,\n align='edge', edgecolor='b')\n", (3081, 3178), True, 'import matplotlib.pyplot as plt\n'), ((3245, 3255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3253, 3255), True, 'import matplotlib.pyplot as plt\n')]
import pandas as pd import numpy as np import torch import torch.nn as nn import utils from torch.utils.data import Dataset, DataLoader from net import model import math from tqdm import tqdm import matplotlib.pyplot as plt import os from sklearn.preprocessing import StandardScaler from torch.optim import lr_scheduler from sklearn.metrics import mean_absolute_percentage_error, mean_absolute_error #OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized. os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" # RuntimeError: CUDA error: unspecified launch failure os.environ['CUDA_LAUNCH_BLOCKING'] = "1" if torch.cuda.is_available(): device = torch.device('cuda') torch.backends.cudnn.benchmark = True torch.cuda.set_device(0) torch.cuda.empty_cache() else: device = torch.device('cpu') n = 10 # 取前n天的資料作為特徵 #載入資料集 train_x = pd.read_csv(r'D:\dataset\lilium_price\train_data.csv', encoding='utf-8', index_col = 0) train_y = pd.read_csv(r'D:\dataset\lilium_price\train_label.csv', encoding='utf-8', index_col = 0) val_x = pd.read_csv(r'D:\dataset\lilium_price\val_data.csv', encoding='utf-8', index_col = 0) val_y = pd.read_csv(r'D:\dataset\lilium_price\val_label.csv', encoding='utf-8', index_col = 0) #正規化 x_scaler = StandardScaler().fit(train_x) train_x = x_scaler.transform(train_x) val_x = x_scaler.transform(val_x) # to tensor train_x = torch.Tensor(train_x) train_y = torch.Tensor(np.array(train_y)) val_x = torch.Tensor(val_x) val_y = torch.Tensor(np.array(val_y)) # Setloader trainset = utils.Setloader(train_x, train_y) valset = utils.Setloader(val_x, val_y) # train batch_size = train_x.shape[0] val_batch_size = val_x.shape[0] LR = 0.1 num_epochs = 3000 model = model.RNN_modelv1(input_dim=train_x.shape[1], output_dim=train_y.shape[1]).to(device) # 選擇優化器與損失函數 optimizer = torch.optim.AdamW(model.parameters(), lr=LR) criterion = nn.MSELoss().to(device) scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.9) # scheduler = lr_scheduler.CosineAnnealingLR(optimizer, # T_max=10, # eta_min=1e-6, # last_epoch=-1) trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True) valloader = DataLoader(valset, batch_size=val_batch_size, shuffle=True) train_epoch_size = math.ceil(len(trainloader.dataset)/batch_size) val_epoch_size = math.ceil(len(valloader.dataset)/val_batch_size) loss_list = [] val_loss_list = [] mae_list = [] lr_list = [] for epoch in range(num_epochs): epoch += 1 print('running epoch: {} / {}'.format(epoch, num_epochs)) #訓練模式 model.train() total_loss = 0 with tqdm(total=train_epoch_size) as pbar: for inputs, target in trainloader: inputs, target = inputs.to(device), target.to(device) output = model(torch.unsqueeze(inputs, dim=0)) loss = criterion(torch.squeeze(output), target) running_loss = loss.item() total_loss += running_loss*inputs.shape[0] optimizer.zero_grad() # clear gradients for this training step loss.backward() # back propagation, compute gradients optimizer.step() #更新進度條 pbar.set_description('train') pbar.set_postfix( **{ 'running_loss': running_loss, }) pbar.update(1) loss = total_loss/len(trainloader.dataset) loss_list.append(loss) #評估模式 model.eval() total_val_loss = 0 total_mae = 0 with tqdm(total=val_epoch_size) as pbar: with torch.no_grad(): for inputs, target in valloader: inputs, target = inputs.to(device), target.to(device) output = model(torch.unsqueeze(inputs, dim=0)) running_val_loss = criterion(torch.squeeze(output), target).item() running_mae = mean_absolute_error(target.cpu(), torch.squeeze(output).cpu()) total_val_loss += running_val_loss*inputs.shape[0] total_mae += running_mae*inputs.shape[0] #更新進度條 pbar.set_description('validation') pbar.set_postfix( **{ 'running_val_loss': running_val_loss, 'mae': running_mae }) pbar.update(1) lr_list.append(scheduler.get_last_lr()) scheduler.step() val_loss = total_val_loss/len(valloader.dataset) mae = total_mae/len(valloader.dataset) val_loss_list.append(val_loss) mae_list.append(mae) print('train_loss: {:.4f}, valid_loss: {:.4f}, MAE:{:.2f}, lr:{:.1e}'.format(loss, val_loss, mae, scheduler.get_last_lr()[0]) ) #每10個epochs及最後一個epoch儲存模型 if (not epoch % 10) or (epoch == num_epochs) : torch.save(model.state_dict(), './logs/epoch%d-loss%.4f-val_loss%.4f-mae%.2f.pth' %(epoch, loss, val_loss, mae)) #繪製圖 plt.figure() plt.xlabel('Epochs') plt.ylabel('Loss') plt.plot(loss_list, label='Loss') plt.plot(val_loss_list, label='Val Loss') plt.legend(loc='best') plt.savefig('./images/loss.jpg') plt.show() plt.figure() plt.xlabel('Epochs') plt.ylabel('mae') plt.plot(mae_list) plt.savefig('./images/mae.jpg') plt.show()
[ "pandas.read_csv", "matplotlib.pyplot.ylabel", "net.model.RNN_modelv1", "numpy.array", "torch.nn.MSELoss", "utils.Setloader", "torch.cuda.is_available", "torch.squeeze", "net.model.train", "net.model.eval", "torch.unsqueeze", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "net.model.state_dict", "matplotlib.pyplot.savefig", "torch.Tensor", "torch.cuda.set_device", "torch.cuda.empty_cache", "matplotlib.pyplot.legend", "torch.device", "matplotlib.pyplot.show", "tqdm.tqdm", "torch.optim.lr_scheduler.StepLR", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.figure", "torch.utils.data.DataLoader", "torch.no_grad", "net.model.parameters" ]
[((635, 660), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (658, 660), False, 'import torch\n'), ((876, 967), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\train_data.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\train_data.csv', encoding='utf-8',\n index_col=0)\n", (887, 967), True, 'import pandas as pd\n'), ((974, 1066), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\train_label.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\train_label.csv', encoding='utf-8',\n index_col=0)\n", (985, 1066), True, 'import pandas as pd\n'), ((1071, 1160), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\val_data.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\val_data.csv', encoding='utf-8',\n index_col=0)\n", (1082, 1160), True, 'import pandas as pd\n'), ((1165, 1255), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\val_label.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\val_label.csv', encoding='utf-8',\n index_col=0)\n", (1176, 1255), True, 'import pandas as pd\n'), ((1394, 1415), 'torch.Tensor', 'torch.Tensor', (['train_x'], {}), '(train_x)\n', (1406, 1415), False, 'import torch\n'), ((1466, 1485), 'torch.Tensor', 'torch.Tensor', (['val_x'], {}), '(val_x)\n', (1478, 1485), False, 'import torch\n'), ((1547, 1580), 'utils.Setloader', 'utils.Setloader', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (1562, 1580), False, 'import utils\n'), ((1590, 1619), 'utils.Setloader', 'utils.Setloader', (['val_x', 'val_y'], {}), '(val_x, val_y)\n', (1605, 1619), False, 'import utils\n'), ((1932, 1987), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(10)', 'gamma': '(0.9)'}), '(optimizer, step_size=10, gamma=0.9)\n', (1951, 1987), False, 'from torch.optim import lr_scheduler\n'), ((2245, 2302), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (2255, 2302), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2315, 2374), 'torch.utils.data.DataLoader', 'DataLoader', (['valset'], {'batch_size': 'val_batch_size', 'shuffle': '(True)'}), '(valset, batch_size=val_batch_size, shuffle=True)\n', (2325, 2374), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5076, 5088), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5086, 5088), True, 'import matplotlib.pyplot as plt\n'), ((5089, 5109), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5099, 5109), True, 'import matplotlib.pyplot as plt\n'), ((5110, 5128), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (5120, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5129, 5162), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_list'], {'label': '"""Loss"""'}), "(loss_list, label='Loss')\n", (5137, 5162), True, 'import matplotlib.pyplot as plt\n'), ((5163, 5204), 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss_list'], {'label': '"""Val Loss"""'}), "(val_loss_list, label='Val Loss')\n", (5171, 5204), True, 'import matplotlib.pyplot as plt\n'), ((5205, 5227), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5215, 5227), True, 'import matplotlib.pyplot as plt\n'), ((5228, 5260), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/loss.jpg"""'], {}), "('./images/loss.jpg')\n", (5239, 5260), True, 'import matplotlib.pyplot as plt\n'), ((5261, 5271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5269, 5271), True, 'import matplotlib.pyplot as plt\n'), ((5273, 5285), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5283, 5285), True, 'import matplotlib.pyplot as plt\n'), ((5286, 5306), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5296, 5306), True, 'import matplotlib.pyplot as plt\n'), ((5307, 5324), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mae"""'], {}), "('mae')\n", (5317, 5324), True, 'import matplotlib.pyplot as plt\n'), ((5325, 5343), 'matplotlib.pyplot.plot', 'plt.plot', (['mae_list'], {}), '(mae_list)\n', (5333, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5344, 5375), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/mae.jpg"""'], {}), "('./images/mae.jpg')\n", (5355, 5375), True, 'import matplotlib.pyplot as plt\n'), ((5376, 5386), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5384, 5386), True, 'import matplotlib.pyplot as plt\n'), ((675, 695), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (687, 695), False, 'import torch\n'), ((742, 766), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (763, 766), False, 'import torch\n'), ((771, 795), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (793, 795), False, 'import torch\n'), ((815, 834), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (827, 834), False, 'import torch\n'), ((1439, 1456), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (1447, 1456), True, 'import numpy as np\n'), ((1507, 1522), 'numpy.array', 'np.array', (['val_y'], {}), '(val_y)\n', (1515, 1522), True, 'import numpy as np\n'), ((1856, 1874), 'net.model.parameters', 'model.parameters', ([], {}), '()\n', (1872, 1874), False, 'from net import model\n'), ((2692, 2705), 'net.model.train', 'model.train', ([], {}), '()\n', (2703, 2705), False, 'from net import model\n'), ((3599, 3611), 'net.model.eval', 'model.eval', ([], {}), '()\n', (3609, 3611), False, 'from net import model\n'), ((1269, 1285), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1283, 1285), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1727, 1801), 'net.model.RNN_modelv1', 'model.RNN_modelv1', ([], {'input_dim': 'train_x.shape[1]', 'output_dim': 'train_y.shape[1]'}), '(input_dim=train_x.shape[1], output_dim=train_y.shape[1])\n', (1744, 1801), False, 'from net import model\n'), ((1896, 1908), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1906, 1908), True, 'import torch.nn as nn\n'), ((2739, 2767), 'tqdm.tqdm', 'tqdm', ([], {'total': 'train_epoch_size'}), '(total=train_epoch_size)\n', (2743, 2767), False, 'from tqdm import tqdm\n'), ((3662, 3688), 'tqdm.tqdm', 'tqdm', ([], {'total': 'val_epoch_size'}), '(total=val_epoch_size)\n', (3666, 3688), False, 'from tqdm import tqdm\n'), ((3711, 3726), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3724, 3726), False, 'import torch\n'), ((4967, 4985), 'net.model.state_dict', 'model.state_dict', ([], {}), '()\n', (4983, 4985), False, 'from net import model\n'), ((2913, 2943), 'torch.unsqueeze', 'torch.unsqueeze', (['inputs'], {'dim': '(0)'}), '(inputs, dim=0)\n', (2928, 2943), False, 'import torch\n'), ((2974, 2995), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (2987, 2995), False, 'import torch\n'), ((3874, 3904), 'torch.unsqueeze', 'torch.unsqueeze', (['inputs'], {'dim': '(0)'}), '(inputs, dim=0)\n', (3889, 3904), False, 'import torch\n'), ((3951, 3972), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (3964, 3972), False, 'import torch\n'), ((4053, 4074), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (4066, 4074), False, 'import torch\n')]
import argparse import numpy as np import utils.loader as l def get_arguments(): """Gets arguments from the command line. Returns: A parser with the input arguments. """ # Creates the ArgumentParser parser = argparse.ArgumentParser( usage='Digitizes a numpy array into intervals in order to create targets.') parser.add_argument( 'input', help='Path to the .npy file', type=str) parser.add_argument( '-n_bins', help='Number of intervals to digitize', type=int, default=5) return parser.parse_args() if __name__ == "__main__": # Gathers the input arguments args = get_arguments() # Gathering variables from arguments input_array = args.input n_bins = args.n_bins # Loads the array features = l.load_npy(input_array) # Gathering minimum and maximum feature values min_features = features.min(axis=0) max_features = features.max(axis=0) # Pre-allocating targets array y = np.zeros((features.shape[0], features.shape[1]), dtype=np.int) print('Creating targets ...') # For every possible feature for i, (min_f, max_f) in enumerate(zip(min_features, max_features)): # Creating equally-spaced intervals bins = np.linspace(min_f, max_f, n_bins+1) # If iteration corresponds to FID or MSE metric if i == 0 or i == 1: # Digitizing the features array with flipped intervals y[:, i] = np.digitize(features[:, i], np.flip(bins), right=True) # If not else: # Digitizing the features array y[:, i] = np.digitize(features[:, i], bins) # Gathering most voted `y` along the features targets = np.asarray([(np.argmax(np.bincount(y[i, :]))) for i in range(features.shape[0])]) print(f'Labels, Counts: {np.unique(targets, return_counts=True)}') # Saving targets array as a .npy file l.save_npy(targets, f'targets.npy')
[ "numpy.flip", "utils.loader.load_npy", "numpy.unique", "argparse.ArgumentParser", "numpy.digitize", "numpy.linspace", "utils.loader.save_npy", "numpy.zeros", "numpy.bincount" ]
[((243, 347), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""Digitizes a numpy array into intervals in order to create targets."""'}), "(usage=\n 'Digitizes a numpy array into intervals in order to create targets.')\n", (266, 347), False, 'import argparse\n'), ((796, 819), 'utils.loader.load_npy', 'l.load_npy', (['input_array'], {}), '(input_array)\n', (806, 819), True, 'import utils.loader as l\n'), ((1000, 1062), 'numpy.zeros', 'np.zeros', (['(features.shape[0], features.shape[1])'], {'dtype': 'np.int'}), '((features.shape[0], features.shape[1]), dtype=np.int)\n', (1008, 1062), True, 'import numpy as np\n'), ((1928, 1963), 'utils.loader.save_npy', 'l.save_npy', (['targets', 'f"""targets.npy"""'], {}), "(targets, f'targets.npy')\n", (1938, 1963), True, 'import utils.loader as l\n'), ((1264, 1301), 'numpy.linspace', 'np.linspace', (['min_f', 'max_f', '(n_bins + 1)'], {}), '(min_f, max_f, n_bins + 1)\n', (1275, 1301), True, 'import numpy as np\n'), ((1628, 1661), 'numpy.digitize', 'np.digitize', (['features[:, i]', 'bins'], {}), '(features[:, i], bins)\n', (1639, 1661), True, 'import numpy as np\n'), ((1503, 1516), 'numpy.flip', 'np.flip', (['bins'], {}), '(bins)\n', (1510, 1516), True, 'import numpy as np\n'), ((1750, 1770), 'numpy.bincount', 'np.bincount', (['y[i, :]'], {}), '(y[i, :])\n', (1761, 1770), True, 'import numpy as np\n'), ((1839, 1877), 'numpy.unique', 'np.unique', (['targets'], {'return_counts': '(True)'}), '(targets, return_counts=True)\n', (1848, 1877), True, 'import numpy as np\n')]
import numpy as np import pandas as pd import tensorflow as tf tfd = tf.contrib.distributions def create_german_datasets(batch=64): def gather_labels(df): labels = [] for j in range(df.shape[1]): if type(df[0, j]) is str: labels.append(np.unique(df[:, j]).tolist()) else: labels.append(np.median(df[:, j])) return labels def transform_to_binary(df, labels): d = np.zeros([df.shape[0], 58]) u = np.zeros([df.shape[0], 1]) y = np.zeros([df.shape[0], 1]) idx = 0 for j in range(len(labels)): if type(labels[j]) is list: if len(labels[j]) > 2: for i in range(df.shape[0]): d[i, idx + int(labels[j].index(df[i, j]))] = 1 idx += len(labels[j]) else: for i in range(df.shape[0]): d[i, idx] = int(labels[j].index(df[i, j])) idx += 1 else: if j != 12 and j != len(labels) - 1: for i in range(df.shape[0]): d[i, idx] = float(df[i, j] > labels[j]) idx += 1 elif j == len(labels) - 1: for i in range(df.shape[0]): y[i] = float(df[i, j] > labels[j]) else: for i in range(df.shape[0]): u[i] = float(df[i, j] > labels[j]) return d.astype(np.bool), u.astype(np.bool), y.astype(np.bool) # observation, protected, label d = pd.read_csv('german.data.txt', header=None, sep=' ').as_matrix() t = pd.read_csv('german.data.txt', header=None, sep=' ').as_matrix() labels = gather_labels(d) ds = transform_to_binary(d, labels) ts = transform_to_binary(t, labels) idx = np.arange(d.shape[0]) np.random.seed(4) np.random.shuffle(idx) cf = int(d.shape[0] * 0.9) german = tuple([a[idx[:cf]].astype(np.float32) for a in ds]) german_test = tuple([a[idx[:cf]].astype(np.float32) for a in ts]) train = tf.data.Dataset.from_tensor_slices(german).shuffle(800).batch(batch) test = tf.data.Dataset.from_tensor_slices(german_test).batch(batch) pu = tfd.Bernoulli(probs=np.mean(german[1])) return train, test, pu
[ "numpy.mean", "numpy.median", "numpy.unique", "pandas.read_csv", "tensorflow.data.Dataset.from_tensor_slices", "numpy.zeros", "numpy.random.seed", "numpy.arange", "numpy.random.shuffle" ]
[((1895, 1916), 'numpy.arange', 'np.arange', (['d.shape[0]'], {}), '(d.shape[0])\n', (1904, 1916), True, 'import numpy as np\n'), ((1921, 1938), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (1935, 1938), True, 'import numpy as np\n'), ((1943, 1965), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1960, 1965), True, 'import numpy as np\n'), ((462, 489), 'numpy.zeros', 'np.zeros', (['[df.shape[0], 58]'], {}), '([df.shape[0], 58])\n', (470, 489), True, 'import numpy as np\n'), ((502, 528), 'numpy.zeros', 'np.zeros', (['[df.shape[0], 1]'], {}), '([df.shape[0], 1])\n', (510, 528), True, 'import numpy as np\n'), ((541, 567), 'numpy.zeros', 'np.zeros', (['[df.shape[0], 1]'], {}), '([df.shape[0], 1])\n', (549, 567), True, 'import numpy as np\n'), ((1636, 1688), 'pandas.read_csv', 'pd.read_csv', (['"""german.data.txt"""'], {'header': 'None', 'sep': '""" """'}), "('german.data.txt', header=None, sep=' ')\n", (1647, 1688), True, 'import pandas as pd\n'), ((1709, 1761), 'pandas.read_csv', 'pd.read_csv', (['"""german.data.txt"""'], {'header': 'None', 'sep': '""" """'}), "('german.data.txt', header=None, sep=' ')\n", (1720, 1761), True, 'import pandas as pd\n'), ((2225, 2272), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['german_test'], {}), '(german_test)\n', (2259, 2272), True, 'import tensorflow as tf\n'), ((2315, 2333), 'numpy.mean', 'np.mean', (['german[1]'], {}), '(german[1])\n', (2322, 2333), True, 'import numpy as np\n'), ((365, 384), 'numpy.median', 'np.median', (['df[:, j]'], {}), '(df[:, j])\n', (374, 384), True, 'import numpy as np\n'), ((2145, 2187), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['german'], {}), '(german)\n', (2179, 2187), True, 'import tensorflow as tf\n'), ((287, 306), 'numpy.unique', 'np.unique', (['df[:, j]'], {}), '(df[:, j])\n', (296, 306), True, 'import numpy as np\n')]
import bisect import math import operator from datetime import timedelta import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.cm as cm import matplotlib.font_manager as font_manager import matplotlib.patheffects as patheffects import numpy as np from scipy.ndimage.filters import gaussian_filter from matplotlib.offsetbox import AnchoredText from historical_hrrr import historical_hrrr_snow from nohrsc_plotting import nohrsc_snow from plot_cities import get_cities for font in font_manager.findSystemFonts(["."]): font_manager.fontManager.addfont(font) # Set font family globally matplotlib.rcParams['font.family'] = 'Inter' DIFF = 0.2 OPP_DIFF = (0.2, 0.2) ZOOM_LEVEL = 1 LONLAT = (-89.3866, 43.07295) GO_OUT_LONLAT = (3, 1.75) if LONLAT: extent = ( LONLAT[0] - GO_OUT_LONLAT[0], LONLAT[0] + GO_OUT_LONLAT[0], LONLAT[1] - GO_OUT_LONLAT[1], LONLAT[1] + GO_OUT_LONLAT[1] ) else: extent = (-109.291992, -101.887207, 36.862043, 41.393294) extent_lim = (extent[0] - DIFF, extent[1] + DIFF, extent[2] - DIFF, extent[3] + DIFF) extent_opp = (extent[0] + OPP_DIFF[0], extent[1] - OPP_DIFF[0], extent[2] + OPP_DIFF[1], extent[3] - OPP_DIFF[1]) lons_extent = extent[:2] lats_extent = extent[2:] fig: plt.Figure = plt.figure(figsize=(12, 6)) ax: plt.Axes = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree()) ax.set_extent(extent) ax.add_feature(cfeature.LAND.with_scale("50m")) ax.add_feature(cfeature.OCEAN.with_scale("50m"), zorder=100) ax.add_feature(cfeature.STATES.with_scale("50m"), lw=1.25, zorder=200) all_cities = get_cities(extent_opp, spacing_lat=0.5, spacing_long=0.5, min_pop=10000) lons_n, lats_n, snow_n, date, accum_time = nohrsc_snow(extent_lim) coords = historical_hrrr_snow(date, extent_lim, accum_time, goes_out=24, occ=2) all_keys = [*coords.keys()] def distance(tup, lon_, lat_): return (abs(tup[0] - lon_) ** 2 + abs(tup[1] - lat_) ** 2) ** 0.5 def regrid_hrrr(use_closest=False, target=0.25): snow_h = [] for lat in lats_n: snow_h.append([]) lat = round(lat, 2) for lon in lons_n: lon = round(lon, 2) try: snow_h[-1].append(coords[(lon, lat)]) except KeyError: if use_closest: idx = bisect.bisect_left(all_keys, (lon, lat)) dists = ((distance(tup, lon, lat), tup) for tup in all_keys[idx:]) for dist in dists: if dist[0] <= target: closest = dist[1] break else: closest = all_keys[bisect.bisect_left(all_keys, (lon, lat))] snow_h[-1].append(coords[closest]) snow_h = np.array(snow_h) return snow_h snow_h = regrid_hrrr(use_closest=True, target=0.1) diff_snow = snow_n - snow_h diff_snow[np.isnan(diff_snow)] = 0 diff_snow = gaussian_filter(diff_snow, ZOOM_LEVEL) diff_snow[np.where(diff_snow >= 4.75)] = 4.75 diff_snow[np.where(diff_snow < -5)] = -5 if diff_snow.max() < 4.75 and diff_snow.min() > -5: abs_min, abs_max = abs(diff_snow.min()), abs(diff_snow.max()) if abs_min > abs_max: levels = np.arange(math.floor(diff_snow.min()), -math.floor(diff_snow.min()), 0.25) else: levels = np.arange(-math.ceil(diff_snow.max()), math.ceil(diff_snow.max()), 0.25) else: levels = np.arange(-5, 5, 0.25) levels_c = np.arange(-5, 5.01, 1) cmap = cm.get_cmap("coolwarm_r") cmap_c = cm.get_cmap("viridis") norm = colors.BoundaryNorm(levels, cmap.N) norm_c = colors.BoundaryNorm(levels_c, cmap_c.N) # These colormaps are used for debugging to see individual snow levels_s = [0.1, 1, 2, 3, 4, 6, 8, 12, 16, 20, 24, 30, 36, 48, 60, 72] cmap_s = colors.ListedColormap( [ '#bdd7e7', '#6baed6', '#3182bd', '#08519c', '#082694', '#ffff96', '#ffc400', '#ff8700', '#db1400', '#9e0000', '#690000', '#ccccff', '#9f8cd8', '#7c52a5', '#561c72', '#40dfff' ] ) norm_s = colors.BoundaryNorm(levels_s, cmap_s.N) # C = ax.contourf( # gaussian_filter(lons_n, ZOOM_LEVEL), gaussian_filter(lats_n, ZOOM_LEVEL), diff_snow, levels, # cmap=cmap, norm=norm, transform=ccrs.PlateCarree(), antialiased=True, alpha=0.75 # ) C = ax.contourf( lons_n, lats_n, snow_n, levels_s, cmap=cmap_s, norm=norm_s, alpha=0.5, transform=ccrs.PlateCarree(), antialiased=True ) # CS = ax.contour( # gaussian_filter(lons_n, ZOOM_LEVEL), gaussian_filter(lats_n, ZOOM_LEVEL), diff_snow, # levels=levels_c, cmap=cmap_c, norm=norm_c, transform=ccrs.PlateCarree() # ) # ax.clabel( # CS, levels_c, # fmt=lambda amt: f"{'-' if amt < 0 else ('+' if amt > 0 else '')}{amt:.0f}\"", inline=True, fontsize=10 # ) # Add all cities to map for city, (lon, lat) in all_cities: txt = ax.text( lon, lat, city, fontdict={"size": 10, "color": "black"}, horizontalalignment="center", verticalalignment="center", transform=ccrs.PlateCarree(), zorder=350 ) txt.set_path_effects([patheffects.withStroke(linewidth=2, foreground="white")]) fig.colorbar( C, label="Difference Between Total Snow and Forecasted Snow (in.)", extend="max" ) ax.set_title( f"Bust or Boom?: from {(date - timedelta(hours=accum_time)).strftime('%B %d, %Y')} to {date.strftime('%B %d, %Y')}", fontweight="bold" ) ax.add_artist( AnchoredText( "Made by @AtlanticWx", loc="lower right", prop={"size": 10}, frameon=True, zorder=300 ) ) plt.show()
[ "scipy.ndimage.filters.gaussian_filter", "numpy.array", "datetime.timedelta", "matplotlib.patheffects.withStroke", "historical_hrrr.historical_hrrr_snow", "numpy.arange", "numpy.where", "nohrsc_plotting.nohrsc_snow", "plot_cities.get_cities", "matplotlib.colors.ListedColormap", "matplotlib.offsetbox.AnchoredText", "matplotlib.cm.get_cmap", "cartopy.feature.STATES.with_scale", "matplotlib.font_manager.findSystemFonts", "cartopy.crs.PlateCarree", "cartopy.feature.LAND.with_scale", "numpy.isnan", "matplotlib.font_manager.fontManager.addfont", "bisect.bisect_left", "matplotlib.pyplot.show", "matplotlib.pyplot.figure", "cartopy.feature.OCEAN.with_scale", "matplotlib.colors.BoundaryNorm" ]
[((593, 628), 'matplotlib.font_manager.findSystemFonts', 'font_manager.findSystemFonts', (["['.']"], {}), "(['.'])\n", (621, 628), True, 'import matplotlib.font_manager as font_manager\n'), ((1358, 1385), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1368, 1385), True, 'import matplotlib.pyplot as plt\n'), ((1675, 1747), 'plot_cities.get_cities', 'get_cities', (['extent_opp'], {'spacing_lat': '(0.5)', 'spacing_long': '(0.5)', 'min_pop': '(10000)'}), '(extent_opp, spacing_lat=0.5, spacing_long=0.5, min_pop=10000)\n', (1685, 1747), False, 'from plot_cities import get_cities\n'), ((1792, 1815), 'nohrsc_plotting.nohrsc_snow', 'nohrsc_snow', (['extent_lim'], {}), '(extent_lim)\n', (1803, 1815), False, 'from nohrsc_plotting import nohrsc_snow\n'), ((1825, 1895), 'historical_hrrr.historical_hrrr_snow', 'historical_hrrr_snow', (['date', 'extent_lim', 'accum_time'], {'goes_out': '(24)', 'occ': '(2)'}), '(date, extent_lim, accum_time, goes_out=24, occ=2)\n', (1845, 1895), False, 'from historical_hrrr import historical_hrrr_snow\n'), ((3016, 3054), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['diff_snow', 'ZOOM_LEVEL'], {}), '(diff_snow, ZOOM_LEVEL)\n', (3031, 3054), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((3534, 3556), 'numpy.arange', 'np.arange', (['(-5)', '(5.01)', '(1)'], {}), '(-5, 5.01, 1)\n', (3543, 3556), True, 'import numpy as np\n'), ((3565, 3590), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""coolwarm_r"""'], {}), "('coolwarm_r')\n", (3576, 3590), True, 'import matplotlib.cm as cm\n'), ((3600, 3622), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (3611, 3622), True, 'import matplotlib.cm as cm\n'), ((3631, 3666), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['levels', 'cmap.N'], {}), '(levels, cmap.N)\n', (3650, 3666), True, 'import matplotlib.colors as colors\n'), ((3676, 3715), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['levels_c', 'cmap_c.N'], {}), '(levels_c, cmap_c.N)\n', (3695, 3715), True, 'import matplotlib.colors as colors\n'), ((3861, 4068), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['#bdd7e7', '#6baed6', '#3182bd', '#08519c', '#082694', '#ffff96',\n '#ffc400', '#ff8700', '#db1400', '#9e0000', '#690000', '#ccccff',\n '#9f8cd8', '#7c52a5', '#561c72', '#40dfff']"], {}), "(['#bdd7e7', '#6baed6', '#3182bd', '#08519c',\n '#082694', '#ffff96', '#ffc400', '#ff8700', '#db1400', '#9e0000',\n '#690000', '#ccccff', '#9f8cd8', '#7c52a5', '#561c72', '#40dfff'])\n", (3882, 4068), True, 'import matplotlib.colors as colors\n'), ((4106, 4145), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['levels_s', 'cmap_s.N'], {}), '(levels_s, cmap_s.N)\n', (4125, 4145), True, 'import matplotlib.colors as colors\n'), ((5625, 5635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5633, 5635), True, 'import matplotlib.pyplot as plt\n'), ((634, 672), 'matplotlib.font_manager.fontManager.addfont', 'font_manager.fontManager.addfont', (['font'], {}), '(font)\n', (666, 672), True, 'import matplotlib.font_manager as font_manager\n'), ((1496, 1527), 'cartopy.feature.LAND.with_scale', 'cfeature.LAND.with_scale', (['"""50m"""'], {}), "('50m')\n", (1520, 1527), True, 'import cartopy.feature as cfeature\n'), ((1544, 1576), 'cartopy.feature.OCEAN.with_scale', 'cfeature.OCEAN.with_scale', (['"""50m"""'], {}), "('50m')\n", (1569, 1576), True, 'import cartopy.feature as cfeature\n'), ((1605, 1638), 'cartopy.feature.STATES.with_scale', 'cfeature.STATES.with_scale', (['"""50m"""'], {}), "('50m')\n", (1631, 1638), True, 'import cartopy.feature as cfeature\n'), ((2853, 2869), 'numpy.array', 'np.array', (['snow_h'], {}), '(snow_h)\n', (2861, 2869), True, 'import numpy as np\n'), ((2979, 2998), 'numpy.isnan', 'np.isnan', (['diff_snow'], {}), '(diff_snow)\n', (2987, 2998), True, 'import numpy as np\n'), ((3066, 3093), 'numpy.where', 'np.where', (['(diff_snow >= 4.75)'], {}), '(diff_snow >= 4.75)\n', (3074, 3093), True, 'import numpy as np\n'), ((3112, 3136), 'numpy.where', 'np.where', (['(diff_snow < -5)'], {}), '(diff_snow < -5)\n', (3120, 3136), True, 'import numpy as np\n'), ((3499, 3521), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.25)'], {}), '(-5, 5, 0.25)\n', (3508, 3521), True, 'import numpy as np\n'), ((5477, 5580), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (['"""Made by @AtlanticWx"""'], {'loc': '"""lower right"""', 'prop': "{'size': 10}", 'frameon': '(True)', 'zorder': '(300)'}), "('Made by @AtlanticWx', loc='lower right', prop={'size': 10},\n frameon=True, zorder=300)\n", (5489, 5580), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((1437, 1455), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1453, 1455), True, 'import cartopy.crs as ccrs\n'), ((4462, 4480), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4478, 4480), True, 'import cartopy.crs as ccrs\n'), ((5068, 5086), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5084, 5086), True, 'import cartopy.crs as ccrs\n'), ((5131, 5186), 'matplotlib.patheffects.withStroke', 'patheffects.withStroke', ([], {'linewidth': '(2)', 'foreground': '"""white"""'}), "(linewidth=2, foreground='white')\n", (5153, 5186), True, 'import matplotlib.patheffects as patheffects\n'), ((2390, 2430), 'bisect.bisect_left', 'bisect.bisect_left', (['all_keys', '(lon, lat)'], {}), '(all_keys, (lon, lat))\n', (2408, 2430), False, 'import bisect\n'), ((5348, 5375), 'datetime.timedelta', 'timedelta', ([], {'hours': 'accum_time'}), '(hours=accum_time)\n', (5357, 5375), False, 'from datetime import timedelta\n'), ((2745, 2785), 'bisect.bisect_left', 'bisect.bisect_left', (['all_keys', '(lon, lat)'], {}), '(all_keys, (lon, lat))\n', (2763, 2785), False, 'import bisect\n')]
import tensorflow as tf import numpy as np import os import pickle from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell from utils import functions, regularization, helpers, pretty_print import argparse def main(results_dir='results/sho/test', trials=1, learning_rate=1e-2, reg_weight=2e-4, timesteps=25, batch_size=129, n_epochs1=2001, n_epochs2=5001, n_epochs3=5001): # Hyperparameters summary_step = 500 timesteps0 = 1 primitive_funcs = [ *[functions.Constant()] * 2, *[functions.Identity()] * 4, *[functions.Square()] * 4, *[functions.Sin()] * 2, *[functions.Exp()] * 2, *[functions.Sigmoid()] * 2, *[functions.Product(norm=0.1)] * 2, ] # Import parabola data data = np.load('dataset/sho.npz') x_d = np.asarray(data["x_d"]) x_v = np.asarray(data["x_v"]) y_d = np.asarray(data["y_d"]) y_v = np.asarray(data["y_v"]) omega2_data = data["omega2"] N = data["N"] # Prepare data x = np.stack((x_d, x_v), axis=2) # Shape (N, NT, 2) y0 = np.stack((y_d[:, 0], y_v[:, 0]), axis=1) # Initial conditions for prediction y, fed into propagator y_data = np.stack((y_d[:, 1:timesteps + 1], y_v[:, 1:timesteps + 1]), axis=2) # shape(NG, LENGTH, 2) # Tensorflow placeholders for x, y0, y x_input = tf.placeholder(shape=(None, x.shape[1], x.shape[2]), dtype=tf.float32, name="enc_input") y0_input = tf.placeholder(shape=(None, 2), dtype=tf.float32, name="prop_input") # input is d, v y_input = tf.placeholder(shape=(None, timesteps, 2), dtype=tf.float32, name="label_input") length_input = tf.placeholder(dtype=tf.int32, shape=()) # Dynamics encoder encoder = helpers.Encoder() training = tf.placeholder_with_default(False, []) z = encoder(x_input, training=training) z_data = omega2_data[:, np.newaxis] # Propagating decoders prop_d = SymbolicNet(2, funcs=primitive_funcs) prop_v = SymbolicNet(2, funcs=primitive_funcs) prop_d.build(4) prop_v.build(4) # Building recurrent structure rnn = tf.keras.layers.RNN(SymbolicCell(prop_d, prop_v), return_sequences=True) y0_rnn = tf.concat([tf.expand_dims(y0_input, axis=1), tf.zeros((tf.shape(y0_input)[0], length_input - 1, 2))], axis=1) prop_input = tf.concat([y0_rnn, tf.keras.backend.repeat(z, length_input), tf.ones((tf.shape(y0_input)[0], length_input, 1))], axis=2) prop_output = rnn(prop_input) epoch = tf.placeholder(tf.float32) reg_freq = np.pi / (n_epochs1 + n_epochs2) / 1.1 reg_loss = tf.sin(reg_freq * epoch) ** 2 * regularization.l12_smooth(prop_d.get_weights()) + \ tf.sin(reg_freq * epoch) ** 2 * regularization.l12_smooth(prop_v.get_weights()) # reg_loss = regularization.l12_smooth(prop_d.get_weights()) + regularization.l12_smooth(prop_v.get_weights()) # Training learning_rate_ph = tf.placeholder(tf.float32) opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate_ph) reg_weight_ph = tf.placeholder(tf.float32) error = tf.losses.mean_squared_error(labels=y_input[:, :length_input, :], predictions=prop_output) loss = error + reg_weight_ph * reg_loss train = tf.group([opt.minimize(loss), encoder.bn.updates]) batch = helpers.batch_generator([x, y_data, y0, z_data], N=N, batch_size=batch_size) # Training session with tf.Session() as sess: for _ in range(trials): loss_i = np.nan while np.isnan(loss_i): loss_list = [] error_list = [] reg_list = [] sess.run(tf.global_variables_initializer()) for i in range(n_epochs1 + n_epochs2): if i < n_epochs1: reg_weight_i = reg_weight / 5 learning_rate_i = learning_rate length_i = min(i // 500 * 2 + timesteps0, timesteps) else: reg_weight_i = reg_weight learning_rate_i = learning_rate / 5 length_i = timesteps x_batch, y_batch, y0_batch, z_batch = next(batch) feed_dict = {x_input: x_batch, y0_input: y0_batch, y_input: y_batch, epoch: i, learning_rate_ph: learning_rate_i, training: True, reg_weight_ph: reg_weight_i, length_input: length_i} _ = sess.run(train, feed_dict=feed_dict) if i % summary_step == 0 or i == n_epochs1 - 1: feed_dict[training] = False loss_i, error_i, reg_i = sess.run((loss, error, reg_loss), feed_dict=feed_dict) z_arr = sess.run(z, feed_dict=feed_dict) r = np.corrcoef(z_batch[:, 0], z_arr[:, 0])[1, 0] loss_list.append(loss_i) error_list.append(error_i) reg_list.append(reg_i) print("Epoch %d\tTotal loss: %f\tError: %f\tReg loss: %f\tCorrelation: %f" % (i, loss_i, error_i, reg_i, r)) if np.isnan(loss_i): break # Setting small weights to 0 and freezing them prop_d_masked = MaskedSymbolicNet(sess, prop_d, threshold=0.01) prop_v_masked = MaskedSymbolicNet(sess, prop_v, threshold=0.01) # Keep track of currently existing variables. When we rebuild the rnn, it makes new variables that we need # to initialize. Later, we will use this to figure out what the uninitialized variables are. temp = set(tf.global_variables()) # Rebuilding the decoding propagator. Remove regularization rnn = tf.keras.layers.RNN(SymbolicCell(prop_d_masked, prop_v_masked), return_sequences=True) prop_output = rnn(prop_input) loss = tf.losses.mean_squared_error(labels=y_input[:, :length_input, :], predictions=prop_output) train = tf.group([opt.minimize(loss), encoder.bn.updates]) weights_d = sess.run(prop_d_masked.get_weights()) expr_d = pretty_print.network(weights_d, primitive_funcs, ["d", "v", "z", 1]) print(expr_d) weights_v = sess.run(prop_v_masked.get_weights()) expr_v = pretty_print.network(weights_v, primitive_funcs, ["d", "v", "z", 1]) print(expr_v) print("Frozen weights. Next stage of training.") # Initialize only the uninitialized variables. sess.run(tf.variables_initializer(set(tf.global_variables()) - temp)) for i in range(n_epochs3): x_batch, y_batch, y0_batch, z_batch = next(batch) feed_dict = {x_input: x_batch, y0_input: y0_batch, y_input: y_batch, epoch: 0, learning_rate_ph: learning_rate / 10, training: True, reg_weight_ph: 0, length_input: length_i} _ = sess.run(train, feed_dict=feed_dict) if i % summary_step == 0: feed_dict[training] = False loss_i, error_i, reg_i = sess.run((loss, error, reg_loss), feed_dict=feed_dict) z_arr = sess.run(z, feed_dict=feed_dict) r = np.corrcoef(z_batch[:, 0], z_arr[:, 0])[1, 0] loss_list.append(loss_i) error_list.append(error_i) reg_list.append(reg_i) print("Epoch %d\tError: %g\tCorrelation: %f" % (i, error_i, r)) weights_d = sess.run(prop_d_masked.get_weights()) expr_d = pretty_print.network(weights_d, primitive_funcs, ["d", "v", "z", 1]) print(expr_d) weights_v = sess.run(prop_v_masked.get_weights()) expr_v = pretty_print.network(weights_v, primitive_funcs, ["d", "v", "z", 1]) print(expr_v) # Save results results = { "summary_step": summary_step, "learning_rate": learning_rate, "n_epochs1": n_epochs1, "n_epochs2": n_epochs2, "reg_weight": reg_weight, "timesteps": timesteps, "timesteps0": timesteps0, "weights_d": weights_d, "weights_v": weights_v, "loss_plot": loss_list, "error_plot": error_list, "reg_plot": reg_list, "expr_d": expr_d, "expr_v": expr_v } trial_dir = helpers.get_trial_path(results_dir) # Get directory in which to save trial results tf.saved_model.simple_save(sess, trial_dir, inputs={"x": x_input, "y0": y0_input, "training": training}, outputs={"z": z, "y": prop_output}) # Save a summary of the parameters and results with open(os.path.join(trial_dir, 'summary.pickle'), "wb+") as f: pickle.dump(results, f) with open(os.path.join(results_dir, 'eq_summary.txt'), 'a') as f: f.write(str(expr_d) + "\n") f.write(str(expr_v) + "\n") f.write("Error: %f\n\n" % error_list[-1]) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Train the EQL network on simple harmonic oscillator (SHO) task.") parser.add_argument("--results-dir", type=str, default='results/sho/test') parser.add_argument("--reg-weight", type=float, default=2e-4, help='Regularization weight, lambda') parser.add_argument('--learning-rate', type=float, default=1e-2, help='Base learning rate for training') parser.add_argument('--batch-size', type=int, default=128) parser.add_argument("--n-epochs1", type=int, default=2001, help="Number of epochs to train in 1st stage") parser.add_argument("--n-epochs2", type=int, default=5001, help="Number of epochs to train in 2nd stage") parser.add_argument("--n-epochs3", type=int, default=5001, help="Number of epochs to train in 3rd stage") parser.add_argument("--timesteps", type=int, default=25, help="Number of time steps to predict") parser.add_argument('--trials', type=int, default=1, help="Number of trials to train.") args = parser.parse_args() kwargs = vars(args) print(kwargs) if not os.path.exists(kwargs['results_dir']): os.makedirs(kwargs['results_dir']) meta = open(os.path.join(kwargs['results_dir'], 'args.txt'), 'a') import json meta.write(json.dumps(kwargs)) meta.close() main(**kwargs)
[ "utils.symbolic_network.SymbolicCell", "tensorflow.shape", "utils.functions.Square", "tensorflow.sin", "utils.functions.Exp", "os.path.exists", "argparse.ArgumentParser", "tensorflow.placeholder", "tensorflow.Session", "numpy.asarray", "json.dumps", "numpy.stack", "utils.helpers.batch_generator", "tensorflow.keras.backend.repeat", "utils.symbolic_network.SymbolicNet", "tensorflow.saved_model.simple_save", "utils.functions.Product", "numpy.corrcoef", "utils.functions.Sigmoid", "tensorflow.global_variables", "utils.functions.Identity", "utils.symbolic_network.MaskedSymbolicNet", "numpy.isnan", "tensorflow.expand_dims", "pickle.dump", "tensorflow.train.RMSPropOptimizer", "os.makedirs", "utils.pretty_print.network", "os.path.join", "utils.helpers.get_trial_path", "utils.functions.Sin", "tensorflow.placeholder_with_default", "tensorflow.global_variables_initializer", "utils.functions.Constant", "numpy.load", "tensorflow.losses.mean_squared_error", "utils.helpers.Encoder" ]
[((795, 821), 'numpy.load', 'np.load', (['"""dataset/sho.npz"""'], {}), "('dataset/sho.npz')\n", (802, 821), True, 'import numpy as np\n'), ((832, 855), 'numpy.asarray', 'np.asarray', (["data['x_d']"], {}), "(data['x_d'])\n", (842, 855), True, 'import numpy as np\n'), ((866, 889), 'numpy.asarray', 'np.asarray', (["data['x_v']"], {}), "(data['x_v'])\n", (876, 889), True, 'import numpy as np\n'), ((900, 923), 'numpy.asarray', 'np.asarray', (["data['y_d']"], {}), "(data['y_d'])\n", (910, 923), True, 'import numpy as np\n'), ((934, 957), 'numpy.asarray', 'np.asarray', (["data['y_v']"], {}), "(data['y_v'])\n", (944, 957), True, 'import numpy as np\n'), ((1037, 1065), 'numpy.stack', 'np.stack', (['(x_d, x_v)'], {'axis': '(2)'}), '((x_d, x_v), axis=2)\n', (1045, 1065), True, 'import numpy as np\n'), ((1097, 1137), 'numpy.stack', 'np.stack', (['(y_d[:, 0], y_v[:, 0])'], {'axis': '(1)'}), '((y_d[:, 0], y_v[:, 0]), axis=1)\n', (1105, 1137), True, 'import numpy as np\n'), ((1212, 1280), 'numpy.stack', 'np.stack', (['(y_d[:, 1:timesteps + 1], y_v[:, 1:timesteps + 1])'], {'axis': '(2)'}), '((y_d[:, 1:timesteps + 1], y_v[:, 1:timesteps + 1]), axis=2)\n', (1220, 1280), True, 'import numpy as np\n'), ((1366, 1459), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, x.shape[1], x.shape[2])', 'dtype': 'tf.float32', 'name': '"""enc_input"""'}), "(shape=(None, x.shape[1], x.shape[2]), dtype=tf.float32, name\n ='enc_input')\n", (1380, 1459), True, 'import tensorflow as tf\n'), ((1470, 1538), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, 2)', 'dtype': 'tf.float32', 'name': '"""prop_input"""'}), "(shape=(None, 2), dtype=tf.float32, name='prop_input')\n", (1484, 1538), True, 'import tensorflow as tf\n'), ((1570, 1655), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, timesteps, 2)', 'dtype': 'tf.float32', 'name': '"""label_input"""'}), "(shape=(None, timesteps, 2), dtype=tf.float32, name='label_input'\n )\n", (1584, 1655), True, 'import tensorflow as tf\n'), ((1670, 1710), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '()'}), '(dtype=tf.int32, shape=())\n', (1684, 1710), True, 'import tensorflow as tf\n'), ((1749, 1766), 'utils.helpers.Encoder', 'helpers.Encoder', ([], {}), '()\n', (1764, 1766), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((1782, 1820), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)', '[]'], {}), '(False, [])\n', (1809, 1820), True, 'import tensorflow as tf\n'), ((1946, 1983), 'utils.symbolic_network.SymbolicNet', 'SymbolicNet', (['(2)'], {'funcs': 'primitive_funcs'}), '(2, funcs=primitive_funcs)\n', (1957, 1983), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((1997, 2034), 'utils.symbolic_network.SymbolicNet', 'SymbolicNet', (['(2)'], {'funcs': 'primitive_funcs'}), '(2, funcs=primitive_funcs)\n', (2008, 2034), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((2552, 2578), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2566, 2578), True, 'import tensorflow as tf\n'), ((2980, 3006), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2994, 3006), True, 'import tensorflow as tf\n'), ((3017, 3074), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'learning_rate_ph'}), '(learning_rate=learning_rate_ph)\n', (3042, 3074), True, 'import tensorflow as tf\n'), ((3095, 3121), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (3109, 3121), True, 'import tensorflow as tf\n'), ((3134, 3228), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'y_input[:, :length_input, :]', 'predictions': 'prop_output'}), '(labels=y_input[:, :length_input, :],\n predictions=prop_output)\n', (3162, 3228), True, 'import tensorflow as tf\n'), ((3345, 3421), 'utils.helpers.batch_generator', 'helpers.batch_generator', (['[x, y_data, y0, z_data]'], {'N': 'N', 'batch_size': 'batch_size'}), '([x, y_data, y0, z_data], N=N, batch_size=batch_size)\n', (3368, 3421), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((9513, 9620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train the EQL network on simple harmonic oscillator (SHO) task."""'}), "(description=\n 'Train the EQL network on simple harmonic oscillator (SHO) task.')\n", (9536, 9620), False, 'import argparse\n'), ((2140, 2168), 'utils.symbolic_network.SymbolicCell', 'SymbolicCell', (['prop_d', 'prop_v'], {}), '(prop_d, prop_v)\n', (2152, 2168), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((3455, 3467), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3465, 3467), True, 'import tensorflow as tf\n'), ((10580, 10617), 'os.path.exists', 'os.path.exists', (["kwargs['results_dir']"], {}), "(kwargs['results_dir'])\n", (10594, 10617), False, 'import os\n'), ((10627, 10661), 'os.makedirs', 'os.makedirs', (["kwargs['results_dir']"], {}), "(kwargs['results_dir'])\n", (10638, 10661), False, 'import os\n'), ((10678, 10725), 'os.path.join', 'os.path.join', (["kwargs['results_dir']", '"""args.txt"""'], {}), "(kwargs['results_dir'], 'args.txt')\n", (10690, 10725), False, 'import os\n'), ((10764, 10782), 'json.dumps', 'json.dumps', (['kwargs'], {}), '(kwargs)\n', (10774, 10782), False, 'import json\n'), ((2217, 2249), 'tensorflow.expand_dims', 'tf.expand_dims', (['y0_input'], {'axis': '(1)'}), '(y0_input, axis=1)\n', (2231, 2249), True, 'import tensorflow as tf\n'), ((2375, 2415), 'tensorflow.keras.backend.repeat', 'tf.keras.backend.repeat', (['z', 'length_input'], {}), '(z, length_input)\n', (2398, 2415), True, 'import tensorflow as tf\n'), ((3556, 3572), 'numpy.isnan', 'np.isnan', (['loss_i'], {}), '(loss_i)\n', (3564, 3572), True, 'import numpy as np\n'), ((5432, 5479), 'utils.symbolic_network.MaskedSymbolicNet', 'MaskedSymbolicNet', (['sess', 'prop_d'], {'threshold': '(0.01)'}), '(sess, prop_d, threshold=0.01)\n', (5449, 5479), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((5508, 5555), 'utils.symbolic_network.MaskedSymbolicNet', 'MaskedSymbolicNet', (['sess', 'prop_v'], {'threshold': '(0.01)'}), '(sess, prop_v, threshold=0.01)\n', (5525, 5555), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((6064, 6158), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'y_input[:, :length_input, :]', 'predictions': 'prop_output'}), '(labels=y_input[:, :length_input, :],\n predictions=prop_output)\n', (6092, 6158), True, 'import tensorflow as tf\n'), ((6310, 6378), 'utils.pretty_print.network', 'pretty_print.network', (['weights_d', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_d, primitive_funcs, ['d', 'v', 'z', 1])\n", (6330, 6378), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((6488, 6556), 'utils.pretty_print.network', 'pretty_print.network', (['weights_v', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_v, primitive_funcs, ['d', 'v', 'z', 1])\n", (6508, 6556), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((7823, 7891), 'utils.pretty_print.network', 'pretty_print.network', (['weights_d', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_d, primitive_funcs, ['d', 'v', 'z', 1])\n", (7843, 7891), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((8001, 8069), 'utils.pretty_print.network', 'pretty_print.network', (['weights_v', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_v, primitive_funcs, ['d', 'v', 'z', 1])\n", (8021, 8069), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((8752, 8787), 'utils.helpers.get_trial_path', 'helpers.get_trial_path', (['results_dir'], {}), '(results_dir)\n', (8774, 8787), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((8849, 8993), 'tensorflow.saved_model.simple_save', 'tf.saved_model.simple_save', (['sess', 'trial_dir'], {'inputs': "{'x': x_input, 'y0': y0_input, 'training': training}", 'outputs': "{'z': z, 'y': prop_output}"}), "(sess, trial_dir, inputs={'x': x_input, 'y0':\n y0_input, 'training': training}, outputs={'z': z, 'y': prop_output})\n", (8875, 8993), True, 'import tensorflow as tf\n'), ((2647, 2671), 'tensorflow.sin', 'tf.sin', (['(reg_freq * epoch)'], {}), '(reg_freq * epoch)\n', (2653, 2671), True, 'import tensorflow as tf\n'), ((2746, 2770), 'tensorflow.sin', 'tf.sin', (['(reg_freq * epoch)'], {}), '(reg_freq * epoch)\n', (2752, 2770), True, 'import tensorflow as tf\n'), ((5803, 5824), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5822, 5824), True, 'import tensorflow as tf\n'), ((5936, 5978), 'utils.symbolic_network.SymbolicCell', 'SymbolicCell', (['prop_d_masked', 'prop_v_masked'], {}), '(prop_d_masked, prop_v_masked)\n', (5948, 5978), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((9222, 9245), 'pickle.dump', 'pickle.dump', (['results', 'f'], {}), '(results, f)\n', (9233, 9245), False, 'import pickle\n'), ((507, 527), 'utils.functions.Constant', 'functions.Constant', ([], {}), '()\n', (525, 527), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((544, 564), 'utils.functions.Identity', 'functions.Identity', ([], {}), '()\n', (562, 564), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((581, 599), 'utils.functions.Square', 'functions.Square', ([], {}), '()\n', (597, 599), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((616, 631), 'utils.functions.Sin', 'functions.Sin', ([], {}), '()\n', (629, 631), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((648, 663), 'utils.functions.Exp', 'functions.Exp', ([], {}), '()\n', (661, 663), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((680, 699), 'utils.functions.Sigmoid', 'functions.Sigmoid', ([], {}), '()\n', (697, 699), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((716, 743), 'utils.functions.Product', 'functions.Product', ([], {'norm': '(0.1)'}), '(norm=0.1)\n', (733, 743), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((3693, 3726), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3724, 3726), True, 'import tensorflow as tf\n'), ((9150, 9191), 'os.path.join', 'os.path.join', (['trial_dir', '"""summary.pickle"""'], {}), "(trial_dir, 'summary.pickle')\n", (9162, 9191), False, 'import os\n'), ((9269, 9312), 'os.path.join', 'os.path.join', (['results_dir', '"""eq_summary.txt"""'], {}), "(results_dir, 'eq_summary.txt')\n", (9281, 9312), False, 'import os\n'), ((2261, 2279), 'tensorflow.shape', 'tf.shape', (['y0_input'], {}), '(y0_input)\n', (2269, 2279), True, 'import tensorflow as tf\n'), ((2454, 2472), 'tensorflow.shape', 'tf.shape', (['y0_input'], {}), '(y0_input)\n', (2462, 2472), True, 'import tensorflow as tf\n'), ((5292, 5308), 'numpy.isnan', 'np.isnan', (['loss_i'], {}), '(loss_i)\n', (5300, 5308), True, 'import numpy as np\n'), ((7474, 7513), 'numpy.corrcoef', 'np.corrcoef', (['z_batch[:, 0]', 'z_arr[:, 0]'], {}), '(z_batch[:, 0], z_arr[:, 0])\n', (7485, 7513), True, 'import numpy as np\n'), ((4909, 4948), 'numpy.corrcoef', 'np.corrcoef', (['z_batch[:, 0]', 'z_arr[:, 0]'], {}), '(z_batch[:, 0], z_arr[:, 0])\n', (4920, 4948), True, 'import numpy as np\n'), ((6755, 6776), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6774, 6776), True, 'import tensorflow as tf\n')]
import torch import torch.utils.data from torch import nn from torch.nn import functional as F from rlkit.pythonplusplus import identity from rlkit.torch import pytorch_util as ptu import numpy as np from rlkit.torch.conv_networks import CNN, DCNN from rlkit.torch.vae.vae_base import GaussianLatentVAE imsize48_default_architecture = dict( conv_args=dict( # conv layers kernel_sizes=[5, 3, 3], n_channels=[16, 32, 64], strides=[3, 2, 2], output_size=6, ), conv_kwargs=dict( hidden_sizes=[], # linear layers after conv batch_norm_conv=False, batch_norm_fc=False, ), LSTM_args=dict( input_size=6, hidden_size=128, ), LSTM_kwargs=dict( num_layers=2, ), deconv_args=dict( hidden_sizes=[], deconv_input_width=3, deconv_input_height=3, deconv_input_channels=64, deconv_output_kernel_size=6, deconv_output_strides=3, deconv_output_channels=3, kernel_sizes=[3, 3], n_channels=[32, 16], strides=[2, 2], ), deconv_kwargs=dict( batch_norm_deconv=False, batch_norm_fc=False, ) ) class ConvLSTM2(nn.Module): def __init__( self, representation_size, architecture, encoder_class=CNN, decoder_class=DCNN, decoder_output_activation=identity, decoder_distribution='gaussian_identity_variance', input_channels=3, imsize=48, init_w=1e-3, min_variance=1e-3, hidden_init=ptu.fanin_init, detach_vae_output=True, ): super(ConvLSTM2, self).__init__() self.representation_size = representation_size # record the empirical statistics of latents, when not sample from true prior, sample from them. self.dist_mu = np.zeros(self.representation_size) self.dist_std = np.ones(self.representation_size) if min_variance is None: self.log_min_variance = None else: self.log_min_variance = float(np.log(min_variance)) self.input_channels = input_channels self.imsize = imsize self.imlength = self.imsize * self.imsize * self.input_channels self.detach_vae_output = detach_vae_output conv_args, conv_kwargs, deconv_args, deconv_kwargs = \ architecture['conv_args'], architecture['conv_kwargs'], \ architecture['deconv_args'], architecture['deconv_kwargs'] self.encoder = encoder_class( **conv_args, paddings=np.zeros(len(conv_args['kernel_sizes']), dtype=np.int64), input_height=self.imsize, input_width=self.imsize, input_channels=self.input_channels, init_w=init_w, hidden_init=hidden_init, **conv_kwargs) self.lstm_args, self.lstm_kwargs = architecture['LSTM_args'], architecture['LSTM_kwargs'] self.lstm = nn.LSTM(**self.lstm_args, **self.lstm_kwargs) self.lstm_num_layers = self.lstm_kwargs['num_layers'] self.lstm_hidden_size = self.lstm_args['hidden_size'] assert representation_size == self.lstm_args['input_size'], "lstm input is vae latent, \ so lstm input size should be equal to representation_size!" self.vae_fc1 = nn.Linear(conv_args['output_size'], representation_size) self.vae_fc2 = nn.Linear(conv_args['output_size'], representation_size) self.vae_fc1.weight.data.uniform_(-init_w, init_w) self.vae_fc1.bias.data.uniform_(-init_w, init_w) self.vae_fc2.weight.data.uniform_(-init_w, init_w) self.vae_fc2.bias.data.uniform_(-init_w, init_w) self.lstm_fc = nn.Linear(self.lstm_hidden_size, representation_size) self.lstm_fc.weight.data.uniform_(-init_w, init_w) self.lstm_fc.bias.data.uniform_(-init_w, init_w) self.decoder = decoder_class( **deconv_args, fc_input_size=representation_size, init_w=init_w, output_activation=decoder_output_activation, paddings=np.zeros(len(deconv_args['kernel_sizes']), dtype=np.int64), hidden_init=hidden_init, **deconv_kwargs) self.decoder_distribution = decoder_distribution def from_vae_latents_to_lstm_latents(self, latents, lstm_hidden=None): batch_size, feature_size = latents.shape # print(latents.shape) lstm_input = latents lstm_input = lstm_input.view((1, batch_size, -1)) if lstm_hidden is None: lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \ ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size)) h, hidden = self.lstm(lstm_input, lstm_hidden) # [seq_len, batch_size, lstm_hidden_size] lstm_latent = self.lstm_fc(h) lstm_latent = lstm_latent.view((batch_size, -1)) return lstm_latent def encode(self, input, lstm_hidden=None, return_hidden=False, return_vae_latent=False): ''' input: [seq_len x batch x flatten_img_dim] of flattened images lstm_hidden: [lstm_layers x batch x lstm_hidden_size] mark: change depends on how latent distribution parameters are used ''' seq_len, batch_size, feature_size = input.shape # print("in lstm encode: ", seq_len, batch_size, feature_size) input = input.reshape((-1, feature_size)) feature = self.encoder(input) # [seq_len x batch x conv_output_size] vae_mu = self.vae_fc1(feature) if self.log_min_variance is None: vae_logvar = self.vae_fc2(feature) else: vae_logvar = self.log_min_variance + torch.abs(self.vae_fc2(feature)) # lstm_input = self.rsample((vae_mu, vae_logvar)) # if self.detach_vae_output: # lstm_input = lstm_input.detach() if self.detach_vae_output: lstm_input = vae_mu.detach().clone() else: lstm_input = vae_mu lstm_input = lstm_input.view((seq_len, batch_size, -1)) # if self.detach_vae_output: # lstm_input = lstm_input.detach() if lstm_hidden is None: lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \ ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size)) h, hidden = self.lstm(lstm_input, lstm_hidden) # [seq_len, batch_size, lstm_hidden_size] lstm_latent = self.lstm_fc(h) ret = (lstm_latent, ptu.ones_like(lstm_latent)) if return_vae_latent: ret += (vae_mu, vae_logvar) if return_hidden: return ret, hidden return ret #, lstm_input # [seq_len, batch_size, representation_size] def forward(self, input, lstm_hidden=None, return_hidden=False): """ :param input: :return: reconstructed input, obs_distribution_params, latent_distribution_params mark: change to return the feature latents and the lstm latents """ if return_hidden: latent_distribution_params, hidden = self.encode(input, lstm_hidden, return_hidden=True, return_vae_latent=True) # seq_len, batch_size, representation_size else: latent_distribution_params = self.encode(input, lstm_hidden, return_hidden=False, return_vae_latent=True) vae_latent_distribution_params = latent_distribution_params[2:] lstm_latent_encodings = latent_distribution_params[0] vae_latents = self.reparameterize(vae_latent_distribution_params) reconstructions, obs_distribution_params = self.decode(vae_latents) # [seq_len * batch_size, representation_size] if return_hidden: return reconstructions, obs_distribution_params, vae_latent_distribution_params, lstm_latent_encodings, hidden return reconstructions, obs_distribution_params, vae_latent_distribution_params, lstm_latent_encodings def reparameterize(self, latent_distribution_params): if self.training: return self.rsample(latent_distribution_params) else: return latent_distribution_params[0] def kl_divergence(self, latent_distribution_params): mu, logvar = latent_distribution_params mu = mu.view((-1, self.representation_size)) # fold the possible seq_len dim logvar = logvar.view((-1, self.representation_size)) return - 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1).mean() def get_encoding_from_latent_distribution_params(self, latent_distribution_params): return latent_distribution_params[0].cpu() def rsample(self, latent_distribution_params): mu, logvar = latent_distribution_params stds = (0.5 * logvar).exp() epsilon = ptu.randn(*mu.size()) latents = epsilon * stds + mu return latents def decode(self, latents): decoded = self.decoder(latents).view(-1, self.imsize * self.imsize * self.input_channels) if self.decoder_distribution == 'bernoulli': return decoded, [decoded] elif self.decoder_distribution == 'gaussian_identity_variance': return torch.clamp(decoded, 0, 1), [torch.clamp(decoded, 0, 1), torch.ones_like(decoded)] else: raise NotImplementedError('Distribution {} not supported'.format( self.decoder_distribution)) def logprob(self, inputs, obs_distribution_params): seq_len, batch_size, feature_size = inputs.shape inputs = inputs.view((-1, feature_size)) if self.decoder_distribution == 'bernoulli': inputs = inputs.narrow(start=0, length=self.imlength, dim=1).contiguous().view(-1, self.imlength) # obs_distribution_params[0] = obs_distribution_params[0].view((-1, feature_size)) log_prob = - F.binary_cross_entropy( obs_distribution_params[0], inputs, reduction='elementwise_mean' ) * self.imlength return log_prob if self.decoder_distribution == 'gaussian_identity_variance': # obs_distribution_params[0] = obs_distribution_params[0].view((-1, feature_size)) inputs = inputs.narrow(start=0, length=self.imlength, dim=1).contiguous().view(-1, self.imlength) log_prob = -1 * F.mse_loss(inputs, obs_distribution_params[0], reduction='elementwise_mean') return log_prob else: raise NotImplementedError('Distribution {} not supported'.format( self.decoder_distribution)) def init_hidden(self, batch_size=1): lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \ ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size)) return lstm_hidden
[ "torch.ones_like", "torch.nn.functional.mse_loss", "numpy.ones", "torch.nn.LSTM", "numpy.log", "torch.nn.functional.binary_cross_entropy", "numpy.zeros", "torch.nn.Linear", "rlkit.torch.pytorch_util.ones_like", "torch.clamp", "rlkit.torch.pytorch_util.zeros" ]
[((1868, 1902), 'numpy.zeros', 'np.zeros', (['self.representation_size'], {}), '(self.representation_size)\n', (1876, 1902), True, 'import numpy as np\n'), ((1928, 1961), 'numpy.ones', 'np.ones', (['self.representation_size'], {}), '(self.representation_size)\n', (1935, 1961), True, 'import numpy as np\n'), ((2993, 3038), 'torch.nn.LSTM', 'nn.LSTM', ([], {}), '(**self.lstm_args, **self.lstm_kwargs)\n', (3000, 3038), False, 'from torch import nn\n'), ((3357, 3413), 'torch.nn.Linear', 'nn.Linear', (["conv_args['output_size']", 'representation_size'], {}), "(conv_args['output_size'], representation_size)\n", (3366, 3413), False, 'from torch import nn\n'), ((3437, 3493), 'torch.nn.Linear', 'nn.Linear', (["conv_args['output_size']", 'representation_size'], {}), "(conv_args['output_size'], representation_size)\n", (3446, 3493), False, 'from torch import nn\n'), ((3752, 3805), 'torch.nn.Linear', 'nn.Linear', (['self.lstm_hidden_size', 'representation_size'], {}), '(self.lstm_hidden_size, representation_size)\n', (3761, 3805), False, 'from torch import nn\n'), ((6637, 6663), 'rlkit.torch.pytorch_util.ones_like', 'ptu.ones_like', (['lstm_latent'], {}), '(lstm_latent)\n', (6650, 6663), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((10989, 11055), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (10998, 11055), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((11080, 11146), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (11089, 11146), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((2093, 2113), 'numpy.log', 'np.log', (['min_variance'], {}), '(min_variance)\n', (2099, 2113), True, 'import numpy as np\n'), ((4626, 4692), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (4635, 4692), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((4717, 4783), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (4726, 4783), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((6304, 6370), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (6313, 6370), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((6395, 6461), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (6404, 6461), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((10644, 10720), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['inputs', 'obs_distribution_params[0]'], {'reduction': '"""elementwise_mean"""'}), "(inputs, obs_distribution_params[0], reduction='elementwise_mean')\n", (10654, 10720), True, 'from torch.nn import functional as F\n'), ((9363, 9389), 'torch.clamp', 'torch.clamp', (['decoded', '(0)', '(1)'], {}), '(decoded, 0, 1)\n', (9374, 9389), False, 'import torch\n'), ((10111, 10204), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['obs_distribution_params[0]', 'inputs'], {'reduction': '"""elementwise_mean"""'}), "(obs_distribution_params[0], inputs, reduction=\n 'elementwise_mean')\n", (10133, 10204), True, 'from torch.nn import functional as F\n'), ((9392, 9418), 'torch.clamp', 'torch.clamp', (['decoded', '(0)', '(1)'], {}), '(decoded, 0, 1)\n', (9403, 9418), False, 'import torch\n'), ((9468, 9492), 'torch.ones_like', 'torch.ones_like', (['decoded'], {}), '(decoded)\n', (9483, 9492), False, 'import torch\n')]
""" game.py Game class which contains the player, target, and all the walls. """ from math import cos, sin import matplotlib.collections as mc import pylab as plt from numpy import asarray, pi from config import Config from environment.robot import Robot from utils.dictionary import * from utils.myutils import load_pickle, store_pickle from utils.vec2d import Vec2d class Game: """ A game environment is built up from the following segments: * robot: The player manoeuvring in the environment * target: Robot that must be reached by the robot """ __slots__ = { 'bot_config', 'done', 'game_config', 'id', 'init_distance', 'noise', 'player', 'player_angle_noise', 'save_path', 'score', 'silent', 'spawn_function', 'steps_taken', 'stop_if_reached', 'target', 'wall_bound', 'x_axis', 'y_axis' } def __init__(self, game_id: int, config: Config, player_noise: float = 0, noise: bool = True, overwrite: bool = False, save_path: str = '', silent: bool = True, spawn_func=None, stop_if_reached: bool = True, wall_bound: bool = True, ): """ Define a new game. :param game_id: Game id :param config: Configuration file (only needed to pass during creation) :param player_noise: The maximum noise added to the player's initial location :param noise: Add noise when progressing the game :param overwrite: Overwrite pre-existing games :param save_path: Save and load the game from different directories :param silent: Do not print anything :param spawn_func: Function that determines which target-position should spawn :param stop_if_reached: Stop the simulation when agent reaches target :param wall_bound: Bound the position of the agent to be within the walls of the game """ assert type(game_id) == int # Set the game's configuration self.bot_config = config.bot self.game_config = config.game # Environment specific parameters self.noise: bool = noise # Add noise to the game-environment self.silent: bool = silent # True: Do not print out statistics self.save_path: str = save_path if save_path else 'environment/games_db/' self.wall_bound: bool = wall_bound # Permit robot to go outside of the boundaries self.stop_if_reached: bool = stop_if_reached # Terminate the simulation ones the target is found self.player_angle_noise: float = player_noise # The noise added to the player's initial orientation # Placeholders for parameters self.done: bool = False # Game has finished self.id: int = game_id # Game's ID-number self.init_distance: float = 0 # Denotes the initial distance from target self.player: Robot = None # Candidate-robot self.score: int = 0 # Denotes the number of targets found self.spawn_function = None # Function determining which targets to spawn self.steps_taken: int = 0 # Number of steps taken by the agent self.target: Vec2d = None # Target-robot self.x_axis: int = 0 # Width of the game self.y_axis: int = 0 # Height of the game # Check if game already exists, if not create new game if overwrite or not self.load(): assert spawn_func is not None self.create_empty_game(spawn_func) def __str__(self): return f"game_{self.id:05d}" # ------------------------------------------------> MAIN METHODS <------------------------------------------------ # def close(self): """Final state of the agent's statistics.""" return { D_DIST_TO_TARGET: self.get_distance_to_target(), D_DONE: self.done, D_GAME_ID: self.id, D_POS: self.player.pos, D_SCORE: self.score, D_TIME_TAKEN: self.steps_taken / self.game_config.fps, D_INIT_DIST: self.init_distance, } def get_observation(self): """Get the current observation of the game in the form of a dictionary.""" return { D_DONE: self.done, D_SENSOR_LIST: self.player.get_sensor_readings(), } def randomize(self): """Randomize the maze.""" self.player.randomize(max_noise=self.player_angle_noise) self.spawn_function.randomize() self.sample_target() def reset(self): """Reset the game and return initial observations.""" self.done = False self.score = 0 self.steps_taken = 0 self.spawn_function.reset() self.sample_target() self.player.reset(noise=self.noise) obs = self.get_observation() self.init_distance = self.get_distance_to_target() # The sensor-values must be read in first! return obs def step(self, l: float, r: float): """ Progress one step in the game. :param l: Left wheel speed [-1..1] :param r: Right wheel speed [-1..1] :return: Observation (Dictionary), target_reached (Boolean) """ dt = 1.0 / self.game_config.fps return self.step_dt(dt=dt, l=l, r=r) def step_dt(self, dt: float, l: float, r: float): """ Progress one step in the game based on a predefined delta-time. This method should only be used for debugging or visualization purposes. :param dt: Delta time :param l: Left wheel speed [-1..1] :param r: Right wheel speed [-1..1] :return: Observation (Dictionary), target_reached (Boolean) """ self.steps_taken += 1 self.player.drive(dt, lw=l, rw=r) # Check if player is not outside of playing-field if the game is wall-bound if self.wall_bound and \ (not (self.player.radius <= self.player.pos[0] <= self.x_axis - self.player.radius) or not (self.player.radius <= self.player.pos[1] <= self.y_axis - self.player.radius)): self.player.set_back() # Check if target reached if self.get_distance_to_target() <= self.game_config.target_reached: self.score += 1 if self.stop_if_reached: self.done = True else: self.sample_target() # Return the current observations return self.get_observation() # -----------------------------------------------> HELPER METHODS <----------------------------------------------- # def create_empty_game(self, spawn_func): """Create an empty game.""" self.x_axis = self.game_config.x_axis self.y_axis = self.game_config.y_axis self.spawn_function = spawn_func self.player = Robot(game=self) self.set_player_init_angle(a=pi / 2) self.set_player_init_pos(p=Vec2d(self.game_config.x_axis / 2, self.game_config.y_axis / 2)) # Save the new game self.save() if not self.silent: print(f"New game created under id: {self.id}") def get_distance_to_target(self): """Get the distance between robot and target.""" return (self.target - self.player.pos).get_length() def sample_target(self): """Sample a target from the target_list.""" self.target = Vec2d().load_tuple(self.spawn_function()) def set_player_init_angle(self, a: float): """Set a new initial angle for the player.""" self.player.set_init_angle(a=a) def set_player_init_pos(self, p: Vec2d): """Set a new initial position for the player.""" self.player.set_init_pos(p=p) # ---------------------------------------------> FUNCTIONAL METHODS <--------------------------------------------- # def save(self): """Save the current state's state.""" persist_dict = dict() persist_dict.update({D_X_AXIS: self.x_axis}) persist_dict.update({D_Y_AXIS: self.y_axis}) persist_dict.update({D_WALL_BOUND: self.wall_bound}) persist_dict.update({D_TARGET_REACHED: self.stop_if_reached}) persist_dict.update({D_ANGLE: self.player.init_angle}) # Initial angle of player persist_dict.update({D_ANGLE_NOISE: self.player_angle_noise}) # Noise added to the initial angle of the player persist_dict.update({D_POS: tuple(self.player.init_pos)}) # Initial position of player persist_dict.update({D_SPAWN_F: self.spawn_function}) # Function deciding on which target to use store_pickle(persist_dict, f'{self.save_path}{self}') def load(self): """Load in a game, specified by its current id and return True if successful.""" try: game = load_pickle(f'{self.save_path}{self}') self.x_axis = game.get(D_X_AXIS) self.y_axis = game.get(D_Y_AXIS) self.wall_bound = game.get(D_WALL_BOUND) self.stop_if_reached = game.get(D_TARGET_REACHED) self.player = Robot(game=self) # Create a dummy-player to set values on self.set_player_init_angle(game.get(D_ANGLE)) self.player_angle_noise = game.get(D_ANGLE_NOISE) self.set_player_init_pos(Vec2d().load_tuple(game.get(D_POS))) self.spawn_function = game.get(D_SPAWN_F) self.spawn_function.reset() self.sample_target() if not self.silent: print(f"Existing game loaded with id: {self.id}") return True except FileNotFoundError: return False def get_blueprint(self, ax=None, show_player: bool = False, annotate: bool = True): """The blueprint map of the board (matplotlib Figure).""" if not ax: fig, ax = plt.subplots() # Draw the (implicit) boundary walls if self.wall_bound: walls = [] corners = asarray([(0, 0), (0, self.y_axis), (self.x_axis, self.y_axis), (self.x_axis, 0)]) for c in range(4): walls.append([corners[c], corners[(c + 1) % 4]]) lc = mc.LineCollection(walls, linewidths=5, colors='k') ax.add_collection(lc) # Add all possible targets to map if "locations" in self.spawn_function.__slots__: for i, t in enumerate(self.spawn_function.locations): plt.plot(t[0], t[1], 'go') if annotate and type(self.spawn_function.locations) == list: plt.annotate(str(i + 1), xy=(t[0] + 0.1, t[1] + 0.1)) # Add player to map if requested if show_player: x = self.player.init_pos[0] y = self.player.init_pos[1] dx = cos(self.player.noisy_init_angle) dy = sin(self.player.noisy_init_angle) plt.arrow(x, y, dx, dy, head_width=0.1, length_includes_head=True) # Adjust the boundaries plt.xlim(0, self.x_axis) plt.ylim(0, self.y_axis) # Return the figure in its current state return ax def get_game(i: int, cfg: Config = None, noise: bool = True): """ Create a game-object. :param i: Game-ID :param cfg: Config object :param noise: Add noise to the game :return: Game or GameCy object """ config = cfg if cfg else Config() return Game( game_id=i, config=config, noise=noise, silent=True, )
[ "pylab.ylim", "environment.robot.Robot", "pylab.arrow", "utils.myutils.load_pickle", "config.Config", "utils.vec2d.Vec2d", "numpy.asarray", "pylab.plot", "matplotlib.collections.LineCollection", "math.cos", "pylab.xlim", "pylab.subplots", "math.sin", "utils.myutils.store_pickle" ]
[((7103, 7119), 'environment.robot.Robot', 'Robot', ([], {'game': 'self'}), '(game=self)\n', (7108, 7119), False, 'from environment.robot import Robot\n'), ((8882, 8935), 'utils.myutils.store_pickle', 'store_pickle', (['persist_dict', 'f"""{self.save_path}{self}"""'], {}), "(persist_dict, f'{self.save_path}{self}')\n", (8894, 8935), False, 'from utils.myutils import load_pickle, store_pickle\n'), ((11259, 11283), 'pylab.xlim', 'plt.xlim', (['(0)', 'self.x_axis'], {}), '(0, self.x_axis)\n', (11267, 11283), True, 'import pylab as plt\n'), ((11292, 11316), 'pylab.ylim', 'plt.ylim', (['(0)', 'self.y_axis'], {}), '(0, self.y_axis)\n', (11300, 11316), True, 'import pylab as plt\n'), ((11660, 11668), 'config.Config', 'Config', ([], {}), '()\n', (11666, 11668), False, 'from config import Config\n'), ((9082, 9120), 'utils.myutils.load_pickle', 'load_pickle', (['f"""{self.save_path}{self}"""'], {}), "(f'{self.save_path}{self}')\n", (9093, 9120), False, 'from utils.myutils import load_pickle, store_pickle\n'), ((9352, 9368), 'environment.robot.Robot', 'Robot', ([], {'game': 'self'}), '(game=self)\n', (9357, 9368), False, 'from environment.robot import Robot\n'), ((10085, 10099), 'pylab.subplots', 'plt.subplots', ([], {}), '()\n', (10097, 10099), True, 'import pylab as plt\n'), ((10227, 10312), 'numpy.asarray', 'asarray', (['[(0, 0), (0, self.y_axis), (self.x_axis, self.y_axis), (self.x_axis, 0)]'], {}), '([(0, 0), (0, self.y_axis), (self.x_axis, self.y_axis), (self.x_axis,\n 0)])\n', (10234, 10312), False, 'from numpy import asarray, pi\n'), ((10422, 10472), 'matplotlib.collections.LineCollection', 'mc.LineCollection', (['walls'], {'linewidths': '(5)', 'colors': '"""k"""'}), "(walls, linewidths=5, colors='k')\n", (10439, 10472), True, 'import matplotlib.collections as mc\n'), ((11046, 11079), 'math.cos', 'cos', (['self.player.noisy_init_angle'], {}), '(self.player.noisy_init_angle)\n', (11049, 11079), False, 'from math import cos, sin\n'), ((11097, 11130), 'math.sin', 'sin', (['self.player.noisy_init_angle'], {}), '(self.player.noisy_init_angle)\n', (11100, 11130), False, 'from math import cos, sin\n'), ((11143, 11209), 'pylab.arrow', 'plt.arrow', (['x', 'y', 'dx', 'dy'], {'head_width': '(0.1)', 'length_includes_head': '(True)'}), '(x, y, dx, dy, head_width=0.1, length_includes_head=True)\n', (11152, 11209), True, 'import pylab as plt\n'), ((7200, 7263), 'utils.vec2d.Vec2d', 'Vec2d', (['(self.game_config.x_axis / 2)', '(self.game_config.y_axis / 2)'], {}), '(self.game_config.x_axis / 2, self.game_config.y_axis / 2)\n', (7205, 7263), False, 'from utils.vec2d import Vec2d\n'), ((7665, 7672), 'utils.vec2d.Vec2d', 'Vec2d', ([], {}), '()\n', (7670, 7672), False, 'from utils.vec2d import Vec2d\n'), ((10697, 10723), 'pylab.plot', 'plt.plot', (['t[0]', 't[1]', '"""go"""'], {}), "(t[0], t[1], 'go')\n", (10705, 10723), True, 'import pylab as plt\n'), ((9568, 9575), 'utils.vec2d.Vec2d', 'Vec2d', ([], {}), '()\n', (9573, 9575), False, 'from utils.vec2d import Vec2d\n')]
import numpy as np import torch as th import torch.nn as nn from rls.nn.mlps import MLP from rls.nn.represent_nets import RepresentationNetwork class QattenMixer(nn.Module): def __init__(self, n_agents: int, state_spec, rep_net_params, agent_own_state_size: bool, query_hidden_units: int, query_embed_dim: int, key_embed_dim: int, head_hidden_units: int, n_attention_head: int, constrant_hidden_units: int, is_weighted: bool = True): super().__init__() self.n_agents = n_agents self.rep_net = RepresentationNetwork(obs_spec=state_spec, rep_net_params=rep_net_params) self.u_dim = agent_own_state_size # TODO: implement this self.query_embed_dim = query_embed_dim self.key_embed_dim = key_embed_dim self.n_attention_head = n_attention_head self.is_weighted = is_weighted self.query_embedding_layers = nn.ModuleList() self.key_embedding_layers = nn.ModuleList() for i in range(self.n_attention_head): self.query_embedding_layers.append(MLP(input_dim=self.rep_net.h_dim, hidden_units=query_hidden_units, layer='linear', act_fn='relu', output_shape=query_embed_dim)) self.key_embedding_layers.append( nn.Linear(self.u_dim, self.key_embed_dim)) self.scaled_product_value = np.sqrt(self.query_embed_dim) self.head_embedding_layer = MLP(input_dim=self.rep_net.h_dim, hidden_units=head_hidden_units, layer='linear', act_fn='relu', output_shape=n_attention_head) self.constrant_value_layer = MLP(input_dim=self.rep_net.h_dim, hidden_units=constrant_hidden_units, layer='linear', act_fn='relu', output_shape=1) def forward(self, q_values, state, **kwargs): """ params: q_values: [T, B, 1, N] state: [T, B, *] """ time_step = q_values.shape[0] # T batch_size = q_values.shape[1] # B # state: [T, B, *] state_feat, _ = self.rep_net(state, **kwargs) # [T, B, *] us = self._get_us(state_feat) # [T, B, N, *] q_lambda_list = [] for i in range(self.n_attention_head): state_embedding = self.query_embedding_layers[i]( state_feat) # [T, B, *] u_embedding = self.key_embedding_layers[i](us) # [T, B, N, *] state_embedding = state_embedding.unsqueeze(-2) # [T, B, 1, *] u_embedding = u_embedding.swapaxes(-1, -2) # [T, B, *, N] raw_lambda = (state_embedding @ u_embedding) / \ self.scaled_product_value # [T, B, 1, N] q_lambda = raw_lambda.softmax(dim=-1) # [T, B, 1, N] q_lambda_list.append(q_lambda) # H * [T, B, 1, N] q_lambda_list = th.cat(q_lambda_list, dim=-2) # [T, B, H, N] q_lambda_list = q_lambda_list.swapaxes(-1, -2) # [T, B, N, H] q_h = q_values @ q_lambda_list # [T, B, 1, H] if self.is_weighted: # shape: [-1, n_attention_head, 1] w_h = th.abs(self.head_embedding_layer(state_feat)) # [T, B, H] w_h = w_h.unsqueeze(-1) # [T, B, H, 1] sum_q_h = q_h @ w_h # [T, B, 1, 1] sum_q_h = sum_q_h.view(time_step, batch_size, 1) # [T, B, 1] else: sum_q_h = q_h.sum(-1) # [T, B, 1] c = self.constrant_value_layer(state_feat) # [T, B, 1] q_tot = sum_q_h + c # [T, B, 1] return q_tot def _get_us(self, state_feat): time_step = state_feat.shape[0] # T batch_size = state_feat.shape[1] # B agent_own_state_size = self.u_dim with th.no_grad(): us = state_feat[:, :, :agent_own_state_size * self.n_agents].view( time_step, batch_size, self.n_agents, agent_own_state_size) # [T, B, N, *] return us
[ "numpy.sqrt", "torch.nn.ModuleList", "torch.nn.Linear", "rls.nn.mlps.MLP", "torch.no_grad", "torch.cat", "rls.nn.represent_nets.RepresentationNetwork" ]
[((714, 787), 'rls.nn.represent_nets.RepresentationNetwork', 'RepresentationNetwork', ([], {'obs_spec': 'state_spec', 'rep_net_params': 'rep_net_params'}), '(obs_spec=state_spec, rep_net_params=rep_net_params)\n', (735, 787), False, 'from rls.nn.represent_nets import RepresentationNetwork\n'), ((1117, 1132), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1130, 1132), True, 'import torch.nn as nn\n'), ((1169, 1184), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1182, 1184), True, 'import torch.nn as nn\n'), ((1601, 1630), 'numpy.sqrt', 'np.sqrt', (['self.query_embed_dim'], {}), '(self.query_embed_dim)\n', (1608, 1630), True, 'import numpy as np\n'), ((1668, 1800), 'rls.nn.mlps.MLP', 'MLP', ([], {'input_dim': 'self.rep_net.h_dim', 'hidden_units': 'head_hidden_units', 'layer': '"""linear"""', 'act_fn': '"""relu"""', 'output_shape': 'n_attention_head'}), "(input_dim=self.rep_net.h_dim, hidden_units=head_hidden_units, layer=\n 'linear', act_fn='relu', output_shape=n_attention_head)\n", (1671, 1800), False, 'from rls.nn.mlps import MLP\n'), ((1874, 1995), 'rls.nn.mlps.MLP', 'MLP', ([], {'input_dim': 'self.rep_net.h_dim', 'hidden_units': 'constrant_hidden_units', 'layer': '"""linear"""', 'act_fn': '"""relu"""', 'output_shape': '(1)'}), "(input_dim=self.rep_net.h_dim, hidden_units=constrant_hidden_units,\n layer='linear', act_fn='relu', output_shape=1)\n", (1877, 1995), False, 'from rls.nn.mlps import MLP\n'), ((3110, 3139), 'torch.cat', 'th.cat', (['q_lambda_list'], {'dim': '(-2)'}), '(q_lambda_list, dim=-2)\n', (3116, 3139), True, 'import torch as th\n'), ((3983, 3995), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (3993, 3995), True, 'import torch as th\n'), ((1279, 1411), 'rls.nn.mlps.MLP', 'MLP', ([], {'input_dim': 'self.rep_net.h_dim', 'hidden_units': 'query_hidden_units', 'layer': '"""linear"""', 'act_fn': '"""relu"""', 'output_shape': 'query_embed_dim'}), "(input_dim=self.rep_net.h_dim, hidden_units=query_hidden_units, layer=\n 'linear', act_fn='relu', output_shape=query_embed_dim)\n", (1282, 1411), False, 'from rls.nn.mlps import MLP\n'), ((1521, 1562), 'torch.nn.Linear', 'nn.Linear', (['self.u_dim', 'self.key_embed_dim'], {}), '(self.u_dim, self.key_embed_dim)\n', (1530, 1562), True, 'import torch.nn as nn\n')]
import numpy as np import os import nibabel as nib from skimage.transform import resize from tqdm import tqdm import matplotlib.pyplot as plt import SimpleITK as sitk spacing = { 0: [1.5, 0.8, 0.8], 1: [1.5, 0.8, 0.8], 2: [1.5, 0.8, 0.8], 3: [1.5, 0.8, 0.8], 4: [1.5, 0.8, 0.8], 5: [1.5, 0.8, 0.8], 6: [1.5, 0.8, 0.8], } ori_path = './0123456' new_path = './0123456_spacing_same' count = -1 for root1, dirs1, _ in os.walk(ori_path): for i_dirs1 in tqdm(sorted(dirs1)): # 0Liver # if i_dirs1 != '0Liver': # continue ########################################################################### if i_dirs1 == '1Kidney': for root2, dirs2, files2 in os.walk(os.path.join(root1, i_dirs1)): for root3, dirs3, files3 in os.walk(os.path.join(root2, 'origin')): for i_dirs3 in sorted(dirs3): # case_00000 # if int(i_dirs3[-2:])!=4: # continue for root4, dirs4, files4 in os.walk(os.path.join(root3, i_dirs3)): for i_files4 in sorted(files4): # read img print("Processing %s" % (i_files4)) img_path = os.path.join(root4, i_files4) imageITK = sitk.ReadImage(img_path) image = sitk.GetArrayFromImage(imageITK) ori_size = np.array(imageITK.GetSize())[[2, 1, 0]] ori_spacing = np.array(imageITK.GetSpacing())[[2, 1, 0]] ori_origin = imageITK.GetOrigin() ori_direction = imageITK.GetDirection() task_id = int(i_dirs1[0]) target_spacing = np.array(spacing[task_id]) if ori_spacing[0] < 0 or ori_spacing[1] < 0 or ori_spacing[2] < 0: print("error") spc_ratio = ori_spacing / target_spacing data_type = image.dtype if i_files4 != 'segmentation.nii.gz': data_type = np.int32 if i_files4 == 'segmentation.nii.gz': order = 0 mode_ = 'edge' else: order = 3 mode_ = 'constant' image = image.astype(np.float) image_resize = resize(image, ( int(ori_size[0] * spc_ratio[0]), int(ori_size[1] * spc_ratio[1]), int(ori_size[2] * spc_ratio[2])), order=order, cval=0, clip=True, preserve_range=True) image_resize = np.round(image_resize).astype(data_type) # save save_path = os.path.join(new_path, i_dirs1, 'origin', i_dirs3) if not os.path.exists(save_path): os.makedirs(save_path) saveITK = sitk.GetImageFromArray(image_resize) saveITK.SetSpacing(target_spacing[[2, 1, 0]]) saveITK.SetOrigin(ori_origin) saveITK.SetDirection(ori_direction) sitk.WriteImage(saveITK, os.path.join(save_path, i_files4)) ############################################################################# for root2, dirs2, files2 in os.walk(os.path.join(root1, i_dirs1)): for i_dirs2 in sorted(dirs2): # imagesTr for root3, dirs3, files3 in os.walk(os.path.join(root2, i_dirs2)): for i_files3 in sorted(files3): if i_files3[0] == '.': continue # read img print("Processing %s" % (i_files3)) img_path = os.path.join(root3, i_files3) imageITK = sitk.ReadImage(img_path) image = sitk.GetArrayFromImage(imageITK) ori_size = np.array(imageITK.GetSize())[[2, 1, 0]] ori_spacing = np.array(imageITK.GetSpacing())[[2, 1, 0]] ori_origin = imageITK.GetOrigin() ori_direction = imageITK.GetDirection() task_id = int(i_dirs1[0]) target_spacing = np.array(spacing[task_id]) spc_ratio = ori_spacing / target_spacing data_type = image.dtype if i_dirs2 != 'labelsTr': data_type = np.int32 if i_dirs2 == 'labelsTr': order = 0 mode_ = 'edge' else: order = 3 mode_ = 'constant' image = image.astype(np.float) image_resize = resize(image, (int(ori_size[0] * spc_ratio[0]), int(ori_size[1] * spc_ratio[1]), int(ori_size[2] * spc_ratio[2])), order=order, mode=mode_, cval=0, clip=True, preserve_range=True) image_resize = np.round(image_resize).astype(data_type) # save save_path = os.path.join(new_path, i_dirs1, i_dirs2) if not os.path.exists(save_path): os.makedirs(save_path) saveITK = sitk.GetImageFromArray(image_resize) saveITK.SetSpacing(target_spacing[[2, 1, 0]]) saveITK.SetOrigin(ori_origin) saveITK.SetDirection(ori_direction) sitk.WriteImage(saveITK, os.path.join(save_path, i_files3))
[ "os.path.exists", "SimpleITK.GetImageFromArray", "os.makedirs", "numpy.round", "os.path.join", "SimpleITK.GetArrayFromImage", "numpy.array", "SimpleITK.ReadImage", "os.walk" ]
[((445, 462), 'os.walk', 'os.walk', (['ori_path'], {}), '(ori_path)\n', (452, 462), False, 'import os\n'), ((3910, 3938), 'os.path.join', 'os.path.join', (['root1', 'i_dirs1'], {}), '(root1, i_dirs1)\n', (3922, 3938), False, 'import os\n'), ((736, 764), 'os.path.join', 'os.path.join', (['root1', 'i_dirs1'], {}), '(root1, i_dirs1)\n', (748, 764), False, 'import os\n'), ((820, 849), 'os.path.join', 'os.path.join', (['root2', '"""origin"""'], {}), "(root2, 'origin')\n", (832, 849), False, 'import os\n'), ((4048, 4076), 'os.path.join', 'os.path.join', (['root2', 'i_dirs2'], {}), '(root2, i_dirs2)\n', (4060, 4076), False, 'import os\n'), ((4345, 4374), 'os.path.join', 'os.path.join', (['root3', 'i_files3'], {}), '(root3, i_files3)\n', (4357, 4374), False, 'import os\n'), ((4410, 4434), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['img_path'], {}), '(img_path)\n', (4424, 4434), True, 'import SimpleITK as sitk\n'), ((4467, 4499), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['imageITK'], {}), '(imageITK)\n', (4489, 4499), True, 'import SimpleITK as sitk\n'), ((4870, 4896), 'numpy.array', 'np.array', (['spacing[task_id]'], {}), '(spacing[task_id])\n', (4878, 4896), True, 'import numpy as np\n'), ((5881, 5921), 'os.path.join', 'os.path.join', (['new_path', 'i_dirs1', 'i_dirs2'], {}), '(new_path, i_dirs1, i_dirs2)\n', (5893, 5921), False, 'import os\n'), ((6065, 6101), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['image_resize'], {}), '(image_resize)\n', (6087, 6101), True, 'import SimpleITK as sitk\n'), ((1068, 1096), 'os.path.join', 'os.path.join', (['root3', 'i_dirs3'], {}), '(root3, i_dirs3)\n', (1080, 1096), False, 'import os\n'), ((5953, 5978), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (5967, 5978), False, 'import os\n'), ((6008, 6030), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (6019, 6030), False, 'import os\n'), ((6335, 6368), 'os.path.join', 'os.path.join', (['save_path', 'i_files3'], {}), '(save_path, i_files3)\n', (6347, 6368), False, 'import os\n'), ((1313, 1342), 'os.path.join', 'os.path.join', (['root4', 'i_files4'], {}), '(root4, i_files4)\n', (1325, 1342), False, 'import os\n'), ((1386, 1410), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['img_path'], {}), '(img_path)\n', (1400, 1410), True, 'import SimpleITK as sitk\n'), ((1451, 1483), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['imageITK'], {}), '(imageITK)\n', (1473, 1483), True, 'import SimpleITK as sitk\n'), ((1902, 1928), 'numpy.array', 'np.array', (['spacing[task_id]'], {}), '(spacing[task_id])\n', (1910, 1928), True, 'import numpy as np\n'), ((3224, 3274), 'os.path.join', 'os.path.join', (['new_path', 'i_dirs1', '"""origin"""', 'i_dirs3'], {}), "(new_path, i_dirs1, 'origin', i_dirs3)\n", (3236, 3274), False, 'import os\n'), ((3442, 3478), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['image_resize'], {}), '(image_resize)\n', (3464, 3478), True, 'import SimpleITK as sitk\n'), ((5772, 5794), 'numpy.round', 'np.round', (['image_resize'], {}), '(image_resize)\n', (5780, 5794), True, 'import numpy as np\n'), ((3314, 3339), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (3328, 3339), False, 'import os\n'), ((3377, 3399), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (3388, 3399), False, 'import os\n'), ((3744, 3777), 'os.path.join', 'os.path.join', (['save_path', 'i_files4'], {}), '(save_path, i_files4)\n', (3756, 3777), False, 'import os\n'), ((3099, 3121), 'numpy.round', 'np.round', (['image_resize'], {}), '(image_resize)\n', (3107, 3121), True, 'import numpy as np\n')]
# Simple single neuron network to model a regression task from __future__ import print_function import numpy as np #np.random.seed(1337) # for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import SGD, Adam, RMSprop from keras.utils import np_utils #Generate Dataset X_train = np.random.rand(600,2) * 100.0 - 50.0 Y_train = X_train[:,0] + X_train[:,1] X_test = np.random.rand(100,2) * 100.0 - 50.0 Y_test = X_test[:,0] + X_test[:,1] X_train = X_train.astype('float32') X_test = X_test.astype('float32') print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') model = Sequential() model.add(Dense(1,input_shape=(2,),init='uniform', activation='linear')) model.compile(loss='mean_absolute_error', optimizer='rmsprop') # Using mse loss results in faster convergence def GetWeights(chromo): [w,b] = model.layers[0].get_weights() #get weights and biases w.shape =(401408,) b.shape = (512,) chromo.genes[0,0:401408] = w chromo.genes[0,401408:401408+512] = b [w,b] = model.layers[3].get_weights() w.shape = (262144,) b.shape = (512,) chromo.genes[0,401408+512:401408+512+262144] = w chromo.genes[0,401408+512+262144:401408+512+262144+512] = b [w,b] = model.layers[6].get_weights() w.shape = (5120,) b.shape = (10,) chromo.genes[0,401408+512+262144+512:401408+512+262144+512+5120] = w chromo.genes[0,401408+512+262144+512+5120:401408+512+262144+512+5120+10] = b def SetWeights(chromo): #There are 8 layers we move in one layer at a time and set the weights w = chromo.genes[0,0:2]; w.shape = (2,1); b = chromo.genes[0,2:3]; b.shape = (1,) model.layers[0].set_weights([w,b])#set weights and biases def EvalModel(j): # nb_batch = 16 # p = len(X_train)/nb_batch - 1 # i = int(math.floor(np.random.rand() * p + 0.5) * nb_batch) # tr_batch = X_train[i:i+nb_batch,:] # label_batch = Y_train[i:i+nb_batch] # score = model.evaluate(tr_batch, label_batch, batch_size=nb_batch,verbose=0) score = model.evaluate(X_train, Y_train,verbose=0) #print('[',j,'] Eval score:',score) return score ###################### #Evolutionary Algo for optimizing Neural Netowrks import numpy as np import sys import datetime import math #EA Parameters gene_count = 3 population_size = 100 p_mutation = 0.15 #1.0/gene_count p_crossover = 0.5 #0.0001 loss_delta = 1 avg_loss_prev = 0; total_gene_set_time = datetime.datetime.utcnow() - datetime.datetime.utcnow() class Chromosome: """docstring for Chromosome""" fitness = 0.0 is_fitness_invalid = True # used for computing fitness def __init__(self, gene_count): self.gene_count = gene_count self.genes = np.random.rand(1,gene_count) * 2.0 - 1.0 #GetWeights(self) self.is_fitness_invalid = True def ComputeFitness(pop,min_loss): """ Computes fitness each chromosome, returns avgloss, min_loss and min_loss_index """ total_fitness = 0.0 min_loss_index = -1 global total_gene_set_time for i in range(0,pop.size): if pop[0,i].is_fitness_invalid: # 1. set the gene to the NN topology # 2. evaluate against the whole *Training* dataset # 3. resulting 'TestScore' will be the fitness t2 = datetime.datetime.utcnow() SetWeights(pop[0,i]) total_gene_set_time += datetime.datetime.utcnow() - t2 pop[0,i].fitness = EvalModel(i) #Mock fitness computation #pop[0,i].fitness = pop[0,i].genes.mean(axis=1) #print(i,' computed fitness') pop[0,i].is_fitness_invalid = False if min_loss >= pop[0,i].fitness: min_loss = pop[0,i].fitness min_loss_index = i total_fitness = total_fitness + pop[0,i].fitness return (total_fitness / pop.size, min_loss, min_loss_index) def MutatePart(winner,loser,p_mutation,p_crossover,begin,end): count = end - begin if np.random.rand() < p_crossover: #generate crossover site cs = math.floor(np.random.rand() * (count-1)) loser.genes[0,begin:end] = winner.genes[0,begin:end] #mutation factor is amount by which the original chromosome gets #changed by after applying the mutate decison mask vector mutation_factor = 2.0 #Weights are mutated by a value in the range of +/- mutation_factor/2 #mutate prep m1 = np.random.rand(1,count) #mutation decision probability vector mask = m1 < p_mutation; #decision as a boolean mask #vector of mutations m2 = np.random.rand(1,count) * mutation_factor - (mutation_factor/2) mutation = mask * m2 # vector of mutation to be added loser.genes[0,begin:end] = loser.genes[0,begin:end] + mutation def Mutate(winner,loser,p_mutation,p_crossover): #apply mutation and cross over layer by layer #layer 0 begin = 0; end = 2; #for c in enumerate(loser.genes): # print('c = ',c) MutatePart(winner,loser,p_mutation,p_crossover,begin,end) #for c in enumerate(loser.genes): # print('c = ',c) #print('++++++++++++++++++++++++++') #print('-----') #for k, l in enumerate(model.layers): # weights = l.get_weights() # print('len weights =',len(weights)) # for n, param in enumerate(weights): # for p in enumerate(param): # print('param = ', p) begin = 2; end = 3 MutatePart(winner,loser,p_mutation,p_crossover,begin,end) loser.is_fitness_invalid = True return loser #------------------------------------------------------------------------------------------------- #initialize population vChromosome = np.vectorize(Chromosome)#vectorize Chromosome constructor arg_array = np.full((1,population_size),gene_count,dtype=int)#init array with gene_count as value population = vChromosome(arg_array)#create a population of Chromosomes #aim is to minimize the loss t1 = datetime.datetime.utcnow() # for timing min_loss = sys.maxint best_so_far = None generation_count = 0; while generation_count < 1000: #loss_delta > 0.001: (avg_loss, min_loss, mi) = ComputeFitness(population,min_loss) if mi >= 0: best_so_far = population[0,mi] loss_delta = math.fabs(avg_loss - avg_loss_prev) avg_loss_prev = avg_loss #print('[{}] [{}] best-so-far = {} mi = {} min-loss = {} loss_delta = {}'.format(\ # str(datetime.datetime.utcnow()), \ # generation_count, \ # best_so_far.fitness,\ # mi,\ # min_loss, \ # loss_delta)) #prep for crossover and mutation idx = np.random.permutation(population_size) for kk in range(0,population_size/2): I1 = idx[2*kk] I2 = idx[2*kk+1] P1 = population[0,I1] P2 = population[0,I2] #print('I1 =',I1,'I2 =',I2,'P2 fitness =',P1.fitness,'P2 fitness =',P2.fitness) #minimization, so <= if P1.fitness <= P2.fitness: #P1 is better, so we replace P2 population[0,I2] = Mutate(P1,P2,p_mutation,p_crossover) else: #P2 is better, so we replace P1 population[0,I1] = Mutate(P2,P1,p_mutation,p_crossover) generation_count += 1 print('==========================================') #evaluate the 'best so far' on test set SetWeights(best_so_far) score_ = model.evaluate(X_test,Y_test, verbose=1) print('Test score:',score_) print('time taken =',(datetime.datetime.utcnow() - t1)) print('time taken to set gene =',total_gene_set_time) for k, l in enumerate(model.layers): weights = l.get_weights() print('len weights =',len(weights)) for n, param in enumerate(weights): for p in enumerate(param): print('param = ', p)
[ "numpy.random.rand", "datetime.datetime.utcnow", "numpy.random.permutation", "keras.models.Sequential", "math.fabs", "numpy.full", "numpy.vectorize", "keras.layers.core.Dense" ]
[((714, 726), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (724, 726), False, 'from keras.models import Sequential\n'), ((5385, 5409), 'numpy.vectorize', 'np.vectorize', (['Chromosome'], {}), '(Chromosome)\n', (5397, 5409), True, 'import numpy as np\n'), ((5455, 5507), 'numpy.full', 'np.full', (['(1, population_size)', 'gene_count'], {'dtype': 'int'}), '((1, population_size), gene_count, dtype=int)\n', (5462, 5507), True, 'import numpy as np\n'), ((5647, 5673), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5671, 5673), False, 'import datetime\n'), ((737, 800), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'input_shape': '(2,)', 'init': '"""uniform"""', 'activation': '"""linear"""'}), "(1, input_shape=(2,), init='uniform', activation='linear')\n", (742, 800), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((2455, 2481), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2479, 2481), False, 'import datetime\n'), ((2484, 2510), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2508, 2510), False, 'import datetime\n'), ((4219, 4243), 'numpy.random.rand', 'np.random.rand', (['(1)', 'count'], {}), '(1, count)\n', (4233, 4243), True, 'import numpy as np\n'), ((5929, 5964), 'math.fabs', 'math.fabs', (['(avg_loss - avg_loss_prev)'], {}), '(avg_loss - avg_loss_prev)\n', (5938, 5964), False, 'import math\n'), ((6434, 6472), 'numpy.random.permutation', 'np.random.permutation', (['population_size'], {}), '(population_size)\n', (6455, 6472), True, 'import numpy as np\n'), ((397, 419), 'numpy.random.rand', 'np.random.rand', (['(600)', '(2)'], {}), '(600, 2)\n', (411, 419), True, 'import numpy as np\n'), ((482, 504), 'numpy.random.rand', 'np.random.rand', (['(100)', '(2)'], {}), '(100, 2)\n', (496, 504), True, 'import numpy as np\n'), ((3818, 3834), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3832, 3834), True, 'import numpy as np\n'), ((7220, 7246), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7244, 7246), False, 'import datetime\n'), ((3229, 3255), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3253, 3255), False, 'import datetime\n'), ((4362, 4386), 'numpy.random.rand', 'np.random.rand', (['(1)', 'count'], {}), '(1, count)\n', (4376, 4386), True, 'import numpy as np\n'), ((2718, 2747), 'numpy.random.rand', 'np.random.rand', (['(1)', 'gene_count'], {}), '(1, gene_count)\n', (2732, 2747), True, 'import numpy as np\n'), ((3306, 3332), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3330, 3332), False, 'import datetime\n'), ((3895, 3911), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3909, 3911), True, 'import numpy as np\n')]
from __future__ import print_function, division from PIL import Image from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize import numpy as np import random import tarfile import io import os import pandas as pd import torch from torch.utils.data import Dataset # %% custom dataset class PlacesDataset(Dataset): def __init__(self, txt_path='filelist.txt', img_dir='data', transform=None, test=False): """ Initialize data set as a list of IDs corresponding to each item of data set :param img_dir: path to image files as a uncompressed tar archive :param txt_path: a text file containing names of all of images line by line :param transform: apply some transforms like cropping, rotating, etc on input image :param test: is inference time or not :return: a 3-value dict containing input image (y_descreen) as ground truth, input image X as halftone image and edge-map (y_edge) of ground truth image to feed into the network. """ df = pd.read_csv(txt_path, sep=' ', index_col=0) self.img_names = df.index.values self.txt_path = txt_path self.img_dir = img_dir self.transform = transform self.to_tensor = ToTensor() self.to_pil = ToPILImage() self.get_image_selector = True if img_dir.__contains__('tar') else False self.tf = tarfile.open(self.img_dir) if self.get_image_selector else None self.transform_gt = transform if test else Compose(self.transform.transforms[:-1]) # omit noise of ground truth def get_image_from_tar(self, name): """ Gets a image by a name gathered from file list csv file :param name: name of targeted image :return: a PIL image """ # tarinfo = self.tf.getmember(name) image = self.tf.extractfile(name) image = image.read() image = Image.open(io.BytesIO(image)) return image def get_image_from_folder(self, name): """ gets a image by a name gathered from file list text file :param name: name of targeted image :return: a PIL image """ image = Image.open(os.path.join(self.img_dir, name)) return image def __len__(self): """ Return the length of data set using list of IDs :return: number of samples in data set """ return len(self.img_names) def __getitem__(self, index): """ Generate one item of data set. Here we apply our preprocessing things like halftone styles and subtractive color process using CMYK color model, generating edge-maps, etc. :param index: index of item in IDs list :return: a sample of data as a dict """ if index == (self.__len__() - 1) and self.get_image_selector: # Close tarfile opened in __init__ self.tf.close() if self.get_image_selector: # note: we prefer to extract then process! y_descreen = self.get_image_from_tar(self.img_names[index]) else: y_descreen = self.get_image_from_folder(self.img_names[index]) seed = np.random.randint(2147483647) random.seed(seed) if self.transform is not None: y_noise = self.noisy_image(y_descreen) y_descreen = self.transform(y_descreen) random.seed(seed) y_noise = self.transform_gt(y_noise) sample = {'y_descreen': y_descreen, 'y_noise': y_noise} return sample def noisy_image(self, image): """ Add Salt and Pepper noise to image and return image as same type as input. :param image: PIL image :return: PIL image """ if type(image) == torch.Tensor: image = self.to_pil(image) image = np.array(image) s_vs_p = 0.5 amount = 0.015 out = np.copy(image) num_salt = np.ceil(amount * image.size * s_vs_p) coords = tuple([np.random.randint(0, i - 1, int(num_salt)) for i in image.shape]) out[coords] = 1 num_pepper = np.ceil(amount * image.size * (1. - s_vs_p)) coords = tuple([np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]) out[coords] = 0 out = ToPILImage()(out) return out class RandomNoise(object): def __init__(self, p, mean=0, std=0.1): self.p = p self.mean = mean self.std = std def __call__(self, img): if random.random() <= self.p: noise = torch.empty(*img.size(), dtype=torch.float, requires_grad=False) return img+noise.normal_(self.mean, self.std) return img class Blend(object): """ Blend two input tensors(tensors) with respect to the alpha value as a weight if random number is lower than p for each example """ def __init__(self, p=0.5): self.p = p def __call__(self, halftone, ground_truth, alpha=0.5): """ :param halftone: First tensor to be blended (batch_size, channel_size, height, width) :param ground_truth: Second tensor to be blended with size (batch_size, channel_size, height, width) :param alpha: weight of linear addition of two tensors :return: A tensor with size of (batch_size, channel_size, height, width) """ p = torch.zeros(halftone.size()[0]).new_full((halftone.size()[0], ), self.p) rand = torch.zeros(p.size()[0]).uniform_() blend = torch.zeros((halftone.size())) mask = rand < p blend[mask] = halftone[mask] * (1.0 - alpha) + ground_truth[mask] * alpha mask = rand > p blend[mask] = halftone[mask] return blend class UnNormalizeNative(object): """ Unnormalize an input tensor given the mean and std """ def __init__(self, mean, std): self.mean = torch.tensor(mean) self.std = torch.tensor(std) def __call__(self, tensor): """ Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. Returns: Tensor: Normalized image. """ return Normalize((-mean / std).tolist(), (1.0 / std).tolist())
[ "numpy.copy", "numpy.ceil", "tarfile.open", "torchvision.transforms.ToPILImage", "pandas.read_csv", "io.BytesIO", "os.path.join", "random.seed", "numpy.array", "numpy.random.randint", "torch.tensor", "random.random", "torchvision.transforms.ToTensor", "torchvision.transforms.Compose" ]
[((1046, 1089), 'pandas.read_csv', 'pd.read_csv', (['txt_path'], {'sep': '""" """', 'index_col': '(0)'}), "(txt_path, sep=' ', index_col=0)\n", (1057, 1089), True, 'import pandas as pd\n'), ((1255, 1265), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1263, 1265), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((1288, 1300), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (1298, 1300), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((3181, 3210), 'numpy.random.randint', 'np.random.randint', (['(2147483647)'], {}), '(2147483647)\n', (3198, 3210), True, 'import numpy as np\n'), ((3219, 3236), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3230, 3236), False, 'import random\n'), ((3863, 3878), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3871, 3878), True, 'import numpy as np\n'), ((3937, 3951), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (3944, 3951), True, 'import numpy as np\n'), ((3971, 4008), 'numpy.ceil', 'np.ceil', (['(amount * image.size * s_vs_p)'], {}), '(amount * image.size * s_vs_p)\n', (3978, 4008), True, 'import numpy as np\n'), ((4144, 4189), 'numpy.ceil', 'np.ceil', (['(amount * image.size * (1.0 - s_vs_p))'], {}), '(amount * image.size * (1.0 - s_vs_p))\n', (4151, 4189), True, 'import numpy as np\n'), ((5919, 5937), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (5931, 5937), False, 'import torch\n'), ((5957, 5974), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (5969, 5974), False, 'import torch\n'), ((1400, 1426), 'tarfile.open', 'tarfile.open', (['self.img_dir'], {}), '(self.img_dir)\n', (1412, 1426), False, 'import tarfile\n'), ((1515, 1554), 'torchvision.transforms.Compose', 'Compose', (['self.transform.transforms[:-1]'], {}), '(self.transform.transforms[:-1])\n', (1522, 1554), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((1930, 1947), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (1940, 1947), False, 'import io\n'), ((2205, 2237), 'os.path.join', 'os.path.join', (['self.img_dir', 'name'], {}), '(self.img_dir, name)\n', (2217, 2237), False, 'import os\n'), ((3392, 3409), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3403, 3409), False, 'import random\n'), ((4319, 4331), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (4329, 4331), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((4538, 4553), 'random.random', 'random.random', ([], {}), '()\n', (4551, 4553), False, 'import random\n')]
from dsynth.view_datasets.tless import TlessMultiviewDataset from dsynth import MultiviewWarper import numpy as np def test_tless_dataset(): dataset = TlessMultiviewDataset(obj_id=2, unit_test=True) ibr = MultiviewWarper(dataset) R = np.reshape(dataset[1].cam_R, (3,3)).astype(np.float32) t = np.float32(dataset[1].cam_t) K = np.reshape(dataset[1].cam_K, (3,3)).astype(np.float32) W, view_id = ibr.match_and_warp( (R,t), K, (400,400)) assert view_id == '0400'
[ "dsynth.MultiviewWarper", "numpy.reshape", "numpy.float32", "dsynth.view_datasets.tless.TlessMultiviewDataset" ]
[((156, 203), 'dsynth.view_datasets.tless.TlessMultiviewDataset', 'TlessMultiviewDataset', ([], {'obj_id': '(2)', 'unit_test': '(True)'}), '(obj_id=2, unit_test=True)\n', (177, 203), False, 'from dsynth.view_datasets.tless import TlessMultiviewDataset\n'), ((214, 238), 'dsynth.MultiviewWarper', 'MultiviewWarper', (['dataset'], {}), '(dataset)\n', (229, 238), False, 'from dsynth import MultiviewWarper\n'), ((312, 340), 'numpy.float32', 'np.float32', (['dataset[1].cam_t'], {}), '(dataset[1].cam_t)\n', (322, 340), True, 'import numpy as np\n'), ((248, 284), 'numpy.reshape', 'np.reshape', (['dataset[1].cam_R', '(3, 3)'], {}), '(dataset[1].cam_R, (3, 3))\n', (258, 284), True, 'import numpy as np\n'), ((349, 385), 'numpy.reshape', 'np.reshape', (['dataset[1].cam_K', '(3, 3)'], {}), '(dataset[1].cam_K, (3, 3))\n', (359, 385), True, 'import numpy as np\n')]
import types import sqlite3 from collections import namedtuple from functools import reduce import numpy from glue.lal import LIGOTimeGPS from glue.ligolw import ligolw, lsctables, table, ilwd from glue.ligolw.utils import process def assign_id(row, i): row.simulation_id = ilwd.ilwdchar("sim_inspiral_table:sim_inspiral:%d" % i) CMAP = { "right_ascension": "longitude", "longitude":"longitude", "latitude":"latitude", "declination": "latitude", "inclination": "inclination", "polarization": "polarization", "t_ref": lambda r, t: r.set_time_geocent(LIGOTimeGPS(float(t))), "coa_phase": "coa_phase", "distance": "distance", "mass1": "mass1", "mass2": "mass2", # SHOEHORN ALERT "sample_n": assign_id, "alpha1":"alpha1", "alpha2":"alpha2", "alpha3":"alpha3", "loglikelihood": "alpha1", "joint_prior": "alpha2", "joint_s_prior": "alpha3", "eccentricity":"alpha4", "spin1x":"spin1x", "spin1y":"spin1y", "spin1z":"spin1z", "spin2x":"spin2x", "spin2y":"spin2y", "spin2z":"spin2z" } # FIXME: Find way to intersect given cols with valid cols when making table. # Otherwise, we'll have to add them manually and ensure they all exist sim_valid_cols = ["process_id", "simulation_id", "inclination", "longitude", "latitude", "polarization", "geocent_end_time", "geocent_end_time_ns", "coa_phase", "distance", "mass1", "mass2", "alpha1", "alpha2", "alpha3","spin1x", "spin1y", "spin1z", "spin2x", "spin2y", "spin2z"] sngl_valid_cols = ["process_id", "event_id", "snr", "tau0", "tau3"] multi_valid_cols = ["process_id", "event_id", "snr"] def append_samples_to_xmldoc(xmldoc, sampdict): try: si_table = table.get_table(xmldoc, lsctables.SimInspiralTable.tableName) new_table = False # Warning: This will also get triggered if there is *more* than one table except ValueError: si_table = lsctables.New(lsctables.SimInspiralTable, sim_valid_cols) new_table = True keys = list(sampdict.keys()) # Just in case the key/value pairs don't come out synchronized values = numpy.array([sampdict[k] for k in keys], object) # Flatten the keys import collections keys = reduce(list.__add__, [list(i) if isinstance(i, tuple) else [i] for i in keys]) # Get the process # FIXME: Assumed that only we have appended information procid = table.get_table(xmldoc, lsctables.ProcessTable.tableName)[-1].process_id # map the samples to sim inspiral rows # NOTE :The list comprehension is to preserve the grouping of multiple # parameters across the transpose operation. It's probably not necessary, # so if speed dictates, it can be reworked by flattening before arriving # here for vrow in numpy.array(list(zip(*[vrow_sub.T for vrow_sub in values])), dtype=numpy.object): #si_table.append(samples_to_siminsp_row(si_table, **dict(zip(keys, vrow.flatten())))) vrow = reduce(list.__add__, [list(i) if isinstance(i, collections.Iterable) else [i] for i in vrow]) si_table.append(samples_to_siminsp_row(si_table, **dict(list(zip(keys, vrow))))) si_table[-1].process_id = procid if new_table: xmldoc.childNodes[0].appendChild(si_table) return xmldoc def append_likelihood_result_to_xmldoc(xmldoc, loglikelihood, neff=0, converged=False,**cols): try: si_table = table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName) new_table = False # NOTE: MultiInspiralTable has no spin columns #si_table = table.get_table(xmldoc, lsctables.MultiInspiralTable.tableName) # Warning: This will also get triggered if there is *more* than one table except ValueError: si_table = lsctables.New(lsctables.SnglInspiralTable, sngl_valid_cols + list(cols.keys())) new_table = True # NOTE: MultiInspiralTable has no spin columns #si_table = lsctables.New(lsctables.MultiInspiralTable, multi_valid_cols + cols.keys()) # Get the process # FIXME: Assumed that only we have appended information procid = table.get_table(xmldoc, lsctables.ProcessTable.tableName)[-1].process_id # map the samples to sim inspiral rows si_table.append(likelihood_to_snglinsp_row(si_table, loglikelihood, neff, converged,**cols)) si_table[-1].process_id = procid if new_table: xmldoc.childNodes[0].appendChild(si_table) return xmldoc def samples_to_siminsp_row(table, colmap={}, **sampdict): row = table.RowType() row.simulation_id = table.get_next_id() for key, col in list(CMAP.items()): if key not in sampdict: continue if isinstance(col, types.FunctionType): col(row, sampdict[key]) else: setattr(row, col, sampdict[key]) return row def likelihood_to_snglinsp_row(table, loglikelihood, neff=0, converged=False, **cols): row = table.RowType() row.event_id = table.get_next_id() for col in cols: setattr(row, col, cols[col]) row.snr = loglikelihood row.tau0 = neff row.tau3 = int(converged) return row def db_identify_param(db_fname, process_id, param): """ Extract the event time for a given process ID. This may fail in the case that the event time was not given on the command line (rather in a pointer to a XML file) NOTE: This is definitely not the best way to do this. """ cmd_prm = "--" + param.replace("_", "-") sql = """select value from process_params where process_id = "%s" and param = "%s" """ % (str(process_id), cmd_prm) try: connection = sqlite3.connect(db_fname) result = list(connection.execute(sql))[0][0] finally: connection.close() return result def db_to_samples(db_fname, tbltype, cols): """ Pull samples from db_fname and return object that resembles a row from an XML table. """ if "geocent_end_time" in cols: cols.append("geocent_end_time_ns") # FIXME: Get columns from db #if cols is None: #colsspec = "*" #else: colsspec = ", ".join(cols) if tbltype == lsctables.SimInspiralTable: sql = """select %s from sim_inspiral""" % colsspec elif tbltype == lsctables.SnglInspiralTable: sql = """select %s from sngl_inspiral""" % colsspec else: raise ValueError("Don't know SQL for table %s" % tbltype.tableName) Sample = namedtuple("Sample", cols) samples = [] try: connection = sqlite3.connect(db_fname) connection.row_factory = sqlite3.Row for row in connection.execute(sql): # FIXME: UGH! res = dict(list(zip(cols, row))) if "geocent_end_time" in list(res.keys()): res["geocent_end_time"] += res["geocent_end_time_ns"]*1e-9 samples.append(Sample(**res)) finally: connection.close() return samples # TESTING import sys if __file__ == sys.argv[0]: import numpy # Not used yet del CMAP["int_var"] del CMAP["int_val"] del CMAP["sample_n"] # Reworked to resemble usage in pipeline del CMAP["mass1"] del CMAP["mass2"] CMAP[("mass1", "mass2")] = ("mass1", "mass2") ar = numpy.random.random((len(CMAP), 10)) samp_dict = dict(list(zip(CMAP, ar))) ar = samp_dict[("mass1", "mass2")] samp_dict[("mass1", "mass2")] = numpy.array([ar, ar]) del CMAP[("mass1", "mass2")] CMAP["mass1"] = "mass1" CMAP["mass2"] = "mass2" samp_dict["samp_n"] = numpy.array(list(range(0,10))) CMAP["sample_n"] = "sample_n" xmldoc = ligolw.Document() xmldoc.appendChild(ligolw.LIGO_LW()) process.register_to_xmldoc(xmldoc, sys.argv[0], {}) append_samples_to_xmldoc(xmldoc, samp_dict) def gaussian(x, mu=0, std=1): return 1/numpy.sqrt(numpy.pi*2)/std * numpy.exp(-(x-mu)**2/2/std**2) m1m, m2m = 1.4, 1.5 m1, m2 = numpy.random.random(2000).reshape(2,1000)*1.0+1.0 loglikes = [gaussian(m1i, m1m)*gaussian(m2i, m2m) for m1i, m2i in zip(m1, m2)] #loglikelihood = - 7.5**2/2 #append_likelihood_result_to_xmldoc(xmldoc, loglikelihood, **{"mass1": 1.4, "mass2": 1.4, "ifos": "H1,L1,V1"}) for m1i, m2i, loglikelihood in zip(m1, m2, loglikes): append_likelihood_result_to_xmldoc(xmldoc, loglikelihood, **{"mass1": m1i, "mass2": m2i}) from glue.ligolw import utils utils.write_filename(xmldoc, "iotest.xml.gz", gz=True)
[ "glue.ligolw.ligolw.LIGO_LW", "glue.ligolw.table.get_next_id", "collections.namedtuple", "numpy.sqrt", "sqlite3.connect", "numpy.random.random", "glue.ligolw.utils.write_filename", "glue.ligolw.table.get_table", "glue.ligolw.ligolw.Document", "glue.ligolw.table.RowType", "numpy.exp", "numpy.array", "glue.ligolw.ilwd.ilwdchar", "glue.ligolw.utils.process.register_to_xmldoc", "glue.ligolw.lsctables.New" ]
[((282, 337), 'glue.ligolw.ilwd.ilwdchar', 'ilwd.ilwdchar', (["('sim_inspiral_table:sim_inspiral:%d' % i)"], {}), "('sim_inspiral_table:sim_inspiral:%d' % i)\n", (295, 337), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((2121, 2169), 'numpy.array', 'numpy.array', (['[sampdict[k] for k in keys]', 'object'], {}), '([sampdict[k] for k in keys], object)\n', (2132, 2169), False, 'import numpy\n'), ((4528, 4543), 'glue.ligolw.table.RowType', 'table.RowType', ([], {}), '()\n', (4541, 4543), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4568, 4587), 'glue.ligolw.table.get_next_id', 'table.get_next_id', ([], {}), '()\n', (4585, 4587), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4938, 4953), 'glue.ligolw.table.RowType', 'table.RowType', ([], {}), '()\n', (4951, 4953), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4973, 4992), 'glue.ligolw.table.get_next_id', 'table.get_next_id', ([], {}), '()\n', (4990, 4992), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((6443, 6469), 'collections.namedtuple', 'namedtuple', (['"""Sample"""', 'cols'], {}), "('Sample', cols)\n", (6453, 6469), False, 'from collections import namedtuple\n'), ((7400, 7421), 'numpy.array', 'numpy.array', (['[ar, ar]'], {}), '([ar, ar])\n', (7411, 7421), False, 'import numpy\n'), ((7621, 7638), 'glue.ligolw.ligolw.Document', 'ligolw.Document', ([], {}), '()\n', (7636, 7638), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((7684, 7735), 'glue.ligolw.utils.process.register_to_xmldoc', 'process.register_to_xmldoc', (['xmldoc', 'sys.argv[0]', '{}'], {}), '(xmldoc, sys.argv[0], {})\n', (7710, 7735), False, 'from glue.ligolw.utils import process\n'), ((8410, 8464), 'glue.ligolw.utils.write_filename', 'utils.write_filename', (['xmldoc', '"""iotest.xml.gz"""'], {'gz': '(True)'}), "(xmldoc, 'iotest.xml.gz', gz=True)\n", (8430, 8464), False, 'from glue.ligolw import utils\n'), ((1712, 1773), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.SimInspiralTable.tableName'], {}), '(xmldoc, lsctables.SimInspiralTable.tableName)\n', (1727, 1773), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((3415, 3477), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.SnglInspiralTable.tableName'], {}), '(xmldoc, lsctables.SnglInspiralTable.tableName)\n', (3430, 3477), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((5641, 5666), 'sqlite3.connect', 'sqlite3.connect', (['db_fname'], {}), '(db_fname)\n', (5656, 5666), False, 'import sqlite3\n'), ((6518, 6543), 'sqlite3.connect', 'sqlite3.connect', (['db_fname'], {}), '(db_fname)\n', (6533, 6543), False, 'import sqlite3\n'), ((7662, 7678), 'glue.ligolw.ligolw.LIGO_LW', 'ligolw.LIGO_LW', ([], {}), '()\n', (7676, 7678), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((1920, 1977), 'glue.ligolw.lsctables.New', 'lsctables.New', (['lsctables.SimInspiralTable', 'sim_valid_cols'], {}), '(lsctables.SimInspiralTable, sim_valid_cols)\n', (1933, 1977), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((2407, 2464), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.ProcessTable.tableName'], {}), '(xmldoc, lsctables.ProcessTable.tableName)\n', (2422, 2464), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4115, 4172), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.ProcessTable.tableName'], {}), '(xmldoc, lsctables.ProcessTable.tableName)\n', (4130, 4172), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((7866, 7906), 'numpy.exp', 'numpy.exp', (['(-(x - mu) ** 2 / 2 / std ** 2)'], {}), '(-(x - mu) ** 2 / 2 / std ** 2)\n', (7875, 7906), False, 'import numpy\n'), ((7837, 7861), 'numpy.sqrt', 'numpy.sqrt', (['(numpy.pi * 2)'], {}), '(numpy.pi * 2)\n', (7847, 7861), False, 'import numpy\n'), ((7935, 7960), 'numpy.random.random', 'numpy.random.random', (['(2000)'], {}), '(2000)\n', (7954, 7960), False, 'import numpy\n')]
import numpy as np import fcl import torch # R = np.array([[0.0, -1.0, 0.0], # [1.0, 0.0, 0.0], # [0.0, 0.0, 1.0]]) R = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) T = np.array([1.0, 1.865, 0]) g1 = fcl.Box(1,2,3) t1 = fcl.Transform() o1 = fcl.CollisionObject(g1, t1) # g2 = fcl.Cone(1,3) g2 = fcl.Cylinder(0.01, 1000) t2 = fcl.Transform() o2 = fcl.CollisionObject(g2, t2) # request = fcl.DistanceRequest(gjk_solver_type=fcl.GJKSolverType.GST_INDEP) # result = fcl.DistanceResult() request = fcl.CollisionRequest(enable_contact=True) result = fcl.CollisionResult() # ret = fcl.distance(o1, o2, request, result) # ret = fcl.collide(o1, o2, request, result) size = 50, 50 yy, xx = torch.meshgrid(torch.linspace(-5, 5, size[0]), torch.linspace(-5, 5, size[1])) grid_points = torch.stack([xx, yy], axis=2).reshape((-1, 2)) grid_labels = torch.zeros_like(grid_points)[:, 0] for i, (x, y) in enumerate(grid_points): print(x, y) o2.setTranslation([x, y, 0]) fcl.update() ret = fcl.collide(o1, o2, request, result) grid_labels[i] = result.is_collision print(result.is_collision) import matplotlib.pyplot as plt plt.scatter(grid_points[grid_labels==True, 0], grid_points[grid_labels==True, 1]) plt.show() # print(ret, result.contacts[0].penetration_depth)
[ "fcl.Cylinder", "fcl.update", "matplotlib.pyplot.show", "fcl.Transform", "fcl.collide", "torch.stack", "fcl.CollisionObject", "numpy.array", "matplotlib.pyplot.scatter", "fcl.CollisionRequest", "torch.zeros_like", "fcl.Box", "torch.linspace", "fcl.CollisionResult" ]
[((151, 212), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (159, 212), True, 'import numpy as np\n'), ((247, 272), 'numpy.array', 'np.array', (['[1.0, 1.865, 0]'], {}), '([1.0, 1.865, 0])\n', (255, 272), True, 'import numpy as np\n'), ((279, 295), 'fcl.Box', 'fcl.Box', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (286, 295), False, 'import fcl\n'), ((299, 314), 'fcl.Transform', 'fcl.Transform', ([], {}), '()\n', (312, 314), False, 'import fcl\n'), ((320, 347), 'fcl.CollisionObject', 'fcl.CollisionObject', (['g1', 't1'], {}), '(g1, t1)\n', (339, 347), False, 'import fcl\n'), ((375, 399), 'fcl.Cylinder', 'fcl.Cylinder', (['(0.01)', '(1000)'], {}), '(0.01, 1000)\n', (387, 399), False, 'import fcl\n'), ((405, 420), 'fcl.Transform', 'fcl.Transform', ([], {}), '()\n', (418, 420), False, 'import fcl\n'), ((426, 453), 'fcl.CollisionObject', 'fcl.CollisionObject', (['g2', 't2'], {}), '(g2, t2)\n', (445, 453), False, 'import fcl\n'), ((574, 615), 'fcl.CollisionRequest', 'fcl.CollisionRequest', ([], {'enable_contact': '(True)'}), '(enable_contact=True)\n', (594, 615), False, 'import fcl\n'), ((625, 646), 'fcl.CollisionResult', 'fcl.CollisionResult', ([], {}), '()\n', (644, 646), False, 'import fcl\n'), ((1212, 1301), 'matplotlib.pyplot.scatter', 'plt.scatter', (['grid_points[grid_labels == True, 0]', 'grid_points[grid_labels == True, 1]'], {}), '(grid_points[grid_labels == True, 0], grid_points[grid_labels ==\n True, 1])\n', (1223, 1301), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1302, 1304), True, 'import matplotlib.pyplot as plt\n'), ((778, 808), 'torch.linspace', 'torch.linspace', (['(-5)', '(5)', 'size[0]'], {}), '(-5, 5, size[0])\n', (792, 808), False, 'import torch\n'), ((810, 840), 'torch.linspace', 'torch.linspace', (['(-5)', '(5)', 'size[1]'], {}), '(-5, 5, size[1])\n', (824, 840), False, 'import torch\n'), ((917, 946), 'torch.zeros_like', 'torch.zeros_like', (['grid_points'], {}), '(grid_points)\n', (933, 946), False, 'import torch\n'), ((1047, 1059), 'fcl.update', 'fcl.update', ([], {}), '()\n', (1057, 1059), False, 'import fcl\n'), ((1070, 1106), 'fcl.collide', 'fcl.collide', (['o1', 'o2', 'request', 'result'], {}), '(o1, o2, request, result)\n', (1081, 1106), False, 'import fcl\n'), ((856, 885), 'torch.stack', 'torch.stack', (['[xx, yy]'], {'axis': '(2)'}), '([xx, yy], axis=2)\n', (867, 885), False, 'import torch\n')]
# -*- coding: utf-8 -*- import librosa.display import librosa as lb import os import numpy as np import pickle import matplotlib.pyplot as plt import time import multiprocessing import itertools import sys from collections import OrderedDict from more_itertools import unique_everseen from scipy.stats import skew from scipy.stats import kurtosis #This python script will load in songs and extract features from the waveform. It will then create a dictionary of all the results, ready for plotting in another script. #At the top we have a load of functions pre-defined, skip down to __main__ to see the steps we run #a function to split up a song into TIME chunks def splitT(mint,maxt,songdat): splittime=[] for i in range(mint,maxt): splittime.append(songdat[:,i]) # first axis is freq, second axis is time. Return all freq for specific time range. return (np.array(splittime)) #a function to split up a song into FREQ chunks def splitF(minv, maxv, songdat): splitfreq = [] for i in range(minv,maxv): splitfreq.append(songdat[i,:]) # first axis is freq, second axis is time. Return all time for specific freq range. return (np.array(splitfreq)) #This is the main function which gets features from the songs. Most values returned are the mean of the whole time series, hence '_a'. def get_features_mean(song,sr,hop_length,n_fft): try: print('extracting features...') y_harmonic, y_percussive = lb.effects.hpss(song) #split song into harmonic and percussive parts stft_harmonic=lb.core.stft(y_harmonic, n_fft=n_fft, hop_length=hop_length) #Compute power spectrogram. stft_percussive=lb.core.stft(y_percussive, n_fft=n_fft, hop_length=hop_length) #Compute power spectrogram. #stft_all=lb.core.stft(song, n_fft=n_fft, hop_length=hop_length) #Compute power spectrogram. band_resolution=[5] #[5,25] Choose number of bands, do low and high resolution? bands_dict=OrderedDict() for no_bands in band_resolution: bands=np.logspace(1.3,4,no_bands)/10 #note that as n_fft is 2050 (I've decided this is sensible resolution), bands/10=freq bands_int=bands.astype(int) bands_int_unique=list(unique_everseen(bands_int)) #removing double entries less than 100Hz, because logspace bunches up down there and we don't need doubles when rounding to the nearest 10 Hz. for i in range(0,len(bands_int_unique)-1): _h=lb.feature.rmse(y=(splitF(bands_int_unique[i],bands_int_unique[i+1],stft_harmonic))) _p=lb.feature.rmse(y=(splitF(bands_int_unique[i],bands_int_unique[i+1],stft_percussive))) #Calculate statistics for harmoinc and percussive over the time series. rms_h=np.mean(np.abs(_h)) std_h=np.std(np.abs(_h)) skew_h=skew(np.mean(np.abs(_h), axis=0)) #skew of the time series (avg along freq axis, axis=0) kurtosis_h=kurtosis(np.mean(np.abs(_h), axis=0), fisher=True, bias=True) #kurtosis of time series (avg along freq axis=0) rms_p=np.mean(np.abs(_p)) std_p=np.std(np.abs(_p)) skew_p=skew(np.mean(np.abs(_p), axis=0)) #skew of the time series (avg along freq axis, axis=0) kurtosis_p=kurtosis(np.mean(np.abs(_p), axis=0), fisher=True, bias=True) #kurtosis of time series (avg along freq axis=0) #Append results to dict, with numbers as band labels bands_dict.update({'{0}band_rms_h{1}'.format(no_bands,i):rms_h,'{0}band_rms_p{1}'.format(no_bands,i):rms_p}) bands_dict.update({'{0}band_std_h{1}'.format(no_bands,i):std_h,'{0}band_std_p{1}'.format(no_bands,i):std_p}) bands_dict.update({'{0}band_skew_h{1}'.format(no_bands,i):skew_h,'{0}band_skew_p{1}'.format(no_bands,i):skew_p}) bands_dict.update({'{0}band_kurtosis_h{1}'.format(no_bands,i):kurtosis_h,'{0}band_kurtosis_p{1}'.format(no_bands,i):kurtosis_p}) #stft=lb.feature.chroma_stft(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute a chromagram from a waveform or power spectrogram. #stft_a=np.mean(stft[0]) #stft_std=np.std(stft[0]) #rmse=lb.feature.rmse(y=song) #Compute root-mean-square (RMS) energy for each frame, either from the audio samples y or from a spectrogram S. #rmse_a=np.mean(rmse) #rmse_std=np.std(rmse) rmseH=np.abs(lb.feature.rmse(y=stft_harmonic)) #Compute root-mean-square (RMS) energy for harmonic rmseH_a=np.mean(rmseH) rmseH_std=np.std(rmseH) rmseH_skew=skew(np.mean(rmseH, axis=0)) rmseH_kurtosis=kurtosis(np.mean(rmseH, axis=0), fisher=True, bias=True) rmseP=np.abs(lb.feature.rmse(y=stft_percussive)) #Compute root-mean-square (RMS) energy for percussive rmseP_a=np.mean(rmseP) rmseP_std=np.std(rmseP) rmseP_skew=skew(np.mean(rmseP, axis=0)) rmseP_kurtosis=kurtosis(np.mean(rmseP, axis=0), fisher=True, bias=True) centroid=lb.feature.spectral_centroid(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute the spectral centroid. centroid_a=np.mean(centroid) centroid_std=np.std(centroid) bw=lb.feature.spectral_bandwidth(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute p’th-order spectral bandwidth: bw_a=np.mean(bw) bw_std=np.std(bw) contrast=lb.feature.spectral_contrast(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute spectral contrast [R16] contrast_a=np.mean(contrast) contrast_std=np.std(contrast) polyfeat=lb.feature.poly_features(y_harmonic, sr, n_fft=n_fft, hop_length=hop_length) #Get coefficients of fitting an nth-order polynomial to the columns of a spectrogram. polyfeat_a=np.mean(polyfeat[0]) polyfeat_std=np.std(polyfeat[0]) tonnetz=lb.feature.tonnetz(librosa.effects.harmonic(y_harmonic), sr) #Computes the tonal centroid features (tonnetz), following the method of [R17]. tonnetz_a=np.mean(tonnetz) tonnetz_std=np.std(tonnetz) zcr=lb.feature.zero_crossing_rate(song, sr, hop_length=hop_length) #zero crossing rate zcr_a=np.mean(zcr) zcr_std=np.std(zcr) onset_env=lb.onset.onset_strength(y_percussive, sr=sr) onset_a=np.mean(onset_env) onset_std=np.std(onset_env) D = librosa.stft(song) times = librosa.frames_to_time(np.arange(D.shape[1])) #not returned, but could be if you want to plot things as a time series bpm,beats=lb.beat.beat_track(y=y_percussive, sr=sr, onset_envelope=onset_env, units='time') beats_a=np.mean(beats) beats_std=np.std(beats) features_dict=OrderedDict({'rmseP_a':rmseP_a,'rmseP_std':rmseP_std,'rmseH_a':rmseH_a,'rmseH_std':rmseH_std,'centroid_a':centroid_a,'centroid_std':centroid_std,'bw_a':bw_a,'bw_std':bw_std,'contrast_a':contrast_a,'contrast_std':contrast_std,'polyfeat_a':polyfeat_a,'polyfeat_std':polyfeat_std,'tonnetz_a':tonnetz_a,'tonnetz_std':tonnetz_std,'zcr_a':zcr_a,'zcr_std':zcr_std,'onset_a':onset_a,'onset_std':onset_std,'bpm':bpm, 'rmseP_skew':rmseP_skew, 'rmseP_kurtosis':rmseP_kurtosis, 'rmseH_skew':rmseH_skew, 'rmseH_kurtosis':rmseH_kurtosis}) combine_features={**features_dict,**bands_dict} print('features extracted successfully') return combine_features except: print('.'*20+'FAILED'+'.'*20) print('.'*40) #a function to look at beat tracking... not used in machine learning yet, just random investigations. def beattrack(song,sr,hop_length,n_fft): y_harmonic, y_percussive = lb.effects.hpss(song) beattrack=lb.beat.beat_track(y=y_percussive, sr=sr, onset_envelope=None, hop_length=hop_length, start_bpm=120.0, tightness=100, trim=True, bpm=None, units='frames') #load music function, accepts any format i've encountered: mp3,wav,wma bla bla def load_music(songname1,songpath1): try: print('loading the song: {0} ......... located here: {1} '.format(songname1, songpath1)) songdata1, sr1 = lb.load(songpath1) #librosa library used to grab songdata and sample rate print ('done........ '+songname1) return [songname1,songdata1,sr1] except: #the song could be corrupt? you could be trying to load something which isn't a song? print('..............................FAILED...............................') print(songpath1) print('...................................................................') #functions for saving/loading the python dictionaries to disk def save_obj(obj, name ): with open(name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name ): with open(name + '.pkl', 'rb') as f: return pickle.load(f) #If you want a grid-plot to test anything out, this will help. Although I've made sure get_features returns only averaged values, not time-series data, so meh. def gridplot(data_dict,feature,size,N,ind): f, axarr = plt.subplots(size, size, sharey=True) i=0 j=0 for key in data_dict: #print (i,j) axarr[i,j].plot(np.convolve(data_dict[key][feature][ind],np.ones((N,))/N, mode='valid')) axarr[i, j].set_title(key[:3]) if j==size-1: i+=1 j=0 if j==size-1 else j+1 for i in range(1,size,1): plt.setp([a.get_yticklabels() for a in axarr[:, i]], visible=False) plt.savefig('test.png') #OK so here we go... if __name__ == "__main__": start_load=time.time() #we're going to want know how long this takes... num_workers = multiprocessing.cpu_count() #if we don't use multiple cores we may as well give up now. This is how many your computer has. print('you have {0} cores available to do your bidding...'.format(num_workers)) num_workers=32 #I was playing around with changing this #multi=int(sys.argv[1]) #at one point I was testing this as a command line input n_fft1=2050 #important parameter here; this is the size of the fft window. these are sensible values hop_length1=441 #n_fft/5 is a sensisble value. too large and you don't sample properly. #create song database, songdb: songname_tmp=[] songpath_tmp=[] #path='./audio_alex/' path=sys.argv[1] #the only command line input is the path to the folder of music print(path) savefile=str(path)+'_data' #it's saved with the same folder name but with _data.pkl on the end. #now load song data in for song in os.listdir(path): #print (song) songname_tmp.append(song) songpath_tmp.append(path+'/'+song) #print(songname) songname=songname_tmp #i'm just reassigning the name incase of tests with commented out lines... songpath=songpath_tmp #if you want to test this on a small number of songs first (e.g. 32), replace previous two lines with the following: #songname=songname_tmp[:31] #remember indices starts at zero. #songname=songname_tmp[:31] print('loading songs...') #Here we go with multi-processing, loading all our song data in with multiprocessing.Pool(processes=num_workers) as pool: songdb=pool.starmap(load_music,zip(songname,songpath)) #btw a starmap is a way to pass multiple arguments to a function using multi-process pool.close() pool.join() print('finished loading songs into songdb') #print (songdb) print ('loaded {0} songs into memory'.format(len(songdb))) songdb=[x for x in songdb if x is not None] #remove any entries where loading may have failed for any reason (rare cases) #parse song data to individual lists ready for feature extraction function (because we can't slice nested lists) song_name=[] #text song_data=[] #list of numbers song_sr=[] #sample rate for song1 in songdb: song_name.append(song1[0]) song_data.append(song1[1]) song_sr.append(song1[2]) start_feat = time.time() #note the time print("Data is all ready, now extracting features from the songs...") #extract features from songs with multiprocesssing with multiprocessing.Pool(processes=num_workers,maxtasksperchild=1) as pool: res=pool.starmap(get_features_mean,zip(song_data,song_sr,itertools.repeat(hop_length1),itertools.repeat(n_fft1))) pool.close() pool.join() #concatenate each songs features (res) into dictionary print('concatenating results into a massive dictionary...') data_dict_mean={} for i in range(0,len(songdb)): data_dict_mean.update({song_name[i]:res[i]}) #print features to screen to check print('The features extracted from the songs are: ') print(res[0].keys()) print('saving dictionary to disk...') save_obj(data_dict_mean,savefile) end_feat=time.time() #note finish time print("loading time: {0} seconds".format(start_feat-start_load)) print("feature extraction time: {0} seconds".format(end_feat-start_feat)) print("total time: {0} seconds".format(end_feat-start_load)) print('finished')
[ "librosa.feature.poly_features", "librosa.feature.zero_crossing_rate", "multiprocessing.cpu_count", "numpy.array", "librosa.feature.spectral_centroid", "librosa.feature.spectral_bandwidth", "librosa.feature.spectral_contrast", "librosa.load", "numpy.arange", "numpy.mean", "itertools.repeat", "os.listdir", "numpy.logspace", "librosa.effects.hpss", "numpy.abs", "collections.OrderedDict", "matplotlib.pyplot.savefig", "numpy.ones", "librosa.beat.beat_track", "pickle.load", "numpy.std", "more_itertools.unique_everseen", "time.time", "librosa.onset.onset_strength", "pickle.dump", "multiprocessing.Pool", "librosa.feature.rmse", "librosa.core.stft", "matplotlib.pyplot.subplots" ]
[((881, 900), 'numpy.array', 'np.array', (['splittime'], {}), '(splittime)\n', (889, 900), True, 'import numpy as np\n'), ((1169, 1188), 'numpy.array', 'np.array', (['splitfreq'], {}), '(splitfreq)\n', (1177, 1188), True, 'import numpy as np\n'), ((7649, 7670), 'librosa.effects.hpss', 'lb.effects.hpss', (['song'], {}), '(song)\n', (7664, 7670), True, 'import librosa as lb\n'), ((7685, 7849), 'librosa.beat.beat_track', 'lb.beat.beat_track', ([], {'y': 'y_percussive', 'sr': 'sr', 'onset_envelope': 'None', 'hop_length': 'hop_length', 'start_bpm': '(120.0)', 'tightness': '(100)', 'trim': '(True)', 'bpm': 'None', 'units': '"""frames"""'}), "(y=y_percussive, sr=sr, onset_envelope=None, hop_length=\n hop_length, start_bpm=120.0, tightness=100, trim=True, bpm=None, units=\n 'frames')\n", (7703, 7849), True, 'import librosa as lb\n'), ((9034, 9071), 'matplotlib.pyplot.subplots', 'plt.subplots', (['size', 'size'], {'sharey': '(True)'}), '(size, size, sharey=True)\n', (9046, 9071), True, 'import matplotlib.pyplot as plt\n'), ((9442, 9465), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.png"""'], {}), "('test.png')\n", (9453, 9465), True, 'import matplotlib.pyplot as plt\n'), ((9541, 9552), 'time.time', 'time.time', ([], {}), '()\n', (9550, 9552), False, 'import time\n'), ((9624, 9651), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (9649, 9651), False, 'import multiprocessing\n'), ((10576, 10592), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (10586, 10592), False, 'import os\n'), ((12129, 12140), 'time.time', 'time.time', ([], {}), '()\n', (12138, 12140), False, 'import time\n'), ((13046, 13057), 'time.time', 'time.time', ([], {}), '()\n', (13055, 13057), False, 'import time\n'), ((1459, 1480), 'librosa.effects.hpss', 'lb.effects.hpss', (['song'], {}), '(song)\n', (1474, 1480), True, 'import librosa as lb\n'), ((1550, 1610), 'librosa.core.stft', 'lb.core.stft', (['y_harmonic'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(y_harmonic, n_fft=n_fft, hop_length=hop_length)\n', (1562, 1610), True, 'import librosa as lb\n'), ((1663, 1725), 'librosa.core.stft', 'lb.core.stft', (['y_percussive'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(y_percussive, n_fft=n_fft, hop_length=hop_length)\n', (1675, 1725), True, 'import librosa as lb\n'), ((1962, 1975), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1973, 1975), False, 'from collections import OrderedDict\n'), ((4557, 4571), 'numpy.mean', 'np.mean', (['rmseH'], {}), '(rmseH)\n', (4564, 4571), True, 'import numpy as np\n'), ((4590, 4603), 'numpy.std', 'np.std', (['rmseH'], {}), '(rmseH)\n', (4596, 4603), True, 'import numpy as np\n'), ((4860, 4874), 'numpy.mean', 'np.mean', (['rmseP'], {}), '(rmseP)\n', (4867, 4874), True, 'import numpy as np\n'), ((4893, 4906), 'numpy.std', 'np.std', (['rmseP'], {}), '(rmseP)\n', (4899, 4906), True, 'import numpy as np\n'), ((5053, 5127), 'librosa.feature.spectral_centroid', 'lb.feature.spectral_centroid', (['song', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(song, sr, n_fft=n_fft, hop_length=hop_length)\n', (5081, 5127), True, 'import librosa as lb\n'), ((5179, 5196), 'numpy.mean', 'np.mean', (['centroid'], {}), '(centroid)\n', (5186, 5196), True, 'import numpy as np\n'), ((5218, 5234), 'numpy.std', 'np.std', (['centroid'], {}), '(centroid)\n', (5224, 5234), True, 'import numpy as np\n'), ((5246, 5321), 'librosa.feature.spectral_bandwidth', 'lb.feature.spectral_bandwidth', (['song', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(song, sr, n_fft=n_fft, hop_length=hop_length)\n', (5275, 5321), True, 'import librosa as lb\n'), ((5375, 5386), 'numpy.mean', 'np.mean', (['bw'], {}), '(bw)\n', (5382, 5386), True, 'import numpy as np\n'), ((5402, 5412), 'numpy.std', 'np.std', (['bw'], {}), '(bw)\n', (5408, 5412), True, 'import numpy as np\n'), ((5430, 5504), 'librosa.feature.spectral_contrast', 'lb.feature.spectral_contrast', (['song', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(song, sr, n_fft=n_fft, hop_length=hop_length)\n', (5458, 5504), True, 'import librosa as lb\n'), ((5557, 5574), 'numpy.mean', 'np.mean', (['contrast'], {}), '(contrast)\n', (5564, 5574), True, 'import numpy as np\n'), ((5596, 5612), 'numpy.std', 'np.std', (['contrast'], {}), '(contrast)\n', (5602, 5612), True, 'import numpy as np\n'), ((5630, 5706), 'librosa.feature.poly_features', 'lb.feature.poly_features', (['y_harmonic', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(y_harmonic, sr, n_fft=n_fft, hop_length=hop_length)\n', (5654, 5706), True, 'import librosa as lb\n'), ((5812, 5832), 'numpy.mean', 'np.mean', (['polyfeat[0]'], {}), '(polyfeat[0])\n', (5819, 5832), True, 'import numpy as np\n'), ((5854, 5873), 'numpy.std', 'np.std', (['polyfeat[0]'], {}), '(polyfeat[0])\n', (5860, 5873), True, 'import numpy as np\n'), ((6049, 6065), 'numpy.mean', 'np.mean', (['tonnetz'], {}), '(tonnetz)\n', (6056, 6065), True, 'import numpy as np\n'), ((6086, 6101), 'numpy.std', 'np.std', (['tonnetz'], {}), '(tonnetz)\n', (6092, 6101), True, 'import numpy as np\n'), ((6114, 6176), 'librosa.feature.zero_crossing_rate', 'lb.feature.zero_crossing_rate', (['song', 'sr'], {'hop_length': 'hop_length'}), '(song, sr, hop_length=hop_length)\n', (6143, 6176), True, 'import librosa as lb\n'), ((6212, 6224), 'numpy.mean', 'np.mean', (['zcr'], {}), '(zcr)\n', (6219, 6224), True, 'import numpy as np\n'), ((6241, 6252), 'numpy.std', 'np.std', (['zcr'], {}), '(zcr)\n', (6247, 6252), True, 'import numpy as np\n'), ((6271, 6315), 'librosa.onset.onset_strength', 'lb.onset.onset_strength', (['y_percussive'], {'sr': 'sr'}), '(y_percussive, sr=sr)\n', (6294, 6315), True, 'import librosa as lb\n'), ((6332, 6350), 'numpy.mean', 'np.mean', (['onset_env'], {}), '(onset_env)\n', (6339, 6350), True, 'import numpy as np\n'), ((6369, 6386), 'numpy.std', 'np.std', (['onset_env'], {}), '(onset_env)\n', (6375, 6386), True, 'import numpy as np\n'), ((6570, 6656), 'librosa.beat.beat_track', 'lb.beat.beat_track', ([], {'y': 'y_percussive', 'sr': 'sr', 'onset_envelope': 'onset_env', 'units': '"""time"""'}), "(y=y_percussive, sr=sr, onset_envelope=onset_env, units=\n 'time')\n", (6588, 6656), True, 'import librosa as lb\n'), ((6668, 6682), 'numpy.mean', 'np.mean', (['beats'], {}), '(beats)\n', (6675, 6682), True, 'import numpy as np\n'), ((6701, 6714), 'numpy.std', 'np.std', (['beats'], {}), '(beats)\n', (6707, 6714), True, 'import numpy as np\n'), ((6738, 7336), 'collections.OrderedDict', 'OrderedDict', (["{'rmseP_a': rmseP_a, 'rmseP_std': rmseP_std, 'rmseH_a': rmseH_a,\n 'rmseH_std': rmseH_std, 'centroid_a': centroid_a, 'centroid_std':\n centroid_std, 'bw_a': bw_a, 'bw_std': bw_std, 'contrast_a': contrast_a,\n 'contrast_std': contrast_std, 'polyfeat_a': polyfeat_a, 'polyfeat_std':\n polyfeat_std, 'tonnetz_a': tonnetz_a, 'tonnetz_std': tonnetz_std,\n 'zcr_a': zcr_a, 'zcr_std': zcr_std, 'onset_a': onset_a, 'onset_std':\n onset_std, 'bpm': bpm, 'rmseP_skew': rmseP_skew, 'rmseP_kurtosis':\n rmseP_kurtosis, 'rmseH_skew': rmseH_skew, 'rmseH_kurtosis': rmseH_kurtosis}"], {}), "({'rmseP_a': rmseP_a, 'rmseP_std': rmseP_std, 'rmseH_a': rmseH_a,\n 'rmseH_std': rmseH_std, 'centroid_a': centroid_a, 'centroid_std':\n centroid_std, 'bw_a': bw_a, 'bw_std': bw_std, 'contrast_a': contrast_a,\n 'contrast_std': contrast_std, 'polyfeat_a': polyfeat_a, 'polyfeat_std':\n polyfeat_std, 'tonnetz_a': tonnetz_a, 'tonnetz_std': tonnetz_std,\n 'zcr_a': zcr_a, 'zcr_std': zcr_std, 'onset_a': onset_a, 'onset_std':\n onset_std, 'bpm': bpm, 'rmseP_skew': rmseP_skew, 'rmseP_kurtosis':\n rmseP_kurtosis, 'rmseH_skew': rmseH_skew, 'rmseH_kurtosis': rmseH_kurtosis}\n )\n", (6749, 7336), False, 'from collections import OrderedDict\n'), ((8088, 8106), 'librosa.load', 'lb.load', (['songpath1'], {}), '(songpath1)\n', (8095, 8106), True, 'import librosa as lb\n'), ((8676, 8720), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (8687, 8720), False, 'import pickle\n'), ((8799, 8813), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8810, 8813), False, 'import pickle\n'), ((11217, 11260), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'num_workers'}), '(processes=num_workers)\n', (11237, 11260), False, 'import multiprocessing\n'), ((12306, 12369), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'num_workers', 'maxtasksperchild': '(1)'}), '(processes=num_workers, maxtasksperchild=1)\n', (12326, 12369), False, 'import multiprocessing\n'), ((4455, 4487), 'librosa.feature.rmse', 'lb.feature.rmse', ([], {'y': 'stft_harmonic'}), '(y=stft_harmonic)\n', (4470, 4487), True, 'import librosa as lb\n'), ((4628, 4650), 'numpy.mean', 'np.mean', (['rmseH'], {'axis': '(0)'}), '(rmseH, axis=0)\n', (4635, 4650), True, 'import numpy as np\n'), ((4684, 4706), 'numpy.mean', 'np.mean', (['rmseH'], {'axis': '(0)'}), '(rmseH, axis=0)\n', (4691, 4706), True, 'import numpy as np\n'), ((4754, 4788), 'librosa.feature.rmse', 'lb.feature.rmse', ([], {'y': 'stft_percussive'}), '(y=stft_percussive)\n', (4769, 4788), True, 'import librosa as lb\n'), ((4931, 4953), 'numpy.mean', 'np.mean', (['rmseP'], {'axis': '(0)'}), '(rmseP, axis=0)\n', (4938, 4953), True, 'import numpy as np\n'), ((4987, 5009), 'numpy.mean', 'np.mean', (['rmseP'], {'axis': '(0)'}), '(rmseP, axis=0)\n', (4994, 5009), True, 'import numpy as np\n'), ((6457, 6478), 'numpy.arange', 'np.arange', (['D.shape[1]'], {}), '(D.shape[1])\n', (6466, 6478), True, 'import numpy as np\n'), ((2035, 2064), 'numpy.logspace', 'np.logspace', (['(1.3)', '(4)', 'no_bands'], {}), '(1.3, 4, no_bands)\n', (2046, 2064), True, 'import numpy as np\n'), ((2226, 2252), 'more_itertools.unique_everseen', 'unique_everseen', (['bands_int'], {}), '(bands_int)\n', (2241, 2252), False, 'from more_itertools import unique_everseen\n'), ((12447, 12476), 'itertools.repeat', 'itertools.repeat', (['hop_length1'], {}), '(hop_length1)\n', (12463, 12476), False, 'import itertools\n'), ((12477, 12501), 'itertools.repeat', 'itertools.repeat', (['n_fft1'], {}), '(n_fft1)\n', (12493, 12501), False, 'import itertools\n'), ((2780, 2790), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2786, 2790), True, 'import numpy as np\n'), ((2821, 2831), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2827, 2831), True, 'import numpy as np\n'), ((3114, 3124), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3120, 3124), True, 'import numpy as np\n'), ((3155, 3165), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3161, 3165), True, 'import numpy as np\n'), ((9200, 9213), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (9207, 9213), True, 'import numpy as np\n'), ((2869, 2879), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2875, 2879), True, 'import numpy as np\n'), ((2990, 3000), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2996, 3000), True, 'import numpy as np\n'), ((3203, 3213), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3209, 3213), True, 'import numpy as np\n'), ((3324, 3334), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3330, 3334), True, 'import numpy as np\n')]
import streamlit as st from streamlit_yellowbrick import st_yellowbrick def run_regression(): with st.sidebar.form(key="regression_form"): regression_visualizers = st.multiselect( "Choose Regression Visualizers", [ "Residuals Plot", "Prediction Error Plot", "Alpha Section", ], ) submit_button = st.form_submit_button(label="Show") if "Residuals Plot" in regression_visualizers: with st.beta_expander("Collapse", expanded=True): agree = st.checkbox("What is a Residuals Plot?", value=False) if agree: st.markdown( """ Residuals, in the context of regression models, are the difference between the observed value of the target variable (y) and the predicted value (ŷ), i.e. the error of the prediction. The residuals plot shows the difference between residuals on the vertical axis and the dependent variable on the horizontal axis, allowing you to detect regions within the target that may be susceptible to more or less error. """ ) col1, col2 = st.beta_columns(2) with col1: residuals_plot() col2.code( """ import streamlit as st from streamlit_yellowbrick import st_yellowbrick from sklearn.linear_model import Ridge from sklearn.model_selection import train_test_split from yellowbrick.datasets import load_concrete from yellowbrick.regressor import ResidualsPlot # Load a regression dataset X, y = load_concrete() # Create the train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Instantiate the linear model and visualizer model = Ridge() visualizer = ResidualsPlot(model) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data st_yellowbrick(visualizer) # Finalize and render the figure """, language="python", ) if "Prediction Error Plot" in regression_visualizers: with st.beta_expander("Collapse", expanded=True): agree = st.checkbox("What is a Prediction Error Plot?", value=False) if agree: st.markdown( """ A prediction error plot shows the actual targets from the dataset against the predicted values generated by our model. This allows us to see how much variance is in the model. Data scientists can diagnose regression models using this plot by comparing against the 45 degree line, where the prediction exactly matches the model. """ ) col1, col2 = st.beta_columns(2) with col1: prediction_error() col2.code( """ import streamlit as st from streamlit_yellowbrick import st_yellowbrick from sklearn.linear_model import Lasso from sklearn.model_selection import train_test_split from yellowbrick.datasets import load_concrete from yellowbrick.regressor import PredictionError # Load a regression dataset X, y = load_concrete() # Create the train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Instantiate the linear model and visualizer model = Lasso() visualizer = PredictionError(model) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data st_yellowbrick(visualizer) # Finalize and render the figure """, language="python", ) if "Alpha Section" in regression_visualizers: with st.beta_expander("Collapse", expanded=True): agree = st.checkbox("What is Alpha Section?", value=False) if agree: st.markdown( """ Regularization is designed to penalize model complexity, therefore the higher the alpha, the less complex the model, decreasing the error due to variance (overfit). Alphas that are too high on the other hand increase the error due to bias (underfit). It is important, therefore to choose an optimal alpha such that the error is minimized in both directions. The `AlphaSelection` Visualizer demonstrates how different values of alpha influence model selection during the regularization of linear models. Generally speaking, alpha increases the affect of regularization, e.g. if alpha is zero there is no regularization and the higher the alpha, the more the regularization parameter influences the final model. """ ) col1, col2 = st.beta_columns(2) with col1: alpha_selection() col2.code( """ import numpy as np from sklearn.linear_model import LassoCV from yellowbrick.datasets import load_concrete from yellowbrick.regressor import AlphaSelection # Load the regression dataset X, y = load_concrete() # Create a list of alphas to cross-validate against alphas = np.logspace(-10, 1, 400) # Instantiate the linear model and visualizer model = LassoCV(alphas=alphas) visualizer = AlphaSelection(model) visualizer.fit(X, y) st_yellowbrick(visualizer) """, language="python", ) return None def residuals_plot(): from sklearn.linear_model import Ridge from sklearn.model_selection import train_test_split from yellowbrick.datasets import load_concrete from yellowbrick.regressor import ResidualsPlot # Load a regression dataset X, y = load_concrete() # Create the train and test data X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Instantiate the linear model and visualizer model = Ridge() visualizer = ResidualsPlot(model) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data return st_yellowbrick(visualizer) # Finalize and render the figure def prediction_error(): from sklearn.linear_model import Lasso from sklearn.model_selection import train_test_split from yellowbrick.datasets import load_concrete from yellowbrick.regressor import PredictionError # Load a regression dataset X, y = load_concrete() # Create the train and test data X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Instantiate the linear model and visualizer model = Lasso() visualizer = PredictionError(model) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data return st_yellowbrick(visualizer) # Finalize and render the figure def alpha_selection(): import numpy as np from sklearn.linear_model import LassoCV from yellowbrick.datasets import load_concrete from yellowbrick.regressor import AlphaSelection # Load the regression dataset X, y = load_concrete() # Create a list of alphas to cross-validate against alphas = np.logspace(-10, 1, 400) # Instantiate the linear model and visualizer model = LassoCV(alphas=alphas) visualizer = AlphaSelection(model) visualizer.fit(X, y) return st_yellowbrick(visualizer)
[ "streamlit.checkbox", "yellowbrick.datasets.load_concrete", "yellowbrick.regressor.PredictionError", "yellowbrick.regressor.AlphaSelection", "streamlit.markdown", "sklearn.linear_model.LassoCV", "sklearn.linear_model.Lasso", "streamlit.beta_columns", "sklearn.model_selection.train_test_split", "yellowbrick.regressor.ResidualsPlot", "streamlit.sidebar.form", "sklearn.linear_model.Ridge", "streamlit_yellowbrick.st_yellowbrick", "streamlit.multiselect", "streamlit.form_submit_button", "numpy.logspace", "streamlit.beta_expander" ]
[((6089, 6104), 'yellowbrick.datasets.load_concrete', 'load_concrete', ([], {}), '()\n', (6102, 6104), False, 'from yellowbrick.datasets import load_concrete\n'), ((6182, 6236), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (6198, 6236), False, 'from sklearn.model_selection import train_test_split\n'), ((6314, 6321), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (6319, 6321), False, 'from sklearn.linear_model import Ridge\n'), ((6339, 6359), 'yellowbrick.regressor.ResidualsPlot', 'ResidualsPlot', (['model'], {}), '(model)\n', (6352, 6359), False, 'from yellowbrick.regressor import ResidualsPlot\n'), ((6528, 6554), 'streamlit_yellowbrick.st_yellowbrick', 'st_yellowbrick', (['visualizer'], {}), '(visualizer)\n', (6542, 6554), False, 'from streamlit_yellowbrick import st_yellowbrick\n'), ((6865, 6880), 'yellowbrick.datasets.load_concrete', 'load_concrete', ([], {}), '()\n', (6878, 6880), False, 'from yellowbrick.datasets import load_concrete\n'), ((6958, 7012), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (6974, 7012), False, 'from sklearn.model_selection import train_test_split\n'), ((7090, 7097), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (7095, 7097), False, 'from sklearn.linear_model import Lasso\n'), ((7115, 7137), 'yellowbrick.regressor.PredictionError', 'PredictionError', (['model'], {}), '(model)\n', (7130, 7137), False, 'from yellowbrick.regressor import PredictionError\n'), ((7306, 7332), 'streamlit_yellowbrick.st_yellowbrick', 'st_yellowbrick', (['visualizer'], {}), '(visualizer)\n', (7320, 7332), False, 'from streamlit_yellowbrick import st_yellowbrick\n'), ((7610, 7625), 'yellowbrick.datasets.load_concrete', 'load_concrete', ([], {}), '()\n', (7623, 7625), False, 'from yellowbrick.datasets import load_concrete\n'), ((7696, 7720), 'numpy.logspace', 'np.logspace', (['(-10)', '(1)', '(400)'], {}), '(-10, 1, 400)\n', (7707, 7720), True, 'import numpy as np\n'), ((7784, 7806), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {'alphas': 'alphas'}), '(alphas=alphas)\n', (7791, 7806), False, 'from sklearn.linear_model import LassoCV\n'), ((7824, 7845), 'yellowbrick.regressor.AlphaSelection', 'AlphaSelection', (['model'], {}), '(model)\n', (7838, 7845), False, 'from yellowbrick.regressor import AlphaSelection\n'), ((7882, 7908), 'streamlit_yellowbrick.st_yellowbrick', 'st_yellowbrick', (['visualizer'], {}), '(visualizer)\n', (7896, 7908), False, 'from streamlit_yellowbrick import st_yellowbrick\n'), ((106, 144), 'streamlit.sidebar.form', 'st.sidebar.form', ([], {'key': '"""regression_form"""'}), "(key='regression_form')\n", (121, 144), True, 'import streamlit as st\n'), ((179, 292), 'streamlit.multiselect', 'st.multiselect', (['"""Choose Regression Visualizers"""', "['Residuals Plot', 'Prediction Error Plot', 'Alpha Section']"], {}), "('Choose Regression Visualizers', ['Residuals Plot',\n 'Prediction Error Plot', 'Alpha Section'])\n", (193, 292), True, 'import streamlit as st\n'), ((411, 446), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Show"""'}), "(label='Show')\n", (432, 446), True, 'import streamlit as st\n'), ((513, 556), 'streamlit.beta_expander', 'st.beta_expander', (['"""Collapse"""'], {'expanded': '(True)'}), "('Collapse', expanded=True)\n", (529, 556), True, 'import streamlit as st\n'), ((579, 632), 'streamlit.checkbox', 'st.checkbox', (['"""What is a Residuals Plot?"""'], {'value': '(False)'}), "('What is a Residuals Plot?', value=False)\n", (590, 632), True, 'import streamlit as st\n'), ((1306, 1324), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (1321, 1324), True, 'import streamlit as st\n'), ((2303, 2346), 'streamlit.beta_expander', 'st.beta_expander', (['"""Collapse"""'], {'expanded': '(True)'}), "('Collapse', expanded=True)\n", (2319, 2346), True, 'import streamlit as st\n'), ((2369, 2429), 'streamlit.checkbox', 'st.checkbox', (['"""What is a Prediction Error Plot?"""'], {'value': '(False)'}), "('What is a Prediction Error Plot?', value=False)\n", (2380, 2429), True, 'import streamlit as st\n'), ((3001, 3019), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (3016, 3019), True, 'import streamlit as st\n'), ((3996, 4039), 'streamlit.beta_expander', 'st.beta_expander', (['"""Collapse"""'], {'expanded': '(True)'}), "('Collapse', expanded=True)\n", (4012, 4039), True, 'import streamlit as st\n'), ((4062, 4112), 'streamlit.checkbox', 'st.checkbox', (['"""What is Alpha Section?"""'], {'value': '(False)'}), "('What is Alpha Section?', value=False)\n", (4073, 4112), True, 'import streamlit as st\n'), ((5167, 5185), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (5182, 5185), True, 'import streamlit as st\n'), ((672, 1252), 'streamlit.markdown', 'st.markdown', (['"""\n Residuals, in the context of regression models, are the difference between\n the observed value of the target variable (y) and the predicted value (ŷ),\n i.e. the error of the prediction. The residuals plot shows the difference\n between residuals on the vertical axis and the dependent variable on the\n horizontal axis, allowing you to detect regions within the target that may\n be susceptible to more or less error.\n """'], {}), '(\n """\n Residuals, in the context of regression models, are the difference between\n the observed value of the target variable (y) and the predicted value (ŷ),\n i.e. the error of the prediction. The residuals plot shows the difference\n between residuals on the vertical axis and the dependent variable on the\n horizontal axis, allowing you to detect regions within the target that may\n be susceptible to more or less error.\n """\n )\n', (683, 1252), True, 'import streamlit as st\n'), ((2469, 2947), 'streamlit.markdown', 'st.markdown', (['"""\n A prediction error plot shows the actual targets from the dataset against\n the predicted values generated by our model. This allows us to see how\n much variance is in the model. Data scientists can diagnose regression\n models using this plot by comparing against the 45 degree line, where\n the prediction exactly matches the model.\n """'], {}), '(\n """\n A prediction error plot shows the actual targets from the dataset against\n the predicted values generated by our model. This allows us to see how\n much variance is in the model. Data scientists can diagnose regression\n models using this plot by comparing against the 45 degree line, where\n the prediction exactly matches the model.\n """\n )\n', (2480, 2947), True, 'import streamlit as st\n'), ((4152, 5113), 'streamlit.markdown', 'st.markdown', (['"""\n Regularization is designed to penalize model complexity, therefore the higher\n the alpha, the less complex the model, decreasing the error due to variance\n (overfit). Alphas that are too high on the other hand increase the error due\n to bias (underfit). It is important, therefore to choose an optimal alpha\n such that the error is minimized in both directions.\n\n The `AlphaSelection` Visualizer demonstrates how different values of alpha\n influence model selection during the regularization of linear models.\n Generally speaking, alpha increases the affect of regularization, e.g.\n if alpha is zero there is no regularization and the higher the alpha,\n the more the regularization parameter influences the final model.\n """'], {}), '(\n """\n Regularization is designed to penalize model complexity, therefore the higher\n the alpha, the less complex the model, decreasing the error due to variance\n (overfit). Alphas that are too high on the other hand increase the error due\n to bias (underfit). It is important, therefore to choose an optimal alpha\n such that the error is minimized in both directions.\n\n The `AlphaSelection` Visualizer demonstrates how different values of alpha\n influence model selection during the regularization of linear models.\n Generally speaking, alpha increases the affect of regularization, e.g.\n if alpha is zero there is no regularization and the higher the alpha,\n the more the regularization parameter influences the final model.\n """\n )\n', (4163, 5113), True, 'import streamlit as st\n')]
import numpy as np from PIL import Image from skimage import color from skimage.feature import hog from pelops.features.feature_producer import FeatureProducer class HOGFeatureProducer(FeatureProducer): def __init__(self, chip_producer, image_size=(224,224), cells=(16, 16), orientations=8, histogram_bins_per_channel=256): self.image_size = image_size self.cells = cells self.orientations = orientations self.histogram_bins_per_channel = histogram_bins_per_channel super().__init__(chip_producer) def produce_features(self, chip): """Takes a chip object and returns a feature vector of size self.feat_size. """ img = self.get_image(chip) img = img.resize(self.image_size, Image.BICUBIC) img_x, img_y = img.size # Calculate histogram of each channel channels = img.split() hist_features = np.full(shape=3 * self.histogram_bins_per_channel, fill_value=-1) # We expect RGB images. If something else is passed warn the user and # continue. if len(channels) < 3: print("Non-RBG image! Vector will be padded with -1!") if len(channels) > 3: print("Non-RBG image! Channels beyond the first three will be ignored!") channels = channel[:3] for i, channel in enumerate(channels): channel_array = np.array(channel) values, _ = np.histogram(channel_array.flat, bins=self.histogram_bins_per_channel) start = i * self.histogram_bins_per_channel end = (i+1) * self.histogram_bins_per_channel hist_features[start:end] = values # Calculate HOG features, which require a grayscale image img = color.rgb2gray(np.array(img)) features = hog( img, orientations=self.orientations, pixels_per_cell=(img_x / self.cells[0], img_y / self.cells[1]), cells_per_block=self.cells, # Normalize over the whole image ) return np.concatenate((features, hist_features)) def set_variables(self): hog_size = self.cells[0] * self.cells[1] * self.orientations hist_size = 3 * self.histogram_bins_per_channel self.feat_size = hog_size + hist_size
[ "numpy.histogram", "numpy.array", "numpy.concatenate", "numpy.full", "skimage.feature.hog" ]
[((907, 972), 'numpy.full', 'np.full', ([], {'shape': '(3 * self.histogram_bins_per_channel)', 'fill_value': '(-1)'}), '(shape=3 * self.histogram_bins_per_channel, fill_value=-1)\n', (914, 972), True, 'import numpy as np\n'), ((1798, 1935), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'self.orientations', 'pixels_per_cell': '(img_x / self.cells[0], img_y / self.cells[1])', 'cells_per_block': 'self.cells'}), '(img, orientations=self.orientations, pixels_per_cell=(img_x / self.\n cells[0], img_y / self.cells[1]), cells_per_block=self.cells)\n', (1801, 1935), False, 'from skimage.feature import hog\n'), ((2040, 2081), 'numpy.concatenate', 'np.concatenate', (['(features, hist_features)'], {}), '((features, hist_features))\n', (2054, 2081), True, 'import numpy as np\n'), ((1395, 1412), 'numpy.array', 'np.array', (['channel'], {}), '(channel)\n', (1403, 1412), True, 'import numpy as np\n'), ((1437, 1507), 'numpy.histogram', 'np.histogram', (['channel_array.flat'], {'bins': 'self.histogram_bins_per_channel'}), '(channel_array.flat, bins=self.histogram_bins_per_channel)\n', (1449, 1507), True, 'import numpy as np\n'), ((1764, 1777), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1772, 1777), True, 'import numpy as np\n')]
import numpy as np arr = np.random.choice([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=(100)) print(arr) arr = np.random.choice([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=100) print(arr) arr = np.random.choice([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=(5, 10)) print(arr)
[ "numpy.random.choice" ]
[((28, 100), 'numpy.random.choice', 'np.random.choice', (['[6, 8, 3, 1, 5]'], {'p': '[0.0, 0.5, 0.2, 0.2, 0.1]', 'size': '(100)'}), '([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=100)\n', (44, 100), True, 'import numpy as np\n'), ((121, 193), 'numpy.random.choice', 'np.random.choice', (['[6, 8, 3, 1, 5]'], {'p': '[0.0, 0.5, 0.2, 0.2, 0.1]', 'size': '(100)'}), '([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=100)\n', (137, 193), True, 'import numpy as np\n'), ((212, 288), 'numpy.random.choice', 'np.random.choice', (['[6, 8, 3, 1, 5]'], {'p': '[0.0, 0.5, 0.2, 0.2, 0.1]', 'size': '(5, 10)'}), '([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=(5, 10))\n', (228, 288), True, 'import numpy as np\n')]
""" Example of a simple genetic algorithm based on DeepNEAT Miikkulainen, Risto, et al. "Evolving deep neural networks." Artificial Intelligence in the Age of Neural Networks and Brain Computing. Academic Press, 2019. 293-312. """ import time import traceback import numpy as np import torch.optim from nord.design.metaheuristics.genetics.neat import Genome, Innovation from nord.neural_nets import LocalEvaluator from nord.utils import assure_reproducibility assure_reproducibility() # Genetic Algorithm Parameters add_node_rate = 0.03 add_connection_rate = 0.05 mutation_rate = 0.5 crossover_rate = 0.75 generations = 10 population_sz = 5 tournament_sz = 2 # Evaluation parameters EPOCHS = 1 dataset = 'cifar10' # can also be 'fashion-mnist' output_file = '../results/genetic_cifar10.out' def write_to_file(msg): with open(output_file, 'a') as f: f.write(msg) f.write('\n') write_to_file('Generation_No, Individual_No, Fitness, Genome') # no_filters, dropout_rate, weight_scaling, kernel_size, max_pooling layer_bound_types = [int, float, float, int, bool] layer_bounds = [[32, 0.0, 0, 1, 0], [256, 0.7, 2.0, 3, 1]] evaluator = LocalEvaluator(torch.optim.Adam, {}, False) cache = dict() i = Innovation() i.new_generation() population = [] # Population initialization for _ in range(population_sz): g = Genome(layer_bound_types, layer_bounds, add_node_rate, add_connection_rate, mutation_rate, i) population.append(g) for r in range(generations): t = time.time() i.new_generation() # Evaluation for j in range(len(population)): g = population[j] try: if g not in cache: print('Evaluating', g) d = g.to_descriptor(dimensions=2) loss, fitness, total_time = evaluator.descriptor_evaluate( d, EPOCHS, data_percentage=1, dataset=dataset) fitness = fitness['accuracy'] cache[g] = fitness else: fitness = cache[g] g.connections.fitness = fitness g.nodes.fitness = fitness write_to_file(str((r, j, fitness, g))) if fitness == 0: print(g.__repr__()) except Exception: traceback.print_exc() print(g.__repr__()) continue new_population = [] # Offspring Generation for _ in range(population_sz//2): pool_1 = np.random.choice( population, size=tournament_sz, replace=False) pool_2 = np.random.choice( population, size=tournament_sz, replace=False) parent_1 = np.argmax([f.nodes.fitness for f in pool_1]) parent_2 = np.argmax([f.nodes.fitness for f in pool_2]) parent_1 = pool_1[parent_1] parent_2 = pool_2[parent_2] offspring_1 = parent_1.crossover(parent_2) offspring_2 = parent_2.crossover(parent_1) new_population.append(offspring_1) new_population.append(offspring_2) population = new_population
[ "nord.neural_nets.LocalEvaluator", "nord.design.metaheuristics.genetics.neat.Innovation", "numpy.random.choice", "numpy.argmax", "nord.utils.assure_reproducibility", "nord.design.metaheuristics.genetics.neat.Genome", "traceback.print_exc", "time.time" ]
[((503, 527), 'nord.utils.assure_reproducibility', 'assure_reproducibility', ([], {}), '()\n', (525, 527), False, 'from nord.utils import assure_reproducibility\n'), ((1248, 1291), 'nord.neural_nets.LocalEvaluator', 'LocalEvaluator', (['torch.optim.Adam', '{}', '(False)'], {}), '(torch.optim.Adam, {}, False)\n', (1262, 1291), False, 'from nord.neural_nets import LocalEvaluator\n'), ((1313, 1325), 'nord.design.metaheuristics.genetics.neat.Innovation', 'Innovation', ([], {}), '()\n', (1323, 1325), False, 'from nord.design.metaheuristics.genetics.neat import Genome, Innovation\n'), ((1433, 1530), 'nord.design.metaheuristics.genetics.neat.Genome', 'Genome', (['layer_bound_types', 'layer_bounds', 'add_node_rate', 'add_connection_rate', 'mutation_rate', 'i'], {}), '(layer_bound_types, layer_bounds, add_node_rate, add_connection_rate,\n mutation_rate, i)\n', (1439, 1530), False, 'from nord.design.metaheuristics.genetics.neat import Genome, Innovation\n'), ((1646, 1657), 'time.time', 'time.time', ([], {}), '()\n', (1655, 1657), False, 'import time\n'), ((2622, 2685), 'numpy.random.choice', 'np.random.choice', (['population'], {'size': 'tournament_sz', 'replace': '(False)'}), '(population, size=tournament_sz, replace=False)\n', (2638, 2685), True, 'import numpy as np\n'), ((2718, 2781), 'numpy.random.choice', 'np.random.choice', (['population'], {'size': 'tournament_sz', 'replace': '(False)'}), '(population, size=tournament_sz, replace=False)\n', (2734, 2781), True, 'import numpy as np\n'), ((2818, 2862), 'numpy.argmax', 'np.argmax', (['[f.nodes.fitness for f in pool_1]'], {}), '([f.nodes.fitness for f in pool_1])\n', (2827, 2862), True, 'import numpy as np\n'), ((2883, 2927), 'numpy.argmax', 'np.argmax', (['[f.nodes.fitness for f in pool_2]'], {}), '([f.nodes.fitness for f in pool_2])\n', (2892, 2927), True, 'import numpy as np\n'), ((2431, 2452), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2450, 2452), False, 'import traceback\n')]
import pymysql import pandas as pd import pickle import numpy as np import databaseInfo as db databaseName = 'root' databasePasswd = '<PASSWORD>' def user_info_query(user_id): conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName) cur = conn.cursor() users = pd.read_sql("select * from User", conn) users = users.filter(regex='UserID|Gender|Age|JobID') # 改变User数据中性别和年龄 gender_map = {'F': 0, 'M': 1} users['Gender'] = users['Gender'].map(gender_map) age_map = {val: ii for ii, val in enumerate(set(users['Age']))} users['Age'] = users['Age'].map(age_map) users_list = users.values # print(users.head()) cur.close() # 归还资源 conn.close() num_line = 0 for index in range(len(users_list)): if int(users_list[index][0]) == user_id: num_line = index break #return users_list[user_id-1][0],users_list[user_id-1][1],users_list[user_id-1][2],users_list[user_id-1][3] return users_list[num_line][0],users_list[num_line][1],users_list[num_line][2],users_list[num_line][3] # print(user_info_query(4)) def movie_info_query(user_id): conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName) cur = conn.cursor() title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = pickle.load( open('preprocess.p', mode='rb')) sentences_size = title_count # 电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5 movieid2idx = {val[0]: i for i, val in enumerate(movies.values)} movies_id_list = pd.read_sql("select MovieID from watch_history where UserID={}".format(user_id), conn) # print(movies.head()) movies_id_list = movies_id_list.values # print(movies_id_list) history_movie_feature_list = [] for i in range(len(movies_id_list)): movie_feature = [] movie_id_val = movies_id_list[i][0] # print(movie_id_val) categories = np.zeros([1, 18]) categories[0] = movies.values[movieid2idx[movie_id_val]][2] titles = np.zeros([1, sentences_size]) titles[0] = movies.values[movieid2idx[movie_id_val]][1] movie_id = np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1]) movie_categories = categories movie_titles = titles movie_feature.append(movie_id) movie_feature.append(movie_categories) movie_feature.append(movie_titles) history_movie_feature_list.append(movie_feature) cur.close() # 归还资源 conn.close() return history_movie_feature_list # print(movie_info_query(1))
[ "pandas.read_sql", "numpy.zeros", "pymysql.connect", "numpy.reshape" ]
[((189, 314), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'db.databaseAddress', 'user': 'db.databaseLoginName', 'password': 'db.databasePasswd', 'database': 'db.databaseName'}), '(host=db.databaseAddress, user=db.databaseLoginName,\n password=db.databasePasswd, database=db.databaseName)\n', (204, 314), False, 'import pymysql\n'), ((347, 386), 'pandas.read_sql', 'pd.read_sql', (['"""select * from User"""', 'conn'], {}), "('select * from User', conn)\n", (358, 386), True, 'import pandas as pd\n'), ((1211, 1336), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'db.databaseAddress', 'user': 'db.databaseLoginName', 'password': 'db.databasePasswd', 'database': 'db.databaseName'}), '(host=db.databaseAddress, user=db.databaseLoginName,\n password=db.databasePasswd, database=db.databaseName)\n', (1226, 1336), False, 'import pymysql\n'), ((2091, 2108), 'numpy.zeros', 'np.zeros', (['[1, 18]'], {}), '([1, 18])\n', (2099, 2108), True, 'import numpy as np\n'), ((2194, 2223), 'numpy.zeros', 'np.zeros', (['[1, sentences_size]'], {}), '([1, sentences_size])\n', (2202, 2223), True, 'import numpy as np\n'), ((2307, 2367), 'numpy.reshape', 'np.reshape', (['movies.values[movieid2idx[movie_id_val]][0]', '[1]'], {}), '(movies.values[movieid2idx[movie_id_val]][0], [1])\n', (2317, 2367), True, 'import numpy as np\n')]
# # locally sensitive hashing code # from collections import defaultdict import numpy as np import xxhash import sys import pyximport pyximport.install() sys.path.insert(0, 'tools') import simcore as csimcore # k-shingles: pairs of adjacent k-length substrings (in order) def shingle(s, k=2): k = min(len(s), k) for i in range(len(s)-k+1): yield s[i:i+k] # split into words def tokenize(s): return s.split() def murmur(x): return np.uint64(xxhash.xxh64_intdigest(x)) # compute actual simhash class Simhash: def __init__(self): self.dim = 64 self.unums = list(map(np.uint64,range(self.dim))) self.masks = [self.unums[1] << n for n in self.unums] def simhash(self, features, weights=None): if weights is None: weights = [1.0]*len(features) hashish = [murmur(f) for f in features] v = [0.0]*self.dim for h, w in zip(hashish, weights): for i in range(self.dim): v[i] += w if h & self.masks[i] else -w ans = self.unums[0] for i in range(self.dim): if v[i] >= 0: ans |= self.masks[i] return ans # compute actual simhash with C - only 64 width class CSimhash(): def __init__(self): self.simcore = csimcore.simcore def simhash(self, features, weights=None): if weights is None: weights = [1.0]*len(features) hashish = [murmur(f) for f in features] ret = np.uint64(self.simcore(hashish, weights)) return ret class Cluster: # dim is the simhash width, k is the tolerance def __init__(self, dim=64, k=4, thresh=1): self.dim = dim self.k = k self.thresh = thresh self.unions = [] self.hashmaps = [defaultdict(list) for _ in range(k)] # defaultdict(list) self.offsets = [np.uint64(dim//k*i) for i in range(k)] self.bin_masks = [np.uint64(2**(dim-offset)-1) if (i == len(self.offsets)-1) else np.uint64(2**(self.offsets[i+1]-offset)-1) for i, offset in enumerate(self.offsets)] self.csim = CSimhash() self.hasher = self.csim.simhash # add item to the cluster def add(self, features, label, weights=None): # get subkeys sign = self.hasher(features, weights) keyvec = self.get_keys(sign) # Unite labels with the same keys in the same band matches = defaultdict(int) for idx, key in enumerate(keyvec): others = self.hashmaps[idx][key] for l in others: matches[l] += 1 others.append(label) for out, val in matches.items(): if val > self.thresh: self.unions.append((label, out)) # bin simhash into chunks def get_keys(self, simhash): return [simhash >> offset & mask for offset, mask in zip(self.offsets, self.bin_masks)]
[ "sys.path.insert", "xxhash.xxh64_intdigest", "pyximport.install", "numpy.uint64", "collections.defaultdict" ]
[((136, 155), 'pyximport.install', 'pyximport.install', ([], {}), '()\n', (153, 155), False, 'import pyximport\n'), ((156, 183), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""tools"""'], {}), "(0, 'tools')\n", (171, 183), False, 'import sys\n'), ((469, 494), 'xxhash.xxh64_intdigest', 'xxhash.xxh64_intdigest', (['x'], {}), '(x)\n', (491, 494), False, 'import xxhash\n'), ((2415, 2431), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2426, 2431), False, 'from collections import defaultdict\n'), ((1784, 1801), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1795, 1801), False, 'from collections import defaultdict\n'), ((1865, 1888), 'numpy.uint64', 'np.uint64', (['(dim // k * i)'], {}), '(dim // k * i)\n', (1874, 1888), True, 'import numpy as np\n'), ((1930, 1964), 'numpy.uint64', 'np.uint64', (['(2 ** (dim - offset) - 1)'], {}), '(2 ** (dim - offset) - 1)\n', (1939, 1964), True, 'import numpy as np\n'), ((1994, 2044), 'numpy.uint64', 'np.uint64', (['(2 ** (self.offsets[i + 1] - offset) - 1)'], {}), '(2 ** (self.offsets[i + 1] - offset) - 1)\n', (2003, 2044), True, 'import numpy as np\n')]
""" File: sample_generator.py Author: Nrupatunga Email: <EMAIL> Github: https://github.com/nrupatunga Description: Generating samples from single frame """ import sys import cv2 import numpy as np from loguru import logger try: from goturn.helper.BoundingBox import BoundingBox from goturn.helper.image_proc import cropPadImage from goturn.helper.vis_utils import Visualizer from goturn.helper import image_io from goturn.helper.draw_util import draw except ImportError: logger.error('Please run $source settings.sh from root directory') sys.exit(1) class bbParams: """Docstring for bbParams. """ def __init__(self, lamda_shift, lamda_scale, min_scale, max_scale): """parameters for generating synthetic data""" self.lamda_shift = lamda_shift self.lamda_scale = lamda_scale self.min_scale = min_scale self.max_scale = max_scale def __repr__(self): return str({'lamda_shift': self.lamda_shift, 'lamda_scale': self.lamda_scale, 'min_scale': self.min_scale, 'max_scale': self.max_scale}) class sample_generator: """Generate samples from single frame""" def __init__(self, lamda_shift, lamda_scale, min_scale, max_scale, dbg=False, env='sample_generator'): """set parameters """ self._lamda_shift = lamda_shift self._lamda_scale = lamda_scale self._min_scale = min_scale self._max_scale = max_scale self._kSamplesPerImage = 10 # number of synthetic samples per image self._viz = None if dbg: self._env = env self._viz = Visualizer(env=self._env) self._dbg = dbg def make_true_sample(self): """Generate true target:search_region pair""" curr_prior_tight = self.bbox_prev_gt_ target_pad = self.target_pad_ # To find out the region in which we need to search in the # current frame, we use the previous frame bbox to get the # region in which we can make the search output = cropPadImage(curr_prior_tight, self.img_curr_, self._dbg, self._viz) curr_search_region, curr_search_location, edge_spacing_x, edge_spacing_y = output bbox_curr_gt = self.bbox_curr_gt_ bbox_curr_gt_recentered = BoundingBox(0, 0, 0, 0) bbox_curr_gt_recentered = bbox_curr_gt.recenter(curr_search_location, edge_spacing_x, edge_spacing_y, bbox_curr_gt_recentered) if self._dbg: env = self._env + '_make_true_sample' search_dbg = draw.bbox(self.img_curr_, curr_search_location) search_dbg = draw.bbox(search_dbg, bbox_curr_gt, color=(255, 255, 0)) self._viz.plot_image_opencv(search_dbg, 'search_region', env=env) recentered_img = draw.bbox(curr_search_region, bbox_curr_gt_recentered, color=(255, 255, 0)) self._viz.plot_image_opencv(recentered_img, 'cropped_search_region', env=env) del recentered_img del search_dbg bbox_curr_gt_recentered.scale(curr_search_region) return curr_search_region, target_pad, bbox_curr_gt_recentered def make_training_samples(self, num_samples, images, targets, bbox_gt_scales): """ @num_samples: number of samples @images: set of num_samples appended to images list @target: set of num_samples targets appended to targets list @bbox_gt_scales: bounding box to be regressed (scaled version) """ for i in range(num_samples): image_rand_focus, target_pad, bbox_gt_scaled = self.make_training_sample_BBShift() images.append(image_rand_focus) targets.append(target_pad) bbox_gt_scales.append(bbox_gt_scaled) if self._dbg: self.visualize(image_rand_focus, target_pad, bbox_gt_scaled, i) return images, targets, bbox_gt_scales def visualize(self, image, target, bbox, idx): """ sample generator prepares image and the respective targets (with bounding box). This function helps you to visualize it. The visualization is based on the Visdom server, please initialize the visdom server by running the command $ python -m visdom.server open http://localhost:8097 in your browser to visualize the images """ if image_io._is_pil_image(image): image = np.asarray(image) if image_io._is_pil_image(target): target = np.asarray(target) target = cv2.resize(target, (227, 227)) target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (227, 227)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) bbox.unscale(image) bbox.x1, bbox.x2, bbox.y1, bbox.y2 = int(bbox.x1), int(bbox.x2), int(bbox.y1), int(bbox.y2) image_bb = draw.bbox(image, bbox) out = np.concatenate((target[np.newaxis, ...], image_bb[np.newaxis, ...]), axis=0) out = np.transpose(out, [0, 3, 1, 2]) self._viz.plot_images_np(out, title='sample_{}'.format(idx), env=self._env + '_train') def get_default_bb_params(self): """default bb parameters""" default_params = bbParams(self._lamda_shift, self._lamda_scale, self._min_scale, self._max_scale) return default_params def make_training_sample_BBShift_(self, bbParams, dbg=False): """generate training samples based on bbparams""" bbox_curr_gt = self.bbox_curr_gt_ bbox_curr_shift = BoundingBox(0, 0, 0, 0) bbox_curr_shift = bbox_curr_gt.shift(self.img_curr_, bbParams.lamda_scale, bbParams.lamda_shift, bbParams.min_scale, bbParams.max_scale, True, bbox_curr_shift) rand_search_region, rand_search_location, edge_spacing_x, edge_spacing_y = cropPadImage(bbox_curr_shift, self.img_curr_, dbg=self._dbg, viz=self._viz) bbox_curr_gt = self.bbox_curr_gt_ bbox_gt_recentered = BoundingBox(0, 0, 0, 0) bbox_gt_recentered = bbox_curr_gt.recenter(rand_search_location, edge_spacing_x, edge_spacing_y, bbox_gt_recentered) if dbg: env = self._env + '_make_training_sample_bbshift' viz = self._viz curr_img_bbox = draw.bbox(self.img_curr_, bbox_curr_gt) recentered_img = draw.bbox(rand_search_region, bbox_gt_recentered) viz.plot_image_opencv(curr_img_bbox, 'curr shifted bbox', env=env) viz.plot_image_opencv(recentered_img, 'recentered shifted bbox', env=env) bbox_gt_recentered.scale(rand_search_region) bbox_gt_scaled = bbox_gt_recentered return rand_search_region, self.target_pad_, bbox_gt_scaled def make_training_sample_BBShift(self): """ bb_params consists of shift, scale, min-max scale for shifting the current bounding box """ default_bb_params = self.get_default_bb_params() image_rand_focus, target_pad, bbox_gt_scaled = self.make_training_sample_BBShift_(default_bb_params, self._dbg) return image_rand_focus, target_pad, bbox_gt_scaled def reset(self, bbox_curr, bbox_prev, img_curr, img_prev): """This prepares the target image with enough context (search region) @bbox_curr: current frame bounding box @bbox_prev: previous frame bounding box @img_curr: current frame @img_prev: previous frame """ target_pad, pad_image_location, _, _ = cropPadImage(bbox_prev, img_prev, dbg=self._dbg, viz=self._viz) self.img_curr_ = img_curr self.bbox_curr_gt_ = bbox_curr self.bbox_prev_gt_ = bbox_prev self.target_pad_ = target_pad # crop kContextFactor * bbox_curr copied if self._dbg: env = self._env + '_targetpad' search_dbg = draw.bbox(img_prev, bbox_prev, color=(0, 0, 255)) search_dbg = draw.bbox(search_dbg, pad_image_location) self._viz.plot_image_opencv(search_dbg, 'target_region', env=env) self._viz.plot_image_opencv(target_pad, 'cropped_target_region', env=env) del search_dbg
[ "goturn.helper.image_io._is_pil_image", "goturn.helper.draw_util.draw.bbox", "goturn.helper.image_proc.cropPadImage", "goturn.helper.BoundingBox.BoundingBox", "numpy.asarray", "loguru.logger.error", "goturn.helper.vis_utils.Visualizer", "cv2.cvtColor", "sys.exit", "numpy.concatenate", "cv2.resize", "numpy.transpose" ]
[((498, 564), 'loguru.logger.error', 'logger.error', (['"""Please run $source settings.sh from root directory"""'], {}), "('Please run $source settings.sh from root directory')\n", (510, 564), False, 'from loguru import logger\n'), ((569, 580), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (577, 580), False, 'import sys\n'), ((2095, 2163), 'goturn.helper.image_proc.cropPadImage', 'cropPadImage', (['curr_prior_tight', 'self.img_curr_', 'self._dbg', 'self._viz'], {}), '(curr_prior_tight, self.img_curr_, self._dbg, self._viz)\n', (2107, 2163), False, 'from goturn.helper.image_proc import cropPadImage\n'), ((2361, 2384), 'goturn.helper.BoundingBox.BoundingBox', 'BoundingBox', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (2372, 2384), False, 'from goturn.helper.BoundingBox import BoundingBox\n'), ((4563, 4592), 'goturn.helper.image_io._is_pil_image', 'image_io._is_pil_image', (['image'], {}), '(image)\n', (4585, 4592), False, 'from goturn.helper import image_io\n'), ((4644, 4674), 'goturn.helper.image_io._is_pil_image', 'image_io._is_pil_image', (['target'], {}), '(target)\n', (4666, 4674), False, 'from goturn.helper import image_io\n'), ((4734, 4764), 'cv2.resize', 'cv2.resize', (['target', '(227, 227)'], {}), '(target, (227, 227))\n', (4744, 4764), False, 'import cv2\n'), ((4782, 4821), 'cv2.cvtColor', 'cv2.cvtColor', (['target', 'cv2.COLOR_BGR2RGB'], {}), '(target, cv2.COLOR_BGR2RGB)\n', (4794, 4821), False, 'import cv2\n'), ((4838, 4867), 'cv2.resize', 'cv2.resize', (['image', '(227, 227)'], {}), '(image, (227, 227))\n', (4848, 4867), False, 'import cv2\n'), ((4884, 4922), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4896, 4922), False, 'import cv2\n'), ((5072, 5094), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['image', 'bbox'], {}), '(image, bbox)\n', (5081, 5094), False, 'from goturn.helper.draw_util import draw\n'), ((5109, 5185), 'numpy.concatenate', 'np.concatenate', (['(target[np.newaxis, ...], image_bb[np.newaxis, ...])'], {'axis': '(0)'}), '((target[np.newaxis, ...], image_bb[np.newaxis, ...]), axis=0)\n', (5123, 5185), True, 'import numpy as np\n'), ((5200, 5231), 'numpy.transpose', 'np.transpose', (['out', '[0, 3, 1, 2]'], {}), '(out, [0, 3, 1, 2])\n', (5212, 5231), True, 'import numpy as np\n'), ((5798, 5821), 'goturn.helper.BoundingBox.BoundingBox', 'BoundingBox', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (5809, 5821), False, 'from goturn.helper.BoundingBox import BoundingBox\n'), ((6073, 6148), 'goturn.helper.image_proc.cropPadImage', 'cropPadImage', (['bbox_curr_shift', 'self.img_curr_'], {'dbg': 'self._dbg', 'viz': 'self._viz'}), '(bbox_curr_shift, self.img_curr_, dbg=self._dbg, viz=self._viz)\n', (6085, 6148), False, 'from goturn.helper.image_proc import cropPadImage\n'), ((6317, 6340), 'goturn.helper.BoundingBox.BoundingBox', 'BoundingBox', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (6328, 6340), False, 'from goturn.helper.BoundingBox import BoundingBox\n'), ((7913, 7976), 'goturn.helper.image_proc.cropPadImage', 'cropPadImage', (['bbox_prev', 'img_prev'], {'dbg': 'self._dbg', 'viz': 'self._viz'}), '(bbox_prev, img_prev, dbg=self._dbg, viz=self._viz)\n', (7925, 7976), False, 'from goturn.helper.image_proc import cropPadImage\n'), ((1672, 1697), 'goturn.helper.vis_utils.Visualizer', 'Visualizer', ([], {'env': 'self._env'}), '(env=self._env)\n', (1682, 1697), False, 'from goturn.helper.vis_utils import Visualizer\n'), ((2618, 2665), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['self.img_curr_', 'curr_search_location'], {}), '(self.img_curr_, curr_search_location)\n', (2627, 2665), False, 'from goturn.helper.draw_util import draw\n'), ((2691, 2747), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['search_dbg', 'bbox_curr_gt'], {'color': '(255, 255, 0)'}), '(search_dbg, bbox_curr_gt, color=(255, 255, 0))\n', (2700, 2747), False, 'from goturn.helper.draw_util import draw\n'), ((2856, 2931), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['curr_search_region', 'bbox_curr_gt_recentered'], {'color': '(255, 255, 0)'}), '(curr_search_region, bbox_curr_gt_recentered, color=(255, 255, 0))\n', (2865, 2931), False, 'from goturn.helper.draw_util import draw\n'), ((4614, 4631), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (4624, 4631), True, 'import numpy as np\n'), ((4697, 4715), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (4707, 4715), True, 'import numpy as np\n'), ((6601, 6640), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['self.img_curr_', 'bbox_curr_gt'], {}), '(self.img_curr_, bbox_curr_gt)\n', (6610, 6640), False, 'from goturn.helper.draw_util import draw\n'), ((6708, 6757), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['rand_search_region', 'bbox_gt_recentered'], {}), '(rand_search_region, bbox_gt_recentered)\n', (6717, 6757), False, 'from goturn.helper.draw_util import draw\n'), ((8320, 8369), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['img_prev', 'bbox_prev'], {'color': '(0, 0, 255)'}), '(img_prev, bbox_prev, color=(0, 0, 255))\n', (8329, 8369), False, 'from goturn.helper.draw_util import draw\n'), ((8395, 8436), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['search_dbg', 'pad_image_location'], {}), '(search_dbg, pad_image_location)\n', (8404, 8436), False, 'from goturn.helper.draw_util import draw\n')]